diff --git a/.cspell.yml b/.cspell.yml new file mode 100644 index 000000000..f56756a87 --- /dev/null +++ b/.cspell.yml @@ -0,0 +1,6 @@ +ignoreWords: + - childs # This spelling is used in the files command + - NodeCreater # This spelling is used in the fuse dependency + - Boddy # One of the contributors to the project - Chris Boddy + - Botto # One of the contributors to the project - Santiago Botto + - cose # dag-cose \ No newline at end of file diff --git a/.gitattributes b/.gitattributes index 831606f19..280c95af2 100644 --- a/.gitattributes +++ b/.gitattributes @@ -15,3 +15,23 @@ LICENSE text eol=auto # Binary assets assets/init-doc/* binary core/coreunix/test_data/** binary +test/cli/migrations/testdata/** binary + +# Generated test data +test/cli/migrations/testdata/** linguist-generated=true +test/cli/autoconf/testdata/** linguist-generated=true +test/cli/fixtures/** linguist-generated=true +test/sharness/t0054-dag-car-import-export-data/** linguist-generated=true +test/sharness/t0109-gateway-web-_redirects-data/** linguist-generated=true +test/sharness/t0114-gateway-subdomains/** linguist-generated=true +test/sharness/t0115-gateway-dir-listing/** linguist-generated=true +test/sharness/t0116-gateway-cache/** linguist-generated=true +test/sharness/t0119-prometheus-data/** linguist-generated=true +test/sharness/t0165-keystore-data/** linguist-generated=true +test/sharness/t0275-cid-security-data/** linguist-generated=true +test/sharness/t0280-plugin-dag-jose-data/** linguist-generated=true +test/sharness/t0280-plugin-data/** linguist-generated=true +test/sharness/t0280-plugin-git-data/** linguist-generated=true +test/sharness/t0400-api-no-gateway/** linguist-generated=true +test/sharness/t0701-delegated-routing-reframe/** linguist-generated=true +test/sharness/t0702-delegated-routing-http/** linguist-generated=true diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml index b0d0d1f0d..d89f921b8 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.yml +++ b/.github/ISSUE_TEMPLATE/bug-report.yml @@ -32,8 +32,9 @@ body: label: Installation method description: Please select your installation method options: + - dist.ipfs.tech or ipfs-update + - docker image - ipfs-desktop - - ipfs-update or dist.ipfs.tech - third-party binary - built from source - type: textarea diff --git a/.github/ISSUE_TEMPLATE/enhancement.yml b/.github/ISSUE_TEMPLATE/enhancement.yml index a0b241b55..d2f7a9205 100644 --- a/.github/ISSUE_TEMPLATE/enhancement.yml +++ b/.github/ISSUE_TEMPLATE/enhancement.yml @@ -2,6 +2,7 @@ name: Enhancement description: Suggest an improvement to an existing kubo feature. labels: - kind/enhancement + - need/triage body: - type: markdown attributes: diff --git a/.github/ISSUE_TEMPLATE/feature.yml b/.github/ISSUE_TEMPLATE/feature.yml index d368588b4..77445f29f 100644 --- a/.github/ISSUE_TEMPLATE/feature.yml +++ b/.github/ISSUE_TEMPLATE/feature.yml @@ -2,6 +2,7 @@ name: Feature description: Suggest a new feature in Kubo. labels: - kind/feature + - need/triage body: - type: markdown attributes: diff --git a/.github/build-platforms.yml b/.github/build-platforms.yml new file mode 100644 index 000000000..456489e60 --- /dev/null +++ b/.github/build-platforms.yml @@ -0,0 +1,17 @@ +# Build platforms configuration for Kubo +# Matches https://github.com/ipfs/distributions/blob/master/dists/kubo/build_matrix +# plus linux-riscv64 for emerging architecture support +# +# The Go compiler handles FUSE support automatically via build tags. +# Platforms are simply listed - no need to specify FUSE capability. + +platforms: + - darwin-amd64 + - darwin-arm64 + - freebsd-amd64 + - linux-amd64 + - linux-arm64 + - linux-riscv64 + - openbsd-amd64 + - windows-amd64 + - windows-arm64 \ No newline at end of file diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index d0e082d65..f1acf21e0 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -29,21 +29,21 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Setup Go - uses: actions/setup-go@v5 + uses: actions/setup-go@v6 with: - go-version: 1.22.x + go-version-file: 'go.mod' # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@v3 + uses: github/codeql-action/init@v4 with: languages: go - name: Autobuild - uses: github/codeql-action/autobuild@v3 + uses: github/codeql-action/autobuild@v4 - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v3 + uses: github/codeql-action/analyze@v4 diff --git a/.github/workflows/docker-build.yml b/.github/workflows/docker-build.yml deleted file mode 100644 index 433240f42..000000000 --- a/.github/workflows/docker-build.yml +++ /dev/null @@ -1,34 +0,0 @@ -# If we decide to run build-image.yml on every PR, we could deprecate this workflow. -name: Docker Build - -on: - workflow_dispatch: - pull_request: - paths-ignore: - - '**/*.md' - push: - branches: - - 'master' - -concurrency: - group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }} - cancel-in-progress: true - -jobs: - docker-build: - if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch' - runs-on: ubuntu-latest - timeout-minutes: 10 - env: - IMAGE_NAME: ipfs/kubo - WIP_IMAGE_TAG: wip - defaults: - run: - shell: bash - steps: - - uses: actions/setup-go@v5 - with: - go-version: 1.22.x - - uses: actions/checkout@v4 - - run: docker build -t $IMAGE_NAME:$WIP_IMAGE_TAG . - - run: docker run --rm $IMAGE_NAME:$WIP_IMAGE_TAG --version diff --git a/.github/workflows/docker-check.yml b/.github/workflows/docker-check.yml new file mode 100644 index 000000000..884155050 --- /dev/null +++ b/.github/workflows/docker-check.yml @@ -0,0 +1,62 @@ +# This workflow performs a quick Docker build check on PRs and pushes to master. +# It builds the Docker image and runs a basic smoke test to ensure the image works. +# This is a lightweight check - for full multi-platform builds and publishing, see docker-image.yml +name: Docker Check + +on: + workflow_dispatch: + pull_request: + paths-ignore: + - '**/*.md' + push: + branches: + - 'master' + +concurrency: + group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }} + cancel-in-progress: true + +jobs: + lint: + if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch' + runs-on: ubuntu-latest + timeout-minutes: 5 + steps: + - uses: actions/checkout@v6 + - uses: hadolint/hadolint-action@v3.3.0 + with: + dockerfile: Dockerfile + failure-threshold: warning + verbose: true + format: tty + + build: + if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch' + runs-on: ubuntu-latest + timeout-minutes: 10 + env: + IMAGE_NAME: ipfs/kubo + WIP_IMAGE_TAG: wip + defaults: + run: + shell: bash + steps: + - uses: actions/checkout@v6 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Build Docker image with BuildKit + uses: docker/build-push-action@v6 + with: + context: . + push: false + load: true + tags: ${{ env.IMAGE_NAME }}:${{ env.WIP_IMAGE_TAG }} + cache-from: | + type=gha + type=registry,ref=${{ env.IMAGE_NAME }}:buildcache + cache-to: type=gha,mode=max + + - name: Test Docker image + run: docker run --rm $IMAGE_NAME:$WIP_IMAGE_TAG --version diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml index f5642fe6d..39eaf52f4 100644 --- a/.github/workflows/docker-image.yml +++ b/.github/workflows/docker-image.yml @@ -1,3 +1,7 @@ +# This workflow builds and publishes official Docker images to Docker Hub. +# It handles multi-platform builds (amd64, arm/v7, arm64/v8) and pushes tagged releases. +# This workflow is triggered on tags, specific branches, and can be manually dispatched. +# For quick build checks during development, see docker-check.yml name: Docker Push on: @@ -19,6 +23,7 @@ on: push: branches: - 'master' + - 'staging' - 'bifrost-*' tags: - 'v*' @@ -31,13 +36,14 @@ jobs: if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch' name: Push Docker image to Docker Hub runs-on: ubuntu-latest - timeout-minutes: 90 + timeout-minutes: 15 env: IMAGE_NAME: ipfs/kubo - LEGACY_IMAGE_NAME: ipfs/go-ipfs + outputs: + tags: ${{ steps.tags.outputs.value }} steps: - name: Check out the repo - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Set up QEMU uses: docker/setup-qemu-action@v3 @@ -45,13 +51,11 @@ jobs: - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 - - name: Cache Docker layers - uses: actions/cache@v4 + - name: Log in to Docker Hub + uses: docker/login-action@v3 with: - path: /tmp/.buildx-cache - key: ${{ runner.os }}-buildx-${{ github.sha }} - restore-keys: | - ${{ runner.os }}-buildx- + username: ${{ vars.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} - name: Get tags id: tags @@ -62,12 +66,6 @@ jobs: echo "EOF" >> $GITHUB_OUTPUT shell: bash - - name: Log in to Docker Hub - uses: docker/login-action@v3 - with: - username: ${{ vars.DOCKER_USERNAME }} - password: ${{ secrets.DOCKER_PASSWORD }} - # We have to build each platform separately because when using multi-arch # builds, only one platform is being loaded into the cache. This would # prevent us from testing the other platforms. @@ -80,8 +78,10 @@ jobs: load: true file: ./Dockerfile tags: ${{ env.IMAGE_NAME }}:linux-amd64 - cache-from: type=local,src=/tmp/.buildx-cache - cache-to: type=local,dest=/tmp/.buildx-cache-new + cache-from: | + type=gha + type=registry,ref=${{ env.IMAGE_NAME }}:buildcache + cache-to: type=gha,mode=max - name: Build Docker image (linux/arm/v7) uses: docker/build-push-action@v6 @@ -92,8 +92,10 @@ jobs: load: true file: ./Dockerfile tags: ${{ env.IMAGE_NAME }}:linux-arm-v7 - cache-from: type=local,src=/tmp/.buildx-cache - cache-to: type=local,dest=/tmp/.buildx-cache-new + cache-from: | + type=gha + type=registry,ref=${{ env.IMAGE_NAME }}:buildcache + cache-to: type=gha,mode=max - name: Build Docker image (linux/arm64/v8) uses: docker/build-push-action@v6 @@ -104,14 +106,24 @@ jobs: load: true file: ./Dockerfile tags: ${{ env.IMAGE_NAME }}:linux-arm64-v8 - cache-from: type=local,src=/tmp/.buildx-cache - cache-to: type=local,dest=/tmp/.buildx-cache-new + cache-from: | + type=gha + type=registry,ref=${{ env.IMAGE_NAME }}:buildcache + cache-to: type=gha,mode=max # We test all the images on amd64 host here. This uses QEMU to emulate # the other platforms. - - run: docker run --rm $IMAGE_NAME:linux-amd64 --version - - run: docker run --rm $IMAGE_NAME:linux-arm-v7 --version - - run: docker run --rm $IMAGE_NAME:linux-arm64-v8 --version + # NOTE: --version should finish instantly, but sometimes + # it hangs on github CI (could be qemu issue), so we retry to remove false negatives + - name: Smoke-test linux-amd64 + run: for i in {1..3}; do timeout 15s docker run --rm $IMAGE_NAME:linux-amd64 version --all && break || [ $i = 3 ] && exit 1; done + timeout-minutes: 1 + - name: Smoke-test linux-arm-v7 + run: for i in {1..3}; do timeout 15s docker run --rm $IMAGE_NAME:linux-arm-v7 version --all && break || [ $i = 3 ] && exit 1; done + timeout-minutes: 1 + - name: Smoke-test linux-arm64-v8 + run: for i in {1..3}; do timeout 15s docker run --rm $IMAGE_NAME:linux-arm64-v8 version --all && break || [ $i = 3 ] && exit 1; done + timeout-minutes: 1 # This will only push the previously built images. - if: github.event_name != 'workflow_dispatch' || github.event.inputs.push == 'true' @@ -123,12 +135,9 @@ jobs: push: true file: ./Dockerfile tags: "${{ github.event.inputs.tags || steps.tags.outputs.value }}" - cache-from: type=local,src=/tmp/.buildx-cache-new - cache-to: type=local,dest=/tmp/.buildx-cache-new - - # https://github.com/docker/build-push-action/issues/252 - # https://github.com/moby/buildkit/issues/1896 - - name: Move cache to limit growth - run: | - rm -rf /tmp/.buildx-cache - mv /tmp/.buildx-cache-new /tmp/.buildx-cache + cache-from: | + type=gha + type=registry,ref=${{ env.IMAGE_NAME }}:buildcache + cache-to: | + type=gha,mode=max + type=registry,ref=${{ env.IMAGE_NAME }}:buildcache,mode=max diff --git a/.github/workflows/gateway-conformance.yml b/.github/workflows/gateway-conformance.yml index b1791868c..f2cd854c5 100644 --- a/.github/workflows/gateway-conformance.yml +++ b/.github/workflows/gateway-conformance.yml @@ -41,22 +41,21 @@ jobs: steps: # 1. Download the gateway-conformance fixtures - name: Download gateway-conformance fixtures - uses: ipfs/gateway-conformance/.github/actions/extract-fixtures@v0.6 + uses: ipfs/gateway-conformance/.github/actions/extract-fixtures@v0.8 with: output: fixtures # 2. Build the kubo-gateway - - name: Setup Go - uses: actions/setup-go@v5 - with: - go-version: 1.22.x - - uses: protocol/cache-go-action@v1 - with: - name: ${{ github.job }} - name: Checkout kubo-gateway - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: path: kubo-gateway + - name: Setup Go + uses: actions/setup-go@v6 + with: + go-version-file: 'kubo-gateway/go.mod' + cache: true + cache-dependency-path: kubo-gateway/go.sum - name: Build kubo-gateway run: make build working-directory: kubo-gateway @@ -94,7 +93,7 @@ jobs: # 6. Run the gateway-conformance tests - name: Run gateway-conformance tests - uses: ipfs/gateway-conformance/.github/actions/test@v0.6 + uses: ipfs/gateway-conformance/.github/actions/test@v0.8 with: gateway-url: http://127.0.0.1:8080 subdomain-url: http://localhost:8080 @@ -110,13 +109,13 @@ jobs: run: cat output.md >> $GITHUB_STEP_SUMMARY - name: Upload HTML report if: failure() || success() - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v6 with: name: gateway-conformance.html path: output.html - name: Upload JSON report if: failure() || success() - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v6 with: name: gateway-conformance.json path: output.json @@ -128,22 +127,21 @@ jobs: steps: # 1. Download the gateway-conformance fixtures - name: Download gateway-conformance fixtures - uses: ipfs/gateway-conformance/.github/actions/extract-fixtures@v0.6 + uses: ipfs/gateway-conformance/.github/actions/extract-fixtures@v0.8 with: output: fixtures # 2. Build the kubo-gateway - - name: Setup Go - uses: actions/setup-go@v5 - with: - go-version: 1.22.x - - uses: protocol/cache-go-action@v1 - with: - name: ${{ github.job }} - name: Checkout kubo-gateway - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: path: kubo-gateway + - name: Setup Go + uses: actions/setup-go@v6 + with: + go-version-file: 'kubo-gateway/go.mod' + cache: true + cache-dependency-path: kubo-gateway/go.sum - name: Build kubo-gateway run: make build working-directory: kubo-gateway @@ -201,7 +199,7 @@ jobs: # 9. Run the gateway-conformance tests over libp2p - name: Run gateway-conformance tests over libp2p - uses: ipfs/gateway-conformance/.github/actions/test@v0.6 + uses: ipfs/gateway-conformance/.github/actions/test@v0.8 with: gateway-url: http://127.0.0.1:8092 args: --specs "trustless-gateway,-trustless-ipns-gateway" -skip 'TestGatewayCar/GET_response_for_application/vnd.ipld.car/Header_Content-Length' @@ -216,13 +214,13 @@ jobs: run: cat output.md >> $GITHUB_STEP_SUMMARY - name: Upload HTML report if: failure() || success() - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v6 with: name: gateway-conformance-libp2p.html path: output.html - name: Upload JSON report if: failure() || success() - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v6 with: name: gateway-conformance-libp2p.json path: output.json diff --git a/.github/workflows/generated-pr.yml b/.github/workflows/generated-pr.yml new file mode 100644 index 000000000..b8c5cc631 --- /dev/null +++ b/.github/workflows/generated-pr.yml @@ -0,0 +1,14 @@ +name: Close Generated PRs + +on: + schedule: + - cron: '0 0 * * *' + workflow_dispatch: + +permissions: + issues: write + pull-requests: write + +jobs: + stale: + uses: ipdxco/unified-github-workflows/.github/workflows/reusable-generated-pr.yml@v1 diff --git a/.github/workflows/gobuild.yml b/.github/workflows/gobuild.yml index 93159eadd..5134f1cd1 100644 --- a/.github/workflows/gobuild.yml +++ b/.github/workflows/gobuild.yml @@ -21,20 +21,38 @@ jobs: env: TEST_DOCKER: 0 TEST_VERBOSE: 1 - TRAVIS: 1 GIT_PAGER: cat IPFS_CHECK_RCMGR_DEFAULTS: 1 defaults: run: shell: bash steps: - - uses: actions/setup-go@v5 + - uses: actions/checkout@v6 + - uses: actions/setup-go@v6 with: - go-version: 1.22.x - - uses: actions/checkout@v4 - - run: make cmd/ipfs-try-build - env: - TEST_FUSE: 1 - - run: make cmd/ipfs-try-build - env: - TEST_FUSE: 0 + go-version-file: 'go.mod' + cache: true + cache-dependency-path: go.sum + + - name: Build all platforms + run: | + # Read platforms from build-platforms.yml and build each one + echo "Building kubo for all platforms..." + + # Read and build each platform + grep '^ - ' .github/build-platforms.yml | sed 's/^ - //' | while read -r platform; do + if [ -z "$platform" ]; then + continue + fi + + echo "::group::Building $platform" + GOOS=$(echo "$platform" | cut -d- -f1) + GOARCH=$(echo "$platform" | cut -d- -f2) + + echo "Building $platform" + echo " GOOS=$GOOS GOARCH=$GOARCH go build -o /dev/null ./cmd/ipfs" + GOOS=$GOOS GOARCH=$GOARCH go build -o /dev/null ./cmd/ipfs + echo "::endgroup::" + done + + echo "All platforms built successfully" \ No newline at end of file diff --git a/.github/workflows/golang-analysis.yml b/.github/workflows/golang-analysis.yml index e89034a92..77bd9c3da 100644 --- a/.github/workflows/golang-analysis.yml +++ b/.github/workflows/golang-analysis.yml @@ -22,12 +22,12 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 10 steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 with: submodules: recursive - - uses: actions/setup-go@v5 + - uses: actions/setup-go@v6 with: - go-version: "1.22.x" + go-version-file: 'go.mod' - name: Check that go.mod is tidy uses: protocol/multiple-go-modules@v1.4 with: diff --git a/.github/workflows/golint.yml b/.github/workflows/golint.yml index aa8b21b53..a68d0c126 100644 --- a/.github/workflows/golint.yml +++ b/.github/workflows/golint.yml @@ -22,15 +22,14 @@ jobs: TEST_DOCKER: 0 TEST_FUSE: 0 TEST_VERBOSE: 1 - TRAVIS: 1 GIT_PAGER: cat IPFS_CHECK_RCMGR_DEFAULTS: 1 defaults: run: shell: bash steps: - - uses: actions/setup-go@v5 + - uses: actions/checkout@v6 + - uses: actions/setup-go@v6 with: - go-version: 1.22.x - - uses: actions/checkout@v4 + go-version-file: 'go.mod' - run: make -O test_go_lint diff --git a/.github/workflows/gotest.yml b/.github/workflows/gotest.yml index 609791aba..8165eb12a 100644 --- a/.github/workflows/gotest.yml +++ b/.github/workflows/gotest.yml @@ -14,64 +14,42 @@ concurrency: cancel-in-progress: true jobs: - go-test: + # Unit tests with coverage collection (uploaded to Codecov) + unit-tests: if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch' runs-on: ${{ fromJSON(github.repository == 'ipfs/kubo' && '["self-hosted", "linux", "x64", "2xlarge"]' || '"ubuntu-latest"') }} - timeout-minutes: 20 + timeout-minutes: 15 env: + GOTRACEBACK: single # reduce noise on test timeout panics TEST_DOCKER: 0 TEST_FUSE: 0 TEST_VERBOSE: 1 - TRAVIS: 1 GIT_PAGER: cat IPFS_CHECK_RCMGR_DEFAULTS: 1 defaults: run: shell: bash steps: - - name: Set up Go - uses: actions/setup-go@v5 - with: - go-version: 1.22.x - name: Check out Kubo - uses: actions/checkout@v4 + uses: actions/checkout@v6 + - name: Set up Go + uses: actions/setup-go@v6 + with: + go-version-file: 'go.mod' - name: Install missing tools run: sudo apt update && sudo apt install -y zsh - - name: 👉️ If this step failed, go to «Summary» (top left) → inspect the «Failures/Errors» table - env: - # increasing parallelism beyond 2 doesn't speed up the tests much - PARALLEL: 2 + - name: Run unit tests run: | - make -j "$PARALLEL" test/unit/gotest.junit.xml && + make test_unit && [[ ! $(jq -s -c 'map(select(.Action == "fail")) | .[]' test/unit/gotest.json) ]] - name: Upload coverage to Codecov - uses: codecov/codecov-action@6d798873df2b1b8e5846dba6fb86631229fbcb17 # v4.4.0 + uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # v5.5.2 if: failure() || success() with: name: unittests files: coverage/unit_tests.coverprofile - - name: Test kubo-as-a-library example - run: | - # we want to first test with the kubo version in the go.mod file - go test -v ./... - - # we also want to test the examples against the current version of kubo - # however, that version might be in a fork so we need to replace the dependency - - # backup the go.mod and go.sum files to restore them after we run the tests - cp go.mod go.mod.bak - cp go.sum go.sum.bak - - # make sure the examples run against the current version of kubo - go mod edit -replace github.com/ipfs/kubo=./../../.. - go mod tidy - - go test -v ./... - - # restore the go.mod and go.sum files to their original state - mv go.mod.bak go.mod - mv go.sum.bak go.sum - working-directory: docs/examples/kubo-as-a-library + token: ${{ secrets.CODECOV_TOKEN }} + fail_ci_if_error: false - name: Create a proper JUnit XML report uses: ipdxco/gotest-json-to-junit-xml@v1 with: @@ -79,9 +57,9 @@ jobs: output: test/unit/gotest.junit.xml if: failure() || success() - name: Archive the JUnit XML report - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v6 with: - name: unit + name: unit-tests-junit path: test/unit/gotest.junit.xml if: failure() || success() - name: Create a HTML report @@ -92,9 +70,9 @@ jobs: output: test/unit/gotest.html if: failure() || success() - name: Archive the HTML report - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v6 with: - name: html + name: unit-tests-html path: test/unit/gotest.html if: failure() || success() - name: Create a Markdown report @@ -107,3 +85,86 @@ jobs: - name: Set the summary run: cat test/unit/gotest.md >> $GITHUB_STEP_SUMMARY if: failure() || success() + + # End-to-end integration/regression tests from test/cli + # (Go-based replacement for legacy test/sharness shell scripts) + cli-tests: + if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch' + runs-on: ${{ fromJSON(github.repository == 'ipfs/kubo' && '["self-hosted", "linux", "x64", "2xlarge"]' || '"ubuntu-latest"') }} + timeout-minutes: 15 + env: + GOTRACEBACK: single # reduce noise on test timeout panics + TEST_VERBOSE: 1 + GIT_PAGER: cat + IPFS_CHECK_RCMGR_DEFAULTS: 1 + defaults: + run: + shell: bash + steps: + - name: Check out Kubo + uses: actions/checkout@v6 + - name: Set up Go + uses: actions/setup-go@v6 + with: + go-version-file: 'go.mod' + - name: Install missing tools + run: sudo apt update && sudo apt install -y zsh + - name: Run CLI tests + env: + IPFS_PATH: ${{ runner.temp }}/ipfs-test + run: make test_cli + - name: Create JUnit XML report + uses: ipdxco/gotest-json-to-junit-xml@v1 + with: + input: test/cli/cli-tests.json + output: test/cli/cli-tests.junit.xml + if: failure() || success() + - name: Archive JUnit XML report + uses: actions/upload-artifact@v6 + with: + name: cli-tests-junit + path: test/cli/cli-tests.junit.xml + if: failure() || success() + - name: Create HTML report + uses: ipdxco/junit-xml-to-html@v1 + with: + mode: no-frames + input: test/cli/cli-tests.junit.xml + output: test/cli/cli-tests.html + if: failure() || success() + - name: Archive HTML report + uses: actions/upload-artifact@v6 + with: + name: cli-tests-html + path: test/cli/cli-tests.html + if: failure() || success() + - name: Create Markdown report + uses: ipdxco/junit-xml-to-html@v1 + with: + mode: summary + input: test/cli/cli-tests.junit.xml + output: test/cli/cli-tests.md + if: failure() || success() + - name: Set summary + run: cat test/cli/cli-tests.md >> $GITHUB_STEP_SUMMARY + if: failure() || success() + + # Example tests (kubo-as-a-library) + example-tests: + if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch' + runs-on: ${{ fromJSON(github.repository == 'ipfs/kubo' && '["self-hosted", "linux", "x64", "2xlarge"]' || '"ubuntu-latest"') }} + timeout-minutes: 5 + env: + GOTRACEBACK: single + defaults: + run: + shell: bash + steps: + - name: Check out Kubo + uses: actions/checkout@v6 + - name: Set up Go + uses: actions/setup-go@v6 + with: + go-version-file: 'go.mod' + - name: Run example tests + run: make test_examples diff --git a/.github/workflows/interop.yml b/.github/workflows/interop.yml index 2967c9997..25bdba4f2 100644 --- a/.github/workflows/interop.yml +++ b/.github/workflows/interop.yml @@ -9,9 +9,6 @@ on: branches: - 'master' -env: - GO_VERSION: 1.22.x - concurrency: group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }} cancel-in-progress: true @@ -29,19 +26,18 @@ jobs: TEST_DOCKER: 0 TEST_FUSE: 0 TEST_VERBOSE: 1 - TRAVIS: 1 GIT_PAGER: cat IPFS_CHECK_RCMGR_DEFAULTS: 1 defaults: run: shell: bash steps: - - uses: actions/setup-go@v5 + - uses: actions/checkout@v6 + - uses: actions/setup-go@v6 with: - go-version: ${{ env.GO_VERSION }} - - uses: actions/checkout@v4 + go-version-file: 'go.mod' - run: make build - - uses: actions/upload-artifact@v4 + - uses: actions/upload-artifact@v6 with: name: kubo path: cmd/ipfs/ipfs @@ -53,17 +49,17 @@ jobs: run: shell: bash steps: - - uses: actions/setup-node@v4 + - uses: actions/setup-node@v6 with: node-version: lts/* - - uses: actions/download-artifact@v4 + - uses: actions/download-artifact@v7 with: name: kubo path: cmd/ipfs - run: chmod +x cmd/ipfs/ipfs - run: echo "dir=$(npm config get cache)" >> $GITHUB_OUTPUT id: npm-cache-dir - - uses: actions/cache@v4 + - uses: actions/cache@v5 with: path: ${{ steps.npm-cache-dir.outputs.dir }} key: ${{ runner.os }}-${{ github.job }}-helia-${{ hashFiles('**/package-lock.json') }} @@ -82,29 +78,28 @@ jobs: LIBP2P_TCP_REUSEPORT: false LIBP2P_ALLOW_WEAK_RSA_KEYS: 1 E2E_IPFSD_TYPE: go - TRAVIS: 1 GIT_PAGER: cat IPFS_CHECK_RCMGR_DEFAULTS: 1 defaults: run: shell: bash steps: - - uses: actions/setup-node@v4 + - uses: actions/setup-node@v6 with: - node-version: 18.14.0 - - uses: actions/download-artifact@v4 + node-version: 20.x + - uses: actions/download-artifact@v7 with: name: kubo path: cmd/ipfs - run: chmod +x cmd/ipfs/ipfs - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 with: repository: ipfs/ipfs-webui path: ipfs-webui - run: | echo "dir=$(npm config get cache)" >> $GITHUB_OUTPUT id: npm-cache-dir - - uses: actions/cache@v4 + - uses: actions/cache@v5 with: path: ${{ steps.npm-cache-dir.outputs.dir }} key: ${{ runner.os }}-${{ github.job }}-${{ hashFiles('**/package-lock.json') }} diff --git a/.github/workflows/sharness.yml b/.github/workflows/sharness.yml index 6432745bf..ac32bf3a4 100644 --- a/.github/workflows/sharness.yml +++ b/.github/workflows/sharness.yml @@ -4,10 +4,10 @@ on: workflow_dispatch: pull_request: paths-ignore: - - '**/*.md' + - "**/*.md" push: branches: - - 'master' + - "master" concurrency: group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }} @@ -17,22 +17,22 @@ jobs: sharness-test: if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch' runs-on: ${{ fromJSON(github.repository == 'ipfs/kubo' && '["self-hosted", "linux", "x64", "4xlarge"]' || '"ubuntu-latest"') }} - timeout-minutes: 20 + timeout-minutes: ${{ github.repository == 'ipfs/kubo' && 15 || 60 }} defaults: run: shell: bash steps: - - name: Setup Go - uses: actions/setup-go@v5 - with: - go-version: 1.22.x - name: Checkout Kubo - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: path: kubo + - name: Setup Go + uses: actions/setup-go@v6 + with: + go-version-file: 'kubo/go.mod' - name: Install missing tools run: sudo apt update && sudo apt install -y socat net-tools fish libxml2-utils - - uses: actions/cache@v4 + - uses: actions/cache@v5 with: path: test/sharness/lib/dependencies key: ${{ runner.os }}-test-generate-junit-html-${{ hashFiles('test/sharness/lib/test-generate-junit-html.sh') }} @@ -55,11 +55,13 @@ jobs: # increasing parallelism beyond 10 doesn't speed up the tests much PARALLEL: ${{ github.repository == 'ipfs/kubo' && 10 || 3 }} - name: Upload coverage report - uses: codecov/codecov-action@6d798873df2b1b8e5846dba6fb86631229fbcb17 # v4.4.0 + uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # v5.5.2 if: failure() || success() with: name: sharness files: kubo/coverage/sharness_tests.coverprofile + token: ${{ secrets.CODECOV_TOKEN }} + fail_ci_if_error: false - name: Aggregate results run: find kubo/test/sharness/test-results -name 't*-*.sh.*.counts' | kubo/test/sharness/lib/sharness/aggregate-results.sh > kubo/test/sharness/test-results/summary.txt - name: 👉️ If this step failed, go to «Summary» (top left) → «HTML Report» → inspect the «Failures» column @@ -88,7 +90,7 @@ jobs: destination: sharness.html - name: Upload one-page HTML report if: github.repository != 'ipfs/kubo' && (failure() || success()) - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v6 with: name: sharness.html path: kubo/test/sharness/test-results/sharness.html @@ -108,7 +110,7 @@ jobs: destination: sharness-html/ - name: Upload full HTML report if: github.repository != 'ipfs/kubo' && (failure() || success()) - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v6 with: name: sharness-html path: kubo/test/sharness/test-results/sharness-html diff --git a/.github/workflows/spellcheck.yml b/.github/workflows/spellcheck.yml new file mode 100644 index 000000000..4eda8b222 --- /dev/null +++ b/.github/workflows/spellcheck.yml @@ -0,0 +1,18 @@ +name: Spell Check + +on: + pull_request: + push: + branches: ["master"] + workflow_dispatch: + +permissions: + contents: read + +concurrency: + group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }} + cancel-in-progress: true + +jobs: + spellcheck: + uses: ipdxco/unified-github-workflows/.github/workflows/reusable-spellcheck.yml@v1 diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 16d65d721..7c955c414 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -1,8 +1,9 @@ -name: Close and mark stale issue +name: Close Stale Issues on: schedule: - cron: '0 0 * * *' + workflow_dispatch: permissions: issues: write @@ -10,4 +11,4 @@ permissions: jobs: stale: - uses: pl-strflt/.github/.github/workflows/reusable-stale-issue.yml@v0.3 + uses: ipdxco/unified-github-workflows/.github/workflows/reusable-stale-issue.yml@v1 diff --git a/.github/workflows/sync-release-assets.yml b/.github/workflows/sync-release-assets.yml index 0d5c8199b..33869f11d 100644 --- a/.github/workflows/sync-release-assets.yml +++ b/.github/workflows/sync-release-assets.yml @@ -22,11 +22,11 @@ jobs: - uses: ipfs/start-ipfs-daemon-action@v1 with: args: --init --init-profile=flatfs,server --enable-gc=false - - uses: actions/setup-node@v4 + - uses: actions/setup-node@v6 with: node-version: 14 - name: Sync the latest 5 github releases - uses: actions/github-script@v7 + uses: actions/github-script@v8 with: script: | const fs = require('fs').promises diff --git a/.github/workflows/test-migrations.yml b/.github/workflows/test-migrations.yml new file mode 100644 index 000000000..35fcbe729 --- /dev/null +++ b/.github/workflows/test-migrations.yml @@ -0,0 +1,85 @@ +name: Migrations + +on: + workflow_dispatch: + pull_request: + paths: + # Migration implementation files + - 'repo/fsrepo/migrations/**' + - 'test/cli/migrations/**' + # Config and repo handling + - 'repo/fsrepo/**' + # This workflow file itself + - '.github/workflows/test-migrations.yml' + push: + branches: + - 'master' + - 'release-*' + paths: + - 'repo/fsrepo/migrations/**' + - 'test/cli/migrations/**' + - 'repo/fsrepo/**' + - '.github/workflows/test-migrations.yml' + +concurrency: + group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }} + cancel-in-progress: true + +jobs: + test: + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, windows-latest, macos-latest] + runs-on: ${{ matrix.os }} + timeout-minutes: 20 + env: + TEST_VERBOSE: 1 + IPFS_CHECK_RCMGR_DEFAULTS: 1 + defaults: + run: + shell: bash + steps: + - name: Check out Kubo + uses: actions/checkout@v6 + + - name: Set up Go + uses: actions/setup-go@v6 + with: + go-version-file: 'go.mod' + + - name: Build kubo binary + run: | + make build + echo "Built ipfs binary at $(pwd)/cmd/ipfs/" + + - name: Add kubo to PATH + run: | + echo "$(pwd)/cmd/ipfs" >> $GITHUB_PATH + + - name: Verify ipfs in PATH + run: | + which ipfs || echo "ipfs not in PATH" + ipfs version || echo "Failed to run ipfs version" + + - name: Run migration unit tests + run: | + go test ./repo/fsrepo/migrations/... + + - name: Run CLI migration tests + env: + IPFS_PATH: ${{ runner.temp }}/ipfs-test + run: | + export PATH="${{ github.workspace }}/cmd/ipfs:$PATH" + which ipfs || echo "ipfs not found in PATH" + ipfs version || echo "Failed to run ipfs version" + go test ./test/cli/migrations/... + + - name: Upload test results + if: always() + uses: actions/upload-artifact@v6 + with: + name: ${{ matrix.os }}-test-results + path: | + test/**/*.log + ${{ runner.temp }}/ipfs-test/ diff --git a/.gitignore b/.gitignore index cb147456b..890870a6e 100644 --- a/.gitignore +++ b/.gitignore @@ -28,6 +28,11 @@ go-ipfs-source.tar.gz docs/examples/go-ipfs-as-a-library/example-folder/Qm* /test/sharness/t0054-dag-car-import-export-data/*.car +# test artifacts from make test_unit / test_cli +/test/unit/gotest.json +/test/unit/gotest.junit.xml +/test/cli/cli-tests.json + # ignore build output from snapcraft /ipfs_*.snap /parts diff --git a/.hadolint.yaml b/.hadolint.yaml new file mode 100644 index 000000000..78b3d23bf --- /dev/null +++ b/.hadolint.yaml @@ -0,0 +1,13 @@ +# Hadolint configuration for Kubo Docker image +# https://github.com/hadolint/hadolint + +# Ignore specific rules +ignored: + # DL3008: Pin versions in apt-get install + # We use stable base images and prefer smaller layers over version pinning + - DL3008 + +# Trust base images from these registries +trustedRegistries: + - docker.io + - gcr.io \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index fa40e1625..6bc565d86 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,14 @@ # Kubo Changelogs +- [v0.40](docs/changelogs/v0.40.md) +- [v0.39](docs/changelogs/v0.39.md) +- [v0.38](docs/changelogs/v0.38.md) +- [v0.37](docs/changelogs/v0.37.md) +- [v0.36](docs/changelogs/v0.36.md) +- [v0.35](docs/changelogs/v0.35.md) +- [v0.34](docs/changelogs/v0.34.md) +- [v0.33](docs/changelogs/v0.33.md) +- [v0.32](docs/changelogs/v0.32.md) - [v0.31](docs/changelogs/v0.31.md) - [v0.30](docs/changelogs/v0.30.md) - [v0.29](docs/changelogs/v0.29.md) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1db5ca246..ed9001df2 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,6 +1,10 @@ -IPFS as a project, including go-ipfs and all of its modules, follows the [standard IPFS Community contributing guidelines](https://github.com/ipfs/community/blob/master/CONTRIBUTING.md). +# Contributing to Kubo -We also adhere to the [GO IPFS Community contributing guidelines](https://github.com/ipfs/community/blob/master/CONTRIBUTING_GO.md) which provide additional information of how to collaborate and contribute in the Go implementation of IPFS. +**For development setup, building, and testing, see the [Developer Guide](docs/developer-guide.md).** + +IPFS as a project, including Kubo and all of its modules, follows the [standard IPFS Community contributing guidelines](https://github.com/ipfs/community/blob/master/CONTRIBUTING.md). + +We also adhere to the [Go IPFS Community contributing guidelines](https://github.com/ipfs/community/blob/master/CONTRIBUTING_GO.md) which provide additional information on how to collaborate and contribute to the Go implementation of IPFS. We appreciate your time and attention for going over these. Please open an issue on ipfs/community if you have any questions. diff --git a/Dockerfile b/Dockerfile index 4ed07d3d4..6d43beefa 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,13 +1,16 @@ -FROM --platform=${BUILDPLATFORM:-linux/amd64} golang:1.22 AS builder +# syntax=docker/dockerfile:1 +# Enables BuildKit with cache mounts for faster builds +FROM --platform=${BUILDPLATFORM:-linux/amd64} golang:1.25 AS builder ARG TARGETOS TARGETARCH -ENV SRC_DIR /kubo +ENV SRC_DIR=/kubo -# Download packages first so they can be cached. +# Cache go module downloads between builds for faster rebuilds COPY go.mod go.sum $SRC_DIR/ -RUN cd $SRC_DIR \ - && go mod download +WORKDIR $SRC_DIR +RUN --mount=type=cache,target=/go/pkg/mod \ + go mod download COPY . $SRC_DIR @@ -18,92 +21,78 @@ ARG IPFS_PLUGINS # Allow for other targets to be built, e.g.: docker build --build-arg MAKE_TARGET="nofuse" ARG MAKE_TARGET=build -# Build the thing. -# Also: fix getting HEAD commit hash via git rev-parse. -RUN cd $SRC_DIR \ - && mkdir -p .git/objects \ +# Build ipfs binary with cached go modules and build cache. +# mkdir .git/objects allows git rev-parse to read commit hash for version info +RUN --mount=type=cache,target=/go/pkg/mod \ + --mount=type=cache,target=/root/.cache/go-build \ + mkdir -p .git/objects \ && GOOS=$TARGETOS GOARCH=$TARGETARCH GOFLAGS=-buildvcs=false make ${MAKE_TARGET} IPFS_PLUGINS=$IPFS_PLUGINS -# Using Debian Buster because the version of busybox we're using is based on it -# and we want to make sure the libraries we're using are compatible. That's also -# why we're running this for the target platform. -FROM debian:stable-slim AS utilities +# Extract required runtime tools from Debian. +# We use Debian instead of Alpine because we need glibc compatibility +# for the busybox base image we're using. +FROM debian:bookworm-slim AS utilities RUN set -eux; \ apt-get update; \ - apt-get install -y \ + apt-get install -y --no-install-recommends \ tini \ # Using gosu (~2MB) instead of su-exec (~20KB) because it's easier to # install on Debian. Useful links: # - https://github.com/ncopa/su-exec#why-reinvent-gosu # - https://github.com/tianon/gosu/issues/52#issuecomment-441946745 gosu \ - # This installs fusermount which we later copy over to the target image. + # fusermount enables IPFS mount commands fuse \ ca-certificates \ ; \ - rm -rf /var/lib/apt/lists/* + apt-get clean; \ + rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* -# Now comes the actual target image, which aims to be as small as possible. +# Final minimal image with shell for debugging (busybox provides sh) FROM busybox:stable-glibc -# Get the ipfs binary, entrypoint script, and TLS CAs from the build container. -ENV SRC_DIR /kubo +# Copy ipfs binary, startup scripts, and runtime dependencies +ENV SRC_DIR=/kubo COPY --from=utilities /usr/sbin/gosu /sbin/gosu COPY --from=utilities /usr/bin/tini /sbin/tini COPY --from=utilities /bin/fusermount /usr/local/bin/fusermount COPY --from=utilities /etc/ssl/certs /etc/ssl/certs COPY --from=builder $SRC_DIR/cmd/ipfs/ipfs /usr/local/bin/ipfs -COPY --from=builder $SRC_DIR/bin/container_daemon /usr/local/bin/start_ipfs +COPY --from=builder --chmod=755 $SRC_DIR/bin/container_daemon /usr/local/bin/start_ipfs COPY --from=builder $SRC_DIR/bin/container_init_run /usr/local/bin/container_init_run -# Add suid bit on fusermount so it will run properly +# Set SUID for fusermount to enable FUSE mounting by non-root user RUN chmod 4755 /usr/local/bin/fusermount -# Fix permissions on start_ipfs (ignore the build machine's permissions) -RUN chmod 0755 /usr/local/bin/start_ipfs - -# Swarm TCP; should be exposed to the public -EXPOSE 4001 -# Swarm UDP; should be exposed to the public -EXPOSE 4001/udp -# Daemon API; must not be exposed publicly but to client services under you control +# Swarm P2P port (TCP/UDP) - expose publicly for peer connections +EXPOSE 4001 4001/udp +# API port - keep private, only for trusted clients EXPOSE 5001 -# Web Gateway; can be exposed publicly with a proxy, e.g. as https://ipfs.example.org +# Gateway port - can be exposed publicly via reverse proxy EXPOSE 8080 -# Swarm Websockets; must be exposed publicly when the node is listening using the websocket transport (/ipX/.../tcp/8081/ws). +# Swarm WebSockets - expose publicly for browser-based peers EXPOSE 8081 -# Create the fs-repo directory and switch to a non-privileged user. -ENV IPFS_PATH /data/ipfs -RUN mkdir -p $IPFS_PATH \ +# Create ipfs user (uid 1000) and required directories with proper ownership +ENV IPFS_PATH=/data/ipfs +RUN mkdir -p $IPFS_PATH /ipfs /ipns /mfs /container-init.d \ && adduser -D -h $IPFS_PATH -u 1000 -G users ipfs \ - && chown ipfs:users $IPFS_PATH + && chown ipfs:users $IPFS_PATH /ipfs /ipns /mfs /container-init.d -# Create mount points for `ipfs mount` command -RUN mkdir /ipfs /ipns \ - && chown ipfs:users /ipfs /ipns - -# Create the init scripts directory -RUN mkdir /container-init.d \ - && chown ipfs:users /container-init.d - -# Expose the fs-repo as a volume. -# start_ipfs initializes an fs-repo if none is mounted. -# Important this happens after the USER directive so permissions are correct. +# Volume for IPFS repository data persistence VOLUME $IPFS_PATH # The default logging level -ENV IPFS_LOGGING "" +ENV GOLOG_LOG_LEVEL="" -# This just makes sure that: -# 1. There's an fs-repo, and initializes one if there isn't. -# 2. The API and Gateway are accessible from outside the container. +# Entrypoint initializes IPFS repo if needed and configures networking. +# tini ensures proper signal handling and zombie process cleanup ENTRYPOINT ["/sbin/tini", "--", "/usr/local/bin/start_ipfs"] -# Healthcheck for the container -# QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn is the CID of empty folder +# Health check verifies IPFS daemon is responsive. +# Uses empty directory CID (QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn) as test HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \ CMD ipfs --api=/ip4/127.0.0.1/tcp/5001 dag stat /ipfs/QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn || exit 1 -# Execute the daemon subcommand by default +# Default: run IPFS daemon with auto-migration enabled CMD ["daemon", "--migrate=true", "--agent-version-suffix=docker"] diff --git a/FUNDING.json b/FUNDING.json new file mode 100644 index 000000000..9085792a6 --- /dev/null +++ b/FUNDING.json @@ -0,0 +1,5 @@ +{ + "opRetro": { + "projectId": "0x7f330267969cf845a983a9d4e7b7dbcca5c700a5191269af377836d109e0bb69" + } +} diff --git a/README.md b/README.md index 30a884e96..c1eaf9748 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,8 @@


- Kubo logo + Kubo logo
- Kubo: IPFS Implementation in GO + Kubo: IPFS Implementation in Go

@@ -11,488 +11,214 @@

Official Part of IPFS Project Discourse Forum - Matrix - ci + Matrix + GitHub release - godoc reference


+

+What is Kubo? | Quick Taste | Install | Documentation | Development | Getting Help +

+ ## What is Kubo? -Kubo was the first IPFS implementation and is the most widely used one today. Implementing the *Interplanetary Filesystem* - the Web3 standard for content-addressing, interoperable with HTTP. Thus powered by IPLD's data models and the libp2p for network communication. Kubo is written in Go. +Kubo was the first [IPFS](https://docs.ipfs.tech/concepts/what-is-ipfs/) implementation and is the [most widely used one today](https://probelab.io/ipfs/topology/#chart-agent-types-avg). It takes an opinionated approach to content-addressing ([CIDs](https://docs.ipfs.tech/concepts/glossary/#cid), [DAGs](https://docs.ipfs.tech/concepts/glossary/#dag)) that maximizes interoperability: [UnixFS](https://docs.ipfs.tech/concepts/glossary/#unixfs) for files and directories, [HTTP Gateways](https://docs.ipfs.tech/concepts/glossary/#gateway) for web browsers, [Bitswap](https://docs.ipfs.tech/concepts/glossary/#bitswap) and [HTTP](https://specs.ipfs.tech/http-gateways/trustless-gateway/) for verifiable data transfer. -Featureset -- Runs an IPFS-Node as a network service that is part of LAN and WAN DHT -- [HTTP Gateway](https://specs.ipfs.tech/http-gateways/) (`/ipfs` and `/ipns`) functionality for trusted and [trustless](https://docs.ipfs.tech/reference/http/gateway/#trustless-verifiable-retrieval) content retrieval -- [HTTP Routing V1](https://specs.ipfs.tech/routing/http-routing-v1/) (`/routing/v1`) client and server implementation for [delegated routing](./docs/delegated-routing.md) lookups -- [HTTP Kubo RPC API](https://docs.ipfs.tech/reference/kubo/rpc/) (`/api/v0`) to access and control the daemon -- [Command Line Interface](https://docs.ipfs.tech/reference/kubo/cli/) based on (`/api/v0`) RPC API -- [WebUI](https://github.com/ipfs/ipfs-webui/#readme) to manage the Kubo node -- [Content blocking](/docs/content-blocking.md) support for operators of public nodes +**Features:** -### Other implementations +- Runs an IPFS node as a network service (LAN [mDNS](https://github.com/libp2p/specs/blob/master/discovery/mdns.md) and WAN [Amino DHT](https://docs.ipfs.tech/concepts/glossary/#dht)) +- [Command-line interface](https://docs.ipfs.tech/reference/kubo/cli/) (`ipfs --help`) +- [WebUI](https://github.com/ipfs/ipfs-webui/#readme) for node management +- [HTTP Gateway](https://specs.ipfs.tech/http-gateways/) for trusted and [trustless](https://docs.ipfs.tech/reference/http/gateway/#trustless-verifiable-retrieval) content retrieval +- [HTTP RPC API](https://docs.ipfs.tech/reference/kubo/rpc/) to control the daemon +- [HTTP Routing V1](https://specs.ipfs.tech/routing/http-routing-v1/) client and server for [delegated routing](./docs/delegated-routing.md) +- [Content blocking](./docs/content-blocking.md) for public node operators -See [List](https://docs.ipfs.tech/basics/ipfs-implementations/) +**Other IPFS implementations:** [Helia](https://github.com/ipfs/helia) (JavaScript), [more...](https://docs.ipfs.tech/concepts/ipfs-implementations/) -## What is IPFS? +## Quick Taste -IPFS is a global, versioned, peer-to-peer filesystem. It combines good ideas from previous systems such as Git, BitTorrent, Kademlia, SFS, and the Web. It is like a single BitTorrent swarm, exchanging git objects. IPFS provides an interface as simple as the HTTP web, but with permanence built-in. You can also mount the world at /ipfs. +After [installing Kubo](#install), verify it works: -For more info see: https://docs.ipfs.tech/concepts/what-is-ipfs/ +```console +$ ipfs init +generating ED25519 keypair...done +peer identity: 12D3KooWGcSLQdLDBi2BvoP8WnpdHvhWPbxpGcqkf93rL2XMZK7R -Before opening an issue, consider using one of the following locations to ensure you are opening your thread in the right place: - - kubo (previously named go-ipfs) _implementation_ bugs in [this repo](https://github.com/ipfs/kubo/issues). - - Documentation issues in [ipfs/docs issues](https://github.com/ipfs/ipfs-docs/issues). - - IPFS _design_ in [ipfs/specs issues](https://github.com/ipfs/specs/issues). - - Exploration of new ideas in [ipfs/notes issues](https://github.com/ipfs/notes/issues). - - Ask questions and meet the rest of the community at the [IPFS Forum](https://discuss.ipfs.tech). - - Or [chat with us](https://docs.ipfs.tech/community/chat/). +$ ipfs daemon & +Daemon is ready -[![YouTube Channel Subscribers](https://img.shields.io/youtube/channel/subscribers/UCdjsUXJ3QawK4O5L1kqqsew?label=Subscribe%20IPFS&style=social&cacheSeconds=3600)](https://www.youtube.com/channel/UCdjsUXJ3QawK4O5L1kqqsew) [![Follow @IPFS on Twitter](https://img.shields.io/twitter/follow/IPFS?style=social&cacheSeconds=3600)](https://twitter.com/IPFS) +$ echo "hello IPFS" | ipfs add -q --cid-version 1 +bafkreicouv3sksjuzxb3rbb6rziy6duakk2aikegsmtqtz5rsuppjorxsa -## Next milestones +$ ipfs cat bafkreicouv3sksjuzxb3rbb6rziy6duakk2aikegsmtqtz5rsuppjorxsa +hello IPFS +``` -[Milestones on GitHub](https://github.com/ipfs/kubo/milestones) +Verify this CID is provided by your node to the IPFS network: - -## Table of Contents - -- [What is Kubo?](#what-is-kubo) -- [What is IPFS?](#what-is-ipfs) -- [Next milestones](#next-milestones) -- [Table of Contents](#table-of-contents) -- [Security Issues](#security-issues) -- [Minimal System Requirements](#minimal-system-requirements) -- [Install](#install) - - [Docker](#docker) - - [Official prebuilt binaries](#official-prebuilt-binaries) - - [Updating](#updating) - - [Using ipfs-update](#using-ipfs-update) - - [Downloading builds using IPFS](#downloading-builds-using-ipfs) - - [Unofficial Linux packages](#unofficial-linux-packages) - - [ArchLinux](#arch-linux) - - [Gentoo Linux](#gentoo-linux) - - [Nix](#nix) - - [Solus](#solus) - - [openSUSE](#opensuse) - - [Guix](#guix) - - [Snap](#snap) - - [Ubuntu PPA](#ubuntu-ppa) - - [Unofficial Windows packages](#unofficial-windows-packages) - - [Chocolatey](#chocolatey) - - [Scoop](#scoop) - - [Unofficial MacOS packages](#unofficial-macos-packages) - - [MacPorts](#macports) - - [Nix](#nix-macos) - - [Homebrew](#homebrew) - - [Build from Source](#build-from-source) - - [Install Go](#install-go) - - [Download and Compile IPFS](#download-and-compile-ipfs) - - [Cross Compiling](#cross-compiling) - - [Troubleshooting](#troubleshooting) -- [Getting Started](#getting-started) - - [Usage](#usage) - - [Some things to try](#some-things-to-try) - - [Troubleshooting](#troubleshooting-1) -- [Packages](#packages) -- [Development](#development) - - [Map of Implemented Subsystems](#map-of-implemented-subsystems) - - [CLI, HTTP-API, Architecture Diagram](#cli-http-api-architecture-diagram) - - [Testing](#testing) - - [Development Dependencies](#development-dependencies) - - [Developer Notes](#developer-notes) -- [Maintainer Info](#maintainer-info) -- [Contributing](#contributing) -- [License](#license) - -## Security Issues - -Please follow [`SECURITY.md`](SECURITY.md). - -### Minimal System Requirements - -IPFS can run on most Linux, macOS, and Windows systems. We recommend running it on a machine with at least 4 GB of RAM and 2 CPU cores (kubo is highly parallel). On systems with less memory, it may not be completely stable, and you run on your own risk. +See `ipfs add --help` for all import options. Ready for more? Follow the [command-line quick start](https://docs.ipfs.tech/how-to/command-line-quick-start/). ## Install -The canonical download instructions for IPFS are over at: https://docs.ipfs.tech/install/. It is **highly recommended** you follow those instructions if you are not interested in working on IPFS development. +Follow the [official installation guide](https://docs.ipfs.tech/install/command-line/), or choose: [prebuilt binary](#official-prebuilt-binaries) | [Docker](#docker) | [package manager](#package-managers) | [from source](#build-from-source). + +Prefer a GUI? Try [IPFS Desktop](https://docs.ipfs.tech/install/ipfs-desktop/) and/or [IPFS Companion](https://docs.ipfs.tech/install/ipfs-companion/). + +### Minimal System Requirements + +Kubo runs on most Linux, macOS, and Windows systems. For optimal performance, we recommend at least 6 GB of RAM and 2 CPU cores (more is ideal, as Kubo is highly parallel). + +> [!IMPORTANT] +> Larger pinsets require additional memory, with an estimated ~1 GiB of RAM per 20 million items for reproviding to the Amino DHT. + +> [!CAUTION] +> Systems with less than the recommended memory may experience instability, frequent OOM errors or restarts, and missing data announcement (reprovider window), which can make data fully or partially inaccessible to other peers. Running Kubo on underprovisioned hardware is at your own risk. + +### Official Prebuilt Binaries + +Download from https://dist.ipfs.tech#kubo or [GitHub Releases](https://github.com/ipfs/kubo/releases/latest). ### Docker -Official images are published at https://hub.docker.com/r/ipfs/kubo/: +Official images are published at https://hub.docker.com/r/ipfs/kubo/: [![Docker Image Version (latest semver)](https://img.shields.io/docker/v/ipfs/kubo?color=blue&label=kubo%20docker%20image&logo=docker&sort=semver&style=flat-square&cacheSeconds=3600)](https://hub.docker.com/r/ipfs/kubo/) -[![Docker Image Version (latest semver)](https://img.shields.io/docker/v/ipfs/kubo?color=blue&label=kubo%20docker%20image&logo=docker&sort=semver&style=flat-square&cacheSeconds=3600)](https://hub.docker.com/r/ipfs/kubo/) +#### 🟢 Release Images -- 🟢 Releases - - `latest` and `release` tags always point at [the latest stable release](https://github.com/ipfs/kubo/releases/latest) - - `vN.N.N` points at a specific [release tag](https://github.com/ipfs/kubo/releases) - - These are production grade images. -- 🟠 We also provide experimental developer builds - - `master-latest` always points at the `HEAD` of the `master` branch - - `master-YYYY-DD-MM-GITSHA` points at a specific commit from the `master` branch - - These tags are used by developers for internal testing, not intended for end users or production use. +Use these for production deployments. + +- `latest` and [`release`](https://hub.docker.com/r/ipfs/kubo/tags?name=release) always point at [the latest stable release](https://github.com/ipfs/kubo/releases/latest) +- [`vN.N.N`](https://hub.docker.com/r/ipfs/kubo/tags?name=v) points at a specific [release tag](https://github.com/ipfs/kubo/releases) ```console $ docker pull ipfs/kubo:latest $ docker run --rm -it --net=host ipfs/kubo:latest ``` -To [customize your node](https://docs.ipfs.tech/install/run-ipfs-inside-docker/#customizing-your-node), -pass necessary config via `-e` or by mounting scripts in the `/container-init.d`. +To [customize your node](https://docs.ipfs.tech/install/run-ipfs-inside-docker/#customizing-your-node), pass config via `-e` or mount scripts in `/container-init.d`. -Learn more at https://docs.ipfs.tech/install/run-ipfs-inside-docker/ +#### 🟠 Developer Preview Images -### Official prebuilt binaries +For internal testing, not intended for production. -The official binaries are published at https://dist.ipfs.tech#kubo: +- [`master-latest`](https://hub.docker.com/r/ipfs/kubo/tags?name=master-latest) points at `HEAD` of [`master`](https://github.com/ipfs/kubo/commits/master/) +- [`master-YYYY-DD-MM-GITSHA`](https://hub.docker.com/r/ipfs/kubo/tags?name=master-2) points at a specific commit -[![dist.ipfs.tech Downloads](https://img.shields.io/github/v/release/ipfs/kubo?label=dist.ipfs.tech&logo=ipfs&style=flat-square&cacheSeconds=3600)](https://dist.ipfs.tech#kubo) +#### 🔴 Internal Staging Images -From there: -- Click the blue "Download Kubo" on the right side of the page. -- Open/extract the archive. -- Move kubo (`ipfs`) to your path (`install.sh` can do it for you). +For testing arbitrary commits and experimental patches (force push to `staging` branch). -If you are unable to access [dist.ipfs.tech](https://dist.ipfs.tech#kubo), you can also download kubo (go-ipfs) from: -- this project's GitHub [releases](https://github.com/ipfs/kubo/releases/latest) page -- `/ipns/dist.ipfs.tech` at [dweb.link](https://dweb.link/ipns/dist.ipfs.tech#kubo) gateway - -#### Updating - -##### Using ipfs-update - -IPFS has an updating tool that can be accessed through `ipfs update`. The tool is -not installed alongside IPFS in order to keep that logic independent of the main -codebase. To install `ipfs-update` tool, [download it here](https://dist.ipfs.tech/#ipfs-update). - -##### Downloading builds using IPFS - -List the available versions of Kubo (go-ipfs) implementation: - -```console -$ ipfs cat /ipns/dist.ipfs.tech/kubo/versions -``` - -Then, to view available builds for a version from the previous command (`$VERSION`): - -```console -$ ipfs ls /ipns/dist.ipfs.tech/kubo/$VERSION -``` - -To download a given build of a version: - -```console -$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_darwin-386.tar.gz # darwin 32-bit build -$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_darwin-amd64.tar.gz # darwin 64-bit build -$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_freebsd-amd64.tar.gz # freebsd 64-bit build -$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_linux-386.tar.gz # linux 32-bit build -$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_linux-amd64.tar.gz # linux 64-bit build -$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_linux-arm.tar.gz # linux arm build -$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_windows-amd64.zip # windows 64-bit build -``` - -### Unofficial Linux packages - - - Packaging status - - -- [ArchLinux](#arch-linux) -- [Gentoo Linux](#gentoo-linux) -- [Nix](#nix-linux) -- [Solus](#solus) -- [openSUSE](#opensuse) -- [Guix](#guix) -- [Snap](#snap) -- [Ubuntu PPA](#ubuntu-ppa) - -#### Arch Linux - -[![kubo via Community Repo](https://img.shields.io/archlinux/v/community/x86_64/kubo?color=1793d1&label=kubo&logo=arch-linux&style=flat-square&cacheSeconds=3600)](https://wiki.archlinux.org/title/IPFS) - -```bash -# pacman -S kubo -``` - -[![kubo-git via AUR](https://img.shields.io/static/v1?label=kubo-git&message=latest%40master&color=1793d1&logo=arch-linux&style=flat-square&cacheSeconds=3600)](https://aur.archlinux.org/packages/kubo/) - -#### Gentoo Linux - -https://wiki.gentoo.org/wiki/Kubo - -```bash -# emerge -a net-p2p/kubo -``` - -https://packages.gentoo.org/packages/net-p2p/kubo - -#### Nix - -With the purely functional package manager [Nix](https://nixos.org/nix/) you can install kubo (go-ipfs) like this: - -``` -$ nix-env -i kubo -``` - -You can also install the Package by using its attribute name, which is also `kubo`. - -#### Solus - -[Package for Solus](https://dev.getsol.us/source/kubo/repository/master/) - -``` -$ sudo eopkg install kubo -``` - -You can also install it through the Solus software center. - -#### openSUSE - -[Community Package for go-ipfs](https://software.opensuse.org/package/go-ipfs) - -#### Guix - -[Community Package for go-ipfs](https://packages.guix.gnu.org/packages/go-ipfs/0.11.0/) is no out-of-date. - -#### Snap - -No longer supported, see rationale in [kubo#8688](https://github.com/ipfs/kubo/issues/8688). - -#### Ubuntu PPA - -[PPA homepage](https://launchpad.net/~twdragon/+archive/ubuntu/ipfs) on Launchpad. - -##### Latest Ubuntu (>= 20.04 LTS) -```sh -sudo add-apt-repository ppa:twdragon/ipfs -sudo apt update -sudo apt install ipfs-kubo -``` - -##### Any Ubuntu version - -```sh -sudo su -echo 'deb https://ppa.launchpadcontent.net/twdragon/ipfs/ubuntu <> main' >> /etc/apt/sources.list.d/ipfs -echo 'deb-src https://ppa.launchpadcontent.net/twdragon/ipfs/ubuntu <> main' >> /etc/apt/sources.list.d/ipfs -exit -sudo apt update -sudo apt install ipfs-kubo -``` -where `<>` is the codename of your Ubuntu distribution (for example, `jammy` for 22.04 LTS). During the first installation the package maintenance script may automatically ask you about which networking profile, CPU accounting model, and/or existing node configuration file you want to use. - -**NOTE**: this method also may work with any compatible Debian-based distro which has `libc6` inside, and APT as a package manager. - -### Unofficial Windows packages - -- [Chocolatey](#chocolatey) -- [Scoop](#scoop) - -#### Chocolatey - -No longer supported, see rationale in [kubo#9341](https://github.com/ipfs/kubo/issues/9341). - -#### Scoop - -Scoop provides kubo as `kubo` in its 'extras' bucket. - -```Powershell -PS> scoop bucket add extras -PS> scoop install kubo -``` - -### Unofficial macOS packages - -- [MacPorts](#macports) -- [Nix](#nix-macos) -- [Homebrew](#homebrew) - -#### MacPorts - -The package [ipfs](https://ports.macports.org/port/ipfs) currently points to kubo (go-ipfs) and is being maintained. - -``` -$ sudo port install ipfs -``` - -#### Nix - -In macOS you can use the purely functional package manager [Nix](https://nixos.org/nix/): - -``` -$ nix-env -i kubo -``` - -You can also install the Package by using its attribute name, which is also `kubo`. - -#### Homebrew - -A Homebrew formula [ipfs](https://formulae.brew.sh/formula/ipfs) is maintained too. - -``` -$ brew install --formula ipfs -``` +- [`staging-latest`](https://hub.docker.com/r/ipfs/kubo/tags?name=staging-latest) points at `HEAD` of [`staging`](https://github.com/ipfs/kubo/commits/staging/) +- [`staging-YYYY-DD-MM-GITSHA`](https://hub.docker.com/r/ipfs/kubo/tags?name=staging-2) points at a specific commit ### Build from Source ![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/ipfs/kubo?label=Requires%20Go&logo=go&style=flat-square&cacheSeconds=3600) -kubo's build system requires Go and some standard POSIX build tools: - -* GNU make -* Git -* GCC (or some other go compatible C Compiler) (optional) - -To build without GCC, build with `CGO_ENABLED=0` (e.g., `make build CGO_ENABLED=0`). - -#### Install Go - -![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/ipfs/kubo?label=Requires%20Go&logo=go&style=flat-square&cacheSeconds=3600) - -If you need to update: [Download latest version of Go](https://golang.org/dl/). - -You'll need to add Go's bin directories to your `$PATH` environment variable e.g., by adding these lines to your `/etc/profile` (for a system-wide installation) or `$HOME/.profile`: - -``` -export PATH=$PATH:/usr/local/go/bin -export PATH=$PATH:$GOPATH/bin +```bash +git clone https://github.com/ipfs/kubo.git +cd kubo +make build # creates cmd/ipfs/ipfs +make install # installs to $GOPATH/bin/ipfs ``` -(If you run into trouble, see the [Go install instructions](https://golang.org/doc/install)). +See the [Developer Guide](docs/developer-guide.md) for details, Windows instructions, and troubleshooting. -#### Download and Compile IPFS +### Package Managers -``` -$ git clone https://github.com/ipfs/kubo.git +Kubo is available in community-maintained packages across many operating systems, Linux distributions, and package managers. See [Repology](https://repology.org/project/kubo/versions) for the full list: [![Packaging status](https://repology.org/badge/tiny-repos/kubo.svg)](https://repology.org/project/kubo/versions) -$ cd kubo -$ make install -``` +> [!WARNING] +> These packages are maintained by third-party volunteers. The IPFS Project and Kubo maintainers are not responsible for their contents or supply chain security. For increased security, [build from source](#build-from-source). -Alternatively, you can run `make build` to build the go-ipfs binary (storing it in `cmd/ipfs/ipfs`) without installing it. +#### Linux -**NOTE:** If you get an error along the lines of "fatal error: stdlib.h: No such file or directory", you're missing a C compiler. Either re-run `make` with `CGO_ENABLED=0` or install GCC. +| Distribution | Install | Version | +|--------------|---------|---------| +| Ubuntu | [PPA](https://launchpad.net/~twdragon/+archive/ubuntu/ipfs): `sudo apt install ipfs-kubo` | [![PPA: twdragon](https://img.shields.io/badge/PPA-twdragon-E95420?logo=ubuntu)](https://launchpad.net/~twdragon/+archive/ubuntu/ipfs) | +| Arch | `pacman -S kubo` | [![Arch package](https://repology.org/badge/version-for-repo/arch/kubo.svg)](https://archlinux.org/packages/extra/x86_64/kubo/) | +| Fedora | [COPR](https://copr.fedorainfracloud.org/coprs/taw/ipfs/): `dnf install kubo` | [![COPR: taw](https://img.shields.io/badge/COPR-taw-51A2DA?logo=fedora)](https://copr.fedorainfracloud.org/coprs/taw/ipfs/) | +| Nix | `nix-env -i kubo` | [![nixpkgs unstable](https://repology.org/badge/version-for-repo/nix_unstable/kubo.svg)](https://search.nixos.org/packages?query=kubo) | +| Gentoo | `emerge -a net-p2p/kubo` | [![Gentoo package](https://repology.org/badge/version-for-repo/gentoo/kubo.svg)](https://packages.gentoo.org/packages/net-p2p/kubo) | +| openSUSE | `zypper install kubo` | [![openSUSE Tumbleweed](https://repology.org/badge/version-for-repo/opensuse_tumbleweed/kubo.svg)](https://software.opensuse.org/package/kubo) | +| Solus | `sudo eopkg install kubo` | [![Solus package](https://repology.org/badge/version-for-repo/solus/kubo.svg)](https://packages.getsol.us/shannon/k/kubo/) | +| Guix | `guix install kubo` | [![Guix package](https://repology.org/badge/version-for-repo/gnuguix/kubo.svg)](https://packages.guix.gnu.org/packages/kubo/) | +| _other_ | [See Repology for the full list](https://repology.org/project/kubo/versions) | | -##### Cross Compiling +~~Snap~~ no longer supported ([#8688](https://github.com/ipfs/kubo/issues/8688)) -Compiling for a different platform is as simple as running: +#### macOS -``` -make build GOOS=myTargetOS GOARCH=myTargetArchitecture -``` +| Manager | Install | Version | +|---------|---------|---------| +| Homebrew | `brew install ipfs` | [![Homebrew](https://repology.org/badge/version-for-repo/homebrew/kubo.svg)](https://formulae.brew.sh/formula/ipfs) | +| MacPorts | `sudo port install ipfs` | [![MacPorts](https://repology.org/badge/version-for-repo/macports/kubo.svg)](https://ports.macports.org/port/ipfs/) | +| Nix | `nix-env -i kubo` | [![nixpkgs unstable](https://repology.org/badge/version-for-repo/nix_unstable/kubo.svg)](https://search.nixos.org/packages?query=kubo) | +| _other_ | [See Repology for the full list](https://repology.org/project/kubo/versions) | | -#### Troubleshooting +#### Windows -- Separate [instructions are available for building on Windows](docs/windows.md). -- `git` is required in order for `go get` to fetch all dependencies. -- Package managers often contain out-of-date `golang` packages. - Ensure that `go version` reports at least 1.10. See above for how to install go. -- If you are interested in development, please install the development -dependencies as well. -- Shell command completions can be generated with one of the `ipfs commands completion` subcommands. Read [docs/command-completion.md](docs/command-completion.md) to learn more. -- See the [misc folder](https://github.com/ipfs/kubo/tree/master/misc) for how to connect IPFS to systemd or whatever init system your distro uses. +| Manager | Install | Version | +|---------|---------|---------| +| Scoop | `scoop install kubo` | [![Scoop](https://repology.org/badge/version-for-repo/scoop/kubo.svg)](https://scoop.sh/#/apps?q=kubo) | +| _other_ | [See Repology for the full list](https://repology.org/project/kubo/versions) | | -## Getting Started +~~Chocolatey~~ no longer supported ([#9341](https://github.com/ipfs/kubo/issues/9341)) -### Usage +## Documentation -[![docs: Command-line quick start](https://img.shields.io/static/v1?label=docs&message=Command-line%20quick%20start&color=blue&style=flat-square&cacheSeconds=3600)](https://docs.ipfs.tech/how-to/command-line-quick-start/) -[![docs: Command-line reference](https://img.shields.io/static/v1?label=docs&message=Command-line%20reference&color=blue&style=flat-square&cacheSeconds=3600)](https://docs.ipfs.tech/reference/kubo/cli/) - -To start using IPFS, you must first initialize IPFS's config files on your -system, this is done with `ipfs init`. See `ipfs init --help` for information on -the optional arguments it takes. After initialization is complete, you can use -`ipfs mount`, `ipfs add` and any of the other commands to explore! - -### Some things to try - -Basic proof of 'ipfs working' locally: - - echo "hello world" > hello - ipfs add hello - # This should output a hash string that looks something like: - # QmT78zSuBmuS4z925WZfrqQ1qHaJ56DQaTfyMUF7F8ff5o - ipfs cat - -### HTTP/RPC clients - -For programmatic interaction with Kubo, see our [list of HTTP/RPC clients](docs/http-rpc-clients.md). - -### Troubleshooting - -If you have previously installed IPFS before and you are running into problems getting a newer version to work, try deleting (or backing up somewhere else) your IPFS config directory (~/.ipfs by default) and rerunning `ipfs init`. This will reinitialize the config file to its defaults and clear out the local datastore of any bad entries. - -Please direct general questions and help requests to our [forums](https://discuss.ipfs.tech). - -If you believe you've found a bug, check the [issues list](https://github.com/ipfs/kubo/issues) and, if you don't see your problem there, either come talk to us on [Matrix chat](https://docs.ipfs.tech/community/chat/), or file an issue of your own! - -## Packages - -See [IPFS in GO](https://docs.ipfs.tech/reference/go/api/) documentation. +| Topic | Description | +|-------|-------------| +| [Configuration](docs/config.md) | All config options reference | +| [Environment variables](docs/environment-variables.md) | Runtime settings via env vars | +| [Experimental features](docs/experimental-features.md) | Opt-in features in development | +| [HTTP Gateway](docs/gateway.md) | Path, subdomain, and trustless gateway setup | +| [HTTP RPC clients](docs/http-rpc-clients.md) | Client libraries for Go, JS | +| [Delegated routing](docs/delegated-routing.md) | Multi-router and HTTP routing | +| [Metrics & monitoring](docs/metrics.md) | Prometheus metrics | +| [Content blocking](docs/content-blocking.md) | Denylist for public nodes | +| [Customizing](docs/customizing.md) | Unsure if use Plugins, Boxo, or fork? | +| [Debug guide](docs/debug-guide.md) | CPU profiles, memory analysis, tracing | +| [Changelogs](docs/changelogs/) | Release notes for each version | +| [All documentation](https://github.com/ipfs/kubo/tree/master/docs) | Full list of docs | ## Development -Some places to get you started on the codebase: +See the [Developer Guide](docs/developer-guide.md) for build instructions, testing, and contribution workflow. -- Main file: [./cmd/ipfs/main.go](https://github.com/ipfs/kubo/blob/master/cmd/ipfs/main.go) -- CLI Commands: [./core/commands/](https://github.com/ipfs/kubo/tree/master/core/commands) -- Bitswap (the data trading engine): [go-bitswap](https://github.com/ipfs/go-bitswap) -- libp2p - - libp2p: https://github.com/libp2p/go-libp2p - - DHT: https://github.com/libp2p/go-libp2p-kad-dht -- [IPFS : The `Add` command demystified](https://github.com/ipfs/kubo/tree/master/docs/add-code-flow.md) +## Getting Help -### Map of Implemented Subsystems -**WIP**: This is a high-level architecture diagram of the various sub-systems of this specific implementation. To be updated with how they interact. Anyone who has suggestions is welcome to comment [here](https://docs.google.com/drawings/d/1OVpBT2q-NtSJqlPX3buvjYhOnWfdzb85YEsM_njesME/edit) on how we can improve this! - +- [IPFS Forum](https://discuss.ipfs.tech) - community support, questions, and discussion +- [Community](https://docs.ipfs.tech/community/) - chat, events, and working groups +- [GitHub Issues](https://github.com/ipfs/kubo/issues) - bug reports for Kubo specifically +- [IPFS Docs Issues](https://github.com/ipfs/ipfs-docs/issues) - documentation issues -### CLI, HTTP-API, Architecture Diagram - -![](./docs/cli-http-api-core-diagram.png) - -> [Origin](https://github.com/ipfs/pm/pull/678#discussion_r210410924) - -Description: Dotted means "likely going away". The "Legacy" parts are thin wrappers around some commands to translate between the new system and the old system. The grayed-out parts on the "daemon" diagram are there to show that the code is all the same, it's just that we turn some pieces on and some pieces off depending on whether we're running on the client or the server. - -### Testing - -``` -make test -``` - -### Development Dependencies - -If you make changes to the protocol buffers, you will need to install the [protoc compiler](https://github.com/google/protobuf). - -### Developer Notes - -Find more documentation for developers on [docs](./docs) - -## Maintainer Info - -Kubo is maintained by [Shipyard](https://ipshipyard.com/). - -* This repository is part of [Shipyard's GO Triage triage](https://ipshipyard.notion.site/IPFS-Go-Triage-Boxo-Kubo-Rainbow-0ddee6b7f28d412da7dabe4f9107c29a). -* [Release Process](https://ipshipyard.notion.site/Kubo-Release-Process-6dba4f5755c9458ab5685eeb28173778) +## Security Issues +See [`SECURITY.md`](SECURITY.md). ## Contributing [![](https://cdn.rawgit.com/jbenet/contribute-ipfs-gif/master/img/contribute.gif)](https://github.com/ipfs/community/blob/master/CONTRIBUTING.md) -We ❤️ all [our contributors](docs/AUTHORS); this project wouldn’t be what it is without you! If you want to help out, please see [CONTRIBUTING.md](CONTRIBUTING.md). +We welcome contributions. See [CONTRIBUTING.md](CONTRIBUTING.md) and the [Developer Guide](docs/developer-guide.md). -This repository falls under the IPFS [Code of Conduct](https://github.com/ipfs/community/blob/master/code-of-conduct.md). +This repository follows the IPFS [Code of Conduct](https://github.com/ipfs/community/blob/master/code-of-conduct.md). -Members of IPFS community provide Kubo support on [discussion forum category here](https://discuss.ipfs.tech/c/help/help-kubo/23). +## Maintainer Info -Need help with IPFS itself? Learn where to get help and support at https://ipfs.tech/help. + + +> [!NOTE] +> Kubo is maintained by the [Shipyard](https://ipshipyard.com/) team. +> +> [Release Process](https://ipshipyard.notion.site/Kubo-Release-Process-6dba4f5755c9458ab5685eeb28173778) ## License -This project is dual-licensed under Apache 2.0 and MIT terms: +Dual-licensed under Apache 2.0 and MIT: -- Apache License, Version 2.0, ([LICENSE-APACHE](https://github.com/ipfs/kubo/blob/master/LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) -- MIT license ([LICENSE-MIT](https://github.com/ipfs/kubo/blob/master/LICENSE-MIT) or http://opensource.org/licenses/MIT) +- [LICENSE-APACHE](LICENSE-APACHE) +- [LICENSE-MIT](LICENSE-MIT) diff --git a/Rules.mk b/Rules.mk index ef88bee0f..b04e3d73e 100644 --- a/Rules.mk +++ b/Rules.mk @@ -107,8 +107,8 @@ uninstall: .PHONY: uninstall supported: - @echo "Currently supported platforms:" - @for p in ${SUPPORTED_PLATFORMS}; do echo $$p; done + @echo "Currently supported platforms (from .github/build-platforms.yml):" + @grep '^ - ' .github/build-platforms.yml | sed 's/^ - //' || (echo "Error: .github/build-platforms.yml not found"; exit 1) .PHONY: supported help: @@ -134,14 +134,14 @@ help: @echo '' @echo 'TESTING TARGETS:' @echo '' - @echo ' test - Run all tests' - @echo ' test_short - Run short go tests and short sharness tests' - @echo ' test_go_short - Run short go tests' - @echo ' test_go_test - Run all go tests' - @echo ' test_go_expensive - Run all go tests and compile on all platforms' - @echo ' test_go_race - Run go tests with the race detector enabled' - @echo ' test_go_lint - Run the `golangci-lint` vetting tool' + @echo ' test - Run all tests (test_go_fmt, test_unit, test_cli, test_sharness)' + @echo ' test_short - Run fast tests (test_go_fmt, test_unit)' + @echo ' test_unit - Run unit tests with coverage (excludes test/cli)' + @echo ' test_cli - Run CLI integration tests (requires built binary)' + @echo ' test_go_fmt - Check Go source formatting' + @echo ' test_go_build - Build kubo for all platforms from .github/build-platforms.yml' + @echo ' test_go_lint - Run golangci-lint' @echo ' test_sharness - Run sharness tests' - @echo ' coverage - Collects coverage info from unit tests and sharness' + @echo ' coverage - Collect coverage info from unit tests and sharness' @echo .PHONY: help diff --git a/appveyor.yml b/appveyor.yml deleted file mode 100644 index 5f2907d00..000000000 --- a/appveyor.yml +++ /dev/null @@ -1,49 +0,0 @@ -# Notes: -# - Minimal appveyor.yml file is an empty file. All sections are optional. -# - Indent each level of configuration with 2 spaces. Do not use tabs! -# - All section names are case-sensitive. -# - Section names should be unique on each level. - -version: "{build}" - -os: Windows Server 2012 R2 - -clone_folder: c:\gopath\src\github.com\ipfs\go-ipfs - -environment: - GOPATH: c:\gopath - TEST_VERBOSE: 1 - #TEST_NO_FUSE: 1 - #TEST_SUITE: test_sharness - #GOFLAGS: -tags nofuse - global: - BASH: C:\cygwin\bin\bash - matrix: - - GOARCH: amd64 - GOVERSION: 1.5.1 - GOROOT: c:\go - DOWNLOADPLATFORM: "x64" - -install: - # Enable make - #- SET PATH=c:\MinGW\bin;%PATH% - #- copy c:\MinGW\bin\mingw32-make.exe c:\MinGW\bin\make.exe - - go version - - go env - -# Cygwin build script -# -# NOTES: -# -# The stdin/stdout file descriptor appears not to be valid for the Appveyor -# build which causes failures as certain functions attempt to redirect -# default file handles. Ensure a dummy file descriptor is opened with 'exec'. -# -build_script: - - '%BASH% -lc "cd $APPVEYOR_BUILD_FOLDER; exec 0&2 "fatal: %s\n" "$@" - exit 1 -} - -# Get arguments - -test "$#" -eq "1" || die "This program must be passed exactly 1 arguments" "Usage: $USAGE" - -GO_MIN_VERSION="$1" - -UPGRADE_MSG="Please take a look at https://golang.org/doc/install to install or upgrade go." - -# Get path to the directory containing this file -# If $0 has no slashes, uses "./" -PREFIX=$(expr "$0" : "\(.*\/\)") || PREFIX='./' -# Include the 'check_at_least_version' function -. ${PREFIX}check_version - -# Check that the go binary exists and is in the path - -GOCC=${GOCC="go"} - -type ${GOCC} >/dev/null 2>&1 || die_upgrade "go is not installed or not in the PATH!" - -# Check the go binary version - -VERS_STR=$(${GOCC} version 2>&1) || die "'go version' failed with output: $VERS_STR" - -GO_CUR_VERSION=$(expr "$VERS_STR" : ".*go version.* go\([^[:space:]]*\) .*") || die "Invalid 'go version' output: $VERS_STR" - -check_at_least_version "$GO_MIN_VERSION" "$GO_CUR_VERSION" "${GOCC}" diff --git a/bin/check_version b/bin/check_version deleted file mode 100755 index 25007002c..000000000 --- a/bin/check_version +++ /dev/null @@ -1,77 +0,0 @@ -#!/bin/sh - -if test "x$UPGRADE_MSG" = "x"; then - printf >&2 "fatal: Please set '"'$UPGRADE_MSG'"' before sourcing this script\n" - exit 1 -fi - -die_upgrade() { - printf >&2 "fatal: %s\n" "$@" - printf >&2 "=> %s\n" "$UPGRADE_MSG" - exit 1 -} - -major_number() { - vers="$1" - - # Hack around 'expr' exiting with code 1 when it outputs 0 - case "$vers" in - 0) echo "0" ;; - 0.*) echo "0" ;; - *) expr "$vers" : "\([^.]*\).*" || return 1 - esac -} - -check_at_least_version() { - MIN_VERS="$1" - CUR_VERS="$2" - PROG_NAME="$3" - - # Get major, minor and fix numbers for each version - MIN_MAJ=$(major_number "$MIN_VERS") || die "No major version number in '$MIN_VERS' for '$PROG_NAME'" - CUR_MAJ=$(major_number "$CUR_VERS") || die "No major version number in '$CUR_VERS' for '$PROG_NAME'" - - # We expect a version to be of form X.X.X - # if the second dot doesn't match, we consider it a prerelease - - if MIN_MIN=$(expr "$MIN_VERS" : "[^.]*\.\([0-9][0-9]*\)"); then - # this captured digit is necessary, since expr returns code 1 if the output is empty - if expr "$MIN_VERS" : "[^.]*\.[0-9]*\([0-9]\.\|[0-9]\$\)" >/dev/null; then - MIN_PRERELEASE="0" - else - MIN_PRERELEASE="1" - fi - MIN_FIX=$(expr "$MIN_VERS" : "[^.]*\.[0-9][0-9]*[^0-9][^0-9]*\([0-9][0-9]*\)") || MIN_FIX="0" - else - MIN_MIN="0" - MIN_PRERELEASE="0" - MIN_FIX="0" - fi - if CUR_MIN=$(expr "$CUR_VERS" : "[^.]*\.\([0-9][0-9]*\)"); then - # this captured digit is necessary, since expr returns code 1 if the output is empty - if expr "$CUR_VERS" : "[^.]*\.[0-9]*\([0-9]\.\|[0-9]\$\)" >/dev/null; then - CUR_PRERELEASE="0" - else - CUR_PRERELEASE="1" - fi - CUR_FIX=$(expr "$CUR_VERS" : "[^.]*\.[0-9][0-9]*[^0-9][^0-9]*\([0-9][0-9]*\)") || CUR_FIX="0" - else - CUR_MIN="0" - CUR_PRERELEASE="0" - CUR_FIX="0" - fi - - # Compare versions - VERS_LEAST="$PROG_NAME version '$CUR_VERS' should be at least '$MIN_VERS'" - test "$CUR_MAJ" -lt "$MIN_MAJ" && die_upgrade "$VERS_LEAST" - test "$CUR_MAJ" -gt "$MIN_MAJ" || { - test "$CUR_MIN" -lt "$MIN_MIN" && die_upgrade "$VERS_LEAST" - test "$CUR_MIN" -gt "$MIN_MIN" || { - test "$CUR_PRERELEASE" -gt "$MIN_PRERELEASE" && die_upgrade "$VERS_LEAST" - test "$CUR_PRERELEASE" -lt "$MIN_PRERELEASE" || { - test "$CUR_FIX" -lt "$MIN_FIX" && die_upgrade "$VERS_LEAST" - true - } - } - } -} diff --git a/bin/container_daemon b/bin/container_daemon index 9651ad55d..7e7c4eddc 100755 --- a/bin/container_daemon +++ b/bin/container_daemon @@ -50,6 +50,6 @@ else unset IPFS_SWARM_KEY_FILE fi -find /container-init.d -maxdepth 1 -type f -iname '*.sh' -print0 | sort -z | xargs -n 1 -0 -r container_init_run +find /container-init.d -maxdepth 1 \( -type f -o -type l \) -iname '*.sh' -print0 | sort -z | xargs -n 1 -0 -r container_init_run exec ipfs "$@" diff --git a/bin/get-docker-tags.sh b/bin/get-docker-tags.sh index e54da6482..f28ce7234 100755 --- a/bin/get-docker-tags.sh +++ b/bin/get-docker-tags.sh @@ -18,7 +18,7 @@ set -euo pipefail if [[ $# -lt 1 ]] ; then echo 'At least 1 arg required.' echo 'Usage:' - echo './push-docker-tags.sh [git commit sha1] [git branch name] [git tag name]' + echo './get-docker-tags.sh [git commit sha1] [git branch name] [git tag name]' exit 1 fi @@ -29,12 +29,10 @@ GIT_BRANCH=${3:-$(git symbolic-ref -q --short HEAD || echo "unknown")} GIT_TAG=${4:-$(git describe --tags --exact-match 2> /dev/null || echo "")} IMAGE_NAME=${IMAGE_NAME:-ipfs/kubo} -LEGACY_IMAGE_NAME=${LEGACY_IMAGE_NAME:-ipfs/go-ipfs} echoImageName () { local IMAGE_TAG=$1 echo "$IMAGE_NAME:$IMAGE_TAG" - echo "$LEGACY_IMAGE_NAME:$IMAGE_TAG" } if [[ $GIT_TAG =~ ^v[0-9]+\.[0-9]+\.[0-9]+-rc ]]; then @@ -43,16 +41,16 @@ if [[ $GIT_TAG =~ ^v[0-9]+\.[0-9]+\.[0-9]+-rc ]]; then elif [[ $GIT_TAG =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then echoImageName "$GIT_TAG" echoImageName "latest" - echoImageName "release" # see: https://github.com/ipfs/go-ipfs/issues/3999#issuecomment-742228981 + echoImageName "release" # see: https://github.com/ipfs/kubo/issues/3999#issuecomment-742228981 elif [[ $GIT_BRANCH =~ ^bifrost-.* ]]; then # sanitize the branch name since docker tags have stricter char limits than git branch names branch=$(echo "$GIT_BRANCH" | tr '/' '-' | tr --delete --complement '[:alnum:]-') echoImageName "${branch}-${BUILD_NUM}-${GIT_SHA1_SHORT}" -elif [ "$GIT_BRANCH" = "master" ]; then - echoImageName "master-${BUILD_NUM}-${GIT_SHA1_SHORT}" - echoImageName "master-latest" +elif [ "$GIT_BRANCH" = "master" ] || [ "$GIT_BRANCH" = "staging" ]; then + echoImageName "${GIT_BRANCH}-${BUILD_NUM}-${GIT_SHA1_SHORT}" + echoImageName "${GIT_BRANCH}-latest" else echo "Nothing to do. No docker tag defined for branch: $GIT_BRANCH, tag: $GIT_TAG" diff --git a/bin/mkreleaselog b/bin/mkreleaselog index 2ff6c0e89..f535f2f6d 100755 --- a/bin/mkreleaselog +++ b/bin/mkreleaselog @@ -1,10 +1,19 @@ -#!/bin/zsh +#!/bin/bash # # Invocation: mkreleaselog [FIRST_REF [LAST_REF]] +# +# Generates release notes with contributor statistics, deduplicating by GitHub handle. +# GitHub handles are resolved from: +# 1. GitHub noreply emails (user@users.noreply.github.com) +# 2. Merge commit messages (Merge pull request #N from user/branch) +# 3. GitHub API via gh CLI (for squash merges) +# +# Results are cached in ~/.cache/mkreleaselog/github-handles.json set -euo pipefail export GO111MODULE=on -export GOPATH="$(go env GOPATH)" +GOPATH="$(go env GOPATH)" +export GOPATH # List of PCRE regular expressions to match "included" modules. INCLUDE_MODULES=( @@ -15,10 +24,15 @@ INCLUDE_MODULES=( "^github.com/multiformats/" "^github.com/filecoin-project/" "^github.com/ipfs-shipyard/" + "^github.com/ipshipyard/" + "^github.com/probe-lab/" # Authors of personal modules used by go-ipfs that should be mentioned in the # release notes. "^github.com/whyrusleeping/" + "^github.com/gammazero/" + "^github.com/Jorropo/" + "^github.com/guillaumemichel/" "^github.com/Kubuxu/" "^github.com/jbenet/" "^github.com/Stebalien/" @@ -48,15 +62,348 @@ IGNORE_FILES=( ) ########################################################################################## +# GitHub Handle Resolution Infrastructure +########################################################################################## + +# Cache location following XDG spec +GITHUB_CACHE_DIR="${XDG_CACHE_HOME:-$HOME/.cache}/mkreleaselog" +GITHUB_CACHE_FILE="$GITHUB_CACHE_DIR/github-handles.json" + +# Timeout for gh CLI commands (seconds) +GH_TIMEOUT=10 + +# Associative array for email -> github handle mapping (runtime cache) +declare -A EMAIL_TO_GITHUB + +# Check if gh CLI is available and authenticated +gh_available() { + command -v gh >/dev/null 2>&1 && gh auth status >/dev/null 2>&1 +} + +# Load cached email -> github handle mappings from disk +load_github_cache() { + EMAIL_TO_GITHUB=() + + if [[ ! -f "$GITHUB_CACHE_FILE" ]]; then + return 0 + fi + + # Validate JSON before loading + if ! jq -e '.' "$GITHUB_CACHE_FILE" >/dev/null 2>&1; then + msg "Warning: corrupted cache file, ignoring" + return 0 + fi + + local email handle + while IFS=$'\t' read -r email handle; do + # Validate handle format (alphanumeric, hyphens, max 39 chars) + if [[ -n "$email" && -n "$handle" && "$handle" =~ ^[a-zA-Z0-9]([a-zA-Z0-9-]{0,37}[a-zA-Z0-9])?$ ]]; then + EMAIL_TO_GITHUB["$email"]="$handle" + fi + done < <(jq -r 'to_entries[] | "\(.key)\t\(.value)"' "$GITHUB_CACHE_FILE" 2>/dev/null) + + msg "Loaded ${#EMAIL_TO_GITHUB[@]} cached GitHub handle mappings" +} + +# Save email -> github handle mappings to disk (atomic write) +save_github_cache() { + if [[ ${#EMAIL_TO_GITHUB[@]} -eq 0 ]]; then + return 0 + fi + + mkdir -p "$GITHUB_CACHE_DIR" + + local tmp_file + tmp_file="$(mktemp "$GITHUB_CACHE_DIR/cache.XXXXXX")" || return 1 + + # Build JSON from associative array + { + echo "{" + local first=true + local key + for key in "${!EMAIL_TO_GITHUB[@]}"; do + if [[ "$first" == "true" ]]; then + first=false + else + echo "," + fi + # Escape special characters in email for JSON + printf ' %s: %s' "$(jq -n --arg e "$key" '$e')" "$(jq -n --arg h "${EMAIL_TO_GITHUB[$key]}" '$h')" + done + echo + echo "}" + } > "$tmp_file" + + # Validate before replacing + if jq -e '.' "$tmp_file" >/dev/null 2>&1; then + mv "$tmp_file" "$GITHUB_CACHE_FILE" + msg "Saved ${#EMAIL_TO_GITHUB[@]} GitHub handle mappings to cache" + else + rm -f "$tmp_file" + msg "Warning: failed to save cache (invalid JSON)" + fi +} + +# Extract GitHub handle from email if it's a GitHub noreply address +# Handles: user@users.noreply.github.com and 12345678+user@users.noreply.github.com +extract_handle_from_noreply() { + local email="$1" + + if [[ "$email" =~ ^([0-9]+\+)?([a-zA-Z0-9]([a-zA-Z0-9-]{0,37}[a-zA-Z0-9])?)@users\.noreply\.github\.com$ ]]; then + echo "${BASH_REMATCH[2]}" + return 0 + fi + return 1 +} + +# Extract GitHub handle from merge commit subject +# Handles: "Merge pull request #123 from username/branch" +extract_handle_from_merge_commit() { + local subject="$1" + + if [[ "$subject" =~ ^Merge\ pull\ request\ \#[0-9]+\ from\ ([a-zA-Z0-9]([a-zA-Z0-9-]{0,37}[a-zA-Z0-9])?)/.*$ ]]; then + echo "${BASH_REMATCH[1]}" + return 0 + fi + return 1 +} + +# Extract PR number from commit subject +# Handles: "Subject (#123)" and "Merge pull request #123 from" +extract_pr_number() { + local subject="$1" + + if [[ "$subject" =~ \(#([0-9]+)\)$ ]]; then + echo "${BASH_REMATCH[1]}" + return 0 + elif [[ "$subject" =~ ^Merge\ pull\ request\ \#([0-9]+)\ from ]]; then + echo "${BASH_REMATCH[1]}" + return 0 + fi + return 1 +} + +# Query GitHub API for PR author (with timeout and error handling) +query_pr_author() { + local gh_repo="$1" # e.g., "ipfs/kubo" + local pr_num="$2" + + if ! gh_available; then + return 1 + fi + + local handle + handle="$(timeout "$GH_TIMEOUT" gh pr view "$pr_num" --repo "$gh_repo" --json author -q '.author.login' 2>/dev/null)" || return 1 + + # Validate handle format + if [[ -n "$handle" && "$handle" =~ ^[a-zA-Z0-9]([a-zA-Z0-9-]{0,37}[a-zA-Z0-9])?$ ]]; then + echo "$handle" + return 0 + fi + return 1 +} + +# Query GitHub API for commit author (fallback when no PR available) +query_commit_author() { + local gh_repo="$1" # e.g., "ipfs/kubo" + local commit_sha="$2" + + if ! gh_available; then + return 1 + fi + + local handle + handle="$(timeout "$GH_TIMEOUT" gh api "/repos/$gh_repo/commits/$commit_sha" --jq '.author.login // empty' 2>/dev/null)" || return 1 + + # Validate handle format + if [[ -n "$handle" && "$handle" =~ ^[a-zA-Z0-9]([a-zA-Z0-9-]{0,37}[a-zA-Z0-9])?$ ]]; then + echo "$handle" + return 0 + fi + return 1 +} + +# Resolve email to GitHub handle using all available methods +# Args: email, commit_hash (optional), repo_dir (optional), gh_repo (optional) +resolve_github_handle() { + local email="$1" + local commit="${2:-}" + local repo_dir="${3:-}" + local gh_repo="${4:-}" + + # Skip empty emails + [[ -z "$email" ]] && return 1 + + # Check runtime cache first + if [[ -n "${EMAIL_TO_GITHUB[$email]:-}" ]]; then + echo "${EMAIL_TO_GITHUB[$email]}" + return 0 + fi + + local handle="" + + # Method 1: Extract from noreply email + if handle="$(extract_handle_from_noreply "$email")"; then + EMAIL_TO_GITHUB["$email"]="$handle" + echo "$handle" + return 0 + fi + + # Method 2: Look at commit message for merge commit pattern + if [[ -n "$commit" && -n "$repo_dir" ]]; then + local subject + subject="$(git -C "$repo_dir" log -1 --format='%s' "$commit" 2>/dev/null)" || true + + if [[ -n "$subject" ]]; then + if handle="$(extract_handle_from_merge_commit "$subject")"; then + EMAIL_TO_GITHUB["$email"]="$handle" + echo "$handle" + return 0 + fi + + # Method 3: Query GitHub API for PR author + if [[ -n "$gh_repo" ]]; then + local pr_num + if pr_num="$(extract_pr_number "$subject")"; then + if handle="$(query_pr_author "$gh_repo" "$pr_num")"; then + EMAIL_TO_GITHUB["$email"]="$handle" + echo "$handle" + return 0 + fi + fi + fi + fi + fi + + return 1 +} + +# Build GitHub handle mappings for all commits in a range +# This does a single pass to collect PR numbers, then batch queries them +build_github_mappings() { + local module="$1" + local start="$2" + local end="${3:-HEAD}" + local repo + repo="$(strip_version "$module")" + local dir + local gh_repo="" + + if [[ "$module" == "github.com/ipfs/kubo" ]]; then + dir="$ROOT_DIR" + else + dir="$GOPATH/src/$repo" + fi + + # Extract gh_repo for API calls (e.g., "ipfs/kubo" from "github.com/ipfs/kubo") + if [[ "$repo" =~ ^github\.com/(.+)$ ]]; then + gh_repo="${BASH_REMATCH[1]}" + fi + + msg "Building GitHub handle mappings for $module..." + + # Collect all unique emails and their commit context + declare -A email_commits=() + local hash email subject + + while IFS=$'\t' read -r hash email subject; do + [[ -z "$email" ]] && continue + + # Skip if already resolved + [[ -n "${EMAIL_TO_GITHUB[$email]:-}" ]] && continue + + # Try to resolve without API first + local handle="" + + # Method 1: noreply email + if handle="$(extract_handle_from_noreply "$email")"; then + EMAIL_TO_GITHUB["$email"]="$handle" + continue + fi + + # Method 2: merge commit message + if handle="$(extract_handle_from_merge_commit "$subject")"; then + EMAIL_TO_GITHUB["$email"]="$handle" + continue + fi + + # Store for potential API lookup + if [[ -z "${email_commits[$email]:-}" ]]; then + email_commits["$email"]="$hash" + fi + done < <(git -C "$dir" log --format='tformat:%H%x09%aE%x09%s' --no-merges "$start..$end" 2>/dev/null) + + # API batch lookup for remaining emails (if gh is available) + if gh_available && [[ -n "$gh_repo" && ${#email_commits[@]} -gt 0 ]]; then + msg "Querying GitHub API for ${#email_commits[@]} unknown contributors..." + local key + for key in "${!email_commits[@]}"; do + # Skip if already resolved + [[ -n "${EMAIL_TO_GITHUB[$key]:-}" ]] && continue + + local commit_hash="${email_commits[$key]}" + local subj handle + subj="$(git -C "$dir" log -1 --format='%s' "$commit_hash" 2>/dev/null)" || true + + # Try PR author lookup first (cheaper API call) + local pr_num + if pr_num="$(extract_pr_number "$subj")"; then + if handle="$(query_pr_author "$gh_repo" "$pr_num")"; then + EMAIL_TO_GITHUB["$key"]="$handle" + continue + fi + fi + + # Fallback: commit author API (works for any commit) + if handle="$(query_commit_author "$gh_repo" "$commit_hash")"; then + EMAIL_TO_GITHUB["$key"]="$handle" + fi + done + fi +} + +########################################################################################## +# Original infrastructure with modifications +########################################################################################## + +build_include_regex() { + local result="" + local mod + for mod in "${INCLUDE_MODULES[@]}"; do + if [[ -n "$result" ]]; then + result="$result|$mod" + else + result="$mod" + fi + done + echo "($result)" +} + +build_exclude_regex() { + local result="" + local mod + for mod in "${EXCLUDE_MODULES[@]}"; do + if [[ -n "$result" ]]; then + result="$result|$mod" + else + result="$mod" + fi + done + if [[ -n "$result" ]]; then + echo "($result)" + else + echo '$^' # match nothing + fi +} if [[ ${#INCLUDE_MODULES[@]} -gt 0 ]]; then - INCLUDE_REGEX="(${$(printf "|%s" "${INCLUDE_MODULES[@]}"):1})" + INCLUDE_REGEX="$(build_include_regex)" else INCLUDE_REGEX="" # "match anything" fi if [[ ${#EXCLUDE_MODULES[@]} -gt 0 ]]; then - EXCLUDE_REGEX="(${$(printf "|%s" "${EXCLUDE_MODULES[@]}"):1})" + EXCLUDE_REGEX="$(build_exclude_regex)" else EXCLUDE_REGEX='$^' # "match nothing" fi @@ -71,15 +418,28 @@ NL=$'\n' ROOT_DIR="$(git rev-parse --show-toplevel)" -alias jq="jq --unbuffered" - msg() { echo "$*" >&2 } statlog() { local module="$1" - local rpath="$GOPATH/src/$(strip_version "$module")" + local rpath + local gh_repo="" + + if [[ "$module" == "github.com/ipfs/kubo" ]]; then + rpath="$ROOT_DIR" + else + rpath="$GOPATH/src/$(strip_version "$module")" + fi + + # Extract gh_repo for API calls + local repo + repo="$(strip_version "$module")" + if [[ "$repo" =~ ^github\.com/(.+)$ ]]; then + gh_repo="${BASH_REMATCH[1]}" + fi + local start="${2:-}" local end="${3:-HEAD}" local mailmap_file="$rpath/.mailmap" @@ -88,18 +448,21 @@ statlog() { fi local stack=() - git -C "$rpath" -c mailmap.file="$mailmap_file" log --use-mailmap --shortstat --no-merges --pretty="tformat:%H%x09%aN%x09%aE" "$start..$end" -- . "${IGNORE_FILES_PATHSPEC[@]}" | while read -r line; do + local line + while read -r line; do if [[ -n "$line" ]]; then stack+=("$line") continue fi + local changes read -r changes - changed=0 - insertions=0 - deletions=0 - while read count event; do + local changed=0 + local insertions=0 + local deletions=0 + local count event + while read -r count event; do if [[ "$event" =~ ^file ]]; then changed=$count elif [[ "$event" =~ ^insertion ]]; then @@ -112,27 +475,32 @@ statlog() { fi done<<<"${changes//,/$NL}" + local author for author in "${stack[@]}"; do + local hash name email IFS=$'\t' read -r hash name email <<<"$author" + + # Resolve GitHub handle + local github_handle="" + github_handle="$(resolve_github_handle "$email" "$hash" "$rpath" "$gh_repo")" || true + jq -n \ --arg "hash" "$hash" \ --arg "name" "$name" \ --arg "email" "$email" \ + --arg "github" "$github_handle" \ --argjson "changed" "$changed" \ --argjson "insertions" "$insertions" \ --argjson "deletions" "$deletions" \ - '{Commit: $hash, Author: $name, Email: $email, Files: $changed, Insertions: $insertions, Deletions: $deletions}' + '{Commit: $hash, Author: $name, Email: $email, GitHub: $github, Files: $changed, Insertions: $insertions, Deletions: $deletions}' done stack=() - done + done < <(git -C "$rpath" -c mailmap.file="$mailmap_file" log --use-mailmap --shortstat --no-merges --pretty="tformat:%H%x09%aN%x09%aE" "$start..$end" -- . "${IGNORE_FILES_PATHSPEC[@]}") } # Returns a stream of deps changed between $1 and $2. dep_changes() { - { - <"$1" - <"$2" - } | jq -s 'JOIN(INDEX(.[0][]; .Path); .[1][]; .Path; {Path: .[0].Path, Old: (.[1] | del(.Path)), New: (.[0] | del(.Path))}) | select(.New.Version != .Old.Version)' + cat "$1" "$2" | jq -s 'JOIN(INDEX(.[0][]; .Path); .[1][]; .Path; {Path: .[0].Path, Old: (.[1] | del(.Path)), New: (.[0] | del(.Path))}) | select(.New.Version != .Old.Version)' } # resolve_commits resolves a git ref for each version. @@ -160,36 +528,37 @@ ignored_commit() { # Generate a release log for a range of commits in a single repo. release_log() { - setopt local_options BASH_REMATCH - local module="$1" local start="$2" local end="${3:-HEAD}" - local repo="$(strip_version "$1")" - local dir="$GOPATH/src/$repo" + local repo + repo="$(strip_version "$1")" + local dir + if [[ "$module" == "github.com/ipfs/kubo" ]]; then + dir="$ROOT_DIR" + else + dir="$GOPATH/src/$repo" + fi - local commit pr - git -C "$dir" log \ - --format='tformat:%H %s' \ - --first-parent \ - "$start..$end" | - while read commit subject; do - # Skip commits that only touch ignored files. - if ignored_commit "$dir" "$commit"; then - continue - fi + local commit subject + while read -r commit subject; do + # Skip commits that only touch ignored files. + if ignored_commit "$dir" "$commit"; then + continue + fi - if [[ "$subject" =~ '^Merge pull request #([0-9]+) from' ]]; then - local prnum="${BASH_REMATCH[2]}" - local desc="$(git -C "$dir" show --summary --format='tformat:%b' "$commit" | head -1)" - printf -- "- %s (%s)\n" "$desc" "$(pr_link "$repo" "$prnum")" - elif [[ "$subject" =~ '\(#([0-9]+)\)$' ]]; then - local prnum="${BASH_REMATCH[2]}" - printf -- "- %s (%s)\n" "$subject" "$(pr_link "$repo" "$prnum")" - else - printf -- "- %s\n" "$subject" - fi - done + if [[ "$subject" =~ ^Merge\ pull\ request\ \#([0-9]+)\ from ]]; then + local prnum="${BASH_REMATCH[1]}" + local desc + desc="$(git -C "$dir" show --summary --format='tformat:%b' "$commit" | head -1)" + printf -- "- %s (%s)\n" "$desc" "$(pr_link "$repo" "$prnum")" + elif [[ "$subject" =~ \(#([0-9]+)\)$ ]]; then + local prnum="${BASH_REMATCH[1]}" + printf -- "- %s (%s)\n" "$subject" "$(pr_link "$repo" "$prnum")" + else + printf -- "- %s\n" "$subject" + fi + done < <(git -C "$dir" log --format='tformat:%H %s' --first-parent "$start..$end") } indent() { @@ -201,10 +570,16 @@ mod_deps() { } ensure() { - local repo="$(strip_version "$1")" + local repo + repo="$(strip_version "$1")" local commit="$2" - local rpath="$GOPATH/src/$repo" - if [[ ! -d "$rpath" ]]; then + local rpath + if [[ "$1" == "github.com/ipfs/kubo" ]]; then + rpath="$ROOT_DIR" + else + rpath="$GOPATH/src/$repo" + fi + if [[ "$1" != "github.com/ipfs/kubo" ]] && [[ ! -d "$rpath" ]]; then msg "Cloning $repo..." git clone "http://$repo" "$rpath" >&2 fi @@ -217,14 +592,27 @@ ensure() { git -C "$rpath" rev-parse --verify "$commit" >/dev/null || return 1 } +# Summarize stats, grouping by GitHub handle (with fallback to email for dedup) statsummary() { - jq -s 'group_by(.Author)[] | {Author: .[0].Author, Commits: (. | length), Insertions: (map(.Insertions) | add), Deletions: (map(.Deletions) | add), Files: (map(.Files) | add)}' | - jq '. + {Lines: (.Deletions + .Insertions)}' + jq -s ' + # Group by GitHub handle if available, otherwise by email + group_by(if .GitHub != "" then .GitHub else .Email end)[] | + { + # Use first non-empty GitHub handle, or fall back to Author name + Author: .[0].Author, + GitHub: (map(select(.GitHub != "")) | .[0].GitHub // ""), + Email: .[0].Email, + Commits: (. | length), + Insertions: (map(.Insertions) | add), + Deletions: (map(.Deletions) | add), + Files: (map(.Files) | add) + } + ' | jq '. + {Lines: (.Deletions + .Insertions)}' } strip_version() { local repo="$1" - if [[ "$repo" =~ '.*/v[0-9]+$' ]]; then + if [[ "$repo" =~ .*/v[0-9]+$ ]]; then repo="$(dirname "$repo")" fi echo "$repo" @@ -233,19 +621,24 @@ strip_version() { recursive_release_log() { local start="${1:-$(git tag -l | sort -V | grep -v -- '-rc' | grep 'v'| tail -n1)}" local end="${2:-$(git rev-parse HEAD)}" - local repo_root="$(git rev-parse --show-toplevel)" - local module="$(go list -m)" - local dir="$(go list -m -f '{{.Dir}}')" + local repo_root + repo_root="$(git rev-parse --show-toplevel)" + local module + module="$(go list -m)" + local dir + dir="$(go list -m -f '{{.Dir}}')" - if [[ "${GOPATH}/${module}" -ef "${dir}" ]]; then - echo "This script requires the target module and all dependencies to live in a GOPATH." - return 1 - fi + # Load cached GitHub handle mappings + load_github_cache + + # Kubo can be run from any directory, dependencies still use GOPATH ( local result=0 - local workspace="$(mktemp -d)" - trap "$(printf 'rm -rf "%q"' "$workspace")" INT TERM EXIT + local workspace + workspace="$(mktemp -d)" + # shellcheck disable=SC2064 + trap "rm -rf '$workspace'" INT TERM EXIT cd "$workspace" echo "Computing old deps..." >&2 @@ -260,6 +653,9 @@ recursive_release_log() { printf -- "Generating Changelog for %s %s..%s\n" "$module" "$start" "$end" >&2 + # Pre-build GitHub mappings for main module + build_github_mappings "$module" "$start" "$end" + echo "### 📝 Changelog" echo echo "
Full Changelog" @@ -270,24 +666,26 @@ recursive_release_log() { statlog "$module" "$start" "$end" > statlog.json - dep_changes old_deps.json new_deps.json | + local dep_module new new_ref old old_ref + while read -r dep_module new new_ref old old_ref; do + if ! ensure "$dep_module" "$new_ref"; then + result=1 + local changelog="failed to fetch repo" + else + # Pre-build GitHub mappings for dependency + build_github_mappings "$dep_module" "$old_ref" "$new_ref" + statlog "$dep_module" "$old_ref" "$new_ref" >> statlog.json + local changelog + changelog="$(release_log "$dep_module" "$old_ref" "$new_ref")" + fi + if [[ -n "$changelog" ]]; then + printf -- "- %s (%s -> %s):\n" "$dep_module" "$old" "$new" + echo "$changelog" | indent + fi + done < <(dep_changes old_deps.json new_deps.json | jq --arg inc "$INCLUDE_REGEX" --arg exc "$EXCLUDE_REGEX" \ 'select(.Path | test($inc)) | select(.Path | test($exc) | not)' | - # Compute changelogs - jq -r '"\(.Path) \(.New.Version) \(.New.Ref) \(.Old.Version) \(.Old.Ref // "")"' | - while read module new new_ref old old_ref; do - if ! ensure "$module" "$new_ref"; then - result=1 - local changelog="failed to fetch repo" - else - statlog "$module" "$old_ref" "$new_ref" >> statlog.json - local changelog="$(release_log "$module" "$old_ref" "$new_ref")" - fi - if [[ -n "$changelog" ]]; then - printf -- "- %s (%s -> %s):\n" "$module" "$old" "$new" - echo "$changelog" | indent - fi - done + jq -r '"\(.Path) \(.New.Version) \(.New.Ref) \(.Old.Version) \(.Old.Ref // "")"') echo echo "
" @@ -299,8 +697,18 @@ recursive_release_log() { echo "|-------------|---------|---------|---------------|" statsummary Provide.Strategy, Reprovider.Interval -> Provide.DHT.Interval. Remove 'Reprovider' from your config. Documentation: https://github.com/ipfs/kubo/blob/master/docs/config.md#provide") + } + // Check for deprecated "flat" strategy (should have been migrated to "all") + if cfg.Provide.Strategy.WithDefault("") == "flat" { + log.Fatal("Provide.Strategy='flat' is no longer supported. Use 'all' instead. Documentation: https://github.com/ipfs/kubo/blob/master/docs/config.md#providestrategy") + } + if cfg.Experimental.StrategicProviding { + log.Fatal("Experimental.StrategicProviding was removed. Remove it from your config. Documentation: https://github.com/ipfs/kubo/blob/master/docs/experimental-features.md#strategic-providing") + } + // Check for invalid MaxWorkers=0 with SweepEnabled + if cfg.Provide.DHT.SweepEnabled.WithDefault(config.DefaultProvideDHTSweepEnabled) && + cfg.Provide.DHT.MaxWorkers.WithDefault(config.DefaultProvideDHTMaxWorkers) == 0 { + log.Fatal("Invalid configuration: Provide.DHT.MaxWorkers cannot be 0 when Provide.DHT.SweepEnabled=true. Set Provide.DHT.MaxWorkers to a positive value (e.g., 16) to control resource usage. Documentation: https://github.com/ipfs/kubo/blob/master/docs/config.md#providedhtmaxworkers") + } + if routingOption == routingOptionDelegatedKwd { + // Delegated routing is read-only mode - content providing must be disabled + if cfg.Provide.Enabled.WithDefault(config.DefaultProvideEnabled) { + log.Fatal("Routing.Type=delegated does not support content providing. Set Provide.Enabled=false in your config.") + } + } printLibp2pPorts(node) @@ -502,6 +566,9 @@ take effect. } }() + // Clear any cached offline node and set the online daemon node + // This ensures HTTP RPC server uses the online node, not any cached offline node + cctx.ClearCachedNode() cctx.ConstructNode = func() (*core.IpfsNode, error) { return node, nil } @@ -512,7 +579,20 @@ take effect. if err != nil { return err } - node.Process.AddChild(goprocess.WithTeardown(cctx.Plugins.Close)) + + pluginErrc := make(chan error, 1) + select { + case <-node.Context().Done(): + close(pluginErrc) + default: + context.AfterFunc(node.Context(), func() { + err := cctx.Plugins.Close() + if err != nil { + pluginErrc <- fmt.Errorf("closing plugins: %w", err) + } + close(pluginErrc) + }) + } // construct api endpoint - every time apiErrc, err := serveHTTPApi(req, cctx) @@ -529,6 +609,11 @@ take effect. if err := mountFuse(req, cctx); err != nil { return err } + defer func() { + if _err != nil { + nodeMount.Unmount(node) + } + }() } // repo blockstore GC - if --enable-gc flag is present @@ -537,9 +622,9 @@ take effect. return err } - // Add any files downloaded by migration. - if cacheMigrations || pinMigrations { - err = addMigrations(cctx.Context(), node, fetcher, pinMigrations) + // Add any files downloaded by external migrations (embedded migrations don't download files) + if externalMigrationFetcher != nil && (cacheMigrations || pinMigrations) { + err = addMigrations(cctx.Context(), node, externalMigrationFetcher, pinMigrations) if err != nil { fmt.Fprintln(os.Stderr, "Could not add migration to IPFS:", err) } @@ -548,10 +633,10 @@ take effect. os.RemoveAll(migrations.DownloadDirectory) migrations.DownloadDirectory = "" } - if fetcher != nil { + if externalMigrationFetcher != nil { // If there is an error closing the IpfsFetcher, then print error, but // do not fail because of it. - err = fetcher.Close() + err = externalMigrationFetcher.Close() if err != nil { log.Errorf("error closing IPFS fetcher: %s", err) } @@ -601,19 +686,32 @@ take effect. }() if !offline { - // Warn users who were victims of 'lowprofile' footgun (https://github.com/ipfs/kubo/pull/10524) - if cfg.Experimental.StrategicProviding { + // Warn users when provide systems are disabled + if !cfg.Provide.Enabled.WithDefault(config.DefaultProvideEnabled) { fmt.Print(` -⚠️ Reprovide system is disabled due to 'Experimental.StrategicProviding=true' + +⚠️ Provide and Reprovide systems are disabled due to 'Provide.Enabled=false' ⚠️ Local CIDs will not be announced to Amino DHT, making them impossible to retrieve without manual peering -⚠️ If this is not intentional, call 'ipfs config profile apply announce-on' +⚠️ If this is not intentional, call 'ipfs config profile apply announce-on' or set Provide.Enabled=true' `) - } else if cfg.Reprovider.Interval.WithDefault(config.DefaultReproviderInterval) == 0 { + } else if cfg.Provide.DHT.Interval.WithDefault(config.DefaultProvideDHTInterval) == 0 { fmt.Print(` -⚠️ Reprovider system is disabled due to 'Reprovider.Interval=0' -⚠️ Local CIDs will not be announced to Amino DHT, making them impossible to retrieve without manual peering -⚠️ If this is not intentional, call 'ipfs config profile apply announce-on', or set 'Reprovider.Interval=22h' + +⚠️ Providing to the DHT is disabled due to 'Provide.DHT.Interval=0' +⚠️ Local CIDs will not be provided to Amino DHT, making them impossible to retrieve without manual peering +⚠️ If this is not intentional, call 'ipfs config profile apply announce-on', or set 'Provide.DHT.Interval=22h' + +`) + } + + // Inform user about Routing.AcceleratedDHTClient when enabled + if cfg.Routing.AcceleratedDHTClient.WithDefault(config.DefaultAcceleratedDHTClient) { + fmt.Print(` + +ℹ️ Routing.AcceleratedDHTClient is enabled for faster content discovery +ℹ️ and DHT provides. Routing table is initializing. IPFS is ready to use, +ℹ️ but performance will improve over time as more peers are discovered `) } @@ -661,16 +759,26 @@ take effect. log.Fatal("Support for IPFS_REUSEPORT was removed. Use LIBP2P_TCP_REUSEPORT instead.") } + unmountErrc := make(chan error) + context.AfterFunc(node.Context(), func() { + <-node.Context().Done() + nodeMount.Unmount(node) + close(unmountErrc) + }) + // collect long-running errors and block for shutdown // TODO(cryptix): our fuse currently doesn't follow this pattern for graceful shutdown - var errs error - for err := range merge(apiErrc, gwErrc, gcErrc, p2pGwErrc) { + var errs []error + for err := range merge(apiErrc, gwErrc, gcErrc, p2pGwErrc, pluginErrc, unmountErrc) { if err != nil { - errs = multierror.Append(errs, err) + errs = append(errs, err) } } + if len(errs) != 0 { + return errors.Join(errs...) + } - return errs + return nil } // serveHTTPApi collects options, creates listener, prints status message and starts serving requests. @@ -723,10 +831,18 @@ func serveHTTPApi(req *cmds.Request, cctx *oldcmds.Context) (<-chan error, error for _, listener := range listeners { // we might have listened to /tcp/0 - let's see what we are listing on fmt.Printf("RPC API server listening on %s\n", listener.Multiaddr()) - // Browsers require TCP. + // Browsers require TCP with explicit host. switch listener.Addr().Network() { case "tcp", "tcp4", "tcp6": - fmt.Printf("WebUI: http://%s/webui\n", listener.Addr()) + rpc := listener.Addr().String() + // replace catch-all with explicit localhost URL that works in browsers + // https://github.com/ipfs/kubo/issues/10515 + if strings.Contains(rpc, "0.0.0.0:") { + rpc = strings.Replace(rpc, "0.0.0.0:", "127.0.0.1:", 1) + } else if strings.Contains(rpc, "[::]:") { + rpc = strings.Replace(rpc, "[::]:", "[::1]:", 1) + } + fmt.Printf("WebUI: http://%s/webui\n", rpc) } } @@ -767,23 +883,38 @@ func serveHTTPApi(req *cmds.Request, cctx *oldcmds.Context) (<-chan error, error return nil, fmt.Errorf("serveHTTPApi: ConstructNode() failed: %s", err) } + // Buffer channel to prevent deadlock when multiple servers write errors simultaneously + errc := make(chan error, len(listeners)) + var wg sync.WaitGroup + + // Start all servers and wait for them to be ready before writing api file. + // This prevents race conditions where external tools (like systemd path units) + // see the file and try to connect before servers can accept connections. if len(listeners) > 0 { - // Only add an api file if the API is running. + readyChannels := make([]chan struct{}, len(listeners)) + for i, lis := range listeners { + readyChannels[i] = make(chan struct{}) + ready := readyChannels[i] + wg.Go(func() { + errc <- corehttp.ServeWithReady(node, manet.NetListener(lis), ready, opts...) + }) + } + + // Wait for all listeners to be ready or any to fail + for _, ready := range readyChannels { + select { + case <-ready: + // This listener is ready + case err := <-errc: + return nil, fmt.Errorf("serveHTTPApi: %w", err) + } + } + if err := node.Repo.SetAPIAddr(rewriteMaddrToUseLocalhostIfItsAny(listeners[0].Multiaddr())); err != nil { return nil, fmt.Errorf("serveHTTPApi: SetAPIAddr() failed: %w", err) } } - errc := make(chan error) - var wg sync.WaitGroup - for _, apiLis := range listeners { - wg.Add(1) - go func(lis manet.Listener) { - defer wg.Done() - errc <- corehttp.Serve(node, manet.NetListener(lis), opts...) - }(apiLis) - } - go func() { wg.Wait() close(errc) @@ -796,9 +927,9 @@ func rewriteMaddrToUseLocalhostIfItsAny(maddr ma.Multiaddr) ma.Multiaddr { first, rest := ma.SplitFirst(maddr) switch { - case first.Equal(manet.IP4Unspecified): + case first.Equal(&manet.IP4Unspecified[0]): return manet.IP4Loopback.Encapsulate(rest) - case first.Equal(manet.IP6Unspecified): + case first.Equal(&manet.IP6Unspecified[0]): return manet.IP6Loopback.Encapsulate(rest) default: return maddr // not ip @@ -812,6 +943,12 @@ func printLibp2pPorts(node *core.IpfsNode) { return } + if node.PeerHost == nil { + log.Error("PeerHost is nil - this should not happen and likely indicates an FX dependency injection issue or race condition") + fmt.Println("Swarm not properly initialized - node PeerHost is nil.") + return + } + ifaceAddrs, err := node.PeerHost.Network().InterfaceListenAddresses() if err != nil { log.Errorf("failed to read listening addresses: %s", err) @@ -936,26 +1073,42 @@ func serveHTTPGateway(req *cmds.Request, cctx *oldcmds.Context) (<-chan error, e return nil, fmt.Errorf("serveHTTPGateway: ConstructNode() failed: %s", err) } + // Buffer channel to prevent deadlock when multiple servers write errors simultaneously + errc := make(chan error, len(listeners)) + var wg sync.WaitGroup + + // Start all servers and wait for them to be ready before writing gateway file. + // This prevents race conditions where external tools (like systemd path units) + // see the file and try to connect before servers can accept connections. if len(listeners) > 0 { + readyChannels := make([]chan struct{}, len(listeners)) + for i, lis := range listeners { + readyChannels[i] = make(chan struct{}) + ready := readyChannels[i] + wg.Go(func() { + errc <- corehttp.ServeWithReady(node, manet.NetListener(lis), ready, opts...) + }) + } + + // Wait for all listeners to be ready or any to fail + for _, ready := range readyChannels { + select { + case <-ready: + // This listener is ready + case err := <-errc: + return nil, fmt.Errorf("serveHTTPGateway: %w", err) + } + } + addr, err := manet.ToNetAddr(rewriteMaddrToUseLocalhostIfItsAny(listeners[0].Multiaddr())) if err != nil { - return nil, fmt.Errorf("serveHTTPGateway: manet.ToIP() failed: %w", err) + return nil, fmt.Errorf("serveHTTPGateway: manet.ToNetAddr() failed: %w", err) } if err := node.Repo.SetGatewayAddr(addr); err != nil { return nil, fmt.Errorf("serveHTTPGateway: SetGatewayAddr() failed: %w", err) } } - errc := make(chan error) - var wg sync.WaitGroup - for _, lis := range listeners { - wg.Add(1) - go func(lis manet.Listener) { - defer wg.Done() - errc <- corehttp.Serve(node, manet.NetListener(lis), opts...) - }(lis) - } - go func() { wg.Wait() close(errc) @@ -993,6 +1146,10 @@ func serveTrustlessGatewayOverLibp2p(cctx *oldcmds.Context) (<-chan error, error return nil, err } + if node.PeerHost == nil { + return nil, fmt.Errorf("cannot create libp2p gateway: node PeerHost is nil (this should not happen and likely indicates an FX dependency injection issue or race condition)") + } + h := p2phttp.Host{ StreamHost: node.PeerHost, } @@ -1003,14 +1160,13 @@ func serveTrustlessGatewayOverLibp2p(cctx *oldcmds.Context) (<-chan error, error errc := make(chan error, 1) go func() { - defer close(errc) errc <- h.Serve() + close(errc) }() - go func() { - <-node.Process.Closing() + context.AfterFunc(node.Context(), func() { h.Close() - }() + }) return errc, nil } @@ -1026,23 +1182,58 @@ func mountFuse(req *cmds.Request, cctx *oldcmds.Context) error { if !found { fsdir = cfg.Mounts.IPFS } + if err := checkFusePath("Mounts.IPFS", fsdir); err != nil { + return err + } nsdir, found := req.Options[ipnsMountKwd].(string) if !found { nsdir = cfg.Mounts.IPNS } + if err := checkFusePath("Mounts.IPNS", nsdir); err != nil { + return err + } + + mfsdir, found := req.Options[mfsMountKwd].(string) + if !found { + mfsdir = cfg.Mounts.MFS + } + if err := checkFusePath("Mounts.MFS", mfsdir); err != nil { + return err + } node, err := cctx.ConstructNode() if err != nil { return fmt.Errorf("mountFuse: ConstructNode() failed: %s", err) } - err = nodeMount.Mount(node, fsdir, nsdir) + err = nodeMount.Mount(node, fsdir, nsdir, mfsdir) if err != nil { return err } fmt.Printf("IPFS mounted at: %s\n", fsdir) fmt.Printf("IPNS mounted at: %s\n", nsdir) + fmt.Printf("MFS mounted at: %s\n", mfsdir) + return nil +} + +func checkFusePath(name, path string) error { + if path == "" { + return fmt.Errorf("%s path cannot be empty", name) + } + + fileInfo, err := os.Stat(path) + if err != nil { + if os.IsNotExist(err) { + return fmt.Errorf("%s path (%q) does not exist: %w", name, path, err) + } + return fmt.Errorf("error while inspecting %s path (%q): %w", name, path, err) + } + + if !fileInfo.IsDir() { + return fmt.Errorf("%s path (%q) is not a directory", name, path) + } + return nil } @@ -1060,14 +1251,14 @@ func maybeRunGC(req *cmds.Request, node *core.IpfsNode) (<-chan error, error) { return errc, nil } -// merge does fan-in of multiple read-only error channels -// taken from http://blog.golang.org/pipelines +// merge does fan-in of multiple read-only error channels. func merge(cs ...<-chan error) <-chan error { var wg sync.WaitGroup out := make(chan error) - // Start an output goroutine for each input channel in cs. output - // copies values from c to out until c is closed, then calls wg.Done. + // Start a goroutine for each input channel in cs, that copies values from + // the input channel to the output channel until the input channel is + // closed. output := func(c <-chan error) { for n := range c { out <- n @@ -1081,8 +1272,8 @@ func merge(cs ...<-chan error) <-chan error { } } - // Start a goroutine to close out once all the output goroutines are - // done. This must start after the wg.Add call. + // Start a goroutine to close out once all the output goroutines, and other + // things to wait on, are done. go func() { wg.Wait() close(out) @@ -1153,8 +1344,6 @@ Visit https://github.com/ipfs/kubo/releases or https://dist.ipfs.tech/#kubo and select { case <-ctx.Done(): return - case <-nd.Process.Closing(): - return case <-ticker.C: continue } diff --git a/cmd/ipfs/kubo/daemon_linux.go b/cmd/ipfs/kubo/daemon_linux.go index b612738a2..2335dd2b9 100644 --- a/cmd/ipfs/kubo/daemon_linux.go +++ b/cmd/ipfs/kubo/daemon_linux.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux package kubo diff --git a/cmd/ipfs/kubo/daemon_other.go b/cmd/ipfs/kubo/daemon_other.go index c5b24053d..6fbc30259 100644 --- a/cmd/ipfs/kubo/daemon_other.go +++ b/cmd/ipfs/kubo/daemon_other.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux package kubo diff --git a/cmd/ipfs/kubo/init.go b/cmd/ipfs/kubo/init.go index 986fe90c8..063120148 100644 --- a/cmd/ipfs/kubo/init.go +++ b/cmd/ipfs/kubo/init.go @@ -88,11 +88,11 @@ environment variable: if it.Err() != nil { return it.Err() } - return fmt.Errorf("file argument was nil") + return errors.New("file argument was nil") } file := files.FileFromEntry(it) if file == nil { - return fmt.Errorf("expected a regular file") + return errors.New("expected a regular file") } conf = &config.Config{} diff --git a/cmd/ipfs/kubo/pinmfs.go b/cmd/ipfs/kubo/pinmfs.go index c9187145c..a210f1b63 100644 --- a/cmd/ipfs/kubo/pinmfs.go +++ b/cmd/ipfs/kubo/pinmfs.go @@ -6,16 +6,14 @@ import ( "os" "time" - "github.com/libp2p/go-libp2p/core/host" - peer "github.com/libp2p/go-libp2p/core/peer" - pinclient "github.com/ipfs/boxo/pinning/remote/client" cid "github.com/ipfs/go-cid" ipld "github.com/ipfs/go-ipld-format" logging "github.com/ipfs/go-log/v2" - config "github.com/ipfs/kubo/config" "github.com/ipfs/kubo/core" + "github.com/libp2p/go-libp2p/core/host" + peer "github.com/libp2p/go-libp2p/core/peer" ) // mfslog is the logger for remote mfs pinning. @@ -90,34 +88,46 @@ func pinMFSOnChange(cctx pinMFSContext, configPollInterval time.Duration, node p case <-cctx.Context().Done(): return case <-tmo.C: - tmo.Reset(configPollInterval) - } + // reread the config, which may have changed in the meantime + cfg, err := cctx.GetConfig() + if err != nil { + mfslog.Errorf("pinning reading config (%v)", err) + continue + } + mfslog.Debugf("pinning loop is awake, %d remote services", len(cfg.Pinning.RemoteServices)) - // reread the config, which may have changed in the meantime - cfg, err := cctx.GetConfig() - if err != nil { - mfslog.Errorf("pinning reading config (%v)", err) - continue + // pin to all remote services in parallel + pinAllMFS(cctx.Context(), node, cfg, lastPins) } - mfslog.Debugf("pinning loop is awake, %d remote services", len(cfg.Pinning.RemoteServices)) - - // get the most recent MFS root cid - rootNode, err := node.RootNode() - if err != nil { - mfslog.Errorf("pinning reading MFS root (%v)", err) - continue - } - - // pin to all remote services in parallel - pinAllMFS(cctx.Context(), node, cfg, rootNode.Cid(), lastPins) + // pinAllMFS may take long. Reset interval only when we are done doing it + // so that we are not pinning constantly. + tmo.Reset(configPollInterval) } } // pinAllMFS pins on all remote services in parallel to overcome DoS attacks. -func pinAllMFS(ctx context.Context, node pinMFSNode, cfg *config.Config, rootCid cid.Cid, lastPins map[string]lastPin) { +func pinAllMFS(ctx context.Context, node pinMFSNode, cfg *config.Config, lastPins map[string]lastPin) { ch := make(chan lastPin) var started int + // Bail out to mitigate issue below when not needing to do anything. + if len(cfg.Pinning.RemoteServices) == 0 { + return + } + + // get the most recent MFS root cid. + // Warning! This can be super expensive. + // See https://github.com/ipfs/boxo/pull/751 + // and https://github.com/ipfs/kubo/issues/8694 + // Reading an MFS-directory nodes can take minutes due to + // ever growing cache being synced to unixfs. + rootNode, err := node.RootNode() + if err != nil { + mfslog.Errorf("pinning reading MFS root (%v)", err) + return + } + rootCid := rootNode.Cid() + for svcName, svcConfig := range cfg.Pinning.RemoteServices { if ctx.Err() != nil { break @@ -183,7 +193,7 @@ func pinMFS(ctx context.Context, node pinMFSNode, cid cid.Cid, svcName string, s // check if MFS pin exists (across all possible states) and inspect its CID pinStatuses := []pinclient.Status{pinclient.StatusQueued, pinclient.StatusPinning, pinclient.StatusPinned, pinclient.StatusFailed} - lsPinCh, lsErrCh := c.Ls(ctx, pinclient.PinOpts.FilterName(pinName), pinclient.PinOpts.FilterStatus(pinStatuses...)) + lsPinCh, lsErrCh := c.GoLs(ctx, pinclient.PinOpts.FilterName(pinName), pinclient.PinOpts.FilterStatus(pinStatuses...)) existingRequestID := "" // is there any pre-existing MFS pin with pinName (for any CID)? pinning := false // is CID for current MFS already being pinned? pinTime := time.Now().UTC() diff --git a/cmd/ipfs/kubo/pinmfs_test.go b/cmd/ipfs/kubo/pinmfs_test.go index 750be9c98..6b171cd63 100644 --- a/cmd/ipfs/kubo/pinmfs_test.go +++ b/cmd/ipfs/kubo/pinmfs_test.go @@ -94,11 +94,24 @@ func TestPinMFSRootNodeError(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 2*testConfigPollInterval) defer cancel() + // need at least one config to trigger + cfg := &config.Config{ + Pinning: config.Pinning{ + RemoteServices: map[string]config.RemotePinningService{ + "A": { + Policies: config.RemotePinningServicePolicies{ + MFS: config.RemotePinningServiceMFSPolicy{ + Enable: false, + }, + }, + }, + }, + }, + } + cctx := &testPinMFSContext{ ctx: ctx, - cfg: &config.Config{ - Pinning: config.Pinning{}, - }, + cfg: cfg, err: nil, } node := &testPinMFSNode{ diff --git a/cmd/ipfs/kubo/start.go b/cmd/ipfs/kubo/start.go index 474045e71..b5aff3bc3 100644 --- a/cmd/ipfs/kubo/start.go +++ b/cmd/ipfs/kubo/start.go @@ -8,6 +8,7 @@ import ( "errors" "fmt" "io" + "log/slog" "net" "net/http" "os" @@ -16,12 +17,11 @@ import ( "time" "github.com/blang/semver/v4" - "github.com/google/uuid" u "github.com/ipfs/boxo/util" cmds "github.com/ipfs/go-ipfs-cmds" "github.com/ipfs/go-ipfs-cmds/cli" cmdhttp "github.com/ipfs/go-ipfs-cmds/http" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" ipfs "github.com/ipfs/kubo" "github.com/ipfs/kubo/client/rpc/auth" "github.com/ipfs/kubo/cmd/ipfs/util" @@ -34,6 +34,7 @@ import ( "github.com/ipfs/kubo/repo" "github.com/ipfs/kubo/repo/fsrepo" "github.com/ipfs/kubo/tracing" + "github.com/libp2p/go-libp2p/gologshim" ma "github.com/multiformats/go-multiaddr" madns "github.com/multiformats/go-multiaddr-dns" manet "github.com/multiformats/go-multiaddr/net" @@ -51,6 +52,17 @@ var ( tracer trace.Tracer ) +func init() { + // Set go-log's slog handler as the application-wide default. + // This ensures all slog-based logging uses go-log's formatting. + slog.SetDefault(slog.New(logging.SlogHandler())) + + // Wire go-log's slog bridge to go-libp2p's gologshim. + // This provides go-libp2p loggers with the "logger" attribute + // for per-subsystem level control (e.g., `ipfs log level libp2p-swarm debug`). + gologshim.SetDefaultHandler(logging.SlogHandler()) +} + // declared as a var for testing purposes. var dnsResolver = madns.DefaultResolver @@ -89,16 +101,6 @@ func printErr(err error) int { return 1 } -func newUUID(key string) logging.Metadata { - ids := "#UUID-ERROR#" - if id, err := uuid.NewRandom(); err == nil { - ids = id.String() - } - return logging.Metadata{ - key: ids, - } -} - func BuildDefaultEnv(ctx context.Context, req *cmds.Request) (cmds.Environment, error) { return BuildEnv(nil)(ctx, req) } @@ -157,8 +159,7 @@ func BuildEnv(pl PluginPreloader) func(ctx context.Context, req *cmds.Request) ( // - output the response // - if anything fails, print error, maybe with help. func Start(buildEnv func(ctx context.Context, req *cmds.Request) (cmds.Environment, error)) (exitCode int) { - ctx := logging.ContextWithLoggable(context.Background(), newUUID("session")) - + ctx := context.Background() tp, err := tracing.NewTracerProvider(ctx) if err != nil { return printErr(err) @@ -226,7 +227,10 @@ func insideGUI() bool { func checkDebug(req *cmds.Request) { // check if user wants to debug. option OR env var. debug, _ := req.Options["debug"].(bool) - if debug || os.Getenv("IPFS_LOGGING") == "debug" { + ipfsLogLevel, _ := logging.Parse(os.Getenv("IPFS_LOGGING")) // IPFS_LOGGING is deprecated + goLogLevel, _ := logging.Parse(os.Getenv("GOLOG_LOG_LEVEL")) + + if debug || goLogLevel == logging.LevelDebug || ipfsLogLevel == logging.LevelDebug { u.Debug = true logging.SetDebugLogging() } @@ -330,6 +334,11 @@ func makeExecutor(req *cmds.Request, env interface{}) (cmds.Executor, error) { switch network { case "tcp", "tcp4", "tcp6": tpt = http.DefaultTransport + // RPC over HTTPS requires explicit schema in the address passed to cmdhttp.NewClient + httpAddr := apiAddr.String() + if !strings.HasPrefix(host, "http:") && !strings.HasPrefix(host, "https:") && (strings.Contains(httpAddr, "/https") || strings.Contains(httpAddr, "/tls/http")) { + host = "https://" + host + } case "unix": path := host host = "unix" diff --git a/cmd/ipfs/runmain_test.go b/cmd/ipfs/runmain_test.go index a37ec194c..56a647f8a 100644 --- a/cmd/ipfs/runmain_test.go +++ b/cmd/ipfs/runmain_test.go @@ -1,5 +1,4 @@ //go:build testrunmain -// +build testrunmain package main_test diff --git a/cmd/ipfs/util/signal.go b/cmd/ipfs/util/signal.go index 2cfd0d5bd..51c9d5acb 100644 --- a/cmd/ipfs/util/signal.go +++ b/cmd/ipfs/util/signal.go @@ -1,5 +1,4 @@ //go:build !wasm -// +build !wasm package util @@ -64,13 +63,7 @@ func SetupInterruptHandler(ctx context.Context) (io.Closer, context.Context) { switch count { case 1: fmt.Println() // Prevent un-terminated ^C character in terminal - - ih.wg.Add(1) - go func() { - defer ih.wg.Done() - cancelFunc() - }() - + cancelFunc() default: fmt.Println("Received another interrupt before graceful shutdown, terminating...") os.Exit(-1) diff --git a/cmd/ipfs/util/ui.go b/cmd/ipfs/util/ui.go index cf8ad5067..f39f1e171 100644 --- a/cmd/ipfs/util/ui.go +++ b/cmd/ipfs/util/ui.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package util diff --git a/cmd/ipfs/util/ulimit.go b/cmd/ipfs/util/ulimit.go index 188444d67..9f58007c9 100644 --- a/cmd/ipfs/util/ulimit.go +++ b/cmd/ipfs/util/ulimit.go @@ -6,7 +6,7 @@ import ( "strconv" "syscall" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" ) var log = logging.Logger("ulimit") diff --git a/cmd/ipfs/util/ulimit_freebsd.go b/cmd/ipfs/util/ulimit_freebsd.go index 27b31349b..358bccfe3 100644 --- a/cmd/ipfs/util/ulimit_freebsd.go +++ b/cmd/ipfs/util/ulimit_freebsd.go @@ -1,5 +1,4 @@ //go:build freebsd -// +build freebsd package util diff --git a/cmd/ipfs/util/ulimit_test.go b/cmd/ipfs/util/ulimit_test.go index bef480fff..33b077776 100644 --- a/cmd/ipfs/util/ulimit_test.go +++ b/cmd/ipfs/util/ulimit_test.go @@ -1,5 +1,4 @@ //go:build !windows && !plan9 -// +build !windows,!plan9 package util diff --git a/cmd/ipfs/util/ulimit_unix.go b/cmd/ipfs/util/ulimit_unix.go index d3b0ec43c..b223de0ff 100644 --- a/cmd/ipfs/util/ulimit_unix.go +++ b/cmd/ipfs/util/ulimit_unix.go @@ -1,5 +1,4 @@ //go:build darwin || linux || netbsd || openbsd -// +build darwin linux netbsd openbsd package util diff --git a/cmd/ipfs/util/ulimit_windows.go b/cmd/ipfs/util/ulimit_windows.go index 5dbfd26f7..cd1447365 100644 --- a/cmd/ipfs/util/ulimit_windows.go +++ b/cmd/ipfs/util/ulimit_windows.go @@ -1,5 +1,4 @@ //go:build windows -// +build windows package util diff --git a/cmd/ipfswatch/ipfswatch_test.go b/cmd/ipfswatch/ipfswatch_test.go index 20397afef..ac68e96cc 100644 --- a/cmd/ipfswatch/ipfswatch_test.go +++ b/cmd/ipfswatch/ipfswatch_test.go @@ -1,16 +1,15 @@ //go:build !plan9 -// +build !plan9 package main import ( "testing" - "github.com/ipfs/kubo/thirdparty/assert" + "github.com/stretchr/testify/require" ) func TestIsHidden(t *testing.T) { - assert.True(IsHidden("bar/.git"), t, "dirs beginning with . should be recognized as hidden") - assert.False(IsHidden("."), t, ". for current dir should not be considered hidden") - assert.False(IsHidden("bar/baz"), t, "normal dirs should not be hidden") + require.True(t, IsHidden("bar/.git"), "dirs beginning with . should be recognized as hidden") + require.False(t, IsHidden("."), ". for current dir should not be considered hidden") + require.False(t, IsHidden("bar/baz"), "normal dirs should not be hidden") } diff --git a/cmd/ipfswatch/main.go b/cmd/ipfswatch/main.go index 0f0283fb8..3ba5dd3d9 100644 --- a/cmd/ipfswatch/main.go +++ b/cmd/ipfswatch/main.go @@ -1,5 +1,4 @@ //go:build !plan9 -// +build !plan9 package main @@ -10,26 +9,40 @@ import ( "os" "os/signal" "path/filepath" + "slices" "syscall" commands "github.com/ipfs/kubo/commands" + "github.com/ipfs/kubo/config" core "github.com/ipfs/kubo/core" coreapi "github.com/ipfs/kubo/core/coreapi" corehttp "github.com/ipfs/kubo/core/corehttp" + "github.com/ipfs/kubo/misc/fsutil" + "github.com/ipfs/kubo/plugin" + pluginbadgerds "github.com/ipfs/kubo/plugin/plugins/badgerds" + pluginflatfs "github.com/ipfs/kubo/plugin/plugins/flatfs" + pluginlevelds "github.com/ipfs/kubo/plugin/plugins/levelds" + pluginpebbleds "github.com/ipfs/kubo/plugin/plugins/pebbleds" fsrepo "github.com/ipfs/kubo/repo/fsrepo" fsnotify "github.com/fsnotify/fsnotify" "github.com/ipfs/boxo/files" - process "github.com/jbenet/goprocess" - homedir "github.com/mitchellh/go-homedir" ) var ( http = flag.Bool("http", false, "expose IPFS HTTP API") - repoPath = flag.String("repo", os.Getenv("IPFS_PATH"), "IPFS_PATH to use") + repoPath *string watchPath = flag.String("path", ".", "the path to watch") ) +func init() { + ipfsPath, err := config.PathRoot() + if err != nil { + ipfsPath = os.Getenv(config.EnvDir) + } + repoPath = flag.String("repo", ipfsPath, "repo path to use") +} + func main() { flag.Parse() @@ -53,11 +66,22 @@ func main() { } } +func loadDatastorePlugins(plugins []plugin.Plugin) error { + for _, pl := range plugins { + if pl, ok := pl.(plugin.PluginDatastore); ok { + err := fsrepo.AddDatastoreConfigHandler(pl.DatastoreTypeName(), pl.DatastoreConfigParser()) + if err != nil { + return err + } + } + } + return nil +} + func run(ipfsPath, watchPath string) error { - proc := process.WithParent(process.Background()) log.Printf("running IPFSWatch on '%s' using repo at '%s'...", watchPath, ipfsPath) - ipfsPath, err := homedir.Expand(ipfsPath) + ipfsPath, err := fsutil.ExpandHome(ipfsPath) if err != nil { return err } @@ -71,6 +95,15 @@ func run(ipfsPath, watchPath string) error { return err } + if err = loadDatastorePlugins(slices.Concat( + pluginbadgerds.Plugins, + pluginflatfs.Plugins, + pluginlevelds.Plugins, + pluginpebbleds.Plugins, + )); err != nil { + return err + } + r, err := fsrepo.Open(ipfsPath) if err != nil { // TODO handle case: daemon running @@ -99,11 +132,11 @@ func run(ipfsPath, watchPath string) error { corehttp.WebUIOption, corehttp.CommandsOption(cmdCtx(node, ipfsPath)), } - proc.Go(func(p process.Process) { + go func() { if err := corehttp.ListenAndServe(node, addr, opts...); err != nil { return } - }) + }() } interrupts := make(chan os.Signal, 1) @@ -137,7 +170,7 @@ func run(ipfsPath, watchPath string) error { } } } - proc.Go(func(p process.Process) { + go func() { file, err := os.Open(e.Name) if err != nil { log.Println(err) @@ -162,7 +195,7 @@ func run(ipfsPath, watchPath string) error { log.Println(err) } log.Printf("added %s... key: %s", e.Name, k) - }) + }() } case err := <-watcher.Errors: log.Println(err) diff --git a/commands/context.go b/commands/context.go index cc95d55f4..c8893ae17 100644 --- a/commands/context.go +++ b/commands/context.go @@ -11,7 +11,7 @@ import ( loader "github.com/ipfs/kubo/plugin/loader" cmds "github.com/ipfs/go-ipfs-cmds" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" config "github.com/ipfs/kubo/config" coreiface "github.com/ipfs/kubo/core/coreiface" options "github.com/ipfs/kubo/core/coreiface/options" @@ -53,6 +53,23 @@ func (c *Context) GetNode() (*core.IpfsNode, error) { return c.node, err } +// ClearCachedNode clears any cached node, forcing GetNode to construct a new one. +// +// This method is critical for mitigating racy FX dependency injection behavior +// that can occur during daemon startup. The daemon may create multiple IpfsNode +// instances during initialization - first an offline node during early init, then +// the proper online daemon node. Without clearing the cache, HTTP RPC handlers may +// end up using the first (offline) cached node instead of the intended online daemon node. +// +// This behavior was likely present forever in go-ipfs, but recent changes made it more +// prominent and forced us to proactively mitigate FX shortcomings. The daemon calls +// this method immediately before setting its ConstructNode function to ensure that +// subsequent GetNode() calls use the correct online daemon node rather than any +// stale cached offline node from initialization. +func (c *Context) ClearCachedNode() { + c.node = nil +} + // GetAPI returns CoreAPI instance backed by ipfs node. // It may construct the node with the provided function. func (c *Context) GetAPI() (coreiface.CoreAPI, error) { diff --git a/config/autoconf.go b/config/autoconf.go new file mode 100644 index 000000000..2f1d41b26 --- /dev/null +++ b/config/autoconf.go @@ -0,0 +1,319 @@ +package config + +import ( + "maps" + "math/rand" + "strings" + + "github.com/ipfs/boxo/autoconf" + logging "github.com/ipfs/go-log/v2" + peer "github.com/libp2p/go-libp2p/core/peer" +) + +var log = logging.Logger("config") + +// AutoConf contains the configuration for the autoconf subsystem +type AutoConf struct { + // URL is the HTTP(S) URL to fetch the autoconf.json from + // Default: see boxo/autoconf.MainnetAutoConfURL + URL *OptionalString `json:",omitempty"` + + // Enabled determines whether to use autoconf + // Default: true + Enabled Flag `json:",omitempty"` + + // RefreshInterval is how often to refresh autoconf data + // Default: 24h + RefreshInterval *OptionalDuration `json:",omitempty"` + + // TLSInsecureSkipVerify allows skipping TLS verification (for testing only) + // Default: false + TLSInsecureSkipVerify Flag `json:",omitempty"` +} + +const ( + // AutoPlaceholder is the string used as a placeholder for autoconf values + AutoPlaceholder = "auto" + + // DefaultAutoConfEnabled is the default value for AutoConf.Enabled + DefaultAutoConfEnabled = true + + // DefaultAutoConfURL is the default URL for fetching autoconf + DefaultAutoConfURL = autoconf.MainnetAutoConfURL + + // DefaultAutoConfRefreshInterval is the default interval for refreshing autoconf data + DefaultAutoConfRefreshInterval = autoconf.DefaultRefreshInterval + + // AutoConf client configuration constants + DefaultAutoConfCacheSize = autoconf.DefaultCacheSize + DefaultAutoConfTimeout = autoconf.DefaultTimeout +) + +// getNativeSystems returns the list of systems that should be used natively based on routing type +func getNativeSystems(routingType string) []string { + switch routingType { + case "dht", "dhtclient", "dhtserver": + return []string{autoconf.SystemAminoDHT} // Only native DHT + case "auto", "autoclient": + return []string{autoconf.SystemAminoDHT} // Native DHT, delegated others + case "delegated": + return []string{} // Everything delegated + case "none": + return []string{} // No native systems + default: + return []string{} // Custom mode + } +} + +// selectRandomResolver picks a random resolver from a list for load balancing +func selectRandomResolver(resolvers []string) string { + if len(resolvers) == 0 { + return "" + } + return resolvers[rand.Intn(len(resolvers))] +} + +// DNSResolversWithAutoConf returns DNS resolvers with "auto" values replaced by autoconf values +func (c *Config) DNSResolversWithAutoConf() map[string]string { + if c.DNS.Resolvers == nil { + return nil + } + + resolved := make(map[string]string) + autoConf := c.getAutoConf() + autoExpanded := 0 + + // Process each configured resolver + for domain, resolver := range c.DNS.Resolvers { + if resolver == AutoPlaceholder { + // Try to resolve from autoconf + if autoConf != nil && autoConf.DNSResolvers != nil { + if resolvers, exists := autoConf.DNSResolvers[domain]; exists && len(resolvers) > 0 { + resolved[domain] = selectRandomResolver(resolvers) + autoExpanded++ + } + } + // If autoConf is disabled or domain not found, skip this "auto" resolver + } else { + // Keep custom resolver as-is + resolved[domain] = resolver + } + } + + // Add default resolvers from autoconf that aren't already configured + if autoConf != nil && autoConf.DNSResolvers != nil { + for domain, resolvers := range autoConf.DNSResolvers { + if _, exists := resolved[domain]; !exists && len(resolvers) > 0 { + resolved[domain] = selectRandomResolver(resolvers) + } + } + } + + // Log expansion statistics + if autoExpanded > 0 { + log.Debugf("expanded %d 'auto' DNS.Resolvers from autoconf", autoExpanded) + } + + return resolved +} + +// expandAutoConfSlice is a generic helper for expanding "auto" placeholders in string slices +// It handles the common pattern of: iterate through slice, expand "auto" once, keep custom values +func expandAutoConfSlice(sourceSlice []string, autoConfData []string) []string { + var resolved []string + autoExpanded := false + + for _, item := range sourceSlice { + if item == AutoPlaceholder { + // Replace with autoconf data (only once) + if autoConfData != nil && !autoExpanded { + resolved = append(resolved, autoConfData...) + autoExpanded = true + } + // If autoConfData is nil or already expanded, skip redundant "auto" entries silently + } else { + // Keep custom item + resolved = append(resolved, item) + } + } + + return resolved +} + +// BootstrapWithAutoConf returns bootstrap config with "auto" values replaced by autoconf values +func (c *Config) BootstrapWithAutoConf() []string { + autoConf := c.getAutoConf() + var autoConfData []string + + if autoConf != nil { + routingType := c.Routing.Type.WithDefault(DefaultRoutingType) + nativeSystems := getNativeSystems(routingType) + autoConfData = autoConf.GetBootstrapPeers(nativeSystems...) + log.Debugf("BootstrapWithAutoConf: processing with routing type: %s", routingType) + } else { + log.Debugf("BootstrapWithAutoConf: autoConf disabled, using original config") + } + + result := expandAutoConfSlice(c.Bootstrap, autoConfData) + log.Debugf("BootstrapWithAutoConf: final result contains %d peers", len(result)) + return result +} + +// getAutoConf is a helper to get autoconf data with fallbacks +func (c *Config) getAutoConf() *autoconf.Config { + if !c.AutoConf.Enabled.WithDefault(DefaultAutoConfEnabled) { + log.Debugf("getAutoConf: AutoConf disabled, returning nil") + return nil + } + + // Create or get cached client with config + client, err := GetAutoConfClient(c) + if err != nil { + log.Debugf("getAutoConf: client creation failed - %v", err) + return nil + } + + // Use GetCached to avoid network I/O during config operations + // This ensures config retrieval doesn't block on network operations + result := client.GetCached() + + log.Debugf("getAutoConf: returning autoconf data") + return result +} + +// BootstrapPeersWithAutoConf returns bootstrap peers with "auto" values replaced by autoconf values +// and parsed into peer.AddrInfo structures +func (c *Config) BootstrapPeersWithAutoConf() ([]peer.AddrInfo, error) { + bootstrapStrings := c.BootstrapWithAutoConf() + return ParseBootstrapPeers(bootstrapStrings) +} + +// DelegatedRoutersWithAutoConf returns delegated router URLs without trailing slashes +func (c *Config) DelegatedRoutersWithAutoConf() []string { + autoConf := c.getAutoConf() + + // Use autoconf to expand the endpoints with supported paths for read operations + routingType := c.Routing.Type.WithDefault(DefaultRoutingType) + nativeSystems := getNativeSystems(routingType) + return autoconf.ExpandDelegatedEndpoints( + c.Routing.DelegatedRouters, + autoConf, + nativeSystems, + // Kubo supports all read paths + autoconf.RoutingV1ProvidersPath, + autoconf.RoutingV1PeersPath, + autoconf.RoutingV1IPNSPath, + ) +} + +// DelegatedPublishersWithAutoConf returns delegated publisher URLs without trailing slashes +func (c *Config) DelegatedPublishersWithAutoConf() []string { + autoConf := c.getAutoConf() + + // Use autoconf to expand the endpoints with IPNS write path + routingType := c.Routing.Type.WithDefault(DefaultRoutingType) + nativeSystems := getNativeSystems(routingType) + return autoconf.ExpandDelegatedEndpoints( + c.Ipns.DelegatedPublishers, + autoConf, + nativeSystems, + autoconf.RoutingV1IPNSPath, // Only IPNS operations (for write) + ) +} + +// expandConfigField expands a specific config field with autoconf values +// Handles both top-level fields ("Bootstrap") and nested fields ("DNS.Resolvers") +func (c *Config) expandConfigField(expandedCfg map[string]any, fieldPath string) { + // Check if this field supports autoconf expansion + expandFunc, supported := supportedAutoConfFields[fieldPath] + if !supported { + return + } + + // Handle top-level fields (no dot in path) + if !strings.Contains(fieldPath, ".") { + if _, exists := expandedCfg[fieldPath]; exists { + expandedCfg[fieldPath] = expandFunc(c) + } + return + } + + // Handle nested fields (section.field format) + parts := strings.SplitN(fieldPath, ".", 2) + if len(parts) != 2 { + return + } + + sectionName, fieldName := parts[0], parts[1] + if section, exists := expandedCfg[sectionName]; exists { + if sectionMap, ok := section.(map[string]any); ok { + if _, exists := sectionMap[fieldName]; exists { + sectionMap[fieldName] = expandFunc(c) + expandedCfg[sectionName] = sectionMap + } + } + } +} + +// ExpandAutoConfValues expands "auto" placeholders in config with their actual values using the same methods as the daemon +func (c *Config) ExpandAutoConfValues(cfg map[string]any) (map[string]any, error) { + // Create a deep copy of the config map to avoid modifying the original + expandedCfg := maps.Clone(cfg) + + // Use the same expansion methods that the daemon uses - ensures runtime consistency + // Unified expansion for all supported autoconf fields + c.expandConfigField(expandedCfg, "Bootstrap") + c.expandConfigField(expandedCfg, "DNS.Resolvers") + c.expandConfigField(expandedCfg, "Routing.DelegatedRouters") + c.expandConfigField(expandedCfg, "Ipns.DelegatedPublishers") + + return expandedCfg, nil +} + +// supportedAutoConfFields maps field keys to their expansion functions +var supportedAutoConfFields = map[string]func(*Config) any{ + "Bootstrap": func(c *Config) any { + expanded := c.BootstrapWithAutoConf() + return stringSliceToInterfaceSlice(expanded) + }, + "DNS.Resolvers": func(c *Config) any { + expanded := c.DNSResolversWithAutoConf() + return stringMapToInterfaceMap(expanded) + }, + "Routing.DelegatedRouters": func(c *Config) any { + expanded := c.DelegatedRoutersWithAutoConf() + return stringSliceToInterfaceSlice(expanded) + }, + "Ipns.DelegatedPublishers": func(c *Config) any { + expanded := c.DelegatedPublishersWithAutoConf() + return stringSliceToInterfaceSlice(expanded) + }, +} + +// ExpandConfigField expands auto values for a specific config field using the same methods as the daemon +func (c *Config) ExpandConfigField(key string, value any) any { + if expandFunc, supported := supportedAutoConfFields[key]; supported { + return expandFunc(c) + } + + // Return original value if no expansion needed (not a field that supports auto values) + return value +} + +// Helper functions for type conversion between string types and any types for JSON compatibility + +func stringSliceToInterfaceSlice(slice []string) []any { + result := make([]any, len(slice)) + for i, v := range slice { + result[i] = v + } + return result +} + +func stringMapToInterfaceMap(m map[string]string) map[string]any { + result := make(map[string]any) + for k, v := range m { + result[k] = v + } + return result +} diff --git a/config/autoconf_client.go b/config/autoconf_client.go new file mode 100644 index 000000000..1775fc445 --- /dev/null +++ b/config/autoconf_client.go @@ -0,0 +1,136 @@ +package config + +import ( + "fmt" + "path/filepath" + "sync" + + "github.com/ipfs/boxo/autoconf" + logging "github.com/ipfs/go-log/v2" + version "github.com/ipfs/kubo" +) + +var autoconfLog = logging.Logger("autoconf") + +// Singleton state for autoconf client +var ( + clientOnce sync.Once + clientCache *autoconf.Client + clientErr error +) + +// GetAutoConfClient returns a cached autoconf client or creates a new one. +// This is thread-safe and uses a singleton pattern. +func GetAutoConfClient(cfg *Config) (*autoconf.Client, error) { + clientOnce.Do(func() { + clientCache, clientErr = newAutoConfClient(cfg) + }) + return clientCache, clientErr +} + +// newAutoConfClient creates a new autoconf client with the given config +func newAutoConfClient(cfg *Config) (*autoconf.Client, error) { + // Get repo path for cache directory + repoPath, err := PathRoot() + if err != nil { + return nil, fmt.Errorf("failed to get repo path: %w", err) + } + + // Prepare refresh interval with nil check + refreshInterval := cfg.AutoConf.RefreshInterval + if refreshInterval == nil { + refreshInterval = &OptionalDuration{} + } + + // Use default URL if not specified + url := cfg.AutoConf.URL.WithDefault(DefaultAutoConfURL) + + // Build client options + options := []autoconf.Option{ + autoconf.WithCacheDir(filepath.Join(repoPath, "autoconf")), + autoconf.WithUserAgent(version.GetUserAgentVersion()), + autoconf.WithCacheSize(DefaultAutoConfCacheSize), + autoconf.WithTimeout(DefaultAutoConfTimeout), + autoconf.WithRefreshInterval(refreshInterval.WithDefault(DefaultAutoConfRefreshInterval)), + autoconf.WithFallback(autoconf.GetMainnetFallbackConfig), + autoconf.WithURL(url), + } + + return autoconf.NewClient(options...) +} + +// ValidateAutoConfWithRepo validates that autoconf setup is correct at daemon startup with repo access +func ValidateAutoConfWithRepo(cfg *Config, swarmKeyExists bool) error { + if !cfg.AutoConf.Enabled.WithDefault(DefaultAutoConfEnabled) { + // AutoConf is disabled, check for "auto" values and warn + return validateAutoConfDisabled(cfg) + } + + // Check for private network with default mainnet URL + url := cfg.AutoConf.URL.WithDefault(DefaultAutoConfURL) + if swarmKeyExists && url == DefaultAutoConfURL { + return fmt.Errorf("AutoConf cannot use the default mainnet URL (%s) on a private network (swarm.key or LIBP2P_FORCE_PNET detected). Either disable AutoConf by setting AutoConf.Enabled=false, or configure AutoConf.URL to point to a configuration service specific to your private swarm", DefaultAutoConfURL) + } + + // Further validation will happen lazily when config is accessed + return nil +} + +// validateAutoConfDisabled checks for "auto" values when AutoConf is disabled and logs errors +func validateAutoConfDisabled(cfg *Config) error { + hasAutoValues := false + var errors []string + + // Check Bootstrap + for _, peer := range cfg.Bootstrap { + if peer == AutoPlaceholder { + hasAutoValues = true + errors = append(errors, "Bootstrap contains 'auto' but AutoConf.Enabled=false") + break + } + } + + // Check DNS.Resolvers + if cfg.DNS.Resolvers != nil { + for _, resolver := range cfg.DNS.Resolvers { + if resolver == AutoPlaceholder { + hasAutoValues = true + errors = append(errors, "DNS.Resolvers contains 'auto' but AutoConf.Enabled=false") + break + } + } + } + + // Check Routing.DelegatedRouters + for _, router := range cfg.Routing.DelegatedRouters { + if router == AutoPlaceholder { + hasAutoValues = true + errors = append(errors, "Routing.DelegatedRouters contains 'auto' but AutoConf.Enabled=false") + break + } + } + + // Check Ipns.DelegatedPublishers + for _, publisher := range cfg.Ipns.DelegatedPublishers { + if publisher == AutoPlaceholder { + hasAutoValues = true + errors = append(errors, "Ipns.DelegatedPublishers contains 'auto' but AutoConf.Enabled=false") + break + } + } + + // Log all errors + for _, errMsg := range errors { + autoconfLog.Error(errMsg) + } + + // If only auto values exist and no static ones, fail to start + if hasAutoValues { + if len(cfg.Bootstrap) == 1 && cfg.Bootstrap[0] == AutoPlaceholder { + autoconfLog.Error("Kubo cannot start with only 'auto' Bootstrap values when AutoConf.Enabled=false") + return fmt.Errorf("no usable bootstrap peers: AutoConf is disabled (AutoConf.Enabled=false) but 'auto' placeholder is used in Bootstrap config. Either set AutoConf.Enabled=true to enable automatic configuration, or replace 'auto' with specific Bootstrap peer addresses") + } + } + + return nil +} diff --git a/config/autoconf_test.go b/config/autoconf_test.go new file mode 100644 index 000000000..f4d447dc5 --- /dev/null +++ b/config/autoconf_test.go @@ -0,0 +1,92 @@ +package config + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestAutoConfDefaults(t *testing.T) { + // Test that AutoConf has the correct default values + cfg := &Config{ + AutoConf: AutoConf{ + URL: NewOptionalString(DefaultAutoConfURL), + Enabled: True, + }, + } + + assert.Equal(t, DefaultAutoConfURL, cfg.AutoConf.URL.WithDefault(DefaultAutoConfURL)) + assert.True(t, cfg.AutoConf.Enabled.WithDefault(DefaultAutoConfEnabled)) + + // Test default refresh interval + if cfg.AutoConf.RefreshInterval == nil { + // This is expected - nil means use default + duration := (*OptionalDuration)(nil).WithDefault(DefaultAutoConfRefreshInterval) + assert.Equal(t, DefaultAutoConfRefreshInterval, duration) + } +} + +func TestAutoConfProfile(t *testing.T) { + cfg := &Config{ + Bootstrap: []string{"some", "existing", "peers"}, + DNS: DNS{ + Resolvers: map[string]string{ + "eth.": "https://example.com", + }, + }, + Routing: Routing{ + DelegatedRouters: []string{"https://existing.router"}, + }, + Ipns: Ipns{ + DelegatedPublishers: []string{"https://existing.publisher"}, + }, + AutoConf: AutoConf{ + Enabled: False, + }, + } + + // Apply autoconf profile + profile, ok := Profiles["autoconf-on"] + require.True(t, ok, "autoconf-on profile not found") + + err := profile.Transform(cfg) + require.NoError(t, err) + + // Check that values were set to "auto" + assert.Equal(t, []string{AutoPlaceholder}, cfg.Bootstrap) + assert.Equal(t, AutoPlaceholder, cfg.DNS.Resolvers["."]) + assert.Equal(t, []string{AutoPlaceholder}, cfg.Routing.DelegatedRouters) + assert.Equal(t, []string{AutoPlaceholder}, cfg.Ipns.DelegatedPublishers) + + // Check that AutoConf was enabled + assert.True(t, cfg.AutoConf.Enabled.WithDefault(DefaultAutoConfEnabled)) + + // Check that URL was set + assert.Equal(t, DefaultAutoConfURL, cfg.AutoConf.URL.WithDefault(DefaultAutoConfURL)) +} + +func TestInitWithAutoValues(t *testing.T) { + identity := Identity{ + PeerID: "QmTest", + } + + cfg, err := InitWithIdentity(identity) + require.NoError(t, err) + + // Check that Bootstrap is set to "auto" + assert.Equal(t, []string{AutoPlaceholder}, cfg.Bootstrap) + + // Check that DNS resolver is set to "auto" + assert.Equal(t, AutoPlaceholder, cfg.DNS.Resolvers["."]) + + // Check that DelegatedRouters is set to "auto" + assert.Equal(t, []string{AutoPlaceholder}, cfg.Routing.DelegatedRouters) + + // Check that DelegatedPublishers is set to "auto" + assert.Equal(t, []string{AutoPlaceholder}, cfg.Ipns.DelegatedPublishers) + + // Check that AutoConf is enabled with correct URL + assert.True(t, cfg.AutoConf.Enabled.WithDefault(DefaultAutoConfEnabled)) + assert.Equal(t, DefaultAutoConfURL, cfg.AutoConf.URL.WithDefault(DefaultAutoConfURL)) +} diff --git a/config/autotls.go b/config/autotls.go new file mode 100644 index 000000000..805a9ded6 --- /dev/null +++ b/config/autotls.go @@ -0,0 +1,46 @@ +package config + +import ( + "time" + + p2pforge "github.com/ipshipyard/p2p-forge/client" +) + +// AutoTLS includes optional configuration of p2p-forge client of service +// for obtaining a domain and TLS certificate to improve connectivity for web +// browser clients. More: https://github.com/ipshipyard/p2p-forge#readme +type AutoTLS struct { + // Enables the p2p-forge feature and all related features. + Enabled Flag `json:",omitempty"` + + // Optional, controls if Kubo should add /tls/sni/.../ws listener to every /tcp port if no explicit /ws is defined in Addresses.Swarm + AutoWSS Flag `json:",omitempty"` + + // Optional override of the parent domain that will be used + DomainSuffix *OptionalString `json:",omitempty"` + + // Optional override of HTTP API that acts as ACME DNS-01 Challenge broker + RegistrationEndpoint *OptionalString `json:",omitempty"` + + // Optional Authorization token, used with private/test instances of p2p-forge + RegistrationToken *OptionalString `json:",omitempty"` + + // Optional registration delay used when AutoTLS.Enabled is not explicitly set to true in config + RegistrationDelay *OptionalDuration `json:",omitempty"` + + // Optional override of CA ACME API used by p2p-forge system + CAEndpoint *OptionalString `json:",omitempty"` + + // Optional, controls if features like AutoWSS should generate shorter /dnsX instead of /ipX/../sni/.. + ShortAddrs Flag `json:",omitempty"` +} + +const ( + DefaultAutoTLSEnabled = true // with DefaultAutoTLSRegistrationDelay, unless explicitly enabled in config + DefaultDomainSuffix = p2pforge.DefaultForgeDomain + DefaultRegistrationEndpoint = p2pforge.DefaultForgeEndpoint + DefaultCAEndpoint = p2pforge.DefaultCAEndpoint + DefaultAutoWSS = true // requires AutoTLS.Enabled + DefaultAutoTLSShortAddrs = true // requires AutoTLS.Enabled + DefaultAutoTLSRegistrationDelay = 1 * time.Hour +) diff --git a/config/bitswap.go b/config/bitswap.go new file mode 100644 index 000000000..5adcdef9c --- /dev/null +++ b/config/bitswap.go @@ -0,0 +1,15 @@ +package config + +// Bitswap holds Bitswap configuration options +type Bitswap struct { + // Libp2pEnabled controls if the node initializes bitswap over libp2p (enabled by default) + // (This can be disabled if HTTPRetrieval.Enabled is set to true) + Libp2pEnabled Flag `json:",omitempty"` + // ServerEnabled controls if the node responds to WANTs (depends on Libp2pEnabled, enabled by default) + ServerEnabled Flag `json:",omitempty"` +} + +const ( + DefaultBitswapLibp2pEnabled = true + DefaultBitswapServerEnabled = true +) diff --git a/config/bootstrap_peers.go b/config/bootstrap_peers.go index 1671d9f81..54670b4c9 100644 --- a/config/bootstrap_peers.go +++ b/config/bootstrap_peers.go @@ -2,27 +2,11 @@ package config import ( "errors" - "fmt" peer "github.com/libp2p/go-libp2p/core/peer" ma "github.com/multiformats/go-multiaddr" ) -// DefaultBootstrapAddresses are the hardcoded bootstrap addresses -// for IPFS. they are nodes run by the IPFS team. docs on these later. -// As with all p2p networks, bootstrap is an important security concern. -// -// NOTE: This is here -- and not inside cmd/ipfs/init.go -- because of an -// import dependency issue. TODO: move this into a config/default/ package. -var DefaultBootstrapAddresses = []string{ - "/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", - "/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa", - "/dnsaddr/bootstrap.libp2p.io/p2p/QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb", - "/dnsaddr/bootstrap.libp2p.io/p2p/QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt", - "/ip4/104.131.131.82/tcp/4001/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", // mars.i.ipfs.io - "/ip4/104.131.131.82/udp/4001/quic-v1/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", // mars.i.ipfs.io -} - // ErrInvalidPeerAddr signals an address is not a valid peer address. var ErrInvalidPeerAddr = errors.New("invalid peer address") @@ -30,23 +14,11 @@ func (c *Config) BootstrapPeers() ([]peer.AddrInfo, error) { return ParseBootstrapPeers(c.Bootstrap) } -// DefaultBootstrapPeers returns the (parsed) set of default bootstrap peers. -// if it fails, it returns a meaningful error for the user. -// This is here (and not inside cmd/ipfs/init) because of module dependency problems. -func DefaultBootstrapPeers() ([]peer.AddrInfo, error) { - ps, err := ParseBootstrapPeers(DefaultBootstrapAddresses) - if err != nil { - return nil, fmt.Errorf(`failed to parse hardcoded bootstrap peers: %w -This is a problem with the ipfs codebase. Please report it to the dev team`, err) - } - return ps, nil -} - func (c *Config) SetBootstrapPeers(bps []peer.AddrInfo) { c.Bootstrap = BootstrapPeerStrings(bps) } -// ParseBootstrapPeer parses a bootstrap list into a list of AddrInfos. +// ParseBootstrapPeers parses a bootstrap list into a list of AddrInfos. func ParseBootstrapPeers(addrs []string) ([]peer.AddrInfo, error) { maddrs := make([]ma.Multiaddr, len(addrs)) for i, addr := range addrs { diff --git a/config/bootstrap_peers_test.go b/config/bootstrap_peers_test.go index eeea9b5fd..f07f2f24a 100644 --- a/config/bootstrap_peers_test.go +++ b/config/bootstrap_peers_test.go @@ -1,24 +1,28 @@ package config import ( - "sort" "testing" + + "github.com/ipfs/boxo/autoconf" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) -func TestBoostrapPeerStrings(t *testing.T) { - parsed, err := ParseBootstrapPeers(DefaultBootstrapAddresses) - if err != nil { - t.Fatal(err) - } +func TestBootstrapPeerStrings(t *testing.T) { + // Test round-trip: string -> parse -> format -> string + // This ensures that parsing and formatting are inverse operations - formatted := BootstrapPeerStrings(parsed) - sort.Strings(formatted) - expected := append([]string{}, DefaultBootstrapAddresses...) - sort.Strings(expected) + // Start with the default bootstrap peer multiaddr strings + originalStrings := autoconf.FallbackBootstrapPeers - for i, s := range formatted { - if expected[i] != s { - t.Fatalf("expected %s, %s", expected[i], s) - } - } + // Parse multiaddr strings into structured peer data + parsed, err := ParseBootstrapPeers(originalStrings) + require.NoError(t, err, "parsing bootstrap peers should succeed") + + // Format the parsed data back into multiaddr strings + formattedStrings := BootstrapPeerStrings(parsed) + + // Verify round-trip: we should get back exactly what we started with + assert.ElementsMatch(t, originalStrings, formattedStrings, + "round-trip through parse/format should preserve all bootstrap peers") } diff --git a/config/config.go b/config/config.go index 71365eb0b..045ca784b 100644 --- a/config/config.go +++ b/config/config.go @@ -7,9 +7,10 @@ import ( "fmt" "os" "path/filepath" + "reflect" "strings" - "github.com/mitchellh/go-homedir" + "github.com/ipfs/kubo/misc/fsutil" ) // Config is used to load ipfs config files. @@ -26,20 +27,27 @@ type Config struct { API API // local node's API settings Swarm SwarmConfig AutoNAT AutoNATConfig + AutoTLS AutoTLS Pubsub PubsubConfig Peering Peering DNS DNS - Migration Migration - Provider Provider - Reprovider Reprovider - Experimental Experiments - Plugins Plugins - Pinning Pinning - Import Import - Version Version + Migration Migration + AutoConf AutoConf + + Provide Provide // Merged Provider and Reprovider configuration + Provider Provider // Deprecated: use Provide. Will be removed in a future release. + Reprovider Reprovider // Deprecated: use Provide. Will be removed in a future release. + HTTPRetrieval HTTPRetrieval + Experimental Experiments + Plugins Plugins + Pinning Pinning + Import Import + Version Version Internal Internal // experimental/unstable options + + Bitswap Bitswap `json:",omitempty"` } const ( @@ -58,7 +66,7 @@ func PathRoot() (string, error) { dir := os.Getenv(EnvDir) var err error if len(dir) == 0 { - dir, err = homedir.Expand(DefaultPathRoot) + dir, err = fsutil.ExpandHome(DefaultPathRoot) } return dir, err } @@ -136,6 +144,71 @@ func ToMap(conf *Config) (map[string]interface{}, error) { return m, nil } +// Convert config to a map, without using encoding/json, since +// zero/empty/'omitempty' fields are excluded by encoding/json during +// marshaling. +func ReflectToMap(conf interface{}) interface{} { + v := reflect.ValueOf(conf) + if !v.IsValid() { + return nil + } + + // Handle pointer type + if v.Kind() == reflect.Ptr { + if v.IsNil() { + // Create a zero value of the pointer's element type + elemType := v.Type().Elem() + zero := reflect.Zero(elemType) + return ReflectToMap(zero.Interface()) + } + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + result := make(map[string]interface{}) + t := v.Type() + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + // Only include exported fields + if field.CanInterface() { + result[t.Field(i).Name] = ReflectToMap(field.Interface()) + } + } + return result + + case reflect.Map: + result := make(map[string]interface{}) + iter := v.MapRange() + for iter.Next() { + key := iter.Key() + // Convert map keys to strings for consistency + keyStr := fmt.Sprint(ReflectToMap(key.Interface())) + result[keyStr] = ReflectToMap(iter.Value().Interface()) + } + // Add a sample to differentiate between a map and a struct on validation. + sample := reflect.Zero(v.Type().Elem()) + if sample.CanInterface() { + result["*"] = ReflectToMap(sample.Interface()) + } + return result + + case reflect.Slice, reflect.Array: + result := make([]interface{}, v.Len()) + for i := 0; i < v.Len(); i++ { + result[i] = ReflectToMap(v.Index(i).Interface()) + } + return result + + default: + // For basic types (int, string, etc.), just return the value + if v.CanInterface() { + return v.Interface() + } + return nil + } +} + // Clone copies the config. Use when updating. func (c *Config) Clone() (*Config, error) { var newConfig Config @@ -151,3 +224,38 @@ func (c *Config) Clone() (*Config, error) { return &newConfig, nil } + +// Check if the provided key is present in the structure. +func CheckKey(key string) error { + conf := Config{} + + // Convert an empty config to a map without JSON. + cursor := ReflectToMap(&conf) + + // Parse the key and verify it's presence in the map. + var ok bool + var mapCursor map[string]interface{} + + parts := strings.Split(key, ".") + for i, part := range parts { + mapCursor, ok = cursor.(map[string]interface{}) + if !ok { + if cursor == nil { + return nil + } + path := strings.Join(parts[:i], ".") + return fmt.Errorf("%s key is not a map", path) + } + + cursor, ok = mapCursor[part] + if !ok { + // If the config sections is a map, validate against the default entry. + if cursor, ok = mapCursor["*"]; ok { + continue + } + path := strings.Join(parts[:i+1], ".") + return fmt.Errorf("%s not found", path) + } + } + return nil +} diff --git a/config/config_test.go b/config/config_test.go index dead06f8a..b1637bcef 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -27,3 +27,145 @@ func TestClone(t *testing.T) { t.Fatal("HTTP headers not preserved") } } + +func TestReflectToMap(t *testing.T) { + // Helper function to create a test config with various field types + reflectedConfig := ReflectToMap(new(Config)) + + mapConfig, ok := reflectedConfig.(map[string]interface{}) + if !ok { + t.Fatal("Config didn't convert to map") + } + + reflectedIdentity, ok := mapConfig["Identity"] + if !ok { + t.Fatal("Identity field not found") + } + + mapIdentity, ok := reflectedIdentity.(map[string]interface{}) + if !ok { + t.Fatal("Identity field didn't convert to map") + } + + // Test string field reflection + reflectedPeerID, ok := mapIdentity["PeerID"] + if !ok { + t.Fatal("PeerID field not found in Identity") + } + if _, ok := reflectedPeerID.(string); !ok { + t.Fatal("PeerID field didn't convert to string") + } + + // Test omitempty json string field + reflectedPrivKey, ok := mapIdentity["PrivKey"] + if !ok { + t.Fatal("PrivKey omitempty field not found in Identity") + } + if _, ok := reflectedPrivKey.(string); !ok { + t.Fatal("PrivKey omitempty field didn't convert to string") + } + + // Test slices field + reflectedBootstrap, ok := mapConfig["Bootstrap"] + if !ok { + t.Fatal("Bootstrap field not found in config") + } + bootstrap, ok := reflectedBootstrap.([]interface{}) + if !ok { + t.Fatal("Bootstrap field didn't convert to []string") + } + if len(bootstrap) != 0 { + t.Fatal("Bootstrap len is incorrect") + } + + reflectedDatastore, ok := mapConfig["Datastore"] + if !ok { + t.Fatal("Datastore field not found in config") + } + datastore, ok := reflectedDatastore.(map[string]interface{}) + if !ok { + t.Fatal("Datastore field didn't convert to map") + } + storageGCWatermark, ok := datastore["StorageGCWatermark"] + if !ok { + t.Fatal("StorageGCWatermark field not found in Datastore") + } + // Test int field + if _, ok := storageGCWatermark.(int64); !ok { + t.Fatal("StorageGCWatermark field didn't convert to int64") + } + noSync, ok := datastore["NoSync"] + if !ok { + t.Fatal("NoSync field not found in Datastore") + } + // Test bool field + if _, ok := noSync.(bool); !ok { + t.Fatal("NoSync field didn't convert to bool") + } + + reflectedDNS, ok := mapConfig["DNS"] + if !ok { + t.Fatal("DNS field not found in config") + } + DNS, ok := reflectedDNS.(map[string]interface{}) + if !ok { + t.Fatal("DNS field didn't convert to map") + } + reflectedResolvers, ok := DNS["Resolvers"] + if !ok { + t.Fatal("Resolvers field not found in DNS") + } + // Test map field + if _, ok := reflectedResolvers.(map[string]interface{}); !ok { + t.Fatal("Resolvers field didn't convert to map") + } + + // Test pointer field + if _, ok := DNS["MaxCacheTTL"].(map[string]interface{}); !ok { + // Since OptionalDuration only field is private, we cannot test it + t.Fatal("MaxCacheTTL field didn't convert to map") + } +} + +// Test validation of options set through "ipfs config" +func TestCheckKey(t *testing.T) { + err := CheckKey("Foo.Bar") + if err == nil { + t.Fatal("Foo.Bar isn't a valid key in the config") + } + + err = CheckKey("Provide.Strategy") + if err != nil { + t.Fatalf("%s: %s", err, "Provide.Strategy is a valid key in the config") + } + + err = CheckKey("Provide.DHT.MaxWorkers") + if err != nil { + t.Fatalf("%s: %s", err, "Provide.DHT.MaxWorkers is a valid key in the config") + } + + err = CheckKey("Provide.DHT.Interval") + if err != nil { + t.Fatalf("%s: %s", err, "Provide.DHT.Interval is a valid key in the config") + } + + err = CheckKey("Provide.Foo") + if err == nil { + t.Fatal("Provide.Foo isn't a valid key in the config") + } + + err = CheckKey("Gateway.PublicGateways.Foo.Paths") + if err != nil { + t.Fatalf("%s: %s", err, "Gateway.PublicGateways.Foo.Paths is a valid key in the config") + } + + err = CheckKey("Gateway.PublicGateways.Foo.Bar") + if err == nil { + t.Fatal("Gateway.PublicGateways.Foo.Bar isn't a valid key in the config") + } + + err = CheckKey("Plugins.Plugins.peerlog.Config.Enabled") + if err != nil { + t.Fatalf("%s: %s", err, "Plugins.Plugins.peerlog.Config.Enabled is a valid key in the config") + } +} diff --git a/config/datastore.go b/config/datastore.go index 1a5994a17..665e03647 100644 --- a/config/datastore.go +++ b/config/datastore.go @@ -4,8 +4,21 @@ import ( "encoding/json" ) -// DefaultDataStoreDirectory is the directory to store all the local IPFS data. -const DefaultDataStoreDirectory = "datastore" +const ( + // DefaultDataStoreDirectory is the directory to store all the local IPFS data. + DefaultDataStoreDirectory = "datastore" + + // DefaultBlockKeyCacheSize is the size for the blockstore two-queue + // cache which caches block keys and sizes. + DefaultBlockKeyCacheSize = 64 << 10 + + // DefaultWriteThrough specifies whether to use a "write-through" + // Blockstore and Blockservice. This means that they will write + // without performing any reads to check if the incoming blocks are + // already present in the datastore. Enable for datastores with fast + // writes and slower reads. + DefaultWriteThrough bool = true +) // Datastore tracks the configuration of the datastore. type Datastore struct { @@ -21,8 +34,10 @@ type Datastore struct { Spec map[string]interface{} - HashOnRead bool - BloomFilterSize int + HashOnRead bool + BloomFilterSize int + BlockKeyCacheSize OptionalInteger `json:",omitempty"` + WriteThrough Flag `json:",omitempty"` } // DataStorePath returns the default data store path given a configuration root diff --git a/config/dns.go b/config/dns.go index 8e1fc85a5..0b269675f 100644 --- a/config/dns.go +++ b/config/dns.go @@ -10,7 +10,7 @@ type DNS struct { // // Example: // - Custom resolver for ENS: `eth.` → `https://dns.eth.limo/dns-query` - // - Override the default OS resolver: `.` → `https://doh.applied-privacy.net/query` + // - Override the default OS resolver: `.` → `https://1.1.1.1/dns-query` Resolvers map[string]string // MaxCacheTTL is the maximum duration DNS entries are valid in the cache. MaxCacheTTL *OptionalDuration `json:",omitempty"` diff --git a/config/experiments.go b/config/experiments.go index fab1f953c..6c43ac04f 100644 --- a/config/experiments.go +++ b/config/experiments.go @@ -6,7 +6,7 @@ type Experiments struct { ShardingEnabled bool `json:",omitempty"` // deprecated by autosharding: https://github.com/ipfs/kubo/pull/8527 Libp2pStreamMounting bool P2pHttpProxy bool //nolint - StrategicProviding bool + StrategicProviding bool `json:",omitempty"` // removed, use Provider.Enabled instead OptimisticProvide bool OptimisticProvideJobsPoolSize int GatewayOverLibp2p bool `json:",omitempty"` diff --git a/config/gateway.go b/config/gateway.go index 35af598b4..3495caede 100644 --- a/config/gateway.go +++ b/config/gateway.go @@ -1,10 +1,20 @@ package config +import ( + "github.com/ipfs/boxo/gateway" +) + const ( DefaultInlineDNSLink = false DefaultDeserializedResponses = true DefaultDisableHTMLErrors = false - DefaultExposeRoutingAPI = false + DefaultExposeRoutingAPI = true + DefaultDiagnosticServiceURL = "https://check.ipfs.network" + + // Gateway limit defaults from boxo + DefaultRetrievalTimeout = gateway.DefaultRetrievalTimeout + DefaultMaxConcurrentRequests = gateway.DefaultMaxConcurrentRequests + DefaultMaxRangeRequestFileSize = 0 // 0 means no limit ) type GatewaySpec struct { @@ -73,4 +83,33 @@ type Gateway struct { // ExposeRoutingAPI configures the gateway port to expose // routing system as HTTP API at /routing/v1 (https://specs.ipfs.tech/routing/http-routing-v1/). ExposeRoutingAPI Flag + + // RetrievalTimeout enforces a maximum duration for content retrieval: + // - Time to first byte: If the gateway cannot start writing the response within + // this duration (e.g., stuck searching for providers), a 504 Gateway Timeout + // is returned. + // - Time between writes: After the first byte, the timeout resets each time new + // bytes are written to the client. If the gateway cannot write additional data + // within this duration after the last successful write, the response is terminated. + // This helps free resources when the gateway gets stuck looking for providers + // or cannot retrieve the requested content. + // A value of 0 disables this timeout. + RetrievalTimeout *OptionalDuration `json:",omitempty"` + + // MaxConcurrentRequests limits concurrent HTTP requests handled by the gateway. + // Requests beyond this limit receive 429 Too Many Requests with Retry-After header. + // A value of 0 disables the limit. + MaxConcurrentRequests *OptionalInteger `json:",omitempty"` + + // MaxRangeRequestFileSize limits the maximum file size for HTTP range requests. + // Range requests for files larger than this limit return 501 Not Implemented. + // This protects against CDN issues with large file range requests and prevents + // excessive bandwidth consumption. A value of 0 disables the limit. + MaxRangeRequestFileSize *OptionalBytes `json:",omitempty"` + + // DiagnosticServiceURL is the URL for a service to diagnose CID retrievability issues. + // When the gateway returns a 504 Gateway Timeout error, an "Inspect retrievability of CID" + // button will be shown that links to this service with the CID appended as ?cid=. + // Set to empty string to disable the button. + DiagnosticServiceURL *OptionalString `json:",omitempty"` } diff --git a/config/http_retrieval.go b/config/http_retrieval.go new file mode 100644 index 000000000..b7e9dbd5d --- /dev/null +++ b/config/http_retrieval.go @@ -0,0 +1,19 @@ +package config + +// HTTPRetrieval is the configuration object for HTTP Retrieval settings. +// Implicit defaults can be found in core/node/bitswap.go +type HTTPRetrieval struct { + Enabled Flag `json:",omitempty"` + Allowlist []string `json:",omitempty"` + Denylist []string `json:",omitempty"` + NumWorkers *OptionalInteger `json:",omitempty"` + MaxBlockSize *OptionalString `json:",omitempty"` + TLSInsecureSkipVerify Flag `json:",omitempty"` +} + +const ( + DefaultHTTPRetrievalEnabled = true + DefaultHTTPRetrievalNumWorkers = 16 + DefaultHTTPRetrievalTLSInsecureSkipVerify = false // only for testing with self-signed HTTPS certs + DefaultHTTPRetrievalMaxBlockSize = "2MiB" // matching bitswap: https://specs.ipfs.tech/bitswap-protocol/#block-sizes +) diff --git a/config/import.go b/config/import.go index 10af4edfa..d595199c8 100644 --- a/config/import.go +++ b/config/import.go @@ -1,17 +1,184 @@ package config +import ( + "fmt" + "strconv" + "strings" + + "github.com/ipfs/boxo/ipld/unixfs/importer/helpers" + "github.com/ipfs/boxo/ipld/unixfs/io" + "github.com/ipfs/boxo/verifcid" + mh "github.com/multiformats/go-multihash" +) + const ( DefaultCidVersion = 0 DefaultUnixFSRawLeaves = false DefaultUnixFSChunker = "size-262144" DefaultHashFunction = "sha2-256" + DefaultFastProvideRoot = true + DefaultFastProvideWait = false + + DefaultUnixFSHAMTDirectorySizeThreshold = 262144 // 256KiB - https://github.com/ipfs/boxo/blob/6c5a07602aed248acc86598f30ab61923a54a83e/ipld/unixfs/io/directory.go#L26 + + // DefaultBatchMaxNodes controls the maximum number of nodes in a + // write-batch. The total size of the batch is limited by + // BatchMaxnodes and BatchMaxSize. + DefaultBatchMaxNodes = 128 + // DefaultBatchMaxSize controls the maximum size of a single + // write-batch. The total size of the batch is limited by + // BatchMaxnodes and BatchMaxSize. + DefaultBatchMaxSize = 100 << 20 // 20MiB +) + +var ( + DefaultUnixFSFileMaxLinks = int64(helpers.DefaultLinksPerBlock) + DefaultUnixFSDirectoryMaxLinks = int64(0) + DefaultUnixFSHAMTDirectoryMaxFanout = int64(io.DefaultShardWidth) ) // Import configures the default options for ingesting data. This affects commands // that ingest data, such as 'ipfs add', 'ipfs dag put, 'ipfs block put', 'ipfs files write'. type Import struct { - CidVersion OptionalInteger - UnixFSRawLeaves Flag - UnixFSChunker OptionalString - HashFunction OptionalString + CidVersion OptionalInteger + UnixFSRawLeaves Flag + UnixFSChunker OptionalString + HashFunction OptionalString + UnixFSFileMaxLinks OptionalInteger + UnixFSDirectoryMaxLinks OptionalInteger + UnixFSHAMTDirectoryMaxFanout OptionalInteger + UnixFSHAMTDirectorySizeThreshold OptionalBytes + BatchMaxNodes OptionalInteger + BatchMaxSize OptionalInteger + FastProvideRoot Flag + FastProvideWait Flag +} + +// ValidateImportConfig validates the Import configuration according to UnixFS spec requirements. +// See: https://specs.ipfs.tech/unixfs/#hamt-structure-and-parameters +func ValidateImportConfig(cfg *Import) error { + // Validate CidVersion + if !cfg.CidVersion.IsDefault() { + cidVer := cfg.CidVersion.WithDefault(DefaultCidVersion) + if cidVer != 0 && cidVer != 1 { + return fmt.Errorf("Import.CidVersion must be 0 or 1, got %d", cidVer) + } + } + + // Validate UnixFSFileMaxLinks + if !cfg.UnixFSFileMaxLinks.IsDefault() { + maxLinks := cfg.UnixFSFileMaxLinks.WithDefault(DefaultUnixFSFileMaxLinks) + if maxLinks <= 0 { + return fmt.Errorf("Import.UnixFSFileMaxLinks must be positive, got %d", maxLinks) + } + } + + // Validate UnixFSDirectoryMaxLinks + if !cfg.UnixFSDirectoryMaxLinks.IsDefault() { + maxLinks := cfg.UnixFSDirectoryMaxLinks.WithDefault(DefaultUnixFSDirectoryMaxLinks) + if maxLinks < 0 { + return fmt.Errorf("Import.UnixFSDirectoryMaxLinks must be non-negative, got %d", maxLinks) + } + } + + // Validate UnixFSHAMTDirectoryMaxFanout if set + if !cfg.UnixFSHAMTDirectoryMaxFanout.IsDefault() { + fanout := cfg.UnixFSHAMTDirectoryMaxFanout.WithDefault(DefaultUnixFSHAMTDirectoryMaxFanout) + + // Check all requirements: fanout < 8 covers both non-positive and non-multiple of 8 + // Combined with power of 2 check and max limit, this ensures valid values: 8, 16, 32, 64, 128, 256, 512, 1024 + if fanout < 8 || !isPowerOfTwo(fanout) || fanout > 1024 { + return fmt.Errorf("Import.UnixFSHAMTDirectoryMaxFanout must be a positive power of 2, multiple of 8, and not exceed 1024 (got %d)", fanout) + } + } + + // Validate BatchMaxNodes + if !cfg.BatchMaxNodes.IsDefault() { + maxNodes := cfg.BatchMaxNodes.WithDefault(DefaultBatchMaxNodes) + if maxNodes <= 0 { + return fmt.Errorf("Import.BatchMaxNodes must be positive, got %d", maxNodes) + } + } + + // Validate BatchMaxSize + if !cfg.BatchMaxSize.IsDefault() { + maxSize := cfg.BatchMaxSize.WithDefault(DefaultBatchMaxSize) + if maxSize <= 0 { + return fmt.Errorf("Import.BatchMaxSize must be positive, got %d", maxSize) + } + } + + // Validate UnixFSChunker format + if !cfg.UnixFSChunker.IsDefault() { + chunker := cfg.UnixFSChunker.WithDefault(DefaultUnixFSChunker) + if !isValidChunker(chunker) { + return fmt.Errorf("Import.UnixFSChunker invalid format: %q (expected \"size-\", \"rabin---\", or \"buzhash\")", chunker) + } + } + + // Validate HashFunction + if !cfg.HashFunction.IsDefault() { + hashFunc := cfg.HashFunction.WithDefault(DefaultHashFunction) + hashCode, ok := mh.Names[strings.ToLower(hashFunc)] + if !ok { + return fmt.Errorf("Import.HashFunction unrecognized: %q", hashFunc) + } + // Check if the hash is allowed by verifcid + if !verifcid.DefaultAllowlist.IsAllowed(hashCode) { + return fmt.Errorf("Import.HashFunction %q is not allowed for use in IPFS", hashFunc) + } + } + + return nil +} + +// isPowerOfTwo checks if a number is a power of 2 +func isPowerOfTwo(n int64) bool { + return n > 0 && (n&(n-1)) == 0 +} + +// isValidChunker validates chunker format +func isValidChunker(chunker string) bool { + if chunker == "buzhash" { + return true + } + + // Check for size- format + if strings.HasPrefix(chunker, "size-") { + sizeStr := strings.TrimPrefix(chunker, "size-") + if sizeStr == "" { + return false + } + // Check if it's a valid positive integer (no negative sign allowed) + if sizeStr[0] == '-' { + return false + } + size, err := strconv.Atoi(sizeStr) + // Size must be positive (not zero) + return err == nil && size > 0 + } + + // Check for rabin--- format + if strings.HasPrefix(chunker, "rabin-") { + parts := strings.Split(chunker, "-") + if len(parts) != 4 { + return false + } + + // Parse and validate min, avg, max values + values := make([]int, 3) + for i := 0; i < 3; i++ { + val, err := strconv.Atoi(parts[i+1]) + if err != nil { + return false + } + values[i] = val + } + + // Validate ordering: min <= avg <= max + min, avg, max := values[0], values[1], values[2] + return min <= avg && avg <= max + } + + return false } diff --git a/config/import_test.go b/config/import_test.go new file mode 100644 index 000000000..f045b9751 --- /dev/null +++ b/config/import_test.go @@ -0,0 +1,408 @@ +package config + +import ( + "strings" + "testing" + + mh "github.com/multiformats/go-multihash" +) + +func TestValidateImportConfig_HAMTFanout(t *testing.T) { + tests := []struct { + name string + fanout int64 + wantErr bool + errMsg string + }{ + // Valid values - powers of 2, multiples of 8, and <= 1024 + {name: "valid 8", fanout: 8, wantErr: false}, + {name: "valid 16", fanout: 16, wantErr: false}, + {name: "valid 32", fanout: 32, wantErr: false}, + {name: "valid 64", fanout: 64, wantErr: false}, + {name: "valid 128", fanout: 128, wantErr: false}, + {name: "valid 256", fanout: 256, wantErr: false}, + {name: "valid 512", fanout: 512, wantErr: false}, + {name: "valid 1024", fanout: 1024, wantErr: false}, + + // Invalid values - not powers of 2 + {name: "invalid 7", fanout: 7, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"}, + {name: "invalid 15", fanout: 15, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"}, + {name: "invalid 100", fanout: 100, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"}, + {name: "invalid 257", fanout: 257, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"}, + {name: "invalid 1000", fanout: 1000, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"}, + + // Invalid values - powers of 2 but not multiples of 8 + {name: "invalid 1", fanout: 1, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"}, + {name: "invalid 2", fanout: 2, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"}, + {name: "invalid 4", fanout: 4, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"}, + + // Invalid values - exceeds 1024 + {name: "invalid 2048", fanout: 2048, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"}, + {name: "invalid 4096", fanout: 4096, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"}, + + // Invalid values - negative or zero + {name: "invalid 0", fanout: 0, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"}, + {name: "invalid -8", fanout: -8, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"}, + {name: "invalid -256", fanout: -256, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := &Import{ + UnixFSHAMTDirectoryMaxFanout: *NewOptionalInteger(tt.fanout), + } + + err := ValidateImportConfig(cfg) + + if tt.wantErr { + if err == nil { + t.Errorf("ValidateImportConfig() expected error for fanout=%d, got nil", tt.fanout) + } else if tt.errMsg != "" && !strings.Contains(err.Error(), tt.errMsg) { + t.Errorf("ValidateImportConfig() error = %v, want error containing %q", err, tt.errMsg) + } + } else { + if err != nil { + t.Errorf("ValidateImportConfig() unexpected error for fanout=%d: %v", tt.fanout, err) + } + } + }) + } +} + +func TestValidateImportConfig_CidVersion(t *testing.T) { + tests := []struct { + name string + cidVer int64 + wantErr bool + errMsg string + }{ + {name: "valid 0", cidVer: 0, wantErr: false}, + {name: "valid 1", cidVer: 1, wantErr: false}, + {name: "invalid 2", cidVer: 2, wantErr: true, errMsg: "must be 0 or 1"}, + {name: "invalid -1", cidVer: -1, wantErr: true, errMsg: "must be 0 or 1"}, + {name: "invalid 100", cidVer: 100, wantErr: true, errMsg: "must be 0 or 1"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := &Import{ + CidVersion: *NewOptionalInteger(tt.cidVer), + } + + err := ValidateImportConfig(cfg) + + if tt.wantErr { + if err == nil { + t.Errorf("ValidateImportConfig() expected error for cidVer=%d, got nil", tt.cidVer) + } else if tt.errMsg != "" && !strings.Contains(err.Error(), tt.errMsg) { + t.Errorf("ValidateImportConfig() error = %v, want error containing %q", err, tt.errMsg) + } + } else { + if err != nil { + t.Errorf("ValidateImportConfig() unexpected error for cidVer=%d: %v", tt.cidVer, err) + } + } + }) + } +} + +func TestValidateImportConfig_UnixFSFileMaxLinks(t *testing.T) { + tests := []struct { + name string + maxLinks int64 + wantErr bool + errMsg string + }{ + {name: "valid 1", maxLinks: 1, wantErr: false}, + {name: "valid 174", maxLinks: 174, wantErr: false}, + {name: "valid 1000", maxLinks: 1000, wantErr: false}, + {name: "invalid 0", maxLinks: 0, wantErr: true, errMsg: "must be positive"}, + {name: "invalid -1", maxLinks: -1, wantErr: true, errMsg: "must be positive"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := &Import{ + UnixFSFileMaxLinks: *NewOptionalInteger(tt.maxLinks), + } + + err := ValidateImportConfig(cfg) + + if tt.wantErr { + if err == nil { + t.Errorf("ValidateImportConfig() expected error for maxLinks=%d, got nil", tt.maxLinks) + } else if tt.errMsg != "" && !strings.Contains(err.Error(), tt.errMsg) { + t.Errorf("ValidateImportConfig() error = %v, want error containing %q", err, tt.errMsg) + } + } else { + if err != nil { + t.Errorf("ValidateImportConfig() unexpected error for maxLinks=%d: %v", tt.maxLinks, err) + } + } + }) + } +} + +func TestValidateImportConfig_UnixFSDirectoryMaxLinks(t *testing.T) { + tests := []struct { + name string + maxLinks int64 + wantErr bool + errMsg string + }{ + {name: "valid 0", maxLinks: 0, wantErr: false}, // 0 means no limit + {name: "valid 1", maxLinks: 1, wantErr: false}, + {name: "valid 1000", maxLinks: 1000, wantErr: false}, + {name: "invalid -1", maxLinks: -1, wantErr: true, errMsg: "must be non-negative"}, + {name: "invalid -100", maxLinks: -100, wantErr: true, errMsg: "must be non-negative"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := &Import{ + UnixFSDirectoryMaxLinks: *NewOptionalInteger(tt.maxLinks), + } + + err := ValidateImportConfig(cfg) + + if tt.wantErr { + if err == nil { + t.Errorf("ValidateImportConfig() expected error for maxLinks=%d, got nil", tt.maxLinks) + } else if tt.errMsg != "" && !strings.Contains(err.Error(), tt.errMsg) { + t.Errorf("ValidateImportConfig() error = %v, want error containing %q", err, tt.errMsg) + } + } else { + if err != nil { + t.Errorf("ValidateImportConfig() unexpected error for maxLinks=%d: %v", tt.maxLinks, err) + } + } + }) + } +} + +func TestValidateImportConfig_BatchMax(t *testing.T) { + tests := []struct { + name string + maxNodes int64 + maxSize int64 + wantErr bool + errMsg string + }{ + {name: "valid nodes 1", maxNodes: 1, maxSize: -999, wantErr: false}, + {name: "valid nodes 128", maxNodes: 128, maxSize: -999, wantErr: false}, + {name: "valid size 1", maxNodes: -999, maxSize: 1, wantErr: false}, + {name: "valid size 20MB", maxNodes: -999, maxSize: 20 << 20, wantErr: false}, + {name: "invalid nodes 0", maxNodes: 0, maxSize: -999, wantErr: true, errMsg: "BatchMaxNodes must be positive"}, + {name: "invalid nodes -1", maxNodes: -1, maxSize: -999, wantErr: true, errMsg: "BatchMaxNodes must be positive"}, + {name: "invalid size 0", maxNodes: -999, maxSize: 0, wantErr: true, errMsg: "BatchMaxSize must be positive"}, + {name: "invalid size -1", maxNodes: -999, maxSize: -1, wantErr: true, errMsg: "BatchMaxSize must be positive"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := &Import{} + if tt.maxNodes != -999 { + cfg.BatchMaxNodes = *NewOptionalInteger(tt.maxNodes) + } + if tt.maxSize != -999 { + cfg.BatchMaxSize = *NewOptionalInteger(tt.maxSize) + } + + err := ValidateImportConfig(cfg) + + if tt.wantErr { + if err == nil { + t.Errorf("ValidateImportConfig() expected error, got nil") + } else if tt.errMsg != "" && !strings.Contains(err.Error(), tt.errMsg) { + t.Errorf("ValidateImportConfig() error = %v, want error containing %q", err, tt.errMsg) + } + } else { + if err != nil { + t.Errorf("ValidateImportConfig() unexpected error: %v", err) + } + } + }) + } +} + +func TestValidateImportConfig_UnixFSChunker(t *testing.T) { + tests := []struct { + name string + chunker string + wantErr bool + errMsg string + }{ + {name: "valid size-262144", chunker: "size-262144", wantErr: false}, + {name: "valid size-1", chunker: "size-1", wantErr: false}, + {name: "valid size-1048576", chunker: "size-1048576", wantErr: false}, + {name: "valid rabin", chunker: "rabin-128-256-512", wantErr: false}, + {name: "valid rabin min", chunker: "rabin-16-32-64", wantErr: false}, + {name: "valid buzhash", chunker: "buzhash", wantErr: false}, + {name: "invalid size-", chunker: "size-", wantErr: true, errMsg: "invalid format"}, + {name: "invalid size-abc", chunker: "size-abc", wantErr: true, errMsg: "invalid format"}, + {name: "invalid rabin-", chunker: "rabin-", wantErr: true, errMsg: "invalid format"}, + {name: "invalid rabin-128", chunker: "rabin-128", wantErr: true, errMsg: "invalid format"}, + {name: "invalid rabin-128-256", chunker: "rabin-128-256", wantErr: true, errMsg: "invalid format"}, + {name: "invalid rabin-a-b-c", chunker: "rabin-a-b-c", wantErr: true, errMsg: "invalid format"}, + {name: "invalid unknown", chunker: "unknown", wantErr: true, errMsg: "invalid format"}, + {name: "invalid empty", chunker: "", wantErr: true, errMsg: "invalid format"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := &Import{ + UnixFSChunker: *NewOptionalString(tt.chunker), + } + + err := ValidateImportConfig(cfg) + + if tt.wantErr { + if err == nil { + t.Errorf("ValidateImportConfig() expected error for chunker=%s, got nil", tt.chunker) + } else if tt.errMsg != "" && !strings.Contains(err.Error(), tt.errMsg) { + t.Errorf("ValidateImportConfig() error = %v, want error containing %q", err, tt.errMsg) + } + } else { + if err != nil { + t.Errorf("ValidateImportConfig() unexpected error for chunker=%s: %v", tt.chunker, err) + } + } + }) + } +} + +func TestValidateImportConfig_HashFunction(t *testing.T) { + tests := []struct { + name string + hashFunc string + wantErr bool + errMsg string + }{ + {name: "valid sha2-256", hashFunc: "sha2-256", wantErr: false}, + {name: "valid sha2-512", hashFunc: "sha2-512", wantErr: false}, + {name: "valid sha3-256", hashFunc: "sha3-256", wantErr: false}, + {name: "valid blake2b-256", hashFunc: "blake2b-256", wantErr: false}, + {name: "valid blake3", hashFunc: "blake3", wantErr: false}, + {name: "invalid unknown", hashFunc: "unknown-hash", wantErr: true, errMsg: "unrecognized"}, + {name: "invalid empty", hashFunc: "", wantErr: true, errMsg: "unrecognized"}, + } + + // Check for hashes that exist but are not allowed + // MD5 should exist but not be allowed + if code, ok := mh.Names["md5"]; ok { + tests = append(tests, struct { + name string + hashFunc string + wantErr bool + errMsg string + }{name: "md5 not allowed", hashFunc: "md5", wantErr: true, errMsg: "not allowed"}) + _ = code // use the variable + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := &Import{ + HashFunction: *NewOptionalString(tt.hashFunc), + } + + err := ValidateImportConfig(cfg) + + if tt.wantErr { + if err == nil { + t.Errorf("ValidateImportConfig() expected error for hashFunc=%s, got nil", tt.hashFunc) + } else if tt.errMsg != "" && !strings.Contains(err.Error(), tt.errMsg) { + t.Errorf("ValidateImportConfig() error = %v, want error containing %q", err, tt.errMsg) + } + } else { + if err != nil { + t.Errorf("ValidateImportConfig() unexpected error for hashFunc=%s: %v", tt.hashFunc, err) + } + } + }) + } +} + +func TestValidateImportConfig_DefaultValue(t *testing.T) { + // Test that default (unset) value doesn't trigger validation + cfg := &Import{} + + err := ValidateImportConfig(cfg) + if err != nil { + t.Errorf("ValidateImportConfig() unexpected error for default config: %v", err) + } +} + +func TestIsValidChunker(t *testing.T) { + tests := []struct { + chunker string + want bool + }{ + {"buzhash", true}, + {"size-262144", true}, + {"size-1", true}, + {"size-0", false}, // 0 is not valid - must be positive + {"size-9999999", true}, + {"rabin-128-256-512", true}, + {"rabin-16-32-64", true}, + {"rabin-1-2-3", true}, + {"rabin-512-256-128", false}, // Invalid ordering: min > avg > max + {"rabin-256-128-512", false}, // Invalid ordering: min > avg + {"rabin-128-512-256", false}, // Invalid ordering: avg > max + + {"", false}, + {"size-", false}, + {"size-abc", false}, + {"size--1", false}, + {"rabin-", false}, + {"rabin-128", false}, + {"rabin-128-256", false}, + {"rabin-128-256-512-1024", false}, + {"rabin-a-b-c", false}, + {"unknown", false}, + {"buzzhash", false}, // typo + } + + for _, tt := range tests { + t.Run(tt.chunker, func(t *testing.T) { + if got := isValidChunker(tt.chunker); got != tt.want { + t.Errorf("isValidChunker(%q) = %v, want %v", tt.chunker, got, tt.want) + } + }) + } +} + +func TestIsPowerOfTwo(t *testing.T) { + tests := []struct { + n int64 + want bool + }{ + {0, false}, + {1, true}, + {2, true}, + {3, false}, + {4, true}, + {5, false}, + {6, false}, + {7, false}, + {8, true}, + {16, true}, + {32, true}, + {64, true}, + {100, false}, + {128, true}, + {256, true}, + {512, true}, + {1024, true}, + {2048, true}, + {-1, false}, + {-8, false}, + } + + for _, tt := range tests { + t.Run("", func(t *testing.T) { + if got := isPowerOfTwo(tt.n); got != tt.want { + t.Errorf("isPowerOfTwo(%d) = %v, want %v", tt.n, got, tt.want) + } + }) + } +} diff --git a/config/init.go b/config/init.go index 4a86aa518..0aeffef5f 100644 --- a/config/init.go +++ b/config/init.go @@ -7,6 +7,7 @@ import ( "io" "time" + "github.com/cockroachdb/pebble/v2" "github.com/ipfs/kubo/core/coreiface/options" "github.com/libp2p/go-libp2p/core/crypto" "github.com/libp2p/go-libp2p/core/peer" @@ -22,11 +23,6 @@ func Init(out io.Writer, nBitsForKeypair int) (*Config, error) { } func InitWithIdentity(identity Identity) (*Config, error) { - bootstrapPeers, err := DefaultBootstrapPeers() - if err != nil { - return nil, err - } - datastore := DefaultDatastoreConfig() conf := &Config{ @@ -39,7 +35,7 @@ func InitWithIdentity(identity Identity) (*Config, error) { Addresses: addressesConfig(), Datastore: datastore, - Bootstrap: BootstrapPeerStrings(bootstrapPeers), + Bootstrap: []string{AutoPlaceholder}, Identity: identity, Discovery: Discovery{ MDNS: MDNS{ @@ -47,20 +43,16 @@ func InitWithIdentity(identity Identity) (*Config, error) { }, }, - Routing: Routing{ - Type: nil, - Methods: nil, - Routers: nil, - }, - // setup the node mount points. Mounts: Mounts{ IPFS: "/ipfs", IPNS: "/ipns", + MFS: "/mfs", }, Ipns: Ipns{ - ResolveCacheSize: 128, + ResolveCacheSize: 128, + DelegatedPublishers: []string{AutoPlaceholder}, }, Gateway: Gateway{ @@ -68,19 +60,16 @@ func InitWithIdentity(identity Identity) (*Config, error) { NoFetch: false, HTTPHeaders: map[string][]string{}, }, - Reprovider: Reprovider{ - Interval: nil, - Strategy: nil, - }, Pinning: Pinning{ RemoteServices: map[string]RemotePinningService{}, }, DNS: DNS{ - Resolvers: map[string]string{}, + Resolvers: map[string]string{ + ".": AutoPlaceholder, + }, }, - Migration: Migration{ - DownloadSources: []string{}, - Keep: "", + Routing: Routing{ + DelegatedRouters: []string{AutoPlaceholder}, }, } @@ -99,6 +88,9 @@ const DefaultConnMgrLowWater = 32 // grace period. const DefaultConnMgrGracePeriod = time.Second * 20 +// DefaultConnMgrSilencePeriod controls how often the connection manager enforces the limits. +const DefaultConnMgrSilencePeriod = time.Second * 10 + // DefaultConnMgrType is the default value for the connection managers // type. const DefaultConnMgrType = "basic" @@ -138,7 +130,38 @@ func DefaultDatastoreConfig() Datastore { } } +func pebbleSpec() map[string]interface{} { + return map[string]interface{}{ + "type": "pebbleds", + "prefix": "pebble.datastore", + "path": "pebbleds", + "formatMajorVersion": int(pebble.FormatNewest), + } +} + +func pebbleSpecMeasure() map[string]interface{} { + return map[string]interface{}{ + "type": "measure", + "prefix": "pebble.datastore", + "child": map[string]interface{}{ + "formatMajorVersion": int(pebble.FormatNewest), + "type": "pebbleds", + "path": "pebbleds", + }, + } +} + func badgerSpec() map[string]interface{} { + return map[string]interface{}{ + "type": "badgerds", + "prefix": "badger.datastore", + "path": "badgerds", + "syncWrites": false, + "truncate": true, + } +} + +func badgerSpecMeasure() map[string]interface{} { return map[string]interface{}{ "type": "measure", "prefix": "badger.datastore", @@ -152,6 +175,29 @@ func badgerSpec() map[string]interface{} { } func flatfsSpec() map[string]interface{} { + return map[string]interface{}{ + "type": "mount", + "mounts": []interface{}{ + map[string]interface{}{ + "mountpoint": "/blocks", + "type": "flatfs", + "prefix": "flatfs.datastore", + "path": "blocks", + "sync": false, + "shardFunc": "/repo/flatfs/shard/v1/next-to-last/2", + }, + map[string]interface{}{ + "mountpoint": "/", + "type": "levelds", + "prefix": "leveldb.datastore", + "path": "datastore", + "compression": "none", + }, + }, + } +} + +func flatfsSpecMeasure() map[string]interface{} { return map[string]interface{}{ "type": "mount", "mounts": []interface{}{ @@ -162,7 +208,7 @@ func flatfsSpec() map[string]interface{} { "child": map[string]interface{}{ "type": "flatfs", "path": "blocks", - "sync": true, + "sync": false, "shardFunc": "/repo/flatfs/shard/v1/next-to-last/2", }, }, diff --git a/config/internal.go b/config/internal.go index f43746534..f344e5252 100644 --- a/config/internal.go +++ b/config/internal.go @@ -1,11 +1,23 @@ package config +const ( + // DefaultMFSNoFlushLimit is the default limit for consecutive unflushed MFS operations + DefaultMFSNoFlushLimit = 256 +) + type Internal struct { // All marked as omitempty since we are expecting to make changes to all subcomponents of Internal Bitswap *InternalBitswap `json:",omitempty"` - UnixFSShardingSizeThreshold *OptionalString `json:",omitempty"` + UnixFSShardingSizeThreshold *OptionalString `json:",omitempty"` // moved to Import.UnixFSHAMTDirectorySizeThreshold Libp2pForceReachability *OptionalString `json:",omitempty"` BackupBootstrapInterval *OptionalDuration `json:",omitempty"` + // MFSNoFlushLimit controls the maximum number of consecutive + // MFS operations allowed with --flush=false before requiring a manual flush. + // This prevents unbounded memory growth and ensures data consistency. + // Set to 0 to disable limiting (old behavior, may cause high memory usage) + // This is an EXPERIMENTAL feature and may change or be removed in future releases. + // See https://github.com/ipfs/kubo/issues/10842 + MFSNoFlushLimit *OptionalInteger `json:",omitempty"` } type InternalBitswap struct { @@ -14,5 +26,53 @@ type InternalBitswap struct { EngineTaskWorkerCount OptionalInteger MaxOutstandingBytesPerPeer OptionalInteger ProviderSearchDelay OptionalDuration + ProviderSearchMaxResults OptionalInteger WantHaveReplaceSize OptionalInteger + BroadcastControl *BitswapBroadcastControl } + +type BitswapBroadcastControl struct { + // EnableEnables or disables broadcast control functionality. Setting this + // to false disables broadcast control functionality and restores the + // previous broadcast behavior of sending broadcasts to all peers. When + // disabled, all other BroadcastControl configuration items are ignored. + // Default is [DefaultBroadcastControlEnable]. + Enable Flag `json:",omitempty"` + // MaxPeers sets a hard limit on the number of peers to send broadcasts to. + // A value of 0 means no broadcasts are sent. A value of -1 means there is + // no limit. Default is [DefaultBroadcastControlMaxPeers]. + MaxPeers OptionalInteger `json:",omitempty"` + // LocalPeers enables or disables broadcast control for peers on the local + // network. If false, than always broadcast to peers on the local network. + // If true, apply broadcast control to local peers. Default is + // [DefaultBroadcastControlLocalPeers]. + LocalPeers Flag `json:",omitempty"` + // PeeredPeers enables or disables broadcast reduction for peers configured + // for peering. If false, than always broadcast to peers configured for + // peering. If true, apply broadcast reduction to peered peers. Default is + // [DefaultBroadcastControlPeeredPeers]. + PeeredPeers Flag `json:",omitempty"` + // MaxRandomPeers is the number of peers to broadcast to anyway, even + // though broadcast reduction logic has determined that they are not + // broadcast targets. Setting this to a non-zero value ensures at least + // this number of random peers receives a broadcast. This may be helpful in + // cases where peers that are not receiving broadcasts my have wanted + // blocks. Default is [DefaultBroadcastControlMaxRandomPeers]. + MaxRandomPeers OptionalInteger `json:",omitempty"` + // SendToPendingPeers enables or disables sending broadcasts to any peers + // to which there is a pending message to send. When enabled, this sends + // broadcasts to many more peers, but does so in a way that does not + // increase the number of separate broadcast messages. There is still the + // increased cost of the recipients having to process and respond to the + // broadcasts. Default is [DefaultBroadcastControlSendToPendingPeers]. + SendToPendingPeers Flag `json:",omitempty"` +} + +const ( + DefaultBroadcastControlEnable = true // Enabled + DefaultBroadcastControlMaxPeers = -1 // Unlimited + DefaultBroadcastControlLocalPeers = false // No control of local + DefaultBroadcastControlPeeredPeers = false // No control of peered + DefaultBroadcastControlMaxRandomPeers = 0 // No randoms + DefaultBroadcastControlSendToPendingPeers = false // Disabled +) diff --git a/config/ipns.go b/config/ipns.go index 288421973..6ffe981bc 100644 --- a/config/ipns.go +++ b/config/ipns.go @@ -20,4 +20,7 @@ type Ipns struct { // Enable namesys pubsub (--enable-namesys-pubsub) UsePubsub Flag `json:",omitempty"` + + // Simplified configuration for delegated IPNS publishers + DelegatedPublishers []string } diff --git a/config/migration.go b/config/migration.go index e172988a9..d2626800c 100644 --- a/config/migration.go +++ b/config/migration.go @@ -2,16 +2,18 @@ package config const DefaultMigrationKeep = "cache" -var DefaultMigrationDownloadSources = []string{"HTTPS", "IPFS"} +// DefaultMigrationDownloadSources defines the default download sources for legacy migrations (repo versions <16). +// Only HTTPS is supported for legacy migrations. IPFS downloads are not supported. +var DefaultMigrationDownloadSources = []string{"HTTPS"} -// Migration configures how migrations are downloaded and if the downloads are -// added to IPFS locally. +// Migration configures how legacy migrations are downloaded (repo versions <16). +// +// DEPRECATED: This configuration only applies to legacy external migrations for repository +// versions below 16. Modern repositories (v16+) use embedded migrations that do not require +// external downloads. These settings will be ignored for modern repository versions. type Migration struct { - // Sources in order of preference, where "IPFS" means use IPFS and "HTTPS" - // means use default gateways. Any other values are interpreted as - // hostnames for custom gateways. Empty list means "use default sources". - DownloadSources []string - // Whether or not to keep the migration after downloading it. - // Options are "discard", "cache", "pin". Empty string for default. - Keep string + // DEPRECATED: This field is deprecated and ignored for modern repositories (repo versions ≥16). + DownloadSources []string `json:",omitempty"` + // DEPRECATED: This field is deprecated and ignored for modern repositories (repo versions ≥16). + Keep string `json:",omitempty"` } diff --git a/config/mounts.go b/config/mounts.go index dfdd1e5bf..571316cf3 100644 --- a/config/mounts.go +++ b/config/mounts.go @@ -4,5 +4,6 @@ package config type Mounts struct { IPFS string IPNS string + MFS string FuseAllowOther bool } diff --git a/config/plugins.go b/config/plugins.go index 08a1acb34..0c438cbd7 100644 --- a/config/plugins.go +++ b/config/plugins.go @@ -7,5 +7,5 @@ type Plugins struct { type Plugin struct { Disabled bool - Config interface{} + Config interface{} `json:",omitempty"` } diff --git a/config/profile.go b/config/profile.go index 0ee9225be..692688796 100644 --- a/config/profile.go +++ b/config/profile.go @@ -86,6 +86,13 @@ is useful when using the daemon in test environments.`, c.Bootstrap = []string{} c.Discovery.MDNS.Enabled = false + c.AutoTLS.Enabled = False + c.AutoConf.Enabled = False + + // Explicitly set autoconf-controlled fields to empty when autoconf is disabled + c.DNS.Resolvers = map[string]string{} + c.Routing.DelegatedRouters = []string{} + c.Ipns.DelegatedPublishers = []string{} return nil }, }, @@ -96,14 +103,14 @@ Inverse profile of the test profile.`, Transform: func(c *Config) error { c.Addresses = addressesConfig() - bootstrapPeers, err := DefaultBootstrapPeers() - if err != nil { - return err - } - c.Bootstrap = appendSingle(c.Bootstrap, BootstrapPeerStrings(bootstrapPeers)) + // Use AutoConf system for bootstrap peers + c.Bootstrap = []string{AutoPlaceholder} + c.AutoConf.Enabled = Default + c.AutoConf.URL = nil // Clear URL to use implicit default c.Swarm.DisableNatPortMap = false c.Discovery.MDNS.Enabled = true + c.AutoTLS.Enabled = Default return nil }, }, @@ -135,7 +142,11 @@ You should use this datastore if: * You want to minimize memory usage. * You are ok with the default speed of data import, or prefer to use --nocopy. -This profile may only be applied when first initializing the node. +See configuration documentation at: +https://github.com/ipfs/kubo/blob/master/docs/datastores.md#flatfs + +NOTE: This profile may only be applied when first initializing node at IPFS_PATH + via 'ipfs init --profile flatfs' `, InitOnly: true, @@ -144,6 +155,60 @@ This profile may only be applied when first initializing the node. return nil }, }, + "flatfs-measure": { + Description: `Configures the node to use the flatfs datastore with metrics tracking wrapper. +Additional '*_datastore_*' metrics will be exposed on /debug/metrics/prometheus + +NOTE: This profile may only be applied when first initializing node at IPFS_PATH + via 'ipfs init --profile flatfs-measure' +`, + + InitOnly: true, + Transform: func(c *Config) error { + c.Datastore.Spec = flatfsSpecMeasure() + return nil + }, + }, + "pebbleds": { + Description: `Configures the node to use the pebble high-performance datastore. + +Pebble is a LevelDB/RocksDB inspired key-value store focused on performance +and internal usage by CockroachDB. +You should use this datastore if: + +- You need a datastore that is focused on performance. +- You need reliability by default, but may choose to disable WAL for maximum performance when reliability is not critical. +- This datastore is good for multi-terabyte data sets. +- May benefit from tuning depending on read/write patterns and throughput. +- Performance is helped significantly by running on a system with plenty of memory. + +See configuration documentation at: +https://github.com/ipfs/kubo/blob/master/docs/datastores.md#pebbleds + +NOTE: This profile may only be applied when first initializing node at IPFS_PATH + via 'ipfs init --profile pebbleds' +`, + + InitOnly: true, + Transform: func(c *Config) error { + c.Datastore.Spec = pebbleSpec() + return nil + }, + }, + "pebbleds-measure": { + Description: `Configures the node to use the pebble datastore with metrics tracking wrapper. +Additional '*_datastore_*' metrics will be exposed on /debug/metrics/prometheus + +NOTE: This profile may only be applied when first initializing node at IPFS_PATH + via 'ipfs init --profile pebbleds-measure' +`, + + InitOnly: true, + Transform: func(c *Config) error { + c.Datastore.Spec = pebbleSpecMeasure() + return nil + }, + }, "badgerds": { Description: `Configures the node to use the legacy badgerv1 datastore. @@ -160,7 +225,12 @@ Other caveats: * Good for medium-size datastores, but may run into performance issues if your dataset is bigger than a terabyte. -This profile may only be applied when first initializing the node.`, +See configuration documentation at: +https://github.com/ipfs/kubo/blob/master/docs/datastores.md#badgerds + +NOTE: This profile may only be applied when first initializing node at IPFS_PATH + via 'ipfs init --profile badgerds' +`, InitOnly: true, Transform: func(c *Config) error { @@ -168,6 +238,20 @@ This profile may only be applied when first initializing the node.`, return nil }, }, + "badgerds-measure": { + Description: `Configures the node to use the legacy badgerv1 datastore with metrics wrapper. +Additional '*_datastore_*' metrics will be exposed on /debug/metrics/prometheus + +NOTE: This profile may only be applied when first initializing node at IPFS_PATH + via 'ipfs init --profile badgerds-measure' +`, + + InitOnly: true, + Transform: func(c *Config) error { + c.Datastore.Spec = badgerSpecMeasure() + return nil + }, + }, "lowpower": { Description: `Reduces daemon overhead on the system. May affect node functionality - performance of content discovery and data @@ -191,25 +275,25 @@ fetching may be degraded. }, }, "announce-off": { - Description: `Disables Reprovide system (and announcing to Amino DHT). + Description: `Disables Provide system (announcing to Amino DHT). USE WITH CAUTION: The main use case for this is setups with manual Peering.Peers config. Data from this node will not be announced on the DHT. This will make - DHT-based routing an data retrieval impossible if this node is the only + DHT-based routing and data retrieval impossible if this node is the only one hosting it, and other peers are not already connected to it. `, Transform: func(c *Config) error { - c.Reprovider.Interval = NewOptionalDuration(0) // 0 disables periodic reprovide - c.Experimental.StrategicProviding = true // this is not a typo (the name is counter-intuitive) + c.Provide.Enabled = False + c.Provide.DHT.Interval = NewOptionalDuration(0) // 0 disables periodic reprovide return nil }, }, "announce-on": { - Description: `Re-enables Reprovide system (reverts announce-off profile).`, + Description: `Re-enables Provide system (reverts announce-off profile).`, Transform: func(c *Config) error { - c.Reprovider.Interval = NewOptionalDuration(DefaultReproviderInterval) // have to apply explicit default because nil would be ignored - c.Experimental.StrategicProviding = false // this is not a typo (the name is counter-intuitive) + c.Provide.Enabled = True + c.Provide.DHT.Interval = NewOptionalDuration(DefaultProvideDHTInterval) // have to apply explicit default because nil would be ignored return nil }, }, @@ -229,24 +313,77 @@ fetching may be degraded. }, }, "legacy-cid-v0": { - Description: `Makes UnixFS import produce legacy CIDv0 with no raw leaves, sha2-256 and 256 KiB chunks.`, - + Description: `Makes UnixFS import produce legacy CIDv0 with no raw leaves, sha2-256 and 256 KiB chunks. This is likely the least optimal preset, use only if legacy behavior is required.`, Transform: func(c *Config) error { c.Import.CidVersion = *NewOptionalInteger(0) c.Import.UnixFSRawLeaves = False c.Import.UnixFSChunker = *NewOptionalString("size-262144") c.Import.HashFunction = *NewOptionalString("sha2-256") + c.Import.UnixFSFileMaxLinks = *NewOptionalInteger(174) + c.Import.UnixFSDirectoryMaxLinks = *NewOptionalInteger(0) + c.Import.UnixFSHAMTDirectoryMaxFanout = *NewOptionalInteger(256) + c.Import.UnixFSHAMTDirectorySizeThreshold = *NewOptionalBytes("256KiB") return nil }, }, "test-cid-v1": { - Description: `Makes UnixFS import produce modern CIDv1 with raw leaves, sha2-256 and 1 MiB chunks.`, - + Description: `Makes UnixFS import produce CIDv1 with raw leaves, sha2-256 and 1 MiB chunks (max 174 links per file, 256 per HAMT node, switch dir to HAMT above 256KiB).`, Transform: func(c *Config) error { c.Import.CidVersion = *NewOptionalInteger(1) c.Import.UnixFSRawLeaves = True c.Import.UnixFSChunker = *NewOptionalString("size-1048576") c.Import.HashFunction = *NewOptionalString("sha2-256") + c.Import.UnixFSFileMaxLinks = *NewOptionalInteger(174) + c.Import.UnixFSDirectoryMaxLinks = *NewOptionalInteger(0) + c.Import.UnixFSHAMTDirectoryMaxFanout = *NewOptionalInteger(256) + c.Import.UnixFSHAMTDirectorySizeThreshold = *NewOptionalBytes("256KiB") + return nil + }, + }, + "test-cid-v1-wide": { + Description: `Makes UnixFS import produce CIDv1 with raw leaves, sha2-256 and 1MiB chunks and wider file DAGs (max 1024 links per every node type, switch dir to HAMT above 1MiB).`, + Transform: func(c *Config) error { + c.Import.CidVersion = *NewOptionalInteger(1) + c.Import.UnixFSRawLeaves = True + c.Import.UnixFSChunker = *NewOptionalString("size-1048576") // 1MiB + c.Import.HashFunction = *NewOptionalString("sha2-256") + c.Import.UnixFSFileMaxLinks = *NewOptionalInteger(1024) + c.Import.UnixFSDirectoryMaxLinks = *NewOptionalInteger(0) // no limit here, use size-based Import.UnixFSHAMTDirectorySizeThreshold instead + c.Import.UnixFSHAMTDirectoryMaxFanout = *NewOptionalInteger(1024) + c.Import.UnixFSHAMTDirectorySizeThreshold = *NewOptionalBytes("1MiB") // 1MiB + return nil + }, + }, + "autoconf-on": { + Description: `Sets configuration to use implicit defaults from remote autoconf service. +Bootstrap peers, DNS resolvers, delegated routers, and IPNS delegated publishers are set to "auto". +This profile requires AutoConf to be enabled and configured.`, + + Transform: func(c *Config) error { + c.Bootstrap = []string{AutoPlaceholder} + c.DNS.Resolvers = map[string]string{ + ".": AutoPlaceholder, + } + c.Routing.DelegatedRouters = []string{AutoPlaceholder} + c.Ipns.DelegatedPublishers = []string{AutoPlaceholder} + c.AutoConf.Enabled = True + if c.AutoConf.URL == nil { + c.AutoConf.URL = NewOptionalString(DefaultAutoConfURL) + } + return nil + }, + }, + "autoconf-off": { + Description: `Disables AutoConf and sets networking fields to empty for manual configuration. +Bootstrap peers, DNS resolvers, delegated routers, and IPNS delegated publishers are set to empty. +Use this when you want normal networking but prefer manual control over all endpoints.`, + + Transform: func(c *Config) error { + c.Bootstrap = nil + c.DNS.Resolvers = nil + c.Routing.DelegatedRouters = nil + c.Ipns.DelegatedPublishers = nil + c.AutoConf.Enabled = False return nil }, }, diff --git a/config/provide.go b/config/provide.go new file mode 100644 index 000000000..c194a39b5 --- /dev/null +++ b/config/provide.go @@ -0,0 +1,204 @@ +package config + +import ( + "fmt" + "strings" + "time" + + "github.com/libp2p/go-libp2p-kad-dht/amino" +) + +const ( + DefaultProvideEnabled = true + DefaultProvideStrategy = "all" + + // DHT provider defaults + DefaultProvideDHTInterval = 22 * time.Hour // https://github.com/ipfs/kubo/pull/9326 + DefaultProvideDHTMaxWorkers = 16 // Unified default for both sweep and legacy providers + DefaultProvideDHTSweepEnabled = true + DefaultProvideDHTResumeEnabled = true + DefaultProvideDHTDedicatedPeriodicWorkers = 2 + DefaultProvideDHTDedicatedBurstWorkers = 1 + DefaultProvideDHTMaxProvideConnsPerWorker = 20 + DefaultProvideDHTKeystoreBatchSize = 1 << 14 // ~544 KiB per batch (1 multihash = 34 bytes) + DefaultProvideDHTOfflineDelay = 2 * time.Hour + + // DefaultFastProvideTimeout is the maximum time allowed for fast-provide operations. + // Prevents hanging on network issues when providing root CID. + // 10 seconds is sufficient for DHT operations with sweep provider or accelerated client. + DefaultFastProvideTimeout = 10 * time.Second +) + +type ProvideStrategy int + +const ( + ProvideStrategyAll ProvideStrategy = 1 << iota + ProvideStrategyPinned + ProvideStrategyRoots + ProvideStrategyMFS +) + +// Provide configures both immediate CID announcements (provide operations) for new content +// and periodic re-announcements of existing CIDs (reprovide operations). +// This section combines the functionality previously split between Provider and Reprovider. +type Provide struct { + // Enabled controls whether both provide and reprovide systems are enabled. + // When disabled, the node will not announce any content to the routing system. + Enabled Flag `json:",omitempty"` + + // Strategy determines which CIDs are announced to the routing system. + // Default: DefaultProvideStrategy + Strategy *OptionalString `json:",omitempty"` + + // DHT configures DHT-specific provide and reprovide settings. + DHT ProvideDHT +} + +// ProvideDHT configures DHT provider settings for both immediate announcements +// and periodic reprovides. +type ProvideDHT struct { + // Interval sets the time between rounds of reproviding local content + // to the routing system. Set to "0" to disable content reproviding. + // Default: DefaultProvideDHTInterval + Interval *OptionalDuration `json:",omitempty"` + + // MaxWorkers sets the maximum number of concurrent workers for provide operations. + // When SweepEnabled is false: controls NEW CID announcements only. + // When SweepEnabled is true: controls total worker pool for all operations. + // Default: DefaultProvideDHTMaxWorkers + MaxWorkers *OptionalInteger `json:",omitempty"` + + // SweepEnabled activates the sweeping reprovider system which spreads + // reprovide operations over time. + // Default: DefaultProvideDHTSweepEnabled + SweepEnabled Flag `json:",omitempty"` + + // DedicatedPeriodicWorkers sets workers dedicated to periodic reprovides (sweep mode only). + // Default: DefaultProvideDHTDedicatedPeriodicWorkers + DedicatedPeriodicWorkers *OptionalInteger `json:",omitempty"` + + // DedicatedBurstWorkers sets workers dedicated to burst provides (sweep mode only). + // Default: DefaultProvideDHTDedicatedBurstWorkers + DedicatedBurstWorkers *OptionalInteger `json:",omitempty"` + + // MaxProvideConnsPerWorker sets concurrent connections per worker for sending provider records (sweep mode only). + // Default: DefaultProvideDHTMaxProvideConnsPerWorker + MaxProvideConnsPerWorker *OptionalInteger `json:",omitempty"` + + // KeystoreBatchSize sets the batch size for keystore operations during reprovide refresh (sweep mode only). + // Default: DefaultProvideDHTKeystoreBatchSize + KeystoreBatchSize *OptionalInteger `json:",omitempty"` + + // OfflineDelay sets the delay after which the provider switches from Disconnected to Offline state (sweep mode only). + // Default: DefaultProvideDHTOfflineDelay + OfflineDelay *OptionalDuration `json:",omitempty"` + + // ResumeEnabled controls whether the provider resumes from its previous state on restart. + // When enabled, the provider persists its reprovide cycle state and provide queue to the datastore, + // and restores them on restart. When disabled, the provider starts fresh on each restart. + // Default: true + ResumeEnabled Flag `json:",omitempty"` +} + +func ParseProvideStrategy(s string) ProvideStrategy { + var strategy ProvideStrategy + for _, part := range strings.Split(s, "+") { + switch part { + case "all", "flat", "": // special case, does not mix with others ("flat" is deprecated, maps to "all") + return ProvideStrategyAll + case "pinned": + strategy |= ProvideStrategyPinned + case "roots": + strategy |= ProvideStrategyRoots + case "mfs": + strategy |= ProvideStrategyMFS + } + } + return strategy +} + +// ValidateProvideConfig validates the Provide configuration according to DHT requirements. +func ValidateProvideConfig(cfg *Provide) error { + // Validate Provide.DHT.Interval + if !cfg.DHT.Interval.IsDefault() { + interval := cfg.DHT.Interval.WithDefault(DefaultProvideDHTInterval) + if interval > amino.DefaultProvideValidity { + return fmt.Errorf("Provide.DHT.Interval (%v) must be less than or equal to DHT provider record validity (%v)", interval, amino.DefaultProvideValidity) + } + if interval < 0 { + return fmt.Errorf("Provide.DHT.Interval must be non-negative, got %v", interval) + } + } + + // Validate MaxWorkers + if !cfg.DHT.MaxWorkers.IsDefault() { + maxWorkers := cfg.DHT.MaxWorkers.WithDefault(DefaultProvideDHTMaxWorkers) + if maxWorkers <= 0 { + return fmt.Errorf("Provide.DHT.MaxWorkers must be positive, got %d", maxWorkers) + } + } + + // Validate DedicatedPeriodicWorkers + if !cfg.DHT.DedicatedPeriodicWorkers.IsDefault() { + workers := cfg.DHT.DedicatedPeriodicWorkers.WithDefault(DefaultProvideDHTDedicatedPeriodicWorkers) + if workers < 0 { + return fmt.Errorf("Provide.DHT.DedicatedPeriodicWorkers must be non-negative, got %d", workers) + } + } + + // Validate DedicatedBurstWorkers + if !cfg.DHT.DedicatedBurstWorkers.IsDefault() { + workers := cfg.DHT.DedicatedBurstWorkers.WithDefault(DefaultProvideDHTDedicatedBurstWorkers) + if workers < 0 { + return fmt.Errorf("Provide.DHT.DedicatedBurstWorkers must be non-negative, got %d", workers) + } + } + + // Validate MaxProvideConnsPerWorker + if !cfg.DHT.MaxProvideConnsPerWorker.IsDefault() { + conns := cfg.DHT.MaxProvideConnsPerWorker.WithDefault(DefaultProvideDHTMaxProvideConnsPerWorker) + if conns <= 0 { + return fmt.Errorf("Provide.DHT.MaxProvideConnsPerWorker must be positive, got %d", conns) + } + } + + // Validate KeystoreBatchSize + if !cfg.DHT.KeystoreBatchSize.IsDefault() { + batchSize := cfg.DHT.KeystoreBatchSize.WithDefault(DefaultProvideDHTKeystoreBatchSize) + if batchSize <= 0 { + return fmt.Errorf("Provide.DHT.KeystoreBatchSize must be positive, got %d", batchSize) + } + } + + // Validate OfflineDelay + if !cfg.DHT.OfflineDelay.IsDefault() { + delay := cfg.DHT.OfflineDelay.WithDefault(DefaultProvideDHTOfflineDelay) + if delay < 0 { + return fmt.Errorf("Provide.DHT.OfflineDelay must be non-negative, got %v", delay) + } + } + + return nil +} + +// ShouldProvideForStrategy determines if content should be provided based on the provide strategy +// and content characteristics (pinned status, root status, MFS status). +func ShouldProvideForStrategy(strategy ProvideStrategy, isPinned bool, isPinnedRoot bool, isMFS bool) bool { + if strategy == ProvideStrategyAll { + // 'all' strategy: always provide + return true + } + + // For combined strategies, check each component + if strategy&ProvideStrategyPinned != 0 && isPinned { + return true + } + if strategy&ProvideStrategyRoots != 0 && isPinnedRoot { + return true + } + if strategy&ProvideStrategyMFS != 0 && isMFS { + return true + } + + return false +} diff --git a/config/provide_test.go b/config/provide_test.go new file mode 100644 index 000000000..5c8f5fac1 --- /dev/null +++ b/config/provide_test.go @@ -0,0 +1,191 @@ +package config + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestParseProvideStrategy(t *testing.T) { + tests := []struct { + input string + expect ProvideStrategy + }{ + {"all", ProvideStrategyAll}, + {"pinned", ProvideStrategyPinned}, + {"mfs", ProvideStrategyMFS}, + {"pinned+mfs", ProvideStrategyPinned | ProvideStrategyMFS}, + {"invalid", 0}, + {"all+invalid", ProvideStrategyAll}, + {"", ProvideStrategyAll}, + {"flat", ProvideStrategyAll}, // deprecated, maps to "all" + {"flat+all", ProvideStrategyAll}, + } + + for _, tt := range tests { + result := ParseProvideStrategy(tt.input) + if result != tt.expect { + t.Errorf("ParseProvideStrategy(%q) = %d, want %d", tt.input, result, tt.expect) + } + } +} + +func TestValidateProvideConfig_Interval(t *testing.T) { + tests := []struct { + name string + interval time.Duration + wantErr bool + errMsg string + }{ + {"valid default (22h)", 22 * time.Hour, false, ""}, + {"valid max (48h)", 48 * time.Hour, false, ""}, + {"valid small (1h)", 1 * time.Hour, false, ""}, + {"valid zero (disabled)", 0, false, ""}, + {"invalid over limit (49h)", 49 * time.Hour, true, "must be less than or equal to DHT provider record validity"}, + {"invalid over limit (72h)", 72 * time.Hour, true, "must be less than or equal to DHT provider record validity"}, + {"invalid negative", -1 * time.Hour, true, "must be non-negative"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := &Provide{ + DHT: ProvideDHT{ + Interval: NewOptionalDuration(tt.interval), + }, + } + + err := ValidateProvideConfig(cfg) + + if tt.wantErr { + require.Error(t, err, "expected error for interval=%v", tt.interval) + if tt.errMsg != "" { + assert.Contains(t, err.Error(), tt.errMsg, "error message mismatch") + } + } else { + require.NoError(t, err, "unexpected error for interval=%v", tt.interval) + } + }) + } +} + +func TestValidateProvideConfig_MaxWorkers(t *testing.T) { + tests := []struct { + name string + maxWorkers int64 + wantErr bool + errMsg string + }{ + {"valid default", 16, false, ""}, + {"valid high", 100, false, ""}, + {"valid low", 1, false, ""}, + {"invalid zero", 0, true, "must be positive"}, + {"invalid negative", -1, true, "must be positive"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := &Provide{ + DHT: ProvideDHT{ + MaxWorkers: NewOptionalInteger(tt.maxWorkers), + }, + } + + err := ValidateProvideConfig(cfg) + + if tt.wantErr { + require.Error(t, err, "expected error for maxWorkers=%d", tt.maxWorkers) + if tt.errMsg != "" { + assert.Contains(t, err.Error(), tt.errMsg, "error message mismatch") + } + } else { + require.NoError(t, err, "unexpected error for maxWorkers=%d", tt.maxWorkers) + } + }) + } +} + +func TestShouldProvideForStrategy(t *testing.T) { + t.Run("all strategy always provides", func(t *testing.T) { + // ProvideStrategyAll should return true regardless of flags + testCases := []struct{ pinned, pinnedRoot, mfs bool }{ + {false, false, false}, + {true, true, true}, + {true, false, false}, + } + + for _, tc := range testCases { + assert.True(t, ShouldProvideForStrategy( + ProvideStrategyAll, tc.pinned, tc.pinnedRoot, tc.mfs)) + } + }) + + t.Run("single strategies match only their flag", func(t *testing.T) { + tests := []struct { + name string + strategy ProvideStrategy + pinned, pinnedRoot, mfs bool + want bool + }{ + {"pinned: matches when pinned=true", ProvideStrategyPinned, true, false, false, true}, + {"pinned: ignores other flags", ProvideStrategyPinned, false, true, true, false}, + + {"roots: matches when pinnedRoot=true", ProvideStrategyRoots, false, true, false, true}, + {"roots: ignores other flags", ProvideStrategyRoots, true, false, true, false}, + + {"mfs: matches when mfs=true", ProvideStrategyMFS, false, false, true, true}, + {"mfs: ignores other flags", ProvideStrategyMFS, true, true, false, false}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := ShouldProvideForStrategy(tt.strategy, tt.pinned, tt.pinnedRoot, tt.mfs) + assert.Equal(t, tt.want, got) + }) + } + }) + + t.Run("combined strategies use OR logic (else-if bug fix)", func(t *testing.T) { + // CRITICAL: Tests the fix where bitflag combinations (pinned+mfs) didn't work + // because of else-if instead of separate if statements + tests := []struct { + name string + strategy ProvideStrategy + pinned, pinnedRoot, mfs bool + want bool + }{ + // pinned|mfs: provide if EITHER matches + {"pinned|mfs when pinned", ProvideStrategyPinned | ProvideStrategyMFS, true, false, false, true}, + {"pinned|mfs when mfs", ProvideStrategyPinned | ProvideStrategyMFS, false, false, true, true}, + {"pinned|mfs when both", ProvideStrategyPinned | ProvideStrategyMFS, true, false, true, true}, + {"pinned|mfs when neither", ProvideStrategyPinned | ProvideStrategyMFS, false, false, false, false}, + + // roots|mfs + {"roots|mfs when root", ProvideStrategyRoots | ProvideStrategyMFS, false, true, false, true}, + {"roots|mfs when mfs", ProvideStrategyRoots | ProvideStrategyMFS, false, false, true, true}, + {"roots|mfs when neither", ProvideStrategyRoots | ProvideStrategyMFS, false, false, false, false}, + + // pinned|roots + {"pinned|roots when pinned", ProvideStrategyPinned | ProvideStrategyRoots, true, false, false, true}, + {"pinned|roots when root", ProvideStrategyPinned | ProvideStrategyRoots, false, true, false, true}, + {"pinned|roots when neither", ProvideStrategyPinned | ProvideStrategyRoots, false, false, false, false}, + + // triple combination + {"all-three when any matches", ProvideStrategyPinned | ProvideStrategyRoots | ProvideStrategyMFS, false, false, true, true}, + {"all-three when none match", ProvideStrategyPinned | ProvideStrategyRoots | ProvideStrategyMFS, false, false, false, false}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := ShouldProvideForStrategy(tt.strategy, tt.pinned, tt.pinnedRoot, tt.mfs) + assert.Equal(t, tt.want, got) + }) + } + }) + + t.Run("zero strategy never provides", func(t *testing.T) { + assert.False(t, ShouldProvideForStrategy(ProvideStrategy(0), false, false, false)) + assert.False(t, ShouldProvideForStrategy(ProvideStrategy(0), true, true, true)) + }) +} diff --git a/config/provider.go b/config/provider.go index f2b5afe05..e3d5a4052 100644 --- a/config/provider.go +++ b/config/provider.go @@ -1,5 +1,16 @@ package config +// Provider configuration describes how NEW CIDs are announced the moment they are created. +// For periodical reprovide configuration, see Provide.* +// +// Deprecated: use Provide instead. This will be removed in a future release. type Provider struct { - Strategy string // Which keys to announce + // Deprecated: use Provide.Enabled instead. This will be removed in a future release. + Enabled Flag `json:",omitempty"` + + // Deprecated: unused, you are likely looking for Provide.Strategy instead. This will be removed in a future release. + Strategy *OptionalString `json:",omitempty"` + + // Deprecated: use Provide.DHT.MaxWorkers instead. This will be removed in a future release. + WorkerCount *OptionalInteger `json:",omitempty"` } diff --git a/config/reprovider.go b/config/reprovider.go index dae9ae6de..0fa5e877a 100644 --- a/config/reprovider.go +++ b/config/reprovider.go @@ -1,13 +1,13 @@ package config -import "time" - -const ( - DefaultReproviderInterval = time.Hour * 22 // https://github.com/ipfs/kubo/pull/9326 - DefaultReproviderStrategy = "all" -) - +// Reprovider configuration describes how CID from local datastore are periodically re-announced to routing systems. +// For provide behavior of ad-hoc or newly created CIDs and their first-time announcement, see Provide.* +// +// Deprecated: use Provide instead. This will be removed in a future release. type Reprovider struct { - Interval *OptionalDuration `json:",omitempty"` // Time period to reprovide locally stored objects to the network - Strategy *OptionalString `json:",omitempty"` // Which keys to announce + // Deprecated: use Provide.DHT.Interval instead. This will be removed in a future release. + Interval *OptionalDuration `json:",omitempty"` + + // Deprecated: use Provide.Strategy instead. This will be removed in a future release. + Strategy *OptionalString `json:",omitempty"` } diff --git a/config/routing.go b/config/routing.go index 231cbca73..d68016e4e 100644 --- a/config/routing.go +++ b/config/routing.go @@ -3,20 +3,39 @@ package config import ( "encoding/json" "fmt" + "os" "runtime" + "strings" +) + +const ( + DefaultAcceleratedDHTClient = false + DefaultLoopbackAddressesOnLanDHT = false + DefaultRoutingType = "auto" + CidContactRoutingURL = "https://cid.contact" + PublicGoodDelegatedRoutingURL = "https://delegated-ipfs.dev" // cid.contact + amino dht (incl. IPNS PUTs) + EnvHTTPRouters = "IPFS_HTTP_ROUTERS" + EnvHTTPRoutersFilterProtocols = "IPFS_HTTP_ROUTERS_FILTER_PROTOCOLS" ) var ( - DefaultAcceleratedDHTClient = false - DefaultLoopbackAddressesOnLanDHT = false + // Default filter-protocols to pass along with delegated routing requests (as defined in IPIP-484) + // and also filter out locally + DefaultHTTPRoutersFilterProtocols = getEnvOrDefault(EnvHTTPRoutersFilterProtocols, []string{ + "unknown", // allow results without protocol list, we can do libp2p identify to test them + "transport-bitswap", + // http is added dynamically in routing/delegated.go. + // 'transport-ipfs-gateway-http' + }) ) // Routing defines configuration options for libp2p routing. type Routing struct { // Type sets default daemon routing mode. // - // Can be one of "auto", "autoclient", "dht", "dhtclient", "dhtserver", "none", or "custom". + // Can be one of "auto", "autoclient", "dht", "dhtclient", "dhtserver", "none", "delegated", or "custom". // When unset or set to "auto", DHT and implicit routers are used. + // When "delegated" is set, only HTTP delegated routers and IPNS publishers are used (no DHT). // When "custom" is set, user-provided Routing.Routers is used. Type *OptionalString `json:",omitempty"` @@ -24,9 +43,14 @@ type Routing struct { LoopbackAddressesOnLanDHT Flag `json:",omitempty"` - Routers Routers + IgnoreProviders []string `json:",omitempty"` - Methods Methods + // Simplified configuration used by default when Routing.Type=auto|autoclient + DelegatedRouters []string + + // Advanced configuration used when Routing.Type=custom + Routers Routers `json:",omitempty"` + Methods Methods `json:",omitempty"` } type Router struct { @@ -180,3 +204,67 @@ type ConfigRouter struct { type Method struct { RouterName string } + +// getEnvOrDefault reads space or comma separated strings from env if present, +// and uses provided defaultValue as a fallback +func getEnvOrDefault(key string, defaultValue []string) []string { + if value, exists := os.LookupEnv(key); exists { + splitFunc := func(r rune) bool { return r == ',' || r == ' ' } + return strings.FieldsFunc(value, splitFunc) + } + return defaultValue +} + +// HasHTTPProviderConfigured checks if the node is configured to use HTTP routers +// for providing content announcements. This is used when determining if the node +// can provide content even when not connected to libp2p peers. +// +// Note: Right now we only support delegated HTTP content providing if Routing.Type=custom +// and Routing.Routers are configured according to: +// https://github.com/ipfs/kubo/blob/master/docs/delegated-routing.md#configuration-file-example +// +// This uses the `ProvideBitswap` request type that is not documented anywhere, +// because we hoped something like IPIP-378 (https://github.com/ipfs/specs/pull/378) +// would get finalized and we'd switch to that. It never happened due to politics, +// and now we are stuck with ProvideBitswap being the only API that works. +// Some people have reverse engineered it (example: +// https://discuss.ipfs.tech/t/only-peers-found-from-dht-seem-to-be-getting-used-as-relays-so-cant-use-http-routers/19545/9) +// and use it, so what we do here is the bare minimum to ensure their use case works +// using this old API until something better is available. +func (c *Config) HasHTTPProviderConfigured() bool { + if len(c.Routing.Routers) == 0 { + // No "custom" routers + return false + } + method, ok := c.Routing.Methods[MethodNameProvide] + if !ok { + // No provide method configured + return false + } + return c.routerSupportsHTTPProviding(method.RouterName) +} + +// routerSupportsHTTPProviding checks if the supplied custom router is or +// includes an HTTP-based router. +func (c *Config) routerSupportsHTTPProviding(routerName string) bool { + rp, ok := c.Routing.Routers[routerName] + if !ok { + // Router configured for providing doesn't exist + return false + } + + switch rp.Type { + case RouterTypeHTTP: + return true + case RouterTypeParallel, RouterTypeSequential: + // Check if any child router supports HTTP + if children, ok := rp.Parameters.(*ComposableRouterParams); ok { + for _, childRouter := range children.Routers { + if c.routerSupportsHTTPProviding(childRouter.RouterName) { + return true + } + } + } + } + return false +} diff --git a/config/swarm.go b/config/swarm.go index f15634b57..9e5460c26 100644 --- a/config/swarm.go +++ b/config/swarm.go @@ -65,8 +65,6 @@ type RelayService struct { // BufferSize is the size of the relayed connection buffers. BufferSize *OptionalInteger `json:",omitempty"` - // MaxReservationsPerPeer is the maximum number of reservations originating from the same peer. - MaxReservationsPerPeer *OptionalInteger `json:",omitempty"` // MaxReservationsPerIP is the maximum number of reservations originating from the same IP address. MaxReservationsPerIP *OptionalInteger `json:",omitempty"` // MaxReservationsPerASN is the maximum number of reservations origination from the same ASN. @@ -106,10 +104,11 @@ type Transports struct { // ConnMgr defines configuration options for the libp2p connection manager. type ConnMgr struct { - Type *OptionalString `json:",omitempty"` - LowWater *OptionalInteger `json:",omitempty"` - HighWater *OptionalInteger `json:",omitempty"` - GracePeriod *OptionalDuration `json:",omitempty"` + Type *OptionalString `json:",omitempty"` + LowWater *OptionalInteger `json:",omitempty"` + HighWater *OptionalInteger `json:",omitempty"` + GracePeriod *OptionalDuration `json:",omitempty"` + SilencePeriod *OptionalDuration `json:",omitempty"` } // ResourceMgr defines configuration options for the libp2p Network Resource Manager @@ -119,7 +118,7 @@ type ResourceMgr struct { Enabled Flag `json:",omitempty"` Limits swarmLimits `json:",omitempty"` - MaxMemory *OptionalString `json:",omitempty"` + MaxMemory *OptionalBytes `json:",omitempty"` MaxFileDescriptors *OptionalInteger `json:",omitempty"` // A list of multiaddrs that can bypass normal system limits (but are still diff --git a/config/types.go b/config/types.go index 506139318..47738f9f2 100644 --- a/config/types.go +++ b/config/types.go @@ -7,6 +7,8 @@ import ( "io" "strings" "time" + + humanize "github.com/dustin/go-humanize" ) // Strings is a helper type that (un)marshals a single string to/from a single @@ -115,6 +117,16 @@ func (f Flag) String() string { } } +// ResolveBoolFromConfig returns the resolved boolean value based on: +// - If userSet is true, returns userValue (user explicitly set the flag) +// - Otherwise, uses configFlag.WithDefault(defaultValue) (respects config or falls back to default) +func ResolveBoolFromConfig(userValue bool, userSet bool, configFlag Flag, defaultValue bool) bool { + if userSet { + return userValue + } + return configFlag.WithDefault(defaultValue) +} + var ( _ json.Unmarshaler = (*Flag)(nil) _ json.Marshaler = (*Flag)(nil) @@ -425,8 +437,79 @@ func (p OptionalString) String() string { } var ( - _ json.Unmarshaler = (*OptionalInteger)(nil) - _ json.Marshaler = (*OptionalInteger)(nil) + _ json.Unmarshaler = (*OptionalString)(nil) + _ json.Marshaler = (*OptionalString)(nil) +) + +// OptionalBytes represents a byte size that has a default value +// +// When encoded in json, Default is encoded as "null". +// Stores the original string representation and parses on access. +// Embeds OptionalString to share common functionality. +type OptionalBytes struct { + OptionalString +} + +// NewOptionalBytes returns an OptionalBytes from a string. +func NewOptionalBytes(s string) *OptionalBytes { + return &OptionalBytes{OptionalString{value: &s}} +} + +// IsDefault returns if this is a default optional byte value. +func (p *OptionalBytes) IsDefault() bool { + if p == nil { + return true + } + return p.OptionalString.IsDefault() +} + +// WithDefault resolves the byte size with the given default. +// Parses the stored string value using humanize.ParseBytes. +func (p *OptionalBytes) WithDefault(defaultValue uint64) (value uint64) { + if p.IsDefault() { + return defaultValue + } + strValue := p.OptionalString.WithDefault("") + bytes, err := humanize.ParseBytes(strValue) + if err != nil { + // This should never happen as values are validated during UnmarshalJSON. + // If it does, it indicates either config corruption or a programming error. + panic(fmt.Sprintf("invalid byte size in OptionalBytes: %q - %v", strValue, err)) + } + return bytes +} + +// UnmarshalJSON validates the input is a parseable byte size. +func (p *OptionalBytes) UnmarshalJSON(input []byte) error { + switch string(input) { + case "null", "undefined": + *p = OptionalBytes{} + default: + var value interface{} + err := json.Unmarshal(input, &value) + if err != nil { + return err + } + switch v := value.(type) { + case float64: + str := fmt.Sprintf("%.0f", v) + p.value = &str + case string: + _, err := humanize.ParseBytes(v) + if err != nil { + return err + } + p.value = &v + default: + return fmt.Errorf("unable to parse byte size, expected a size string (e.g., \"5GiB\") or a number, but got %T", v) + } + } + return nil +} + +var ( + _ json.Unmarshaler = (*OptionalBytes)(nil) + _ json.Marshaler = (*OptionalBytes)(nil) ) type swarmLimits doNotUse diff --git a/config/types_test.go b/config/types_test.go index 7ea7506f1..293231fb8 100644 --- a/config/types_test.go +++ b/config/types_test.go @@ -5,6 +5,9 @@ import ( "encoding/json" "testing" "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestOptionalDuration(t *testing.T) { @@ -509,3 +512,125 @@ func TestOptionalString(t *testing.T) { } } } + +func TestOptionalBytes(t *testing.T) { + makeStringPointer := func(v string) *string { return &v } + + t.Run("default value", func(t *testing.T) { + var b OptionalBytes + assert.True(t, b.IsDefault()) + assert.Equal(t, uint64(0), b.WithDefault(0)) + assert.Equal(t, uint64(1024), b.WithDefault(1024)) + assert.Equal(t, "default", b.String()) + }) + + t.Run("non-default value", func(t *testing.T) { + b := OptionalBytes{OptionalString{value: makeStringPointer("1MiB")}} + assert.False(t, b.IsDefault()) + assert.Equal(t, uint64(1048576), b.WithDefault(512)) + assert.Equal(t, "1MiB", b.String()) + }) + + t.Run("JSON roundtrip", func(t *testing.T) { + testCases := []struct { + jsonInput string + jsonOutput string + expectedValue string + }{ + {"null", "null", ""}, + {"\"256KiB\"", "\"256KiB\"", "256KiB"}, + {"\"1MiB\"", "\"1MiB\"", "1MiB"}, + {"\"5GiB\"", "\"5GiB\"", "5GiB"}, + {"\"256KB\"", "\"256KB\"", "256KB"}, + {"1048576", "\"1048576\"", "1048576"}, + } + + for _, tc := range testCases { + t.Run(tc.jsonInput, func(t *testing.T) { + var b OptionalBytes + err := json.Unmarshal([]byte(tc.jsonInput), &b) + require.NoError(t, err) + + if tc.expectedValue == "" { + assert.Nil(t, b.value) + } else { + require.NotNil(t, b.value) + assert.Equal(t, tc.expectedValue, *b.value) + } + + out, err := json.Marshal(b) + require.NoError(t, err) + assert.Equal(t, tc.jsonOutput, string(out)) + }) + } + }) + + t.Run("parsing byte sizes", func(t *testing.T) { + testCases := []struct { + input string + expected uint64 + }{ + {"256KiB", 262144}, + {"1MiB", 1048576}, + {"5GiB", 5368709120}, + {"256KB", 256000}, + {"1048576", 1048576}, + } + + for _, tc := range testCases { + t.Run(tc.input, func(t *testing.T) { + var b OptionalBytes + err := json.Unmarshal([]byte("\""+tc.input+"\""), &b) + require.NoError(t, err) + assert.Equal(t, tc.expected, b.WithDefault(0)) + }) + } + }) + + t.Run("omitempty", func(t *testing.T) { + type Foo struct { + B *OptionalBytes `json:",omitempty"` + } + + out, err := json.Marshal(new(Foo)) + require.NoError(t, err) + assert.Equal(t, "{}", string(out)) + + var foo2 Foo + err = json.Unmarshal(out, &foo2) + require.NoError(t, err) + + if foo2.B != nil { + assert.Equal(t, uint64(1024), foo2.B.WithDefault(1024)) + assert.True(t, foo2.B.IsDefault()) + } else { + // When field is omitted, pointer is nil which is also considered default + t.Log("B is nil, which is acceptable for omitempty") + } + }) + + t.Run("invalid values", func(t *testing.T) { + invalidInputs := []string{ + "\"5XiB\"", "\"invalid\"", "\"\"", "[]", "{}", + } + + for _, invalid := range invalidInputs { + t.Run(invalid, func(t *testing.T) { + var b OptionalBytes + err := json.Unmarshal([]byte(invalid), &b) + assert.Error(t, err) + }) + } + }) + + t.Run("panic on invalid stored value", func(t *testing.T) { + // This tests that if somehow an invalid value gets stored + // (bypassing UnmarshalJSON validation), WithDefault will panic + invalidValue := "invalid-size" + b := OptionalBytes{OptionalString{value: &invalidValue}} + + assert.Panics(t, func() { + b.WithDefault(1024) + }, "should panic on invalid stored value") + }) +} diff --git a/config/version.go b/config/version.go index 8096107bb..8d6d4b6a6 100644 --- a/config/version.go +++ b/config/version.go @@ -2,7 +2,7 @@ package config const DefaultSwarmCheckPercentThreshold = 5 -// Version allows controling things like custom user agent and update checks. +// Version allows controlling things like custom user agent and update checks. type Version struct { // Optional suffix to the AgentVersion presented by `ipfs id` and exposed // via libp2p identify protocol. diff --git a/core/commands/active.go b/core/commands/active.go index 786075f01..aacadd676 100644 --- a/core/commands/active.go +++ b/core/commands/active.go @@ -3,7 +3,7 @@ package commands import ( "fmt" "io" - "sort" + "slices" "text/tabwriter" "time" @@ -60,7 +60,7 @@ Lists running and recently run commands. for k := range req.Options { keys = append(keys, k) } - sort.Strings(keys) + slices.Sort(keys) for _, k := range keys { fmt.Fprintf(tw, "%s=%v,", k, req.Options[k]) diff --git a/core/commands/add.go b/core/commands/add.go index 908613025..cb4bcb312 100644 --- a/core/commands/add.go +++ b/core/commands/add.go @@ -8,15 +8,16 @@ import ( gopath "path" "strconv" "strings" - "time" "github.com/ipfs/kubo/config" "github.com/ipfs/kubo/core/commands/cmdenv" + "github.com/ipfs/kubo/core/commands/cmdutils" "github.com/cheggaaa/pb" "github.com/ipfs/boxo/files" mfs "github.com/ipfs/boxo/mfs" "github.com/ipfs/boxo/path" + "github.com/ipfs/boxo/verifcid" cmds "github.com/ipfs/go-ipfs-cmds" ipld "github.com/ipfs/go-ipld-format" coreiface "github.com/ipfs/kubo/core/coreiface" @@ -25,24 +26,7 @@ import ( ) // ErrDepthLimitExceeded indicates that the max depth has been exceeded. -var ErrDepthLimitExceeded = fmt.Errorf("depth limit exceeded") - -type TimeParts struct { - t *time.Time -} - -func (t TimeParts) MarshalJSON() ([]byte, error) { - return t.t.MarshalJSON() -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -// The time is expected to be a quoted string in RFC 3339 format. -func (t *TimeParts) UnmarshalJSON(data []byte) (err error) { - // Fractional seconds are handled implicitly by Parse. - tt, err := time.Parse("\"2006-01-02T15:04:05Z\"", string(data)) - *t = TimeParts{&tt} - return -} +var ErrDepthLimitExceeded = errors.New("depth limit exceeded") type AddEvent struct { Name string @@ -55,47 +39,78 @@ type AddEvent struct { } const ( - quietOptionName = "quiet" - quieterOptionName = "quieter" - silentOptionName = "silent" - progressOptionName = "progress" - trickleOptionName = "trickle" - wrapOptionName = "wrap-with-directory" - onlyHashOptionName = "only-hash" - chunkerOptionName = "chunker" - pinOptionName = "pin" - rawLeavesOptionName = "raw-leaves" - noCopyOptionName = "nocopy" - fstoreCacheOptionName = "fscache" - cidVersionOptionName = "cid-version" - hashOptionName = "hash" - inlineOptionName = "inline" - inlineLimitOptionName = "inline-limit" - toFilesOptionName = "to-files" + pinNameOptionName = "pin-name" + quietOptionName = "quiet" + quieterOptionName = "quieter" + silentOptionName = "silent" + progressOptionName = "progress" + trickleOptionName = "trickle" + wrapOptionName = "wrap-with-directory" + onlyHashOptionName = "only-hash" + chunkerOptionName = "chunker" + pinOptionName = "pin" + rawLeavesOptionName = "raw-leaves" + maxFileLinksOptionName = "max-file-links" + maxDirectoryLinksOptionName = "max-directory-links" + maxHAMTFanoutOptionName = "max-hamt-fanout" + noCopyOptionName = "nocopy" + fstoreCacheOptionName = "fscache" + cidVersionOptionName = "cid-version" + hashOptionName = "hash" + inlineOptionName = "inline" + inlineLimitOptionName = "inline-limit" + toFilesOptionName = "to-files" - preserveModeOptionName = "preserve-mode" - preserveMtimeOptionName = "preserve-mtime" - modeOptionName = "mode" - mtimeOptionName = "mtime" - mtimeNsecsOptionName = "mtime-nsecs" + preserveModeOptionName = "preserve-mode" + preserveMtimeOptionName = "preserve-mtime" + modeOptionName = "mode" + mtimeOptionName = "mtime" + mtimeNsecsOptionName = "mtime-nsecs" + fastProvideRootOptionName = "fast-provide-root" + fastProvideWaitOptionName = "fast-provide-wait" ) -const adderOutChanSize = 8 +const ( + adderOutChanSize = 8 +) var AddCmd = &cmds.Command{ Helptext: cmds.HelpText{ Tagline: "Add a file or directory to IPFS.", ShortDescription: ` Adds the content of to IPFS. Use -r to add directories (recursively). + +FAST PROVIDE OPTIMIZATION: + +When you add content to IPFS, the sweep provider queues it for efficient +DHT provides over time. While this is resource-efficient, other peers won't +find your content immediately after 'ipfs add' completes. + +To make sharing faster, 'ipfs add' does an immediate provide of the root CID +to the DHT in addition to the regular queue. This complements the sweep provider: +fast-provide handles the urgent case (root CIDs that users share and reference), +while the sweep provider efficiently provides all blocks according to +Provide.Strategy over time. + +By default, this immediate provide runs in the background without blocking +the command. If you need certainty that the root CID is discoverable before +the command returns (e.g., sharing a link immediately), use --fast-provide-wait +to wait for the provide to complete. Use --fast-provide-root=false to skip +this optimization. + +This works best with the sweep provider and accelerated DHT client. +Automatically skipped when DHT is not available. `, LongDescription: ` Adds the content of to IPFS. Use -r to add directories. -Note that directories are added recursively, to form the IPFS -MerkleDAG. +Note that directories are added recursively, and big files are chunked, +to form the IPFS MerkleDAG. Learn more: https://docs.ipfs.tech/concepts/merkle-dag/ -If the daemon is not running, it will just add locally. +If the daemon is not running, it will just add locally to the repo at $IPFS_PATH. If the daemon is started later, it will be advertised after a few -seconds when the reprovider runs. +seconds when the provide system runs. + +BASIC EXAMPLES: The wrap option, '-w', wraps the file (or files, if using the recursive option) in a directory. This directory contains only @@ -115,6 +130,12 @@ You can now refer to the added file in a gateway, like so: Files imported with 'ipfs add' are protected from GC (implicit '--pin=true'), but it is up to you to remember the returned CID to get the data back later. +If you need to back up or transport content-addressed data using a non-IPFS +medium, CID can be preserved with CAR files. +See 'dag export' and 'dag import' for more information. + +MFS INTEGRATION: + Passing '--to-files' creates a reference in Files API (MFS), making it easier to find it in the future: @@ -126,6 +147,8 @@ to find it in the future: See 'ipfs files --help' to learn more about using MFS for keeping track of added files and directories. +CHUNKING EXAMPLES: + The chunker option, '-s', specifies the chunking strategy that dictates how to break files into blocks. Blocks with same content can be deduplicated. Different chunking strategies will produce different @@ -146,14 +169,16 @@ want to use a 1024 times larger chunk sizes for most files. You can now check what blocks have been created by: - > ipfs object links QmafrLBfzRLV4XSH1XcaMMeaXEUhDJjmtDfsYU95TrWG87 + > ipfs ls QmafrLBfzRLV4XSH1XcaMMeaXEUhDJjmtDfsYU95TrWG87 QmY6yj1GsermExDXoosVE3aSPxdMNYr6aKuw3nA8LoWPRS 2059 Qmf7ZQeSxq2fJVJbCmgTrLLVN9tDR9Wy5k75DxQKuz5Gyt 1195 - > ipfs object links Qmf1hDN65tR55Ubh2RN1FPxr69xq3giVBz1KApsresY8Gn + > ipfs ls Qmf1hDN65tR55Ubh2RN1FPxr69xq3giVBz1KApsresY8Gn QmY6yj1GsermExDXoosVE3aSPxdMNYr6aKuw3nA8LoWPRS 2059 QmerURi9k4XzKCaaPbsK6BL5pMEjF7PGphjDvkkjDtsVf3 868 QmQB28iwSriSUSMqG2nXDTLtdPHgWb4rebBrU7Q1j4vxPv 338 +ADVANCED CONFIGURATION: + Finally, a note on hash (CID) determinism and 'ipfs add' command. Almost all the flags provided by this command will change the final CID, and @@ -161,9 +186,11 @@ new flags may be added in the future. It is not guaranteed for the implicit defaults of 'ipfs add' to remain the same in future Kubo releases, or for other IPFS software to use the same import parameters as Kubo. -If you need to back up or transport content-addressed data using a non-IPFS -medium, CID can be preserved with CAR files. -See 'dag export' and 'dag import' for more information. +Note: CIDv1 is automatically used when using non-default options like custom +hash functions or when raw-leaves is explicitly enabled. + +Use Import.* configuration options to override global implicit defaults: +https://github.com/ipfs/kubo/blob/master/docs/config.md#import `, }, @@ -171,34 +198,48 @@ See 'dag export' and 'dag import' for more information. cmds.FileArg("path", true, true, "The path to a file to be added to IPFS.").EnableRecursive().EnableStdin(), }, Options: []cmds.Option{ + // Input Processing cmds.OptionRecursivePath, // a builtin option that allows recursive paths (-r, --recursive) cmds.OptionDerefArgs, // a builtin option that resolves passed in filesystem links (--dereference-args) cmds.OptionStdinName, // a builtin option that optionally allows wrapping stdin into a named file cmds.OptionHidden, cmds.OptionIgnore, cmds.OptionIgnoreRules, + // Output Control cmds.BoolOption(quietOptionName, "q", "Write minimal output."), cmds.BoolOption(quieterOptionName, "Q", "Write only final hash."), cmds.BoolOption(silentOptionName, "Write no output."), cmds.BoolOption(progressOptionName, "p", "Stream progress data."), - cmds.BoolOption(trickleOptionName, "t", "Use trickle-dag format for dag generation."), + // Basic Add Behavior cmds.BoolOption(onlyHashOptionName, "n", "Only chunk and hash - do not write to disk."), cmds.BoolOption(wrapOptionName, "w", "Wrap files with a directory object."), - cmds.StringOption(chunkerOptionName, "s", "Chunking algorithm, size-[bytes], rabin-[min]-[avg]-[max] or buzhash"), - cmds.BoolOption(rawLeavesOptionName, "Use raw blocks for leaf nodes."), - cmds.BoolOption(noCopyOptionName, "Add the file using filestore. Implies raw-leaves. (experimental)"), - cmds.BoolOption(fstoreCacheOptionName, "Check the filestore for pre-existing blocks. (experimental)"), - cmds.IntOption(cidVersionOptionName, "CID version. Defaults to 0 unless an option that depends on CIDv1 is passed. Passing version 1 will cause the raw-leaves option to default to true."), - cmds.StringOption(hashOptionName, "Hash function to use. Implies CIDv1 if not sha2-256. (experimental)"), - cmds.BoolOption(inlineOptionName, "Inline small blocks into CIDs. (experimental)"), - cmds.IntOption(inlineLimitOptionName, "Maximum block size to inline. (experimental)").WithDefault(32), cmds.BoolOption(pinOptionName, "Pin locally to protect added files from garbage collection.").WithDefault(true), + cmds.StringOption(pinNameOptionName, "Name to use for the pin. Requires explicit value (e.g., --pin-name=myname)."), + // MFS Integration cmds.StringOption(toFilesOptionName, "Add reference to Files API (MFS) at the provided path."), - cmds.BoolOption(preserveModeOptionName, "Apply existing POSIX permissions to created UnixFS entries. Disables raw-leaves. (experimental)"), - cmds.BoolOption(preserveMtimeOptionName, "Apply existing POSIX modification time to created UnixFS entries. Disables raw-leaves. (experimental)"), - cmds.UintOption(modeOptionName, "Custom POSIX file mode to store in created UnixFS entries. Disables raw-leaves. (experimental)"), - cmds.Int64Option(mtimeOptionName, "Custom POSIX modification time to store in created UnixFS entries (seconds before or after the Unix Epoch). Disables raw-leaves. (experimental)"), + // CID & Hashing + cmds.IntOption(cidVersionOptionName, "CID version (0 or 1). CIDv1 automatically enables raw-leaves and is required for non-sha2-256 hashes. Default: Import.CidVersion"), + cmds.StringOption(hashOptionName, "Hash function to use. Implies CIDv1 if not sha2-256. Default: Import.HashFunction"), + cmds.BoolOption(rawLeavesOptionName, "Use raw blocks for leaf nodes. Note: CIDv1 automatically enables raw-leaves. Default: false for CIDv0, true for CIDv1 (Import.UnixFSRawLeaves)"), + // Chunking & DAG Structure + cmds.StringOption(chunkerOptionName, "s", "Chunking algorithm, size-[bytes], rabin-[min]-[avg]-[max] or buzhash. Files larger than chunk size are split into multiple blocks. Default: Import.UnixFSChunker"), + cmds.BoolOption(trickleOptionName, "t", "Use trickle-dag format for dag generation."), + // Advanced UnixFS Limits + cmds.IntOption(maxFileLinksOptionName, "Limit the maximum number of links in UnixFS file nodes to this value. WARNING: experimental. Default: Import.UnixFSFileMaxLinks"), + cmds.IntOption(maxDirectoryLinksOptionName, "Limit the maximum number of links in UnixFS basic directory nodes to this value. WARNING: experimental, Import.UnixFSHAMTDirectorySizeThreshold is safer. Default: Import.UnixFSDirectoryMaxLinks"), + cmds.IntOption(maxHAMTFanoutOptionName, "Limit the maximum number of links of a UnixFS HAMT directory node to this (power of 2, multiple of 8). WARNING: experimental, Import.UnixFSHAMTDirectorySizeThreshold is safer. Default: Import.UnixFSHAMTDirectoryMaxFanout"), + // Experimental Features + cmds.BoolOption(inlineOptionName, "Inline small blocks into CIDs. WARNING: experimental"), + cmds.IntOption(inlineLimitOptionName, fmt.Sprintf("Maximum block size to inline. Maximum: %d bytes. WARNING: experimental", verifcid.DefaultMaxIdentityDigestSize)).WithDefault(32), + cmds.BoolOption(noCopyOptionName, "Add the file using filestore. Implies raw-leaves. WARNING: experimental"), + cmds.BoolOption(fstoreCacheOptionName, "Check the filestore for pre-existing blocks. WARNING: experimental"), + cmds.BoolOption(preserveModeOptionName, "Apply existing POSIX permissions to created UnixFS entries. WARNING: experimental, forces dag-pb for root block, disables raw-leaves"), + cmds.BoolOption(preserveMtimeOptionName, "Apply existing POSIX modification time to created UnixFS entries. WARNING: experimental, forces dag-pb for root block, disables raw-leaves"), + cmds.UintOption(modeOptionName, "Custom POSIX file mode to store in created UnixFS entries. WARNING: experimental, forces dag-pb for root block, disables raw-leaves"), + cmds.Int64Option(mtimeOptionName, "Custom POSIX modification time to store in created UnixFS entries (seconds before or after the Unix Epoch). WARNING: experimental, forces dag-pb for root block, disables raw-leaves"), cmds.UintOption(mtimeNsecsOptionName, "Custom POSIX modification time (optional time fraction in nanoseconds)"), + cmds.BoolOption(fastProvideRootOptionName, "Immediately provide root CID to DHT in addition to regular queue, for faster discovery. Default: Import.FastProvideRoot"), + cmds.BoolOption(fastProvideWaitOptionName, "Block until the immediate provide completes before returning. Default: Import.FastProvideWait"), }, PreRun: func(req *cmds.Request, env cmds.Environment) error { quiet, _ := req.Options[quietOptionName].(bool) @@ -239,19 +280,38 @@ See 'dag export' and 'dag import' for more information. silent, _ := req.Options[silentOptionName].(bool) chunker, _ := req.Options[chunkerOptionName].(string) dopin, _ := req.Options[pinOptionName].(bool) + pinName, pinNameSet := req.Options[pinNameOptionName].(string) rawblks, rbset := req.Options[rawLeavesOptionName].(bool) + maxFileLinks, maxFileLinksSet := req.Options[maxFileLinksOptionName].(int) + maxDirectoryLinks, maxDirectoryLinksSet := req.Options[maxDirectoryLinksOptionName].(int) + maxHAMTFanout, maxHAMTFanoutSet := req.Options[maxHAMTFanoutOptionName].(int) nocopy, _ := req.Options[noCopyOptionName].(bool) fscache, _ := req.Options[fstoreCacheOptionName].(bool) cidVer, cidVerSet := req.Options[cidVersionOptionName].(int) hashFunStr, _ := req.Options[hashOptionName].(string) inline, _ := req.Options[inlineOptionName].(bool) inlineLimit, _ := req.Options[inlineLimitOptionName].(int) + + // Validate inline-limit doesn't exceed the maximum identity digest size + if inline && inlineLimit > verifcid.DefaultMaxIdentityDigestSize { + return fmt.Errorf("inline-limit %d exceeds maximum allowed size of %d bytes", inlineLimit, verifcid.DefaultMaxIdentityDigestSize) + } + + // Validate pin name + if pinNameSet { + if err := cmdutils.ValidatePinName(pinName); err != nil { + return err + } + } + toFilesStr, toFilesSet := req.Options[toFilesOptionName].(string) preserveMode, _ := req.Options[preserveModeOptionName].(bool) preserveMtime, _ := req.Options[preserveMtimeOptionName].(bool) mode, _ := req.Options[modeOptionName].(uint) mtime, _ := req.Options[mtimeOptionName].(int64) mtimeNsecs, _ := req.Options[mtimeNsecsOptionName].(uint) + fastProvideRoot, fastProvideRootSet := req.Options[fastProvideRootOptionName].(bool) + fastProvideWait, fastProvideWaitSet := req.Options[fastProvideWaitOptionName].(bool) if chunker == "" { chunker = cfg.Import.UnixFSChunker.WithDefault(config.DefaultUnixFSChunker) @@ -266,11 +326,31 @@ See 'dag export' and 'dag import' for more information. cidVer = int(cfg.Import.CidVersion.WithDefault(config.DefaultCidVersion)) } + // Pin names are only used when explicitly provided via --pin-name=value + if !rbset && cfg.Import.UnixFSRawLeaves != config.Default { rbset = true rawblks = cfg.Import.UnixFSRawLeaves.WithDefault(config.DefaultUnixFSRawLeaves) } + if !maxFileLinksSet && !cfg.Import.UnixFSFileMaxLinks.IsDefault() { + maxFileLinksSet = true + maxFileLinks = int(cfg.Import.UnixFSFileMaxLinks.WithDefault(config.DefaultUnixFSFileMaxLinks)) + } + + if !maxDirectoryLinksSet && !cfg.Import.UnixFSDirectoryMaxLinks.IsDefault() { + maxDirectoryLinksSet = true + maxDirectoryLinks = int(cfg.Import.UnixFSDirectoryMaxLinks.WithDefault(config.DefaultUnixFSDirectoryMaxLinks)) + } + + if !maxHAMTFanoutSet && !cfg.Import.UnixFSHAMTDirectoryMaxFanout.IsDefault() { + maxHAMTFanoutSet = true + maxHAMTFanout = int(cfg.Import.UnixFSHAMTDirectoryMaxFanout.WithDefault(config.DefaultUnixFSHAMTDirectoryMaxFanout)) + } + + fastProvideRoot = config.ResolveBoolFromConfig(fastProvideRoot, fastProvideRootSet, cfg.Import.FastProvideRoot, config.DefaultFastProvideRoot) + fastProvideWait = config.ResolveBoolFromConfig(fastProvideWait, fastProvideWaitSet, cfg.Import.FastProvideWait, config.DefaultFastProvideWait) + // Storing optional mode or mtime (UnixFS 1.5) requires root block // to always be 'dag-pb' and not 'raw'. Below adjusts raw-leaves setting, if possible. if preserveMode || preserveMtime || mode != 0 || mtime != 0 { @@ -287,6 +367,12 @@ See 'dag export' and 'dag import' for more information. if onlyHash && toFilesSet { return fmt.Errorf("%s and %s options are not compatible", onlyHashOptionName, toFilesOptionName) } + if !dopin && pinNameSet { + return fmt.Errorf("%s option requires %s to be set", pinNameOptionName, pinOptionName) + } + if wrap && toFilesSet { + return fmt.Errorf("%s and %s options are not compatible", wrapOptionName, toFilesOptionName) + } hashFunCode, ok := mh.Names[strings.ToLower(hashFunStr)] if !ok { @@ -313,7 +399,7 @@ See 'dag export' and 'dag import' for more information. options.Unixfs.Chunker(chunker), - options.Unixfs.Pin(dopin), + options.Unixfs.Pin(dopin, pinName), options.Unixfs.HashOnly(onlyHash), options.Unixfs.FsCache(fscache), options.Unixfs.Nocopy(nocopy), @@ -343,6 +429,18 @@ See 'dag export' and 'dag import' for more information. opts = append(opts, options.Unixfs.RawLeaves(rawblks)) } + if maxFileLinksSet { + opts = append(opts, options.Unixfs.MaxFileLinks(maxFileLinks)) + } + + if maxDirectoryLinksSet { + opts = append(opts, options.Unixfs.MaxDirectoryLinks(maxDirectoryLinks)) + } + + if maxHAMTFanoutSet { + opts = append(opts, options.Unixfs.MaxHAMTFanout(maxHAMTFanout)) + } + if trickle { opts = append(opts, options.Unixfs.Layout(options.TrickleLayout)) } @@ -355,11 +453,12 @@ See 'dag export' and 'dag import' for more information. } var added int var fileAddedToMFS bool + var lastRootCid path.ImmutablePath // Track the root CID for fast-provide addit := toadd.Entries() for addit.Next() { _, dir := addit.Node().(files.Directory) errCh := make(chan error, 1) - events := make(chan interface{}, adderOutChanSize) + events := make(chan any, adderOutChanSize) opts[len(opts)-1] = options.Unixfs.Events(events) go func() { @@ -371,8 +470,16 @@ See 'dag export' and 'dag import' for more information. return } + // Store the root CID for potential fast-provide operation + lastRootCid = pathAdded + // creating MFS pointers when optional --to-files is set if toFilesSet { + if addit.Name() == "" { + errCh <- fmt.Errorf("%s: cannot add unnamed files to MFS", toFilesOptionName) + return + } + if toFilesStr == "" { toFilesStr = "/" } @@ -489,12 +596,29 @@ See 'dag export' and 'dag import' for more information. return fmt.Errorf("expected a file argument") } + // Apply fast-provide-root if the flag is enabled + if fastProvideRoot && (lastRootCid != path.ImmutablePath{}) { + cfg, err := ipfsNode.Repo.Config() + if err != nil { + return err + } + if err := cmdenv.ExecuteFastProvide(req.Context, ipfsNode, cfg, lastRootCid.RootCid(), fastProvideWait, dopin, dopin, toFilesSet); err != nil { + return err + } + } else if !fastProvideRoot { + if fastProvideWait { + log.Debugw("fast-provide-root: skipped", "reason", "disabled by flag or config", "wait-flag-ignored", true) + } else { + log.Debugw("fast-provide-root: skipped", "reason", "disabled by flag or config") + } + } + return nil }, PostRun: cmds.PostRunMap{ cmds.CLI: func(res cmds.Response, re cmds.ResponseEmitter) error { sizeChan := make(chan int64, 1) - outChan := make(chan interface{}) + outChan := make(chan any) req := res.Request() // Could be slow. diff --git a/core/commands/bitswap.go b/core/commands/bitswap.go index 07f91fb0f..7bddaac60 100644 --- a/core/commands/bitswap.go +++ b/core/commands/bitswap.go @@ -5,7 +5,6 @@ import ( "io" cmdenv "github.com/ipfs/kubo/core/commands/cmdenv" - e "github.com/ipfs/kubo/core/commands/e" humanize "github.com/dustin/go-humanize" bitswap "github.com/ipfs/boxo/bitswap" @@ -25,7 +24,7 @@ var BitswapCmd = &cmds.Command{ "stat": bitswapStatCmd, "wantlist": showWantlistCmd, "ledger": ledgerCmd, - "reprovide": reprovideCmd, + "reprovide": deprecatedBitswapReprovideCmd, }, } @@ -33,6 +32,17 @@ const ( peerOptionName = "peer" ) +var deprecatedBitswapReprovideCmd = &cmds.Command{ + Status: cmds.Deprecated, + Helptext: cmds.HelpText{ + Tagline: "Deprecated command to announce to bitswap. Use 'ipfs routing reprovide' instead.", + ShortDescription: ` +'ipfs bitswap reprovide' is a legacy plumbing command used to announce to DHT. +Deprecated, use modern 'ipfs routing reprovide' instead.`, + }, + Run: reprovideRoutingCmd.Run, // alias to routing reprovide to not break existing users +} + var showWantlistCmd = &cmds.Command{ Helptext: cmds.HelpText{ Tagline: "Show blocks currently on the wantlist.", @@ -53,10 +63,7 @@ Print out all blocks currently on the bitswap wantlist for the local peer.`, return ErrNotOnline } - bs, ok := nd.Exchange.(*bitswap.Bitswap) - if !ok { - return e.TypeErr(bs, nd.Exchange) - } + bs := nd.Bitswap pstr, found := req.Options[peerOptionName].(string) if found { @@ -112,12 +119,7 @@ var bitswapStatCmd = &cmds.Command{ return cmds.Errorf(cmds.ErrClient, "unable to run offline: %s", ErrNotOnline) } - bs, ok := nd.Exchange.(*bitswap.Bitswap) - if !ok { - return e.TypeErr(bs, nd.Exchange) - } - - st, err := bs.Stat() + st, err := nd.Bitswap.Stat() if err != nil { return err } @@ -134,7 +136,6 @@ var bitswapStatCmd = &cmds.Command{ human, _ := req.Options[bitswapHumanOptionName].(bool) fmt.Fprintln(w, "bitswap status") - fmt.Fprintf(w, "\tprovides buffer: %d / %d\n", s.ProvideBufLen, bitswap.HasBlockBufferSize) fmt.Fprintf(w, "\tblocks received: %d\n", s.BlocksReceived) fmt.Fprintf(w, "\tblocks sent: %d\n", s.BlocksSent) if human { @@ -190,17 +191,12 @@ prints the ledger associated with a given peer. return ErrNotOnline } - bs, ok := nd.Exchange.(*bitswap.Bitswap) - if !ok { - return e.TypeErr(bs, nd.Exchange) - } - partner, err := peer.Decode(req.Arguments[0]) if err != nil { return err } - return cmds.EmitOnce(res, bs.LedgerForPeer(partner)) + return cmds.EmitOnce(res, nd.Bitswap.LedgerForPeer(partner)) }, Encoders: cmds.EncoderMap{ cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *server.Receipt) error { @@ -215,29 +211,3 @@ prints the ledger associated with a given peer. }), }, } - -var reprovideCmd = &cmds.Command{ - Helptext: cmds.HelpText{ - Tagline: "Trigger reprovider.", - ShortDescription: ` -Trigger reprovider to announce our data to network. -`, - }, - Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { - nd, err := cmdenv.GetNode(env) - if err != nil { - return err - } - - if !nd.IsOnline { - return ErrNotOnline - } - - err = nd.Provider.Reprovide(req.Context) - if err != nil { - return err - } - - return nil - }, -} diff --git a/core/commands/bootstrap.go b/core/commands/bootstrap.go index decf2b271..e5a55dfab 100644 --- a/core/commands/bootstrap.go +++ b/core/commands/bootstrap.go @@ -4,14 +4,14 @@ import ( "errors" "fmt" "io" - "sort" - - cmdenv "github.com/ipfs/kubo/core/commands/cmdenv" - repo "github.com/ipfs/kubo/repo" - fsrepo "github.com/ipfs/kubo/repo/fsrepo" + "slices" + "strings" cmds "github.com/ipfs/go-ipfs-cmds" config "github.com/ipfs/kubo/config" + cmdenv "github.com/ipfs/kubo/core/commands/cmdenv" + repo "github.com/ipfs/kubo/repo" + fsrepo "github.com/ipfs/kubo/repo/fsrepo" peer "github.com/libp2p/go-libp2p/core/peer" ma "github.com/multiformats/go-multiaddr" ) @@ -41,15 +41,15 @@ Running 'ipfs bootstrap' with no arguments will run 'ipfs bootstrap list'. }, } -const ( - defaultOptionName = "default" -) - var bootstrapAddCmd = &cmds.Command{ Helptext: cmds.HelpText{ Tagline: "Add peers to the bootstrap list.", ShortDescription: `Outputs a list of peers that were added (that weren't already in the bootstrap list). + +The special values 'default' and 'auto' can be used to add the default +bootstrap peers. Both are equivalent and will add the 'auto' placeholder to +the bootstrap list, which gets resolved using the AutoConf system. ` + bootstrapSecurityWarning, }, @@ -57,29 +57,23 @@ in the bootstrap list). cmds.StringArg("peer", false, true, peerOptionDesc).EnableStdin(), }, - Options: []cmds.Option{ - cmds.BoolOption(defaultOptionName, "Add default bootstrap nodes. (Deprecated, use 'default' subcommand instead)"), - }, - Subcommands: map[string]*cmds.Command{ - "default": bootstrapAddDefaultCmd, - }, - Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { - deflt, _ := req.Options[defaultOptionName].(bool) - - inputPeers := config.DefaultBootstrapAddresses - if !deflt { - if err := req.ParseBodyArgs(); err != nil { - return err - } - - inputPeers = req.Arguments + if err := req.ParseBodyArgs(); err != nil { + return err } + inputPeers := req.Arguments if len(inputPeers) == 0 { return errors.New("no bootstrap peers to add") } + // Convert "default" to "auto" for backward compatibility + for i, peer := range inputPeers { + if peer == "default" { + inputPeers[i] = "auto" + } + } + cfgRoot, err := cmdenv.GetConfigRoot(env) if err != nil { return err @@ -95,6 +89,13 @@ in the bootstrap list). return err } + // Check if trying to add "auto" when AutoConf is disabled + for _, peer := range inputPeers { + if peer == config.AutoPlaceholder && !cfg.AutoConf.Enabled.WithDefault(config.DefaultAutoConfEnabled) { + return errors.New("cannot add default bootstrap peers: AutoConf is disabled (AutoConf.Enabled=false). Enable AutoConf by setting AutoConf.Enabled=true in your config, or add specific peer addresses instead") + } + } + added, err := bootstrapAdd(r, cfg, inputPeers) if err != nil { return err @@ -110,44 +111,6 @@ in the bootstrap list). }, } -var bootstrapAddDefaultCmd = &cmds.Command{ - Helptext: cmds.HelpText{ - Tagline: "Add default peers to the bootstrap list.", - ShortDescription: `Outputs a list of peers that were added (that weren't already -in the bootstrap list).`, - }, - Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { - cfgRoot, err := cmdenv.GetConfigRoot(env) - if err != nil { - return err - } - - r, err := fsrepo.Open(cfgRoot) - if err != nil { - return err - } - - defer r.Close() - cfg, err := r.Config() - if err != nil { - return err - } - - added, err := bootstrapAdd(r, cfg, config.DefaultBootstrapAddresses) - if err != nil { - return err - } - - return cmds.EmitOnce(res, &BootstrapOutput{added}) - }, - Type: BootstrapOutput{}, - Encoders: cmds.EncoderMap{ - cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *BootstrapOutput) error { - return bootstrapWritePeers(w, "added ", out.Peers) - }), - }, -} - const ( bootstrapAllOptionName = "all" ) @@ -251,6 +214,9 @@ var bootstrapListCmd = &cmds.Command{ Tagline: "Show peers in the bootstrap list.", ShortDescription: "Peers are output in the format '/'.", }, + Options: []cmds.Option{ + cmds.BoolOption(configExpandAutoName, "Expand 'auto' placeholders from AutoConf service."), + }, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { cfgRoot, err := cmdenv.GetConfigRoot(env) @@ -268,12 +234,16 @@ var bootstrapListCmd = &cmds.Command{ return err } - peers, err := cfg.BootstrapPeers() - if err != nil { - return err + // Check if user wants to expand auto values + expandAuto, _ := req.Options[configExpandAutoName].(bool) + if expandAuto { + // Use the same expansion method as the daemon + expandedBootstrap := cfg.BootstrapWithAutoConf() + return cmds.EmitOnce(res, &BootstrapOutput{expandedBootstrap}) } - return cmds.EmitOnce(res, &BootstrapOutput{config.BootstrapPeerStrings(peers)}) + // Simply return the bootstrap config as-is, including any "auto" values + return cmds.EmitOnce(res, &BootstrapOutput{cfg.Bootstrap}) }, Type: BootstrapOutput{}, Encoders: cmds.EncoderMap{ @@ -284,7 +254,9 @@ var bootstrapListCmd = &cmds.Command{ } func bootstrapWritePeers(w io.Writer, prefix string, peers []string) error { - sort.Stable(sort.StringSlice(peers)) + slices.SortStableFunc(peers, func(a, b string) int { + return strings.Compare(a, b) + }) for _, peer := range peers { _, err := w.Write([]byte(prefix + peer + "\n")) if err != nil { @@ -295,7 +267,11 @@ func bootstrapWritePeers(w io.Writer, prefix string, peers []string) error { } func bootstrapAdd(r repo.Repo, cfg *config.Config, peers []string) ([]string, error) { + // Validate peers - skip validation for "auto" placeholder for _, p := range peers { + if p == config.AutoPlaceholder { + continue // Skip validation for "auto" placeholder + } m, err := ma.NewMultiaddr(p) if err != nil { return nil, err @@ -345,6 +321,16 @@ func bootstrapAdd(r repo.Repo, cfg *config.Config, peers []string) ([]string, er } func bootstrapRemove(r repo.Repo, cfg *config.Config, toRemove []string) ([]string, error) { + // Check if bootstrap contains "auto" + hasAuto := slices.Contains(cfg.Bootstrap, config.AutoPlaceholder) + + if hasAuto && cfg.AutoConf.Enabled.WithDefault(config.DefaultAutoConfEnabled) { + // Cannot selectively remove peers when using "auto" bootstrap + // Users should either disable AutoConf or replace "auto" with specific peers + return nil, fmt.Errorf("cannot remove individual bootstrap peers when using 'auto' placeholder: the 'auto' value is managed by AutoConf. Either disable AutoConf by setting AutoConf.Enabled=false and replace 'auto' with specific peer addresses, or use 'ipfs bootstrap rm --all' to remove all peers") + } + + // Original logic for non-auto bootstrap removed := make([]peer.AddrInfo, 0, len(toRemove)) keep := make([]peer.AddrInfo, 0, len(cfg.Bootstrap)) @@ -404,16 +390,28 @@ func bootstrapRemove(r repo.Repo, cfg *config.Config, toRemove []string) ([]stri } func bootstrapRemoveAll(r repo.Repo, cfg *config.Config) ([]string, error) { - removed, err := cfg.BootstrapPeers() - if err != nil { - return nil, err + // Check if bootstrap contains "auto" - if so, we need special handling + hasAuto := slices.Contains(cfg.Bootstrap, config.AutoPlaceholder) + + var removed []string + if hasAuto { + // When "auto" is present, we can't parse it as peer.AddrInfo + // Just return the raw bootstrap list as strings for display + removed = slices.Clone(cfg.Bootstrap) + } else { + // Original logic for configs without "auto" + removedPeers, err := cfg.BootstrapPeers() + if err != nil { + return nil, err + } + removed = config.BootstrapPeerStrings(removedPeers) } cfg.Bootstrap = nil if err := r.SetConfig(cfg); err != nil { return nil, err } - return config.BootstrapPeerStrings(removed), nil + return removed, nil } const bootstrapSecurityWarning = ` diff --git a/core/commands/cat.go b/core/commands/cat.go index 6fa1f71b7..38a3e8dfa 100644 --- a/core/commands/cat.go +++ b/core/commands/cat.go @@ -2,7 +2,7 @@ package commands import ( "context" - "fmt" + "errors" "io" "os" @@ -43,13 +43,13 @@ var CatCmd = &cmds.Command{ offset, _ := req.Options[offsetOptionName].(int64) if offset < 0 { - return fmt.Errorf("cannot specify negative offset") + return errors.New("cannot specify negative offset") } max, found := req.Options[lengthOptionName].(int64) if max < 0 { - return fmt.Errorf("cannot specify negative length") + return errors.New("cannot specify negative length") } if !found { max = -1 diff --git a/core/commands/cid.go b/core/commands/cid.go index b2e8f131d..0be9f6cc1 100644 --- a/core/commands/cid.go +++ b/core/commands/cid.go @@ -1,9 +1,11 @@ package commands import ( + "cmp" + "errors" "fmt" "io" - "sort" + "slices" "strings" "unicode" @@ -33,7 +35,7 @@ var CidCmd = &cmds.Command{ const ( cidFormatOptionName = "f" - cidVerisonOptionName = "v" + cidToVersionOptionName = "v" cidCodecOptionName = "mc" cidMultibaseOptionName = "b" ) @@ -52,13 +54,13 @@ The optional format string is a printf style format string: }, Options: []cmds.Option{ cmds.StringOption(cidFormatOptionName, "Printf style format string.").WithDefault("%s"), - cmds.StringOption(cidVerisonOptionName, "CID version to convert to."), + cmds.StringOption(cidToVersionOptionName, "CID version to convert to."), cmds.StringOption(cidCodecOptionName, "CID multicodec to convert to."), cmds.StringOption(cidMultibaseOptionName, "Multibase to display CID in."), }, Run: func(req *cmds.Request, resp cmds.ResponseEmitter, env cmds.Environment) error { fmtStr, _ := req.Options[cidFormatOptionName].(string) - verStr, _ := req.Options[cidVerisonOptionName].(string) + verStr, _ := req.Options[cidToVersionOptionName].(string) codecStr, _ := req.Options[cidCodecOptionName].(string) baseStr, _ := req.Options[cidMultibaseOptionName].(string) @@ -85,10 +87,10 @@ The optional format string is a printf style format string: } case "0": if opts.newCodec != 0 && opts.newCodec != cid.DagProtobuf { - return fmt.Errorf("cannot convert to CIDv0 with any codec other than dag-pb") + return errors.New("cannot convert to CIDv0 with any codec other than dag-pb") } if baseStr != "" && baseStr != "base58btc" { - return fmt.Errorf("cannot convert to CIDv0 with any multibase other than the implicit base58btc") + return errors.New("cannot convert to CIDv0 with any multibase other than the implicit base58btc") } opts.verConv = toCidV0 case "1": @@ -119,7 +121,8 @@ The optional format string is a printf style format string: return "" }), }, - Type: CidFormatRes{}, + Type: CidFormatRes{}, + Extra: CreateCmdExtras(SetDoesNotUseRepo(true)), } type CidFormatRes struct { @@ -149,6 +152,7 @@ Useful when processing third-party CIDs which could come with arbitrary formats. }, PostRun: cidFmtCmd.PostRun, Type: cidFmtCmd.Type, + Extra: CreateCmdExtras(SetDoesNotUseRepo(true)), } type cidFormatOpts struct { @@ -286,10 +290,10 @@ var basesCmd = &cmds.Command{ cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, val []CodeAndName) error { prefixes, _ := req.Options[prefixOptionName].(bool) numeric, _ := req.Options[numericOptionName].(bool) - sort.Sort(multibaseSorter{val}) + multibaseSorter{val}.Sort() for _, v := range val { code := v.Code - if code < 32 || code >= 127 { + if !unicode.IsPrint(rune(code)) { // don't display non-printable prefixes code = ' ' } @@ -307,7 +311,8 @@ var basesCmd = &cmds.Command{ return nil }), }, - Type: []CodeAndName{}, + Type: []CodeAndName{}, + Extra: CreateCmdExtras(SetDoesNotUseRepo(true)), } const ( @@ -356,7 +361,7 @@ var codecsCmd = &cmds.Command{ Encoders: cmds.EncoderMap{ cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, val []CodeAndName) error { numeric, _ := req.Options[codecsNumericOptionName].(bool) - sort.Sort(codeAndNameSorter{val}) + codeAndNameSorter{val}.Sort() for _, v := range val { if numeric { fmt.Fprintf(w, "%5d %s\n", v.Code, v.Name) @@ -367,7 +372,8 @@ var codecsCmd = &cmds.Command{ return nil }), }, - Type: []CodeAndName{}, + Type: []CodeAndName{}, + Extra: CreateCmdExtras(SetDoesNotUseRepo(true)), } var hashesCmd = &cmds.Command{ @@ -391,29 +397,29 @@ var hashesCmd = &cmds.Command{ }, Encoders: codecsCmd.Encoders, Type: codecsCmd.Type, + Extra: CreateCmdExtras(SetDoesNotUseRepo(true)), } type multibaseSorter struct { data []CodeAndName } -func (s multibaseSorter) Len() int { return len(s.data) } -func (s multibaseSorter) Swap(i, j int) { s.data[i], s.data[j] = s.data[j], s.data[i] } - -func (s multibaseSorter) Less(i, j int) bool { - a := unicode.ToLower(rune(s.data[i].Code)) - b := unicode.ToLower(rune(s.data[j].Code)) - if a != b { - return a < b - } - // lowecase letters should come before uppercase - return s.data[i].Code > s.data[j].Code +func (s multibaseSorter) Sort() { + slices.SortFunc(s.data, func(a, b CodeAndName) int { + if n := cmp.Compare(unicode.ToLower(rune(a.Code)), unicode.ToLower(rune(b.Code))); n != 0 { + return n + } + // lowercase letters should come before uppercase + return cmp.Compare(b.Code, a.Code) + }) } type codeAndNameSorter struct { data []CodeAndName } -func (s codeAndNameSorter) Len() int { return len(s.data) } -func (s codeAndNameSorter) Swap(i, j int) { s.data[i], s.data[j] = s.data[j], s.data[i] } -func (s codeAndNameSorter) Less(i, j int) bool { return s.data[i].Code < s.data[j].Code } +func (s codeAndNameSorter) Sort() { + slices.SortFunc(s.data, func(a, b CodeAndName) int { + return cmp.Compare(a.Code, b.Code) + }) +} diff --git a/core/commands/cid_test.go b/core/commands/cid_test.go index 106296282..d159521d2 100644 --- a/core/commands/cid_test.go +++ b/core/commands/cid_test.go @@ -40,7 +40,7 @@ func TestCidFmtCmd(t *testing.T) { // Mock request req := &cmds.Request{ Options: map[string]interface{}{ - cidVerisonOptionName: "0", + cidToVersionOptionName: "0", cidMultibaseOptionName: e.MultibaseName, cidFormatOptionName: "%s", }, @@ -91,7 +91,7 @@ func TestCidFmtCmd(t *testing.T) { // Mock request req := &cmds.Request{ Options: map[string]interface{}{ - cidVerisonOptionName: e.Ver, + cidToVersionOptionName: e.Ver, cidMultibaseOptionName: e.MultibaseName, cidFormatOptionName: "%s", }, diff --git a/core/commands/cmdenv/cidbase.go b/core/commands/cmdenv/cidbase.go index 55815f524..926cd24a1 100644 --- a/core/commands/cmdenv/cidbase.go +++ b/core/commands/cmdenv/cidbase.go @@ -16,14 +16,14 @@ var ( ) // GetCidEncoder processes the `cid-base` and `output-cidv1` options and -// returns a encoder to use based on those parameters. +// returns an encoder to use based on those parameters. func GetCidEncoder(req *cmds.Request) (cidenc.Encoder, error) { return getCidBase(req, true) } -// GetLowLevelCidEncoder is like GetCidEncoder but meant to be used by -// lower level commands. It differs from GetCidEncoder in that CIDv0 -// are not, by default, auto-upgraded to CIDv1. +// GetLowLevelCidEncoder is like GetCidEncoder but meant to be used by lower +// level commands. It differs from GetCidEncoder in that CIDv0 are not, by +// default, auto-upgraded to CIDv1. func GetLowLevelCidEncoder(req *cmds.Request) (cidenc.Encoder, error) { return getCidBase(req, false) } @@ -52,19 +52,19 @@ func getCidBase(req *cmds.Request, autoUpgrade bool) (cidenc.Encoder, error) { return e, nil } -// CidBaseDefined returns true if the `cid-base` option is specified -// on the command line +// CidBaseDefined returns true if the `cid-base` option is specified on the +// command line func CidBaseDefined(req *cmds.Request) bool { base, _ := req.Options["cid-base"].(string) return base != "" } -// CidEncoderFromPath creates a new encoder that is influenced from -// the encoded Cid in a Path. For CidV0 the multibase from the base -// encoder is used and automatic upgrades are disabled. For CidV1 the -// multibase from the CID is used and upgrades are enabled. +// CidEncoderFromPath creates a new encoder that is influenced from the encoded +// Cid in a Path. For CIDv0 the multibase from the base encoder is used and +// automatic upgrades are disabled. For CIDv1 the multibase from the CID is +// used and upgrades are enabled. // -// This logic is intentionally fuzzy and will match anything of the form +// This logic is intentionally fuzzy and matches anything of the form // `CidLike`, `CidLike/...`, or `/namespace/CidLike/...`. // // For example: diff --git a/core/commands/cmdenv/env.go b/core/commands/cmdenv/env.go index fb538dc12..b2a45351e 100644 --- a/core/commands/cmdenv/env.go +++ b/core/commands/cmdenv/env.go @@ -1,15 +1,19 @@ package cmdenv import ( + "context" "fmt" "strconv" "strings" - "github.com/ipfs/kubo/commands" - "github.com/ipfs/kubo/core" - + "github.com/ipfs/go-cid" cmds "github.com/ipfs/go-ipfs-cmds" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" + routing "github.com/libp2p/go-libp2p/core/routing" + + "github.com/ipfs/kubo/commands" + "github.com/ipfs/kubo/config" + "github.com/ipfs/kubo/core" coreiface "github.com/ipfs/kubo/core/coreiface" options "github.com/ipfs/kubo/core/coreiface/options" ) @@ -86,3 +90,103 @@ func needEscape(s string) bool { } return false } + +// provideCIDSync performs a synchronous/blocking provide operation to announce +// the given CID to the DHT. +// +// - If the accelerated DHT client is used, a DHT lookup isn't needed, we +// directly allocate provider records to closest peers. +// - If Provide.DHT.SweepEnabled=true or OptimisticProvide=true, we make an +// optimistic provide call. +// - Else we make a standard provide call (much slower). +// +// IMPORTANT: The caller MUST verify DHT availability using HasActiveDHTClient() +// before calling this function. Calling with a nil or invalid router will cause +// a panic - this is the caller's responsibility to prevent. +func provideCIDSync(ctx context.Context, router routing.Routing, c cid.Cid) error { + return router.Provide(ctx, c, true) +} + +// ExecuteFastProvide immediately provides a root CID to the DHT, bypassing the regular +// provide queue for faster content discovery. This function is reusable across commands +// that add or import content, such as ipfs add and ipfs dag import. +// +// Parameters: +// - ctx: context for synchronous provides +// - ipfsNode: the IPFS node instance +// - cfg: node configuration +// - rootCid: the CID to provide +// - wait: whether to block until provide completes (sync mode) +// - isPinned: whether content is pinned +// - isPinnedRoot: whether this is a pinned root CID +// - isMFS: whether content is in MFS +// +// Return value: +// - Returns nil if operation succeeded or was skipped (preconditions not met) +// - Returns error only in sync mode (wait=true) when provide operation fails +// - In async mode (wait=false), always returns nil (errors logged in goroutine) +// +// The function handles all precondition checks (Provide.Enabled, DHT availability, +// strategy matching) and logs appropriately. In async mode, it launches a goroutine +// with a detached context and timeout. +func ExecuteFastProvide( + ctx context.Context, + ipfsNode *core.IpfsNode, + cfg *config.Config, + rootCid cid.Cid, + wait bool, + isPinned bool, + isPinnedRoot bool, + isMFS bool, +) error { + log.Debugw("fast-provide-root: enabled", "wait", wait) + + // Check preconditions for providing + switch { + case !cfg.Provide.Enabled.WithDefault(config.DefaultProvideEnabled): + log.Debugw("fast-provide-root: skipped", "reason", "Provide.Enabled is false") + return nil + case cfg.Provide.DHT.Interval.WithDefault(config.DefaultProvideDHTInterval) == 0: + log.Debugw("fast-provide-root: skipped", "reason", "Provide.DHT.Interval is 0") + return nil + case !ipfsNode.HasActiveDHTClient(): + log.Debugw("fast-provide-root: skipped", "reason", "DHT not available") + return nil + } + + // Check if strategy allows providing this content + strategyStr := cfg.Provide.Strategy.WithDefault(config.DefaultProvideStrategy) + strategy := config.ParseProvideStrategy(strategyStr) + shouldProvide := config.ShouldProvideForStrategy(strategy, isPinned, isPinnedRoot, isMFS) + + if !shouldProvide { + log.Debugw("fast-provide-root: skipped", "reason", "strategy does not match content", "strategy", strategyStr, "pinned", isPinned, "pinnedRoot", isPinnedRoot, "mfs", isMFS) + return nil + } + + // Execute provide operation + if wait { + // Synchronous mode: block until provide completes, return error on failure + log.Debugw("fast-provide-root: providing synchronously", "cid", rootCid) + if err := provideCIDSync(ctx, ipfsNode.DHTClient, rootCid); err != nil { + log.Warnw("fast-provide-root: sync provide failed", "cid", rootCid, "error", err) + return fmt.Errorf("fast-provide: %w", err) + } + log.Debugw("fast-provide-root: sync provide completed", "cid", rootCid) + return nil + } + + // Asynchronous mode (default): fire-and-forget, don't block, always return nil + log.Debugw("fast-provide-root: providing asynchronously", "cid", rootCid) + go func() { + // Use detached context with timeout to prevent hanging on network issues + ctx, cancel := context.WithTimeout(context.Background(), config.DefaultFastProvideTimeout) + defer cancel() + if err := provideCIDSync(ctx, ipfsNode.DHTClient, rootCid); err != nil { + log.Warnw("fast-provide-root: async provide failed", "cid", rootCid, "error", err) + } else { + log.Debugw("fast-provide-root: async provide completed", "cid", rootCid) + } + }() + return nil +} diff --git a/core/commands/cmdutils/sanitize.go b/core/commands/cmdutils/sanitize.go new file mode 100644 index 000000000..4cd3d3f59 --- /dev/null +++ b/core/commands/cmdutils/sanitize.go @@ -0,0 +1,50 @@ +package cmdutils + +import ( + "strings" + "unicode" +) + +const maxRunes = 128 + +// CleanAndTrim sanitizes untrusted strings from remote peers to prevent display issues +// across web UIs, terminals, and logs. It replaces control characters, format characters, +// and surrogates with U+FFFD (�), then enforces a maximum length of 128 runes. +// +// This follows the libp2p identify specification and RFC 9839 guidance: +// replacing problematic code points is preferred over deletion as deletion +// is a known security risk. +func CleanAndTrim(str string) string { + // Build sanitized result + var result []rune + for _, r := range str { + // Replace control characters (Cc) with U+FFFD - prevents terminal escapes, CR, LF, etc. + if unicode.Is(unicode.Cc, r) { + result = append(result, '\uFFFD') + continue + } + // Replace format characters (Cf) with U+FFFD - prevents RTL/LTR overrides, zero-width chars + if unicode.Is(unicode.Cf, r) { + result = append(result, '\uFFFD') + continue + } + // Replace surrogate characters (Cs) with U+FFFD - invalid in UTF-8 + if unicode.Is(unicode.Cs, r) { + result = append(result, '\uFFFD') + continue + } + // Private use characters (Co) are preserved per spec + result = append(result, r) + } + + // Convert to string and trim whitespace + sanitized := strings.TrimSpace(string(result)) + + // Enforce maximum length (128 runes, not bytes) + runes := []rune(sanitized) + if len(runes) > maxRunes { + return string(runes[:maxRunes]) + } + + return sanitized +} diff --git a/core/commands/cmdutils/utils.go b/core/commands/cmdutils/utils.go index be295f9e3..30249d016 100644 --- a/core/commands/cmdutils/utils.go +++ b/core/commands/cmdutils/utils.go @@ -2,17 +2,20 @@ package cmdutils import ( "fmt" + "slices" cmds "github.com/ipfs/go-ipfs-cmds" "github.com/ipfs/boxo/path" "github.com/ipfs/go-cid" coreiface "github.com/ipfs/kubo/core/coreiface" + "github.com/libp2p/go-libp2p/core/peer" ) const ( AllowBigBlockOptionName = "allow-big-block" SoftBlockLimit = 1024 * 1024 // https://github.com/ipfs/kubo/issues/7421#issuecomment-910833499 + MaxPinNameBytes = 255 // Maximum number of bytes allowed for a pin name ) var AllowBigBlockOption cmds.Option @@ -50,6 +53,21 @@ func CheckBlockSize(req *cmds.Request, size uint64) error { return nil } +// ValidatePinName validates that a pin name does not exceed the maximum allowed byte length. +// Returns an error if the name exceeds MaxPinNameBytes (255 bytes). +func ValidatePinName(name string) error { + if name == "" { + // Empty names are allowed + return nil + } + + nameBytes := len([]byte(name)) + if nameBytes > MaxPinNameBytes { + return fmt.Errorf("pin name is %d bytes (max %d bytes)", nameBytes, MaxPinNameBytes) + } + return nil +} + // PathOrCidPath returns a path.Path built from the argument. It keeps the old // behaviour by building a path from a CID string. func PathOrCidPath(str string) (path.Path, error) { @@ -58,10 +76,23 @@ func PathOrCidPath(str string) (path.Path, error) { return p, nil } + // Save the original error before attempting fallback + originalErr := err + if p, err := path.NewPath("/ipfs/" + str); err == nil { return p, nil } // Send back original err. - return nil, err + return nil, originalErr +} + +// CloneAddrInfo returns a copy of the AddrInfo with a cloned Addrs slice. +// This prevents data races if the sender reuses the backing array. +// See: https://github.com/ipfs/kubo/issues/11116 +func CloneAddrInfo(ai peer.AddrInfo) peer.AddrInfo { + return peer.AddrInfo{ + ID: ai.ID, + Addrs: slices.Clone(ai.Addrs), + } } diff --git a/core/commands/cmdutils/utils_test.go b/core/commands/cmdutils/utils_test.go new file mode 100644 index 000000000..c50277d53 --- /dev/null +++ b/core/commands/cmdutils/utils_test.go @@ -0,0 +1,106 @@ +package cmdutils + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPathOrCidPath(t *testing.T) { + t.Run("valid path is returned as-is", func(t *testing.T) { + validPath := "/ipfs/QmYwAPJzv5CZsnA625s3Xf2nemtYgPpHdWEz79ojWnPbdG" + p, err := PathOrCidPath(validPath) + require.NoError(t, err) + assert.Equal(t, validPath, p.String()) + }) + + t.Run("valid CID is converted to /ipfs/ path", func(t *testing.T) { + cid := "QmYwAPJzv5CZsnA625s3Xf2nemtYgPpHdWEz79ojWnPbdG" + p, err := PathOrCidPath(cid) + require.NoError(t, err) + assert.Equal(t, "/ipfs/"+cid, p.String()) + }) + + t.Run("valid ipns path is returned as-is", func(t *testing.T) { + validPath := "/ipns/example.com" + p, err := PathOrCidPath(validPath) + require.NoError(t, err) + assert.Equal(t, validPath, p.String()) + }) + + t.Run("returns original error when both attempts fail", func(t *testing.T) { + invalidInput := "invalid!@#path" + _, err := PathOrCidPath(invalidInput) + require.Error(t, err) + + // The error should reference the original input attempt. + // This ensures users get meaningful error messages about their actual input. + assert.Contains(t, err.Error(), invalidInput, + "error should mention the original input") + assert.Contains(t, err.Error(), "path does not have enough components", + "error should describe the problem with the original input") + }) + + t.Run("empty string returns error about original input", func(t *testing.T) { + _, err := PathOrCidPath("") + require.Error(t, err) + + // Verify we're not getting an error about "/ipfs/" (the fallback) + errMsg := err.Error() + assert.NotContains(t, errMsg, "/ipfs/", + "error should be about empty input, not the fallback path") + }) + + t.Run("invalid characters return error about original input", func(t *testing.T) { + invalidInput := "not a valid path or CID with spaces and /@#$%" + _, err := PathOrCidPath(invalidInput) + require.Error(t, err) + + // The error message should help debug the original input + assert.True(t, strings.Contains(err.Error(), invalidInput) || + strings.Contains(err.Error(), "invalid"), + "error should reference original problematic input") + }) + + t.Run("CID with path is converted correctly", func(t *testing.T) { + cidWithPath := "QmYwAPJzv5CZsnA625s3Xf2nemtYgPpHdWEz79ojWnPbdG/file.txt" + p, err := PathOrCidPath(cidWithPath) + require.NoError(t, err) + assert.Equal(t, "/ipfs/"+cidWithPath, p.String()) + }) +} + +func TestValidatePinName(t *testing.T) { + t.Run("valid pin name is accepted", func(t *testing.T) { + err := ValidatePinName("my-pin-name") + assert.NoError(t, err) + }) + + t.Run("empty pin name is accepted", func(t *testing.T) { + err := ValidatePinName("") + assert.NoError(t, err) + }) + + t.Run("pin name at max length is accepted", func(t *testing.T) { + maxName := strings.Repeat("a", MaxPinNameBytes) + err := ValidatePinName(maxName) + assert.NoError(t, err) + }) + + t.Run("pin name exceeding max length is rejected", func(t *testing.T) { + tooLong := strings.Repeat("a", MaxPinNameBytes+1) + err := ValidatePinName(tooLong) + require.Error(t, err) + assert.Contains(t, err.Error(), "max") + }) + + t.Run("pin name with unicode is counted by bytes", func(t *testing.T) { + // Unicode character can be multiple bytes + unicodeName := strings.Repeat("🔒", MaxPinNameBytes/4+1) // emoji is 4 bytes + err := ValidatePinName(unicodeName) + require.Error(t, err) + assert.Contains(t, err.Error(), "bytes") + }) +} diff --git a/core/commands/commands.go b/core/commands/commands.go index 249f0ffbe..b1cd6c45d 100644 --- a/core/commands/commands.go +++ b/core/commands/commands.go @@ -10,7 +10,7 @@ import ( "fmt" "io" "os" - "sort" + "slices" "strings" cmds "github.com/ipfs/go-ipfs-cmds" @@ -131,7 +131,7 @@ func cmdPathStrings(cmd *Command, showOptions bool) []string { } recurse("", cmd) - sort.Strings(cmds) + slices.Sort(cmds) return cmds } @@ -233,12 +233,11 @@ type nonFatalError string // contain non-fatal errors. The helper function is allowed to panic // on internal errors. func streamResult(procVal func(interface{}, io.Writer) nonFatalError) func(cmds.Response, cmds.ResponseEmitter) error { - return func(res cmds.Response, re cmds.ResponseEmitter) (err error) { + return func(res cmds.Response, re cmds.ResponseEmitter) (rerr error) { defer func() { if r := recover(); r != nil { - err = fmt.Errorf("internal error: %v", r) + rerr = fmt.Errorf("internal error: %v", r) } - re.Close() }() var errors bool @@ -248,7 +247,8 @@ func streamResult(procVal func(interface{}, io.Writer) nonFatalError) func(cmds. if err == io.EOF { break } - return err + rerr = err + return } errorMsg := procVal(v, os.Stdout) @@ -260,8 +260,8 @@ func streamResult(procVal func(interface{}, io.Writer) nonFatalError) func(cmds. } if errors { - return fmt.Errorf("errors while displaying some entries") + rerr = fmt.Errorf("errors while displaying some entries") } - return nil + return } } diff --git a/core/commands/commands_test.go b/core/commands/commands_test.go index b04a5459b..23782f209 100644 --- a/core/commands/commands_test.go +++ b/core/commands/commands_test.go @@ -30,7 +30,6 @@ func TestCommands(t *testing.T) { "/block/stat", "/bootstrap", "/bootstrap/add", - "/bootstrap/add/default", "/bootstrap/list", "/bootstrap/rm", "/bootstrap/rm/all", @@ -72,6 +71,7 @@ func TestCommands(t *testing.T) { "/routing/findpeer", "/routing/findprovs", "/routing/provide", + "/routing/reprovide", "/diag", "/diag/cmds", "/diag/cmds/clear", @@ -162,6 +162,9 @@ func TestCommands(t *testing.T) { "/pin/update", "/pin/verify", "/ping", + "/provide", + "/provide/clear", + "/provide/stat", "/pubsub", "/pubsub/ls", "/pubsub/peers", @@ -183,6 +186,7 @@ func TestCommands(t *testing.T) { "/stats/bw", "/stats/dht", "/stats/provide", + "/stats/reprovide", "/stats/repo", "/swarm", "/swarm/addrs", diff --git a/core/commands/completion.go b/core/commands/completion.go index 2f5b8b61e..448af4d50 100644 --- a/core/commands/completion.go +++ b/core/commands/completion.go @@ -2,7 +2,8 @@ package commands import ( "io" - "sort" + "slices" + "strings" "text/template" cmds "github.com/ipfs/go-ipfs-cmds" @@ -39,8 +40,8 @@ func commandToCompletions(name string, fullName string, cmd *cmds.Command) *comp parsed.Subcommands = append(parsed.Subcommands, commandToCompletions(name, fullName+" "+name, subCmd)) } - sort.Slice(parsed.Subcommands, func(i, j int) bool { - return parsed.Subcommands[i].Name < parsed.Subcommands[j].Name + slices.SortFunc(parsed.Subcommands, func(a, b *completionCommand) int { + return strings.Compare(a.Name, b.Name) }) for _, opt := range cmd.Options { @@ -68,18 +69,10 @@ func commandToCompletions(name string, fullName string, cmd *cmds.Command) *comp parsed.Options = append(parsed.Options, flag) } } - sort.Slice(parsed.LongFlags, func(i, j int) bool { - return parsed.LongFlags[i] < parsed.LongFlags[j] - }) - sort.Slice(parsed.ShortFlags, func(i, j int) bool { - return parsed.ShortFlags[i] < parsed.ShortFlags[j] - }) - sort.Slice(parsed.LongOptions, func(i, j int) bool { - return parsed.LongOptions[i] < parsed.LongOptions[j] - }) - sort.Slice(parsed.ShortOptions, func(i, j int) bool { - return parsed.ShortOptions[i] < parsed.ShortOptions[j] - }) + slices.Sort(parsed.LongFlags) + slices.Sort(parsed.ShortFlags) + slices.Sort(parsed.LongOptions) + slices.Sort(parsed.ShortOptions) return parsed } diff --git a/core/commands/config.go b/core/commands/config.go index b52c05af2..c28466a98 100644 --- a/core/commands/config.go +++ b/core/commands/config.go @@ -5,17 +5,19 @@ import ( "errors" "fmt" "io" + "maps" "os" "os/exec" + "slices" "strings" - "github.com/ipfs/kubo/core/commands/cmdenv" - "github.com/ipfs/kubo/repo" - "github.com/ipfs/kubo/repo/fsrepo" - + "github.com/anmitsu/go-shlex" "github.com/elgris/jsondiff" cmds "github.com/ipfs/go-ipfs-cmds" config "github.com/ipfs/kubo/config" + "github.com/ipfs/kubo/core/commands/cmdenv" + "github.com/ipfs/kubo/repo" + "github.com/ipfs/kubo/repo/fsrepo" ) // ConfigUpdateOutput is config profile apply command's output @@ -33,6 +35,7 @@ const ( configBoolOptionName = "bool" configJSONOptionName = "json" configDryRunOptionName = "dry-run" + configExpandAutoName = "expand-auto" ) var ConfigCmd = &cmds.Command{ @@ -48,13 +51,18 @@ file inside your IPFS repository (IPFS_PATH). Examples: -Get the value of the 'Datastore.Path' key: +Get the value of the 'Routing.Type' key: - $ ipfs config Datastore.Path + $ ipfs config Routing.Type -Set the value of the 'Datastore.Path' key: +Set the value of the 'Routing.Type' key: - $ ipfs config Datastore.Path ~/.ipfs/datastore + $ ipfs config Routing.Type auto + +Set multiple values in the 'Addresses.AppendAnnounce' array: + + $ ipfs config Addresses.AppendAnnounce --json \ + '["/dns4/a.example.com/tcp/4001", "/dns4/b.example.com/tcp/4002"]' `, }, Subcommands: map[string]*cmds.Command{ @@ -70,6 +78,7 @@ Set the value of the 'Datastore.Path' key: Options: []cmds.Option{ cmds.BoolOption(configBoolOptionName, "Set a boolean value."), cmds.BoolOption(configJSONOptionName, "Parse stringified JSON."), + cmds.BoolOption(configExpandAutoName, "Expand 'auto' placeholders to their expanded values from AutoConf service."), }, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { args := req.Arguments @@ -100,6 +109,11 @@ Set the value of the 'Datastore.Path' key: } defer r.Close() if len(args) == 2 { + // Check if user is trying to write config with expand flag + if expandAuto, _ := req.Options[configExpandAutoName].(bool); expandAuto { + return fmt.Errorf("--expand-auto can only be used for reading config values, not for setting them") + } + value := args[1] if parseJSON, _ := req.Options[configJSONOptionName].(bool); parseJSON { @@ -116,7 +130,13 @@ Set the value of the 'Datastore.Path' key: output, err = setConfig(r, key, value) } } else { - output, err = getConfig(r, key) + // Check if user wants to expand auto values for getter + expandAuto, _ := req.Options[configExpandAutoName].(bool) + if expandAuto { + output, err = getConfigWithAutoExpand(r, key) + } else { + output, err = getConfig(r, key) + } } if err != nil { @@ -203,6 +223,23 @@ NOTE: For security reasons, this command will omit your private key and remote s return err } + // Check if user wants to expand auto values + expandAuto, _ := req.Options[configExpandAutoName].(bool) + if expandAuto { + // Load full config to use resolution methods + var fullCfg config.Config + err = json.Unmarshal(data, &fullCfg) + if err != nil { + return err + } + + // Expand auto values and update the map + cfg, err = fullCfg.ExpandAutoConfValues(cfg) + if err != nil { + return err + } + } + cfg, err = scrubValue(cfg, []string{config.IdentityTag, config.PrivKeyTag}) if err != nil { return err @@ -412,7 +449,8 @@ var configProfileApplyCmd = &cmds.Command{ func buildProfileHelp() string { var out string - for name, profile := range config.Profiles { + for _, name := range slices.Sorted(maps.Keys(config.Profiles)) { + profile := config.Profiles[name] dlines := strings.Split(profile.Description, "\n") for i := range dlines { dlines[i] = " " + dlines[i] @@ -493,6 +531,28 @@ func getConfig(r repo.Repo, key string) (*ConfigField, error) { }, nil } +func getConfigWithAutoExpand(r repo.Repo, key string) (*ConfigField, error) { + // First get the current value + value, err := r.GetConfigKey(key) + if err != nil { + return nil, fmt.Errorf("failed to get config value: %q", err) + } + + // Load full config for resolution + fullCfg, err := r.Config() + if err != nil { + return nil, fmt.Errorf("failed to load config: %q", err) + } + + // Expand auto values based on the key + expandedValue := fullCfg.ExpandConfigField(key, value) + + return &ConfigField{ + Key: key, + Value: expandedValue, + }, nil +} + func setConfig(r repo.Repo, key string, value interface{}) (*ConfigField, error) { err := r.SetConfigKey(key, value) if err != nil { @@ -501,13 +561,25 @@ func setConfig(r repo.Repo, key string, value interface{}) (*ConfigField, error) return getConfig(r, key) } +// parseEditorCommand parses the EDITOR environment variable into command and arguments +func parseEditorCommand(editor string) ([]string, error) { + return shlex.Split(editor, true) +} + func editConfig(filename string) error { editor := os.Getenv("EDITOR") if editor == "" { return errors.New("ENV variable $EDITOR not set") } - cmd := exec.Command(editor, filename) + editorAndArgs, err := parseEditorCommand(editor) + if err != nil { + return fmt.Errorf("cannot parse $EDITOR value: %s", err) + } + editor = editorAndArgs[0] + args := append(editorAndArgs[1:], filename) + + cmd := exec.Command(editor, args...) cmd.Stdin, cmd.Stdout, cmd.Stderr = os.Stdin, os.Stdout, os.Stderr return cmd.Run() } diff --git a/core/commands/config_test.go b/core/commands/config_test.go index 5eb79c153..fe1660abb 100644 --- a/core/commands/config_test.go +++ b/core/commands/config_test.go @@ -14,3 +14,116 @@ func TestScrubMapInternalDelete(t *testing.T) { t.Errorf("expecting an empty map, got a non-empty map") } } + +func TestEditorParsing(t *testing.T) { + testCases := []struct { + name string + input string + expected []string + hasError bool + }{ + { + name: "simple editor", + input: "vim", + expected: []string{"vim"}, + hasError: false, + }, + { + name: "editor with single flag", + input: "emacs -nw", + expected: []string{"emacs", "-nw"}, + hasError: false, + }, + { + name: "VS Code with wait flag (issue #9375)", + input: "code --wait", + expected: []string{"code", "--wait"}, + hasError: false, + }, + { + name: "VS Code with full path and wait flag (issue #9375)", + input: "/opt/homebrew/bin/code --wait", + expected: []string{"/opt/homebrew/bin/code", "--wait"}, + hasError: false, + }, + { + name: "editor with quoted path containing spaces", + input: "\"/Applications/Visual Studio Code.app/Contents/Resources/app/bin/code\" --wait", + expected: []string{"/Applications/Visual Studio Code.app/Contents/Resources/app/bin/code", "--wait"}, + hasError: false, + }, + { + name: "sublime text with wait flag", + input: "subl -w", + expected: []string{"subl", "-w"}, + hasError: false, + }, + { + name: "nano editor", + input: "nano", + expected: []string{"nano"}, + hasError: false, + }, + { + name: "gedit editor", + input: "gedit", + expected: []string{"gedit"}, + hasError: false, + }, + { + name: "editor with multiple flags", + input: "vim -c 'set number' -c 'set hlsearch'", + expected: []string{"vim", "-c", "set number", "-c", "set hlsearch"}, + hasError: false, + }, + { + name: "trailing backslash (POSIX edge case)", + input: "editor\\", + expected: nil, + hasError: true, + }, + { + name: "double quoted editor name with spaces", + input: "\"code with spaces\" --wait", + expected: []string{"code with spaces", "--wait"}, + hasError: false, + }, + { + name: "single quoted editor with flags", + input: "'my editor' -flag", + expected: []string{"my editor", "-flag"}, + hasError: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result, err := parseEditorCommand(tc.input) + + if tc.hasError { + if err == nil { + t.Errorf("Expected error for input '%s', but got none", tc.input) + } + return + } + + if err != nil { + t.Errorf("Unexpected error for input '%s': %v", tc.input, err) + return + } + + if len(result) != len(tc.expected) { + t.Errorf("Expected %d args, got %d for input '%s'", len(tc.expected), len(result), tc.input) + t.Errorf("Expected: %v", tc.expected) + t.Errorf("Got: %v", result) + return + } + + for i, expected := range tc.expected { + if result[i] != expected { + t.Errorf("Expected arg %d to be '%s', got '%s' for input '%s'", i, expected, result[i], tc.input) + } + } + }) + } +} diff --git a/core/commands/dag/dag.go b/core/commands/dag/dag.go index ce5edb641..caf7a5474 100644 --- a/core/commands/dag/dag.go +++ b/core/commands/dag/dag.go @@ -7,6 +7,7 @@ import ( "io" "path" + "github.com/dustin/go-humanize" "github.com/ipfs/kubo/core/commands/cmdenv" "github.com/ipfs/kubo/core/commands/cmdutils" @@ -16,10 +17,12 @@ import ( ) const ( - pinRootsOptionName = "pin-roots" - progressOptionName = "progress" - silentOptionName = "silent" - statsOptionName = "stats" + pinRootsOptionName = "pin-roots" + progressOptionName = "progress" + silentOptionName = "silent" + statsOptionName = "stats" + fastProvideRootOptionName = "fast-provide-root" + fastProvideWaitOptionName = "fast-provide-wait" ) // DagCmd provides a subset of commands for interacting with ipld dag objects @@ -189,6 +192,18 @@ Note: currently present in the blockstore does not represent a complete DAG, pinning of that individual root will fail. +FAST PROVIDE OPTIMIZATION: + +Root CIDs from CAR headers are immediately provided to the DHT in addition +to the regular provide queue, allowing other peers to discover your content +right away. This complements the sweep provider, which efficiently provides +all blocks according to Provide.Strategy over time. + +By default, the provide happens in the background without blocking the +command. Use --fast-provide-wait to wait for the provide to complete, or +--fast-provide-root=false to skip it. Works even with --pin-roots=false. +Automatically skipped when DHT is not available. + Maximum supported CAR version: 2 Specification of CAR formats: https://ipld.io/specs/transport/car/ `, @@ -200,6 +215,8 @@ Specification of CAR formats: https://ipld.io/specs/transport/car/ cmds.BoolOption(pinRootsOptionName, "Pin optional roots listed in the .car headers after importing.").WithDefault(true), cmds.BoolOption(silentOptionName, "No output."), cmds.BoolOption(statsOptionName, "Output stats."), + cmds.BoolOption(fastProvideRootOptionName, "Immediately provide root CIDs to DHT in addition to regular queue, for faster discovery. Default: Import.FastProvideRoot"), + cmds.BoolOption(fastProvideWaitOptionName, "Block until the immediate provide completes before returning. Default: Import.FastProvideWait"), cmdutils.AllowBigBlockOption, }, Type: CarImportOutput{}, @@ -333,7 +350,11 @@ type DagStatSummary struct { } func (s *DagStatSummary) String() string { - return fmt.Sprintf("Total Size: %d\nUnique Blocks: %d\nShared Size: %d\nRatio: %f", s.TotalSize, s.UniqueBlocks, s.SharedSize, s.Ratio) + return fmt.Sprintf("Total Size: %d (%s)\nUnique Blocks: %d\nShared Size: %d (%s)\nRatio: %f", + s.TotalSize, humanize.Bytes(s.TotalSize), + s.UniqueBlocks, + s.SharedSize, humanize.Bytes(s.SharedSize), + s.Ratio) } func (s *DagStatSummary) incrementTotalSize(size uint64) { @@ -368,7 +389,7 @@ Note: This command skips duplicate blocks in reporting both size and the number cmds.StringArg("root", true, true, "CID of a DAG root to get statistics for").EnableStdin(), }, Options: []cmds.Option{ - cmds.BoolOption(progressOptionName, "p", "Return progressive data while reading through the DAG").WithDefault(true), + cmds.BoolOption(progressOptionName, "p", "Show progress on stderr. Auto-detected if stderr is a terminal."), }, Run: dagStat, Type: DagStatSummary{}, diff --git a/core/commands/dag/export.go b/core/commands/dag/export.go index a729cf752..9f11c43de 100644 --- a/core/commands/dag/export.go +++ b/core/commands/dag/export.go @@ -9,15 +9,14 @@ import ( "time" "github.com/cheggaaa/pb" - blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" + cmds "github.com/ipfs/go-ipfs-cmds" ipld "github.com/ipfs/go-ipld-format" "github.com/ipfs/kubo/core/commands/cmdenv" "github.com/ipfs/kubo/core/commands/cmdutils" iface "github.com/ipfs/kubo/core/coreiface" - - cmds "github.com/ipfs/go-ipfs-cmds" - gocar "github.com/ipld/go-car" + gocar "github.com/ipld/go-car/v2" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" selectorparse "github.com/ipld/go-ipld-prime/traversal/selector/parse" ) @@ -51,14 +50,27 @@ func dagExport(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment close(errCh) }() - store := dagStore{dag: api.Dag(), ctx: req.Context} - dag := gocar.Dag{Root: c, Selector: selectorparse.CommonSelector_ExploreAllRecursively} - // TraverseLinksOnlyOnce is safe for an exhaustive selector but won't be when we allow - // arbitrary selectors here - car := gocar.NewSelectiveCar(req.Context, store, []gocar.Dag{dag}, gocar.TraverseLinksOnlyOnce()) - if err := car.Write(pipeW); err != nil { + lsys := cidlink.DefaultLinkSystem() + lsys.SetReadStorage(&dagStore{dag: api.Dag(), ctx: req.Context}) + + // Uncomment the following to support CARv2 output. + /* + car, err := gocar.NewSelectiveWriter(req.Context, &lsys, c, selectorparse.CommonSelector_ExploreAllRecursively, gocar.AllowDuplicatePuts(false)) + if err != nil { + errCh <- err + return + } + if _, err = car.WriteTo(pipeW); err != nil { + errCh <- err + return + } + */ + _, err := gocar.TraverseV1(req.Context, &lsys, c, selectorparse.CommonSelector_ExploreAllRecursively, pipeW, gocar.AllowDuplicatePuts(false)) + if err != nil { errCh <- err + return } + }() if err := res.Emit(pipeR); err != nil { @@ -69,7 +81,7 @@ func dagExport(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment err = <-errCh // minimal user friendliness - if ipld.IsNotFound(err) { + if errors.Is(err, ipld.ErrNotFound{}) { explicitOffline, _ := req.Options["offline"].(bool) if explicitOffline { err = fmt.Errorf("%s (currently offline, perhaps retry without the offline flag)", err) @@ -112,16 +124,17 @@ func finishCLIExport(res cmds.Response, re cmds.ResponseEmitter) error { var processedOneResponse bool for { v, err := res.Next() - if err == io.EOF { - - // We only write the final bar update on success - // On error it looks too weird - bar.Finish() - - return re.Close() - } else if err != nil { + if err != nil { + if errors.Is(err, io.EOF) { + // We only write the final bar update on success + // On error it looks too weird + bar.Finish() + return re.Close() + } return re.CloseWithError(err) - } else if processedOneResponse { + } + + if processedOneResponse { return re.CloseWithError(errors.New("unexpected multipart response during emit, please file a bugreport")) } @@ -133,18 +146,53 @@ func finishCLIExport(res cmds.Response, re cmds.ResponseEmitter) error { processedOneResponse = true - if err := re.Emit(bar.NewProxyReader(r)); err != nil { + if err = re.Emit(bar.NewProxyReader(r)); err != nil { return err } } } -// FIXME(@Jorropo): https://github.com/ipld/go-car/issues/315 type dagStore struct { dag iface.APIDagService ctx context.Context } -func (ds dagStore) Get(_ context.Context, c cid.Cid) (blocks.Block, error) { - return ds.dag.Get(ds.ctx, c) +func (ds *dagStore) Get(ctx context.Context, key string) ([]byte, error) { + if ctx.Err() != nil { + return nil, ctx.Err() + } + + c, err := cidFromBinString(key) + if err != nil { + return nil, err + } + + block, err := ds.dag.Get(ds.ctx, c) + if err != nil { + return nil, err + } + + return block.RawData(), nil +} + +func (ds *dagStore) Has(ctx context.Context, key string) (bool, error) { + _, err := ds.Get(ctx, key) + if err != nil { + if errors.Is(err, ipld.ErrNotFound{}) { + return false, nil + } + return false, err + } + return true, nil +} + +func cidFromBinString(key string) (cid.Cid, error) { + l, k, err := cid.CidFromBytes([]byte(key)) + if err != nil { + return cid.Undef, fmt.Errorf("dagStore: key was not a cid: %w", err) + } + if l != len(key) { + return cid.Undef, fmt.Errorf("dagSore: key was not a cid: had %d bytes leftover", len(key)-l) + } + return k, nil } diff --git a/core/commands/dag/import.go b/core/commands/dag/import.go index 5e39393c1..032b9e52a 100644 --- a/core/commands/dag/import.go +++ b/core/commands/dag/import.go @@ -11,6 +11,8 @@ import ( cmds "github.com/ipfs/go-ipfs-cmds" ipld "github.com/ipfs/go-ipld-format" ipldlegacy "github.com/ipfs/go-ipld-legacy" + logging "github.com/ipfs/go-log/v2" + "github.com/ipfs/kubo/config" "github.com/ipfs/kubo/core/coreiface/options" gocarv2 "github.com/ipld/go-car/v2" @@ -18,12 +20,19 @@ import ( "github.com/ipfs/kubo/core/commands/cmdutils" ) +var log = logging.Logger("core/commands") + func dagImport(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { node, err := cmdenv.GetNode(env) if err != nil { return err } + cfg, err := node.Repo.Config() + if err != nil { + return err + } + api, err := cmdenv.GetApi(env, req) if err != nil { return err @@ -41,6 +50,12 @@ func dagImport(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment doPinRoots, _ := req.Options[pinRootsOptionName].(bool) + fastProvideRoot, fastProvideRootSet := req.Options[fastProvideRootOptionName].(bool) + fastProvideWait, fastProvideWaitSet := req.Options[fastProvideWaitOptionName].(bool) + + fastProvideRoot = config.ResolveBoolFromConfig(fastProvideRoot, fastProvideRootSet, cfg.Import.FastProvideRoot, config.DefaultFastProvideRoot) + fastProvideWait = config.ResolveBoolFromConfig(fastProvideWait, fastProvideWaitSet, cfg.Import.FastProvideWait, config.DefaultFastProvideWait) + // grab a pinlock ( which doubles as a GC lock ) so that regardless of the // size of the streamed-in cars nothing will disappear on us before we had // a chance to roots that may show up at the very end @@ -55,7 +70,14 @@ func dagImport(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment // this is *not* a transaction // it is simply a way to relieve pressure on the blockstore // similar to pinner.Pin/pinner.Flush - batch := ipld.NewBatch(req.Context, api.Dag()) + batch := ipld.NewBatch(req.Context, api.Dag(), + // Default: 128. Means 128 file descriptors needed in flatfs + ipld.MaxNodesBatchOption(int(cfg.Import.BatchMaxNodes.WithDefault(config.DefaultBatchMaxNodes))), + // Default 100MiB. When setting block size to 1MiB, we can add + // ~100 nodes maximum. With default 256KiB block-size, we will + // hit the max nodes limit at 32MiB.p + ipld.MaxSizeBatchOption(int(cfg.Import.BatchMaxSize.WithDefault(config.DefaultBatchMaxSize))), + ) roots := cid.NewSet() var blockCount, blockBytesCount uint64 @@ -178,5 +200,21 @@ func dagImport(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment } } + // Fast-provide roots for faster discovery + if fastProvideRoot { + err = roots.ForEach(func(c cid.Cid) error { + return cmdenv.ExecuteFastProvide(req.Context, node, cfg, c, fastProvideWait, doPinRoots, doPinRoots, false) + }) + if err != nil { + return err + } + } else { + if fastProvideWait { + log.Debugw("fast-provide-root: skipped", "reason", "disabled by flag or config", "wait-flag-ignored", true) + } else { + log.Debugw("fast-provide-root: skipped", "reason", "disabled by flag or config") + } + } + return nil } diff --git a/core/commands/dag/stat.go b/core/commands/dag/stat.go index bb9be7e0d..916aae71a 100644 --- a/core/commands/dag/stat.go +++ b/core/commands/dag/stat.go @@ -5,6 +5,7 @@ import ( "io" "os" + "github.com/dustin/go-humanize" mdag "github.com/ipfs/boxo/ipld/merkledag" "github.com/ipfs/boxo/ipld/merkledag/traverse" cid "github.com/ipfs/go-cid" @@ -19,7 +20,11 @@ import ( // to compute the new state func dagStat(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { - progressive := req.Options[progressOptionName].(bool) + // Default to true (emit intermediate states) for HTTP/RPC clients that want progress + progressive := true + if val, specified := req.Options[progressOptionName].(bool); specified { + progressive = val + } api, err := cmdenv.GetApi(env, req) if err != nil { return err @@ -84,6 +89,18 @@ func dagStat(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) } func finishCLIStat(res cmds.Response, re cmds.ResponseEmitter) error { + // Determine whether to show progress based on TTY detection or explicit flag + var showProgress bool + val, specified := res.Request().Options[progressOptionName] + if !specified { + // Auto-detect: show progress only if stderr is a TTY + if errStat, err := os.Stderr.Stat(); err == nil { + showProgress = (errStat.Mode() & os.ModeCharDevice) != 0 + } + } else { + showProgress = val.(bool) + } + var dagStats *DagStatSummary for { v, err := res.Next() @@ -96,17 +113,26 @@ func finishCLIStat(res cmds.Response, re cmds.ResponseEmitter) error { switch out := v.(type) { case *DagStatSummary: dagStats = out - if dagStats.Ratio == 0 { - length := len(dagStats.DagStatsArray) - if length > 0 { - currentStat := dagStats.DagStatsArray[length-1] - fmt.Fprintf(os.Stderr, "CID: %s, Size: %d, NumBlocks: %d\n", currentStat.Cid, currentStat.Size, currentStat.NumBlocks) + // Ratio == 0 means this is a progress update (not final result) + if showProgress && dagStats.Ratio == 0 { + // Sum up total progress across all DAGs being scanned + var totalBlocks int64 + var totalSize uint64 + for _, stat := range dagStats.DagStatsArray { + totalBlocks += stat.NumBlocks + totalSize += stat.Size } + fmt.Fprintf(os.Stderr, "Fetched/Processed %d blocks, %d bytes (%s)\r", totalBlocks, totalSize, humanize.Bytes(totalSize)) } default: return e.TypeErr(out, v) - } } + + // Clear the progress line before final output + if showProgress { + fmt.Fprint(os.Stderr, "\033[2K\r") + } + return re.Emit(dagStats) } diff --git a/core/commands/dht.go b/core/commands/dht.go index 1d4620181..b246a78cc 100644 --- a/core/commands/dht.go +++ b/core/commands/dht.go @@ -56,7 +56,7 @@ var queryDhtCmd = &cmds.Command{ return err } - if nd.DHTClient == nil { + if !nd.HasActiveDHTClient() { return ErrNotDHT } @@ -70,7 +70,7 @@ var queryDhtCmd = &cmds.Command{ ctx, events := routing.RegisterForQueryEvents(ctx) client := nd.DHTClient - if client == nd.DHT { + if nd.DHT != nil && client == nd.DHT { client = nd.DHT.WAN if !nd.DHT.WANActive() { client = nd.DHT.LAN @@ -78,7 +78,7 @@ var queryDhtCmd = &cmds.Command{ } if d, ok := client.(kademlia); !ok { - return fmt.Errorf("dht client does not support GetClosestPeers") + return errors.New("dht client does not support GetClosestPeers") } else { errCh := make(chan error, 1) go func() { diff --git a/core/commands/files.go b/core/commands/files.go index e5f5f2053..599ea4188 100644 --- a/core/commands/files.go +++ b/core/commands/files.go @@ -8,9 +8,11 @@ import ( "io" "os" gopath "path" - "sort" + "slices" "strconv" "strings" + "sync" + "sync/atomic" "time" humanize "github.com/dustin/go-humanize" @@ -32,13 +34,50 @@ import ( fslock "github.com/ipfs/go-fs-lock" cmds "github.com/ipfs/go-ipfs-cmds" ipld "github.com/ipfs/go-ipld-format" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" iface "github.com/ipfs/kubo/core/coreiface" mh "github.com/multiformats/go-multihash" ) var flog = logging.Logger("cmds/files") +// Global counter for unflushed MFS operations +var noFlushOperationCounter atomic.Int64 + +// Cached limit value (read once on first use) +var ( + noFlushLimit int64 + noFlushLimitInit sync.Once +) + +// updateNoFlushCounter manages the counter for unflushed operations +func updateNoFlushCounter(nd *core.IpfsNode, flush bool) error { + if flush { + // Reset counter when flushing + noFlushOperationCounter.Store(0) + return nil + } + + // Cache the limit on first use (config doesn't change at runtime) + noFlushLimitInit.Do(func() { + noFlushLimit = int64(config.DefaultMFSNoFlushLimit) + if cfg, err := nd.Repo.Config(); err == nil && cfg.Internal.MFSNoFlushLimit != nil { + noFlushLimit = cfg.Internal.MFSNoFlushLimit.WithDefault(int64(config.DefaultMFSNoFlushLimit)) + } + }) + + // Check if limit reached + if noFlushLimit > 0 && noFlushOperationCounter.Load() >= noFlushLimit { + return fmt.Errorf("reached limit of %d unflushed MFS operations. "+ + "To resolve: 1) run 'ipfs files flush' to persist changes, "+ + "2) use --flush=true (default), or "+ + "3) increase Internal.MFSNoFlushLimit in config", noFlushLimit) + } + + noFlushOperationCounter.Add(1) + return nil +} + // FilesCmd is the 'ipfs files' command var FilesCmd = &cmds.Command{ Helptext: cmds.HelpText{ @@ -63,16 +102,23 @@ Content added with "ipfs add" (which by default also becomes pinned), is not added to MFS. Any content can be lazily referenced from MFS with the command "ipfs files cp /ipfs/ /some/path/" (see ipfs files cp --help). - -NOTE: -Most of the subcommands of 'ipfs files' accept the '--flush' flag. It defaults -to true. Use caution when setting this flag to false. It will improve +NOTE: Most of the subcommands of 'ipfs files' accept the '--flush' flag. It +defaults to true and ensures two things: 1) that the changes are reflected in +the full MFS structure (updated CIDs) 2) that the parent-folder's cache is +cleared. Use caution when setting this flag to false. It will improve performance for large numbers of file operations, but it does so at the cost of consistency guarantees. If the daemon is unexpectedly killed before running 'ipfs files flush' on the files in question, then data may be lost. This also -applies to run 'ipfs repo gc' concurrently with '--flush=false' -operations. -`, +applies to run 'ipfs repo gc' concurrently with '--flush=false' operations. + +When using '--flush=false', operations are limited to prevent unbounded +memory growth. After reaching Internal.MFSNoFlushLimit operations, further +operations will fail until you run 'ipfs files flush'. This explicit failure +(instead of auto-flushing) ensures you maintain control over when data is +persisted, preventing unexpected partial states and making batch operations +predictable. We recommend flushing paths regularly, especially folders with +many write operations, to clear caches, free memory, and maintain good +performance.`, }, Options: []cmds.Option{ cmds.BoolOption(filesFlushOptionName, "f", "Flush target and ancestors after write.").WithDefault(true), @@ -332,7 +378,7 @@ func statNode(nd ipld.Node, enc cidenc.Encoder) (*statOutput, error) { Type: "file", }, nil default: - return nil, fmt.Errorf("not unixfs node (proto or raw)") + return nil, errors.New("not unixfs node (proto or raw)") } } @@ -405,6 +451,7 @@ func walkBlock(ctx context.Context, dagserv ipld.DAGService, nd ipld.Node) (bool return local, sizeLocal, nil } +var errFilesCpInvalidUnixFS = errors.New("cp: source must be a valid UnixFS (dag-pb or raw codec)") var filesCpCmd = &cmds.Command{ Helptext: cmds.HelpText{ Tagline: "Add references to IPFS files and directories in MFS (or copy within MFS).", @@ -442,10 +489,10 @@ being GC'ed. cmds.StringArg("dest", true, false, "Destination within MFS."), }, Options: []cmds.Option{ + cmds.BoolOption(forceOptionName, "Force overwrite of existing files."), cmds.BoolOption(filesParentsOptionName, "p", "Make parent directories as needed."), }, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { - mkParents, _ := req.Options[filesParentsOptionName].(bool) nd, err := cmdenv.GetNode(env) if err != nil { return err @@ -461,8 +508,6 @@ being GC'ed. return err } - flush, _ := req.Options[filesFlushOptionName].(bool) - src, err := checkPath(req.Arguments[0]) if err != nil { return err @@ -483,6 +528,26 @@ being GC'ed. return fmt.Errorf("cp: cannot get node from path %s: %s", src, err) } + // Sanity-check: ensure root CID is a valid UnixFS (dag-pb or raw block) + // Context: https://github.com/ipfs/kubo/issues/10331 + srcCidType := node.Cid().Type() + switch srcCidType { + case cid.Raw: + if _, ok := node.(*dag.RawNode); !ok { + return errFilesCpInvalidUnixFS + } + case cid.DagProtobuf: + if _, ok := node.(*dag.ProtoNode); !ok { + return errFilesCpInvalidUnixFS + } + if _, err = ft.FSNodeFromBytes(node.(*dag.ProtoNode).Data()); err != nil { + return fmt.Errorf("%w: %v", errFilesCpInvalidUnixFS, err) + } + default: + return errFilesCpInvalidUnixFS + } + + mkParents, _ := req.Options[filesParentsOptionName].(bool) if mkParents { err := ensureContainingDirectoryExists(nd.FilesRoot, dst, prefix) if err != nil { @@ -490,16 +555,32 @@ being GC'ed. } } + force, _ := req.Options[forceOptionName].(bool) + if force { + if err = unlinkNodeIfExists(nd, dst); err != nil { + return fmt.Errorf("cp: cannot unlink existing file: %s", err) + } + } + + flush, _ := req.Options[filesFlushOptionName].(bool) + + if err := updateNoFlushCounter(nd, flush); err != nil { + return err + } + err = mfs.PutNode(nd.FilesRoot, dst, node) if err != nil { return fmt.Errorf("cp: cannot put node in path %s: %s", dst, err) } - if flush { - _, err := mfs.FlushPath(req.Context, nd.FilesRoot, dst) - if err != nil { + if _, err := mfs.FlushPath(req.Context, nd.FilesRoot, dst); err != nil { return fmt.Errorf("cp: cannot flush the created file %s: %s", dst, err) } + // Flush parent to clear directory cache and free memory. + parent := gopath.Dir(dst) + if _, err = mfs.FlushPath(req.Context, nd.FilesRoot, parent); err != nil { + return fmt.Errorf("cp: cannot flush the created file's parent folder %s: %s", dst, err) + } } return nil @@ -525,6 +606,35 @@ func getNodeFromPath(ctx context.Context, node *core.IpfsNode, api iface.CoreAPI } } +func unlinkNodeIfExists(node *core.IpfsNode, path string) error { + dir, name := gopath.Split(path) + parent, err := mfs.Lookup(node.FilesRoot, dir) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + return nil + } + return err + } + + pdir, ok := parent.(*mfs.Directory) + if !ok { + return fmt.Errorf("not a directory: %s", dir) + } + + // Attempt to unlink if child is a file, ignore error since + // we are only concerned with unlinking an existing file. + child, err := pdir.Child(name) + if err != nil { + return nil // no child file, nothing to unlink + } + + if child.Type() != mfs.TFile { + return fmt.Errorf("not a file: %s", path) + } + + return pdir.Unlink(name) +} + type filesLsOutput struct { Entries []mfs.NodeListing } @@ -641,8 +751,8 @@ Examples: cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *filesLsOutput) error { noSort, _ := req.Options[dontSortOptionName].(bool) if !noSort { - sort.Slice(out.Entries, func(i, j int) bool { - return strings.Compare(out.Entries[i].Name, out.Entries[j].Name) < 0 + slices.SortFunc(out.Entries, func(a, b mfs.NodeListing) int { + return strings.Compare(a.Name, b.Name) }) } @@ -703,7 +813,7 @@ Examples: fsn, err := mfs.Lookup(nd.FilesRoot, path) if err != nil { - return err + return fmt.Errorf("%s: %w", path, err) } fi, ok := fsn.(*mfs.File) @@ -787,6 +897,10 @@ Example: flush, _ := req.Options[filesFlushOptionName].(bool) + if err := updateNoFlushCounter(nd, flush); err != nil { + return err + } + src, err := checkPath(req.Arguments[0]) if err != nil { return err @@ -797,10 +911,30 @@ Example: } err = mfs.Mv(nd.FilesRoot, src, dst) - if err == nil && flush { - _, err = mfs.FlushPath(req.Context, nd.FilesRoot, "/") + if err != nil { + return err } - return err + if flush { + parentSrc := gopath.Dir(src) + parentDst := gopath.Dir(dst) + // Flush parent to clear directory cache and free memory. + if _, err = mfs.FlushPath(req.Context, nd.FilesRoot, parentDst); err != nil { + return fmt.Errorf("cp: cannot flush the destination file's parent folder %s: %s", dst, err) + } + + // Avoid re-flushing when moving within the same folder. + if parentSrc != parentDst { + if _, err = mfs.FlushPath(req.Context, nd.FilesRoot, parentSrc); err != nil { + return fmt.Errorf("cp: cannot flush the source's file's parent folder %s: %s", dst, err) + } + } + + if _, err = mfs.FlushPath(req.Context, nd.FilesRoot, "/"); err != nil { + return err + } + } + + return nil }, } @@ -904,6 +1038,10 @@ See '--to-files' in 'ipfs add --help' for more information. flush, _ := req.Options[filesFlushOptionName].(bool) rawLeaves, rawLeavesDef := req.Options[filesRawLeavesOptionName].(bool) + if err := updateNoFlushCounter(nd, flush); err != nil { + return err + } + if !rawLeavesDef && cfg.Import.UnixFSRawLeaves != config.Default { rawLeavesDef = true rawLeaves = cfg.Import.UnixFSRawLeaves.WithDefault(config.DefaultUnixFSRawLeaves) @@ -948,6 +1086,17 @@ See '--to-files' in 'ipfs add --help' for more information. flog.Error("files: error closing file mfs file descriptor", err) } } + if flush { + // Flush parent to clear directory cache and free memory. + parent := gopath.Dir(path) + if _, err := mfs.FlushPath(req.Context, nd.FilesRoot, parent); err != nil { + if retErr == nil { + retErr = err + } else { + flog.Error("files: flushing the parent folder", err) + } + } + } }() if trunc { @@ -1021,6 +1170,10 @@ Examples: flush, _ := req.Options[filesFlushOptionName].(bool) + if err := updateNoFlushCounter(n, flush); err != nil { + return err + } + prefix, err := getPrefix(req) if err != nil { return err @@ -1073,6 +1226,9 @@ are run with the '--flush=false'. return err } + // Reset the counter (flush always resets) + noFlushOperationCounter.Store(0) + return cmds.EmitOnce(res, &flushRes{enc.Encode(n.Cid())}) }, Type: flushRes{}, @@ -1110,11 +1266,20 @@ Change the CID version or hash function of the root node of a given path. return err } - err = updatePath(nd.FilesRoot, path, prefix) - if err == nil && flush { - _, err = mfs.FlushPath(req.Context, nd.FilesRoot, path) + if err := updatePath(nd.FilesRoot, path, prefix); err != nil { + return err } - return err + if flush { + if _, err = mfs.FlushPath(req.Context, nd.FilesRoot, path); err != nil { + return err + } + // Flush parent to clear directory cache and free memory. + parent := gopath.Dir(path) + if _, err = mfs.FlushPath(req.Context, nd.FilesRoot, parent); err != nil { + return err + } + } + return nil }, } @@ -1161,6 +1326,13 @@ Remove files or directories. cmds.BoolOption(forceOptionName, "Forcibly remove target at path; implies -r for directories"), }, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { + // Check if user explicitly set --flush=false + if flushOpt, ok := req.Options[filesFlushOptionName]; ok { + if flush, ok := flushOpt.(bool); ok && !flush { + return fmt.Errorf("files rm always flushes for safety. The --flush flag cannot be set to false for this command") + } + } + nd, err := cmdenv.GetNode(env) if err != nil { return err diff --git a/core/commands/files_test.go b/core/commands/files_test.go new file mode 100644 index 000000000..2456363e1 --- /dev/null +++ b/core/commands/files_test.go @@ -0,0 +1,45 @@ +package commands + +import ( + "io" + "testing" + + dag "github.com/ipfs/boxo/ipld/merkledag" + cmds "github.com/ipfs/go-ipfs-cmds" + coremock "github.com/ipfs/kubo/core/mock" + "github.com/stretchr/testify/require" +) + +func TestFilesCp_DagCborNodeFails(t *testing.T) { + ctx := t.Context() + + cmdCtx, err := coremock.MockCmdsCtx() + require.NoError(t, err) + + node, err := cmdCtx.ConstructNode() + require.NoError(t, err) + + invalidData := []byte{0x00} + protoNode := dag.NodeWithData(invalidData) + err = node.DAG.Add(ctx, protoNode) + require.NoError(t, err) + + req := &cmds.Request{ + Context: ctx, + Arguments: []string{ + "/ipfs/" + protoNode.Cid().String(), + "/test-destination", + }, + Options: map[string]interface{}{ + "force": false, + }, + } + + _, pw := io.Pipe() + res, err := cmds.NewWriterResponseEmitter(pw, req) + require.NoError(t, err) + + err = filesCpCmd.Run(req, res, &cmdCtx) + require.Error(t, err) + require.ErrorContains(t, err, "cp: source must be a valid UnixFS (dag-pb or raw codec)") +} diff --git a/core/commands/filestore.go b/core/commands/filestore.go index 0c9dbee0a..92a20176e 100644 --- a/core/commands/filestore.go +++ b/core/commands/filestore.go @@ -27,7 +27,8 @@ var FileStoreCmd = &cmds.Command{ } const ( - fileOrderOptionName = "file-order" + fileOrderOptionName = "file-order" + removeBadBlocksOptionName = "remove-bad-blocks" ) var lsFileStore = &cmds.Command{ @@ -57,7 +58,7 @@ The output is: } args := req.Arguments if len(args) > 0 { - return listByArgs(req.Context, res, fs, args) + return listByArgs(req.Context, res, fs, args, false) } fileOrder, _ := req.Options[fileOrderOptionName].(bool) @@ -108,7 +109,7 @@ otherwise verify all objects. The output is: - + [] Where is one of: ok: the block can be reconstructed @@ -118,6 +119,10 @@ error: there was some other problem reading the file missing: could not be found in the filestore ERROR: internal error, most likely due to a corrupt database +Where is present only when removing bad blocks and is one of: +remove: link to the block will be removed from datastore +keep: keep link, nothing to do + For ERROR entries the error will also be printed to stderr. `, }, @@ -126,15 +131,18 @@ For ERROR entries the error will also be printed to stderr. }, Options: []cmds.Option{ cmds.BoolOption(fileOrderOptionName, "verify the objects based on the order of the backing file"), + cmds.BoolOption(removeBadBlocksOptionName, "remove bad blocks. WARNING: This may remove pinned data. You should run 'ipfs pin verify' after running this command and correct any issues."), }, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { _, fs, err := getFilestore(env) if err != nil { return err } + + removeBadBlocks, _ := req.Options[removeBadBlocksOptionName].(bool) args := req.Arguments if len(args) > 0 { - return listByArgs(req.Context, res, fs, args) + return listByArgs(req.Context, res, fs, args, removeBadBlocks) } fileOrder, _ := req.Options[fileOrderOptionName].(bool) @@ -148,7 +156,14 @@ For ERROR entries the error will also be printed to stderr. if r == nil { break } - if err := res.Emit(r); err != nil { + + if removeBadBlocks && (r.Status != filestore.StatusOk) && (r.Status != filestore.StatusOtherError) { + if err = fs.FileManager().DeleteBlock(req.Context, r.Key); err != nil { + return err + } + } + + if err = res.Emit(r); err != nil { return err } } @@ -162,6 +177,8 @@ For ERROR entries the error will also be printed to stderr. return err } + req := res.Request() + removeBadBlocks, _ := req.Options[removeBadBlocksOptionName].(bool) for { v, err := res.Next() if err != nil { @@ -179,7 +196,16 @@ For ERROR entries the error will also be printed to stderr. if list.Status == filestore.StatusOtherError { fmt.Fprintf(os.Stderr, "%s\n", list.ErrorMsg) } - fmt.Fprintf(os.Stdout, "%s %s\n", list.Status.Format(), list.FormatLong(enc.Encode)) + + if removeBadBlocks { + action := "keep" + if removeBadBlocks && (list.Status != filestore.StatusOk) && (list.Status != filestore.StatusOtherError) { + action = "remove" + } + fmt.Fprintf(os.Stdout, "%s %s %s\n", list.Status.Format(), list.FormatLong(enc.Encode), action) + } else { + fmt.Fprintf(os.Stdout, "%s %s\n", list.Status.Format(), list.FormatLong(enc.Encode)) + } } }, }, @@ -236,7 +262,7 @@ func getFilestore(env cmds.Environment) (*core.IpfsNode, *filestore.Filestore, e return n, fs, err } -func listByArgs(ctx context.Context, res cmds.ResponseEmitter, fs *filestore.Filestore, args []string) error { +func listByArgs(ctx context.Context, res cmds.ResponseEmitter, fs *filestore.Filestore, args []string, removeBadBlocks bool) error { for _, arg := range args { c, err := cid.Decode(arg) if err != nil { @@ -250,7 +276,14 @@ func listByArgs(ctx context.Context, res cmds.ResponseEmitter, fs *filestore.Fil continue } r := filestore.Verify(ctx, fs, c) - if err := res.Emit(r); err != nil { + + if removeBadBlocks && (r.Status != filestore.StatusOk) && (r.Status != filestore.StatusOtherError) { + if err = fs.FileManager().DeleteBlock(ctx, r.Key); err != nil { + return err + } + } + + if err = res.Emit(r); err != nil { return err } } diff --git a/core/commands/get_test.go b/core/commands/get_test.go index 0a17d8842..ccca3e593 100644 --- a/core/commands/get_test.go +++ b/core/commands/get_test.go @@ -1,7 +1,6 @@ package commands import ( - "context" "fmt" "testing" @@ -52,8 +51,7 @@ func TestGetOutputPath(t *testing.T) { for i, tc := range cases { t.Run(fmt.Sprintf("%s-%d", t.Name(), i), func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() req, err := cmds.NewRequest(ctx, []string{}, tc.opts, tc.args, nil, GetCmd) if err != nil { diff --git a/core/commands/id.go b/core/commands/id.go index 3446fc267..58886699b 100644 --- a/core/commands/id.go +++ b/core/commands/id.go @@ -6,12 +6,13 @@ import ( "errors" "fmt" "io" - "sort" + "slices" "strings" version "github.com/ipfs/kubo" "github.com/ipfs/kubo/core" "github.com/ipfs/kubo/core/commands/cmdenv" + "github.com/ipfs/kubo/core/commands/cmdutils" cmds "github.com/ipfs/go-ipfs-cmds" ke "github.com/ipfs/kubo/core/commands/keyencode" @@ -81,7 +82,7 @@ EXAMPLE: var err error id, err = peer.Decode(req.Arguments[0]) if err != nil { - return fmt.Errorf("invalid peer id") + return errors.New("invalid peer id") } } else { id = n.Identity @@ -170,15 +171,17 @@ func printPeer(keyEnc ke.KeyEncoder, ps pstore.Peerstore, p peer.ID) (interface{ for _, a := range addrs { info.Addresses = append(info.Addresses, a.String()) } - sort.Strings(info.Addresses) + slices.Sort(info.Addresses) protocols, _ := ps.GetProtocols(p) // don't care about errors here. - info.Protocols = append(info.Protocols, protocols...) - sort.Slice(info.Protocols, func(i, j int) bool { return info.Protocols[i] < info.Protocols[j] }) + for _, proto := range protocols { + info.Protocols = append(info.Protocols, protocol.ID(cmdutils.CleanAndTrim(string(proto)))) + } + slices.Sort(info.Protocols) if v, err := ps.Get(p, "AgentVersion"); err == nil { if vs, ok := v.(string); ok { - info.AgentVersion = vs + info.AgentVersion = cmdutils.CleanAndTrim(vs) } } @@ -205,9 +208,9 @@ func printSelf(keyEnc ke.KeyEncoder, node *core.IpfsNode) (interface{}, error) { for _, a := range addrs { info.Addresses = append(info.Addresses, a.String()) } - sort.Strings(info.Addresses) + slices.Sort(info.Addresses) info.Protocols = node.PeerHost.Mux().Protocols() - sort.Slice(info.Protocols, func(i, j int) bool { return info.Protocols[i] < info.Protocols[j] }) + slices.Sort(info.Protocols) } info.AgentVersion = version.GetUserAgentVersion() return info, nil diff --git a/core/commands/keystore.go b/core/commands/keystore.go index a86fb281a..6ce1b5a0d 100644 --- a/core/commands/keystore.go +++ b/core/commands/keystore.go @@ -5,6 +5,7 @@ import ( "crypto/ed25519" "crypto/x509" "encoding/pem" + "errors" "fmt" "io" "os" @@ -101,12 +102,12 @@ var keyGenCmd = &cmds.Command{ typ, f := req.Options[keyStoreTypeOptionName].(string) if !f { - return fmt.Errorf("please specify a key type with --type") + return errors.New("please specify a key type with --type") } name := req.Arguments[0] if name == "self" { - return fmt.Errorf("cannot create key with name 'self'") + return errors.New("cannot create key with name 'self'") } opts := []options.KeyGenerateOption{options.Key.Type(typ)} @@ -457,7 +458,7 @@ var keyListCmd = &cmds.Command{ Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { keyEnc, err := ke.KeyEncoderFromString(req.Options[ke.OptionIPNSBase.Name()].(string)) if err != nil { - return err + return fmt.Errorf("cannot get key encoder: %w", err) } api, err := cmdenv.GetApi(env, req) @@ -467,7 +468,7 @@ var keyListCmd = &cmds.Command{ keys, err := api.Key().List(req.Context) if err != nil { - return err + return fmt.Errorf("listing keys failed: %w", err) } list := make([]KeyOutput, 0, len(keys)) diff --git a/core/commands/log.go b/core/commands/log.go index d2cb4a1a1..0ebb1ac43 100644 --- a/core/commands/log.go +++ b/core/commands/log.go @@ -3,17 +3,37 @@ package commands import ( "fmt" "io" + "slices" cmds "github.com/ipfs/go-ipfs-cmds" - logging "github.com/ipfs/go-log" - lwriter "github.com/ipfs/go-log/writer" + logging "github.com/ipfs/go-log/v2" ) -// Golang os.Args overrides * and replaces the character argument with -// an array which includes every file in the user's CWD. As a -// workaround, we use 'all' instead. The util library still uses * so -// we convert it at this step. -var logAllKeyword = "all" +const ( + // allLogSubsystems is used to specify all log subsystems when setting the + // log level. + allLogSubsystems = "*" + // allLogSubsystemsAlias is a convenience alias for allLogSubsystems that + // doesn't require shell escaping. + allLogSubsystemsAlias = "all" + // defaultLogLevel is used to request and to identify the default log + // level. + defaultLogLevel = "default" + // defaultSubsystemKey is the subsystem name that is used to denote the + // default log level. We use parentheses for UI clarity to distinguish it + // from regular subsystem names. + defaultSubsystemKey = "(default)" + // logLevelOption is an option for the tail subcommand to select the log + // level to output. + logLevelOption = "log-level" + // noSubsystemSpecified is used when no subsystem argument is provided + noSubsystemSpecified = "" +) + +type logLevelOutput struct { + Levels map[string]string `json:",omitempty"` + Message string `json:",omitempty"` +} var LogCmd = &cmds.Command{ Helptext: cmds.HelpText{ @@ -22,12 +42,12 @@ var LogCmd = &cmds.Command{ 'ipfs log' contains utility commands to affect or read the logging output of a running daemon. -There are also two environmental variables that direct the logging +There are also two environmental variables that direct the logging system (not just for the daemon logs, but all commands): - IPFS_LOGGING - sets the level of verbosity of the logging. + GOLOG_LOG_LEVEL - sets the level of verbosity of the logging. One of: debug, info, warn, error, dpanic, panic, fatal - IPFS_LOGGING_FMT - sets formatting of the log output. - One of: color, nocolor + GOLOG_LOG_FMT - sets formatting of the log output. + One of: color, nocolor, json `, }, @@ -40,46 +60,161 @@ system (not just for the daemon logs, but all commands): var logLevelCmd = &cmds.Command{ Helptext: cmds.HelpText{ - Tagline: "Change the logging level.", + Tagline: "Change or get the logging level.", ShortDescription: ` -Change the verbosity of one or all subsystems log output. This does not affect -the event log. +Get or change the logging level of one or all logging subsystems. + +This command provides a runtime alternative to the GOLOG_LOG_LEVEL +environment variable for debugging and troubleshooting. + +UNDERSTANDING DEFAULT vs '*': + +The "default" level is the fallback used by unconfigured subsystems. +You cannot set the default level directly - it only changes when you use '*'. + +The '*' wildcard represents ALL subsystems including the default level. +Setting '*' changes everything at once, including the default. + +EXAMPLES - Getting levels: + + ipfs log level # Show only the default fallback level + ipfs log level all # Show all subsystem levels (100+ lines) + ipfs log level core # Show level for 'core' subsystem only + +EXAMPLES - Setting levels: + + ipfs log level core debug # Set 'core' to 'debug' (default unchanged) + ipfs log level all info # Set ALL to 'info' (including default) + ipfs log level core default # Reset 'core' to use current default level + +WILDCARD OPTIONS: + +Use 'all' (convenient) or '*' (requires escaping) to affect all subsystems: + ipfs log level all debug # Convenient - no shell escaping needed + ipfs log level '*' debug # Equivalent but needs quotes: '*' or "*" or \* + +BEHAVIOR EXAMPLES: + +Initial state (all using default 'error'): + $ ipfs log level => error + $ ipfs log level core => error + +After setting one subsystem: + $ ipfs log level core debug + $ ipfs log level => error (default unchanged!) + $ ipfs log level core => debug (explicitly set) + $ ipfs log level dht => error (still uses default) + +After setting everything with 'all': + $ ipfs log level all info + $ ipfs log level => info (default changed!) + $ ipfs log level core => info (all changed) + $ ipfs log level dht => info (all changed) + +The 'default' keyword always refers to the current default level: + $ ipfs log level => error + $ ipfs log level core default # Sets core to 'error' + $ ipfs log level all info # Changes default to 'info' + $ ipfs log level core default # Now sets core to 'info' `, }, Arguments: []cmds.Argument{ - // TODO use a different keyword for 'all' because all can theoretically - // clash with a subsystem name - cmds.StringArg("subsystem", true, false, fmt.Sprintf("The subsystem logging identifier. Use '%s' for all subsystems.", logAllKeyword)), - cmds.StringArg("level", true, false, `The log level, with 'debug' the most verbose and 'fatal' the least verbose. - One of: debug, info, warn, error, dpanic, panic, fatal. - `), + cmds.StringArg("subsystem", false, false, fmt.Sprintf("The subsystem logging identifier. Use '%s' or '%s' to get or set the log level of all subsystems including the default. If not specified, only show the default log level.", allLogSubsystemsAlias, allLogSubsystems)), + cmds.StringArg("level", false, false, fmt.Sprintf("The log level, with 'debug' as the most verbose and 'fatal' the least verbose. Use '%s' to set to the current default level. One of: debug, info, warn, error, dpanic, panic, fatal, %s", defaultLogLevel, defaultLogLevel)), }, NoLocal: true, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { - args := req.Arguments - subsystem, level := args[0], args[1] + var level, subsystem string - if subsystem == logAllKeyword { - subsystem = "*" + if len(req.Arguments) > 0 { + subsystem = req.Arguments[0] + if len(req.Arguments) > 1 { + level = req.Arguments[1] + } + + // Normalize aliases to the canonical "*" form + if subsystem == allLogSubsystems || subsystem == allLogSubsystemsAlias { + subsystem = "*" + } } - if err := logging.SetLogLevel(subsystem, level); err != nil { - return err + // If a level is specified, then set the log level. + if level != "" { + if level == defaultLogLevel { + level = logging.DefaultLevel().String() + } + + if err := logging.SetLogLevel(subsystem, level); err != nil { + return err + } + + s := fmt.Sprintf("Changed log level of '%s' to '%s'\n", subsystem, level) + log.Info(s) + + return cmds.EmitOnce(res, &logLevelOutput{Message: s}) } - s := fmt.Sprintf("Changed log level of '%s' to '%s'\n", subsystem, level) - log.Info(s) + // Get the level for the requested subsystem. + switch subsystem { + case noSubsystemSpecified: + // Return the default log level + levelMap := map[string]string{logging.DefaultName: logging.DefaultLevel().String()} + return cmds.EmitOnce(res, &logLevelOutput{Levels: levelMap}) + case allLogSubsystems, allLogSubsystemsAlias: + // Return levels for all subsystems (default behavior) + levels := logging.SubsystemLevelNames() + + // Replace default subsystem key with defaultSubsystemKey. + levels[defaultSubsystemKey] = levels[logging.DefaultName] + delete(levels, logging.DefaultName) + return cmds.EmitOnce(res, &logLevelOutput{Levels: levels}) + default: + // Return level for a specific subsystem. + level, err := logging.SubsystemLevelName(subsystem) + if err != nil { + return err + } + levelMap := map[string]string{subsystem: level} + return cmds.EmitOnce(res, &logLevelOutput{Levels: levelMap}) + } - return cmds.EmitOnce(res, &MessageOutput{s}) }, Encoders: cmds.EncoderMap{ - cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *MessageOutput) error { - fmt.Fprint(w, out.Message) + cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *logLevelOutput) error { + if out.Message != "" { + fmt.Fprint(w, out.Message) + return nil + } + + // Check if this is an RPC call by looking for the encoding option + encoding, _ := req.Options["encoding"].(string) + isRPC := encoding == "json" + + // Determine whether to show subsystem names in output. + // Show subsystem names when: + // 1. It's an RPC call (needs JSON structure with named fields) + // 2. Multiple subsystems are displayed (for clarity when showing many levels) + showNames := isRPC || len(out.Levels) > 1 + + levelNames := make([]string, 0, len(out.Levels)) + for subsystem, level := range out.Levels { + if showNames { + // Show subsystem name when it's RPC or when showing multiple subsystems + levelNames = append(levelNames, fmt.Sprintf("%s: %s", subsystem, level)) + } else { + // For CLI calls with single subsystem, only show the level + levelNames = append(levelNames, level) + } + } + slices.Sort(levelNames) + for _, ln := range levelNames { + fmt.Fprintln(w, ln) + } return nil }), }, - Type: MessageOutput{}, + Type: logLevelOutput{}, } var logLsCmd = &cmds.Command{ @@ -107,22 +242,41 @@ subsystems of a running daemon. var logTailCmd = &cmds.Command{ Status: cmds.Experimental, Helptext: cmds.HelpText{ - Tagline: "Read the event log.", + Tagline: "Read and output log messages.", ShortDescription: ` -Outputs event log messages (not other log messages) as they are generated. +Outputs log messages as they are generated. -Currently broken. Follow https://github.com/ipfs/kubo/issues/9245 for updates. +NOTE: --log-level requires the server to be logging at least at this level + +Example: + + GOLOG_LOG_LEVEL="error,bitswap=debug" ipfs daemon + ipfs log tail --log-level info + +This will only return 'info' logs from bitswap and skip 'debug'. `, }, + Options: []cmds.Option{ + cmds.StringOption(logLevelOption, "Log level to listen to.").WithDefault(""), + }, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { - ctx := req.Context - r, w := io.Pipe() + var pipeReader *logging.PipeReader + logLevelString, _ := req.Options[logLevelOption].(string) + if logLevelString != "" { + logLevel, err := logging.Parse(logLevelString) + if err != nil { + return fmt.Errorf("setting log level %s: %w", logLevelString, err) + } + pipeReader = logging.NewPipeReader(logging.PipeLevel(logLevel)) + } else { + pipeReader = logging.NewPipeReader() + } + go func() { - defer w.Close() - <-ctx.Done() + <-req.Context.Done() + pipeReader.Close() }() - lwriter.WriterGroup.AddWriter(w) - return res.Emit(r) + return res.Emit(pipeReader) }, } diff --git a/core/commands/ls.go b/core/commands/ls.go index ab914bb0e..327b159a1 100644 --- a/core/commands/ls.go +++ b/core/commands/ls.go @@ -1,10 +1,12 @@ package commands import ( + "context" "fmt" "io" "os" - "sort" + "slices" + "strings" "text/tabwriter" "time" @@ -117,8 +119,8 @@ The JSON output contains type information. return nil }, func(i int) { // after each dir - sort.Slice(outputLinks, func(i, j int) bool { - return outputLinks[i].Name < outputLinks[j].Name + slices.SortFunc(outputLinks, func(a, b LsLink) int { + return strings.Compare(a.Name, b.Name) }) output[i] = LsObject{ @@ -133,23 +135,24 @@ The JSON output contains type information. } } + lsCtx, cancel := context.WithCancel(req.Context) + defer cancel() + for i, fpath := range paths { pth, err := cmdutils.PathOrCidPath(fpath) if err != nil { return err } - results, err := api.Unixfs().Ls(req.Context, pth, - options.Unixfs.ResolveChildren(resolveSize || resolveType)) - if err != nil { - return err - } + results := make(chan iface.DirEntry) + lsErr := make(chan error, 1) + go func() { + lsErr <- api.Unixfs().Ls(lsCtx, pth, results, + options.Unixfs.ResolveChildren(resolveSize || resolveType)) + }() processLink, dirDone = processDir() for link := range results { - if link.Err != nil { - return link.Err - } var ftype unixfs_pb.Data_DataType switch link.Type { case iface.TFile: @@ -170,10 +173,13 @@ The JSON output contains type information. Mode: link.Mode, ModTime: link.ModTime, } - if err := processLink(paths[i], lsLink); err != nil { + if err = processLink(paths[i], lsLink); err != nil { return err } } + if err = <-lsErr; err != nil { + return err + } dirDone(i) } return done() diff --git a/core/commands/mount_nofuse.go b/core/commands/mount_nofuse.go index c425aff0f..2844a4b71 100644 --- a/core/commands/mount_nofuse.go +++ b/core/commands/mount_nofuse.go @@ -1,5 +1,4 @@ //go:build !windows && nofuse -// +build !windows,nofuse package commands @@ -14,10 +13,11 @@ var MountCmd = &cmds.Command{ ShortDescription: ` This version of ipfs is compiled without fuse support, which is required for mounting. If you'd like to be able to mount, please use a version of -ipfs compiled with fuse. +Kubo compiled with fuse. For the latest instructions, please check the project's repository: - http://github.com/ipfs/go-ipfs + http://github.com/ipfs/kubo + https://github.com/ipfs/kubo/blob/master/docs/fuse.md `, }, } diff --git a/core/commands/mount_unix.go b/core/commands/mount_unix.go index 52a1b843b..8ca85cdaa 100644 --- a/core/commands/mount_unix.go +++ b/core/commands/mount_unix.go @@ -1,5 +1,4 @@ //go:build !windows && !nofuse -// +build !windows,!nofuse package commands @@ -18,6 +17,7 @@ import ( const ( mountIPFSPathOptionName = "ipfs-path" mountIPNSPathOptionName = "ipns-path" + mountMFSPathOptionName = "mfs-path" ) var MountCmd = &cmds.Command{ @@ -25,14 +25,14 @@ var MountCmd = &cmds.Command{ Helptext: cmds.HelpText{ Tagline: "Mounts IPFS to the filesystem (read-only).", ShortDescription: ` -Mount IPFS at a read-only mountpoint on the OS (default: /ipfs and /ipns). +Mount IPFS at a read-only mountpoint on the OS (default: /ipfs, /ipns, /mfs). All IPFS objects will be accessible under that directory. Note that the root will not be listable, as it is virtual. Access known paths directly. You may have to create /ipfs and /ipns before using 'ipfs mount': -> sudo mkdir /ipfs /ipns -> sudo chown $(whoami) /ipfs /ipns +> sudo mkdir /ipfs /ipns /mfs +> sudo chown $(whoami) /ipfs /ipns /mfs > ipfs daemon & > ipfs mount `, @@ -44,8 +44,8 @@ root will not be listable, as it is virtual. Access known paths directly. You may have to create /ipfs and /ipns before using 'ipfs mount': -> sudo mkdir /ipfs /ipns -> sudo chown $(whoami) /ipfs /ipns +> sudo mkdir /ipfs /ipns /mfs +> sudo chown $(whoami) /ipfs /ipns /mfs > ipfs daemon & > ipfs mount @@ -67,6 +67,7 @@ baz > ipfs mount IPFS mounted at: /ipfs IPNS mounted at: /ipns +MFS mounted at: /mfs > cd /ipfs/QmSh5e7S6fdcu75LAbXNZAFY2nGyZUJXyLCJDvn2zRkWyC > ls bar @@ -81,6 +82,7 @@ baz Options: []cmds.Option{ cmds.StringOption(mountIPFSPathOptionName, "f", "The path where IPFS should be mounted."), cmds.StringOption(mountIPNSPathOptionName, "n", "The path where IPNS should be mounted."), + cmds.StringOption(mountMFSPathOptionName, "m", "The path where MFS should be mounted."), }, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { cfg, err := env.(*oldcmds.Context).GetConfig() @@ -109,7 +111,12 @@ baz nsdir = cfg.Mounts.IPNS // NB: be sure to not redeclare! } - err = nodeMount.Mount(nd, fsdir, nsdir) + mfsdir, found := req.Options[mountMFSPathOptionName].(string) + if !found { + mfsdir = cfg.Mounts.MFS + } + + err = nodeMount.Mount(nd, fsdir, nsdir, mfsdir) if err != nil { return err } @@ -117,6 +124,7 @@ baz var output config.Mounts output.IPFS = fsdir output.IPNS = nsdir + output.MFS = mfsdir return cmds.EmitOnce(res, &output) }, Type: config.Mounts{}, @@ -124,6 +132,7 @@ baz cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, mounts *config.Mounts) error { fmt.Fprintf(w, "IPFS mounted at: %s\n", cmdenv.EscNonPrint(mounts.IPFS)) fmt.Fprintf(w, "IPNS mounted at: %s\n", cmdenv.EscNonPrint(mounts.IPNS)) + fmt.Fprintf(w, "MFS mounted at: %s\n", cmdenv.EscNonPrint(mounts.MFS)) return nil }), diff --git a/core/commands/name/ipns.go b/core/commands/name/ipns.go index 92cbb59a3..e9d5c4426 100644 --- a/core/commands/name/ipns.go +++ b/core/commands/name/ipns.go @@ -10,7 +10,7 @@ import ( "github.com/ipfs/boxo/namesys" "github.com/ipfs/boxo/path" cmds "github.com/ipfs/go-ipfs-cmds" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" cmdenv "github.com/ipfs/kubo/core/commands/cmdenv" options "github.com/ipfs/kubo/core/coreiface/options" ) diff --git a/core/commands/name/publish.go b/core/commands/name/publish.go index 168d7fb44..918606d63 100644 --- a/core/commands/name/publish.go +++ b/core/commands/name/publish.go @@ -16,17 +16,19 @@ import ( options "github.com/ipfs/kubo/core/coreiface/options" ) -var errAllowOffline = errors.New("can't publish while offline: pass `--allow-offline` to override") +var errAllowOffline = errors.New("can't publish while offline: pass `--allow-offline` to override or `--allow-delegated` if Ipns.DelegatedPublishers are set up") const ( - ipfsPathOptionName = "ipfs-path" - resolveOptionName = "resolve" - allowOfflineOptionName = "allow-offline" - lifeTimeOptionName = "lifetime" - ttlOptionName = "ttl" - keyOptionName = "key" - quieterOptionName = "quieter" - v1compatOptionName = "v1compat" + ipfsPathOptionName = "ipfs-path" + resolveOptionName = "resolve" + allowOfflineOptionName = "allow-offline" + allowDelegatedOptionName = "allow-delegated" + lifeTimeOptionName = "lifetime" + ttlOptionName = "ttl" + keyOptionName = "key" + quieterOptionName = "quieter" + v1compatOptionName = "v1compat" + sequenceOptionName = "sequence" ) var PublishCmd = &cmds.Command{ @@ -47,6 +49,14 @@ which is the hash of its public key. You can use the 'ipfs key' commands to list and generate more names and their respective keys. +Publishing Modes: + +By default, IPNS records are published to both the DHT and any configured +HTTP delegated publishers. You can control this behavior with the following flags: + + --allow-offline Allow publishing when offline (publishes to local datastore, network operations are optional) + --allow-delegated Allow publishing without DHT connectivity (local + HTTP delegated publishers only) + Examples: Publish an with your default name: @@ -54,18 +64,33 @@ Publish an with your default name: > ipfs name publish /ipfs/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy Published to QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n: /ipfs/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy -Publish an with another name, added by an 'ipfs key' command: +Publish without DHT (HTTP delegated publishers only): - > ipfs key gen --type=rsa --size=2048 mykey - > ipfs name publish --key=mykey /ipfs/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy - Published to QmSrPmbaUKA3ZodhzPWZnpFgcPMFWF4QsxXbkWfEptTBJd: /ipfs/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy - -Alternatively, publish an using a valid PeerID (as listed by -'ipfs key list -l'): - - > ipfs name publish --key=QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n /ipfs/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy + > ipfs name publish --allow-delegated /ipfs/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy Published to QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n: /ipfs/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy +Publish when offline (local publish, network optional): + + > ipfs name publish --allow-offline /ipfs/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy + Published to QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n: /ipfs/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy + +Notes: + +The --ttl option specifies the time duration for caching IPNS records. +Lower values like '1m' enable faster updates but increase network load, +while the default of 1 hour reduces traffic but may delay propagation. +Gateway operators may override this with Ipns.MaxCacheTTL configuration. + +The --sequence option sets a custom sequence number for the IPNS record. +The sequence number must be monotonically increasing (greater than the +current record's sequence). This is useful for manually coordinating +updates across multiple writers. If not specified, the sequence number +increments automatically. + +For faster IPNS updates, consider: +- Using a lower --ttl value (e.g., '1m' for quick updates) +- Enabling PubSub via Ipns.UsePubsub in the config + `, }, @@ -79,7 +104,9 @@ Alternatively, publish an using a valid PeerID (as listed by cmds.StringOption(ttlOptionName, "Time duration hint, akin to --lifetime, indicating how long to cache this record before checking for updates.").WithDefault(ipns.DefaultRecordTTL.String()), cmds.BoolOption(quieterOptionName, "Q", "Write only final IPNS Name encoded as CIDv1 (for use in /ipns content paths)."), cmds.BoolOption(v1compatOptionName, "Produce a backward-compatible IPNS Record by including fields for both V1 and V2 signatures.").WithDefault(true), - cmds.BoolOption(allowOfflineOptionName, "When --offline, save the IPNS record to the local datastore without broadcasting to the network (instead of failing)."), + cmds.BoolOption(allowOfflineOptionName, "Allow publishing when offline - publishes to local datastore without requiring network connectivity."), + cmds.BoolOption(allowDelegatedOptionName, "Allow publishing without DHT connectivity - uses local datastore and HTTP delegated publishers only."), + cmds.Uint64Option(sequenceOptionName, "Set a custom sequence number for the IPNS record (must be higher than current)."), ke.OptionIPNSBase, }, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { @@ -89,9 +116,15 @@ Alternatively, publish an using a valid PeerID (as listed by } allowOffline, _ := req.Options[allowOfflineOptionName].(bool) + allowDelegated, _ := req.Options[allowDelegatedOptionName].(bool) compatibleWithV1, _ := req.Options[v1compatOptionName].(bool) kname, _ := req.Options[keyOptionName].(string) + // Validate flag combinations + if allowOffline && allowDelegated { + return errors.New("cannot use both --allow-offline and --allow-delegated flags") + } + validTimeOpt, _ := req.Options[lifeTimeOptionName].(string) validTime, err := time.ParseDuration(validTimeOpt) if err != nil { @@ -100,6 +133,7 @@ Alternatively, publish an using a valid PeerID (as listed by opts := []options.NamePublishOption{ options.Name.AllowOffline(allowOffline), + options.Name.AllowDelegated(allowDelegated), options.Name.Key(kname), options.Name.ValidTime(validTime), options.Name.CompatibleWithV1(compatibleWithV1), @@ -114,6 +148,10 @@ Alternatively, publish an using a valid PeerID (as listed by opts = append(opts, options.Name.TTL(d)) } + if sequence, found := req.Options[sequenceOptionName].(uint64); found { + opts = append(opts, options.Name.Sequence(sequence)) + } + p, err := cmdutils.PathOrCidPath(req.Arguments[0]) if err != nil { return err diff --git a/core/commands/p2p.go b/core/commands/p2p.go index 7b8b416e5..1de0bfca3 100644 --- a/core/commands/p2p.go +++ b/core/commands/p2p.go @@ -50,9 +50,17 @@ type P2PStreamsOutput struct { Streams []P2PStreamInfoOutput } +// P2PForegroundOutput is output type for foreground mode status messages +type P2PForegroundOutput struct { + Status string // "active" or "closing" + Protocol string + Address string +} + const ( allowCustomProtocolOptionName = "allow-custom-protocol" reportPeerIDOptionName = "report-peer-id" + foregroundOptionName = "foreground" ) var resolveTimeout = 10 * time.Second @@ -83,15 +91,37 @@ var p2pForwardCmd = &cmds.Command{ Helptext: cmds.HelpText{ Tagline: "Forward connections to libp2p service.", ShortDescription: ` -Forward connections made to to . +Forward connections made to to via libp2p. - specifies the libp2p protocol name to use for libp2p -connections and/or handlers. It must be prefixed with '` + P2PProtoPrefix + `'. +Creates a local TCP listener that tunnels connections through libp2p to a +remote peer's p2p listener. Similar to SSH port forwarding (-L flag). -Example: - ipfs p2p forward ` + P2PProtoPrefix + `myproto /ip4/127.0.0.1/tcp/4567 /p2p/QmPeer - - Forward connections to 127.0.0.1:4567 to '` + P2PProtoPrefix + `myproto' service on /p2p/QmPeer +ARGUMENTS: + Protocol name (must start with '` + P2PProtoPrefix + `') + Local multiaddr (e.g., /ip4/127.0.0.1/tcp/3000) + Remote peer multiaddr (e.g., /p2p/PeerID) + +FOREGROUND MODE (--foreground, -f): + + By default, the forwarder runs in the daemon and the command returns + immediately. Use --foreground to block until interrupted: + + - Ctrl+C or SIGTERM: Removes the forwarder and exits + - 'ipfs p2p close': Removes the forwarder and exits + - Daemon shutdown: Forwarder is automatically removed + + Useful for systemd services or scripts that need cleanup on exit. + +EXAMPLES: + + # Persistent forwarder (command returns immediately) + ipfs p2p forward /x/myapp /ip4/127.0.0.1/tcp/3000 /p2p/PeerID + + # Temporary forwarder (removed when command exits) + ipfs p2p forward -f /x/myapp /ip4/127.0.0.1/tcp/3000 /p2p/PeerID + +Learn more: https://github.com/ipfs/kubo/blob/master/docs/p2p-tunnels.md `, }, Arguments: []cmds.Argument{ @@ -101,6 +131,7 @@ Example: }, Options: []cmds.Option{ cmds.BoolOption(allowCustomProtocolOptionName, "Don't require /x/ prefix"), + cmds.BoolOption(foregroundOptionName, "f", "Run in foreground; forwarder is removed when command exits"), }, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { n, err := p2pGetNode(env) @@ -130,7 +161,51 @@ Example: return errors.New("protocol name must be within '" + P2PProtoPrefix + "' namespace") } - return forwardLocal(n.Context(), n.P2P, n.Peerstore, proto, listen, targets) + listener, err := forwardLocal(n.Context(), n.P2P, n.Peerstore, proto, listen, targets) + if err != nil { + return err + } + + foreground, _ := req.Options[foregroundOptionName].(bool) + if foreground { + if err := res.Emit(&P2PForegroundOutput{ + Status: "active", + Protocol: protoOpt, + Address: listenOpt, + }); err != nil { + return err + } + // Wait for either context cancellation (Ctrl+C/daemon shutdown) + // or listener removal (ipfs p2p close) + select { + case <-req.Context.Done(): + // SIGTERM/Ctrl+C - cleanup silently (CLI stream already closing) + n.P2P.ListenersLocal.Close(func(l p2p.Listener) bool { + return l == listener + }) + return nil + case <-listener.Done(): + // Closed via "ipfs p2p close" - emit closing message + return res.Emit(&P2PForegroundOutput{ + Status: "closing", + Protocol: protoOpt, + Address: listenOpt, + }) + } + } + + return nil + }, + Type: P2PForegroundOutput{}, + Encoders: cmds.EncoderMap{ + cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *P2PForegroundOutput) error { + if out.Status == "active" { + fmt.Fprintf(w, "Forwarding %s to %s, waiting for interrupt...\n", out.Protocol, out.Address) + } else if out.Status == "closing" { + fmt.Fprintf(w, "Received interrupt, removing forwarder for %s\n", out.Protocol) + } + return nil + }), }, } @@ -185,14 +260,40 @@ var p2pListenCmd = &cmds.Command{ Helptext: cmds.HelpText{ Tagline: "Create libp2p service.", ShortDescription: ` -Create libp2p service and forward connections made to . +Create a libp2p protocol handler that forwards incoming connections to +. - specifies the libp2p handler name. It must be prefixed with '` + P2PProtoPrefix + `'. +When a remote peer connects using 'ipfs p2p forward', the connection is +forwarded to your local service. Similar to SSH port forwarding (server side). -Example: - ipfs p2p listen ` + P2PProtoPrefix + `myproto /ip4/127.0.0.1/tcp/1234 - - Forward connections to 'myproto' libp2p service to 127.0.0.1:1234 +ARGUMENTS: + Protocol name (must start with '` + P2PProtoPrefix + `') + Local multiaddr (e.g., /ip4/127.0.0.1/tcp/3000) + +FOREGROUND MODE (--foreground, -f): + + By default, the listener runs in the daemon and the command returns + immediately. Use --foreground to block until interrupted: + + - Ctrl+C or SIGTERM: Removes the listener and exits + - 'ipfs p2p close': Removes the listener and exits + - Daemon shutdown: Listener is automatically removed + + Useful for systemd services or scripts that need cleanup on exit. + +EXAMPLES: + + # Persistent listener (command returns immediately) + ipfs p2p listen /x/myapp /ip4/127.0.0.1/tcp/3000 + + # Temporary listener (removed when command exits) + ipfs p2p listen -f /x/myapp /ip4/127.0.0.1/tcp/3000 + + # Report connecting peer ID to the target application + ipfs p2p listen -r /x/myapp /ip4/127.0.0.1/tcp/3000 + +Learn more: https://github.com/ipfs/kubo/blob/master/docs/p2p-tunnels.md `, }, Arguments: []cmds.Argument{ @@ -202,6 +303,7 @@ Example: Options: []cmds.Option{ cmds.BoolOption(allowCustomProtocolOptionName, "Don't require /x/ prefix"), cmds.BoolOption(reportPeerIDOptionName, "r", "Send remote base58 peerid to target when a new connection is established"), + cmds.BoolOption(foregroundOptionName, "f", "Run in foreground; listener is removed when command exits"), }, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { n, err := p2pGetNode(env) @@ -231,8 +333,51 @@ Example: return errors.New("protocol name must be within '" + P2PProtoPrefix + "' namespace") } - _, err = n.P2P.ForwardRemote(n.Context(), proto, target, reportPeerID) - return err + listener, err := n.P2P.ForwardRemote(n.Context(), proto, target, reportPeerID) + if err != nil { + return err + } + + foreground, _ := req.Options[foregroundOptionName].(bool) + if foreground { + if err := res.Emit(&P2PForegroundOutput{ + Status: "active", + Protocol: protoOpt, + Address: targetOpt, + }); err != nil { + return err + } + // Wait for either context cancellation (Ctrl+C/daemon shutdown) + // or listener removal (ipfs p2p close) + select { + case <-req.Context.Done(): + // SIGTERM/Ctrl+C - cleanup silently (CLI stream already closing) + n.P2P.ListenersP2P.Close(func(l p2p.Listener) bool { + return l == listener + }) + return nil + case <-listener.Done(): + // Closed via "ipfs p2p close" - emit closing message + return res.Emit(&P2PForegroundOutput{ + Status: "closing", + Protocol: protoOpt, + Address: targetOpt, + }) + } + } + + return nil + }, + Type: P2PForegroundOutput{}, + Encoders: cmds.EncoderMap{ + cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *P2PForegroundOutput) error { + if out.Status == "active" { + fmt.Fprintf(w, "Listening on %s, forwarding to %s, waiting for interrupt...\n", out.Protocol, out.Address) + } else if out.Status == "closing" { + fmt.Fprintf(w, "Received interrupt, removing listener for %s\n", out.Protocol) + } + return nil + }), }, } @@ -250,7 +395,7 @@ func checkPort(target ma.Multiaddr) error { if sport != "" { return sport, nil } - return "", fmt.Errorf("address does not contain tcp or udp protocol") + return "", errors.New("address does not contain tcp or udp protocol") } sport, err := getPort() @@ -264,18 +409,16 @@ func checkPort(target ma.Multiaddr) error { } if port == 0 { - return fmt.Errorf("port can not be 0") + return errors.New("port can not be 0") } return nil } // forwardLocal forwards local connections to a libp2p service -func forwardLocal(ctx context.Context, p *p2p.P2P, ps pstore.Peerstore, proto protocol.ID, bindAddr ma.Multiaddr, addr *peer.AddrInfo) error { +func forwardLocal(ctx context.Context, p *p2p.P2P, ps pstore.Peerstore, proto protocol.ID, bindAddr ma.Multiaddr, addr *peer.AddrInfo) (p2p.Listener, error) { ps.AddAddrs(addr.ID, addr.Addrs, pstore.TempAddrTTL) - // TODO: return some info - _, err := p.ForwardLocal(ctx, addr.ID, proto, bindAddr) - return err + return p.ForwardLocal(ctx, addr.ID, proto, bindAddr) } const ( diff --git a/core/commands/pin/pin.go b/core/commands/pin/pin.go index b87760aaf..cab868c30 100644 --- a/core/commands/pin/pin.go +++ b/core/commands/pin/pin.go @@ -8,9 +8,11 @@ import ( "os" "time" + "github.com/dustin/go-humanize" bserv "github.com/ipfs/boxo/blockservice" offline "github.com/ipfs/boxo/exchange/offline" dag "github.com/ipfs/boxo/ipld/merkledag" + pin "github.com/ipfs/boxo/pinning/pinner" verifcid "github.com/ipfs/boxo/verifcid" cid "github.com/ipfs/go-cid" cidenc "github.com/ipfs/go-cidutil/cidenc" @@ -46,6 +48,7 @@ type PinOutput struct { type AddPinOutput struct { Pins []string `json:",omitempty"` Progress int `json:",omitempty"` + Bytes uint64 `json:",omitempty"` } const ( @@ -99,6 +102,11 @@ It may take some time. Pass '--progress' to track the progress. name, _ := req.Options[pinNameOptionName].(string) showProgress, _ := req.Options[pinProgressOptionName].(bool) + // Validate pin name + if err := cmdutils.ValidatePinName(name); err != nil { + return err + } + if err := req.ParseBodyArgs(); err != nil { return err } @@ -141,14 +149,15 @@ It may take some time. Pass '--progress' to track the progress. return val.err } - if pv := v.Value(); pv != 0 { - if err := res.Emit(&AddPinOutput{Progress: v.Value()}); err != nil { + if ps := v.ProgressStat(); ps.Nodes != 0 { + if err := res.Emit(&AddPinOutput{Progress: ps.Nodes, Bytes: ps.Bytes}); err != nil { return err } } return res.Emit(&AddPinOutput{Pins: val.pins}) case <-ticker.C: - if err := res.Emit(&AddPinOutput{Progress: v.Value()}); err != nil { + ps := v.ProgressStat() + if err := res.Emit(&AddPinOutput{Progress: ps.Nodes, Bytes: ps.Bytes}); err != nil { return err } case <-ctx.Done(): @@ -191,7 +200,7 @@ It may take some time. Pass '--progress' to track the progress. } if out.Pins == nil { // this can only happen if the progress option is set - fmt.Fprintf(os.Stderr, "Fetched/Processed %d nodes\r", out.Progress) + fmt.Fprintf(os.Stderr, "Fetched/Processed %d nodes (%s)\r", out.Progress, humanize.Bytes(out.Bytes)) } else { err = re.Emit(out) if err != nil { @@ -370,18 +379,30 @@ Example: return err } + n, err := cmdenv.GetNode(env) + if err != nil { + return err + } + + if n.Pinning == nil { + return fmt.Errorf("pinning service not available") + } + typeStr, _ := req.Options[pinTypeOptionName].(string) stream, _ := req.Options[pinStreamOptionName].(bool) displayNames, _ := req.Options[pinNamesOptionName].(bool) name, _ := req.Options[pinNameOptionName].(string) - switch typeStr { - case "all", "direct", "indirect", "recursive": - default: - err = fmt.Errorf("invalid type '%s', must be one of {direct, indirect, recursive, all}", typeStr) + // Validate name filter + if err := cmdutils.ValidatePinName(name); err != nil { return err } + mode, ok := pin.StringToMode(typeStr) + if !ok { + return fmt.Errorf("invalid type '%s', must be one of {direct, indirect, recursive, all}", typeStr) + } + // For backward compatibility, we accumulate the pins in the same output type as before. var emit func(PinLsOutputWrapper) error lgcList := map[string]PinLsType{} @@ -397,7 +418,7 @@ Example: } if len(req.Arguments) > 0 { - err = pinLsKeys(req, typeStr, api, emit) + err = pinLsKeys(req, mode, displayNames || name != "", n.Pinning, api, emit) } else { err = pinLsAll(req, typeStr, displayNames || name != "", name, api, emit) } @@ -482,23 +503,14 @@ type PinLsObject struct { Type string `json:",omitempty"` } -func pinLsKeys(req *cmds.Request, typeStr string, api coreiface.CoreAPI, emit func(value PinLsOutputWrapper) error) error { +func pinLsKeys(req *cmds.Request, mode pin.Mode, displayNames bool, pinner pin.Pinner, api coreiface.CoreAPI, emit func(value PinLsOutputWrapper) error) error { enc, err := cmdenv.GetCidEncoder(req) if err != nil { return err } - switch typeStr { - case "all", "direct", "indirect", "recursive": - default: - return fmt.Errorf("invalid type '%s', must be one of {direct, indirect, recursive, all}", typeStr) - } - - opt, err := options.Pin.IsPinned.Type(typeStr) - if err != nil { - panic("unhandled pin type") - } - + // Collect CIDs to check + cids := make([]cid.Cid, 0, len(req.Arguments)) for _, p := range req.Arguments { p, err := cmdutils.PathOrCidPath(p) if err != nil { @@ -510,25 +522,31 @@ func pinLsKeys(req *cmds.Request, typeStr string, api coreiface.CoreAPI, emit fu return err } - pinType, pinned, err := api.Pin().IsPinned(req.Context, rp, opt) - if err != nil { - return err + cids = append(cids, rp.RootCid()) + } + + // Check pins using the new type-specific method + pinned, err := pinner.CheckIfPinnedWithType(req.Context, mode, displayNames, cids...) + if err != nil { + return err + } + + // Process results + for i, p := range pinned { + if !p.Pinned() { + return fmt.Errorf("path '%s' is not pinned", req.Arguments[i]) } - if !pinned { - return fmt.Errorf("path '%s' is not pinned", p) - } - - switch pinType { - case "direct", "indirect", "recursive", "internal": - default: - pinType = "indirect through " + pinType + pinType, _ := pin.ModeToString(p.Mode) + if p.Mode == pin.Indirect && p.Via.Defined() { + pinType = "indirect through " + enc.Encode(p.Via) } err = emit(PinLsOutputWrapper{ PinLsObject: PinLsObject{ Type: pinType, - Cid: enc.Encode(rp.RootCid()), + Cid: enc.Encode(cids[i]), + Name: p.Name, }, }) if err != nil { @@ -545,11 +563,9 @@ func pinLsAll(req *cmds.Request, typeStr string, detailed bool, name string, api return err } - switch typeStr { - case "all", "direct", "indirect", "recursive": - default: - err = fmt.Errorf("invalid type '%s', must be one of {direct, indirect, recursive, all}", typeStr) - return err + _, ok := pin.StringToMode(typeStr) + if !ok { + return fmt.Errorf("invalid type '%s', must be one of {direct, indirect, recursive, all}", typeStr) } opt, err := options.Pin.Ls.Type(typeStr) @@ -557,15 +573,16 @@ func pinLsAll(req *cmds.Request, typeStr string, detailed bool, name string, api panic("unhandled pin type") } - pins, err := api.Pin().Ls(req.Context, opt, options.Pin.Ls.Detailed(detailed), options.Pin.Ls.Name(name)) - if err != nil { - return err - } + pins := make(chan coreiface.Pin) + lsErr := make(chan error, 1) + lsCtx, cancel := context.WithCancel(req.Context) + defer cancel() + + go func() { + lsErr <- api.Pin().Ls(lsCtx, pins, opt, options.Pin.Ls.Detailed(detailed), options.Pin.Ls.Name(name)) + }() for p := range pins { - if err := p.Err(); err != nil { - return err - } err = emit(PinLsOutputWrapper{ PinLsObject: PinLsObject{ Type: p.Type(), @@ -577,8 +594,7 @@ func pinLsAll(req *cmds.Request, typeStr string, detailed bool, name string, api return err } } - - return nil + return <-lsErr } const ( diff --git a/core/commands/pin/remotepin.go b/core/commands/pin/remotepin.go index 3721913e7..3936ce635 100644 --- a/core/commands/pin/remotepin.go +++ b/core/commands/pin/remotepin.go @@ -18,7 +18,7 @@ import ( pinclient "github.com/ipfs/boxo/pinning/remote/client" cid "github.com/ipfs/go-cid" cmds "github.com/ipfs/go-ipfs-cmds" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" config "github.com/ipfs/kubo/config" "github.com/ipfs/kubo/core/commands/cmdenv" "github.com/ipfs/kubo/core/commands/cmdutils" @@ -171,6 +171,10 @@ NOTE: a comma-separated notation is supported in CLI for convenience: opts := []pinclient.AddOption{} if name, nameFound := req.Options[pinNameOptionName]; nameFound { nameStr := name.(string) + // Validate pin name + if err := cmdutils.ValidatePinName(nameStr); err != nil { + return err + } opts = append(opts, pinclient.PinOpts.WithName(nameStr)) } @@ -285,26 +289,26 @@ Pass '--status=queued,pinning,pinned,failed' to list pins in all states. cmds.DelimitedStringsOption(",", pinStatusOptionName, "Return pins with the specified statuses (queued,pinning,pinned,failed).").WithDefault([]string{"pinned"}), }, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { - ctx, cancel := context.WithCancel(req.Context) - defer cancel() - c, err := getRemotePinServiceFromRequest(req, env) if err != nil { return err } - psCh, errCh, err := lsRemote(ctx, req, c) - if err != nil { - return err - } + ctx, cancel := context.WithCancel(req.Context) + defer cancel() + psCh := make(chan pinclient.PinStatusGetter) + lsErr := make(chan error, 1) + go func() { + lsErr <- lsRemote(ctx, req, c, psCh) + }() for ps := range psCh { if err := res.Emit(toRemotePinOutput(ps)); err != nil { return err } } - return <-errCh + return <-lsErr }, Type: RemotePinOutput{}, Encoders: cmds.EncoderMap{ @@ -317,10 +321,15 @@ Pass '--status=queued,pinning,pinned,failed' to list pins in all states. } // Executes GET /pins/?query-with-filters -func lsRemote(ctx context.Context, req *cmds.Request, c *pinclient.Client) (chan pinclient.PinStatusGetter, chan error, error) { +func lsRemote(ctx context.Context, req *cmds.Request, c *pinclient.Client, out chan<- pinclient.PinStatusGetter) error { opts := []pinclient.LsOption{} if name, nameFound := req.Options[pinNameOptionName]; nameFound { nameStr := name.(string) + // Validate name filter + if err := cmdutils.ValidatePinName(nameStr); err != nil { + close(out) + return err + } opts = append(opts, pinclient.PinOpts.FilterName(nameStr)) } @@ -330,7 +339,8 @@ func lsRemote(ctx context.Context, req *cmds.Request, c *pinclient.Client) (chan for _, rawCID := range cidsRawArr { parsedCID, err := cid.Decode(rawCID) if err != nil { - return nil, nil, fmt.Errorf("CID %q cannot be parsed: %v", rawCID, err) + close(out) + return fmt.Errorf("CID %q cannot be parsed: %v", rawCID, err) } parsedCIDs = append(parsedCIDs, parsedCID) } @@ -342,16 +352,15 @@ func lsRemote(ctx context.Context, req *cmds.Request, c *pinclient.Client) (chan for _, rawStatus := range statusRawArr { s := pinclient.Status(rawStatus) if s.String() == string(pinclient.StatusUnknown) { - return nil, nil, fmt.Errorf("status %q is not valid", rawStatus) + close(out) + return fmt.Errorf("status %q is not valid", rawStatus) } parsedStatuses = append(parsedStatuses, s) } opts = append(opts, pinclient.PinOpts.FilterStatus(parsedStatuses...)) } - psCh, errCh := c.Ls(ctx, opts...) - - return psCh, errCh, nil + return c.Ls(ctx, out, opts...) } var rmRemotePinCmd = &cmds.Command{ @@ -393,36 +402,37 @@ To list and then remove all pending pin requests, pass an explicit status list: cmds.BoolOption(pinForceOptionName, "Allow removal of multiple pins matching the query without additional confirmation.").WithDefault(false), }, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { - ctx, cancel := context.WithCancel(req.Context) - defer cancel() - c, err := getRemotePinServiceFromRequest(req, env) if err != nil { return err } rmIDs := []string{} - if len(req.Arguments) == 0 { - psCh, errCh, err := lsRemote(ctx, req, c) - if err != nil { - return err - } - for ps := range psCh { - rmIDs = append(rmIDs, ps.GetRequestId()) - } - if err = <-errCh; err != nil { - return fmt.Errorf("error while listing remote pins: %v", err) - } - - if len(rmIDs) > 1 && !req.Options[pinForceOptionName].(bool) { - return fmt.Errorf("multiple remote pins are matching this query, add --force to confirm the bulk removal") - } - } else { + if len(req.Arguments) != 0 { return fmt.Errorf("unexpected argument %q", req.Arguments[0]) } + psCh := make(chan pinclient.PinStatusGetter) + errCh := make(chan error, 1) + ctx, cancel := context.WithCancel(req.Context) + defer cancel() + + go func() { + errCh <- lsRemote(ctx, req, c, psCh) + }() + for ps := range psCh { + rmIDs = append(rmIDs, ps.GetRequestId()) + } + if err = <-errCh; err != nil { + return fmt.Errorf("error while listing remote pins: %v", err) + } + + if len(rmIDs) > 1 && !req.Options[pinForceOptionName].(bool) { + return fmt.Errorf("multiple remote pins are matching this query, add --force to confirm the bulk removal") + } + for _, rmID := range rmIDs { - if err := c.DeleteByID(ctx, rmID); err != nil { + if err = c.DeleteByID(ctx, rmID); err != nil { return fmt.Errorf("removing pin identified by requestid=%q failed: %v", rmID, err) } } diff --git a/core/commands/provide.go b/core/commands/provide.go new file mode 100644 index 000000000..c9d3954cf --- /dev/null +++ b/core/commands/provide.go @@ -0,0 +1,596 @@ +package commands + +import ( + "context" + "errors" + "fmt" + "io" + "strings" + "text/tabwriter" + "time" + "unicode/utf8" + + humanize "github.com/dustin/go-humanize" + boxoprovider "github.com/ipfs/boxo/provider" + cid "github.com/ipfs/go-cid" + cmds "github.com/ipfs/go-ipfs-cmds" + "github.com/ipfs/kubo/core/commands/cmdenv" + "github.com/libp2p/go-libp2p-kad-dht/fullrt" + "github.com/libp2p/go-libp2p-kad-dht/provider" + "github.com/libp2p/go-libp2p-kad-dht/provider/buffered" + "github.com/libp2p/go-libp2p-kad-dht/provider/dual" + "github.com/libp2p/go-libp2p-kad-dht/provider/stats" + routing "github.com/libp2p/go-libp2p/core/routing" + "github.com/probe-lab/go-libdht/kad/key" + "golang.org/x/exp/constraints" +) + +const ( + provideQuietOptionName = "quiet" + provideLanOptionName = "lan" + + provideStatAllOptionName = "all" + provideStatCompactOptionName = "compact" + provideStatNetworkOptionName = "network" + provideStatConnectivityOptionName = "connectivity" + provideStatOperationsOptionName = "operations" + provideStatTimingsOptionName = "timings" + provideStatScheduleOptionName = "schedule" + provideStatQueuesOptionName = "queues" + provideStatWorkersOptionName = "workers" + + // lowWorkerThreshold is the threshold below which worker availability warnings are shown + lowWorkerThreshold = 2 +) + +var ProvideCmd = &cmds.Command{ + Status: cmds.Experimental, + Helptext: cmds.HelpText{ + Tagline: "Control and monitor content providing", + ShortDescription: ` +Control providing operations. + +OVERVIEW: + +The provider system advertises content by publishing provider records, +allowing other nodes to discover which peers have specific content. +Content is reprovided periodically (every Provide.DHT.Interval) +according to Provide.Strategy. + +CONFIGURATION: + +Learn more: https://github.com/ipfs/kubo/blob/master/docs/config.md#provide + +SEE ALSO: + +For ad-hoc one-time provide, see 'ipfs routing provide' +`, + }, + + Subcommands: map[string]*cmds.Command{ + "clear": provideClearCmd, + "stat": provideStatCmd, + }, +} + +var provideClearCmd = &cmds.Command{ + Status: cmds.Experimental, + Helptext: cmds.HelpText{ + Tagline: "Clear all CIDs from the provide queue.", + ShortDescription: ` +Clear all CIDs pending to be provided for the first time. + +BEHAVIOR: + +This command removes CIDs from the provide queue that are waiting to be +advertised to the DHT for the first time. It does not affect content that +is already being reprovided on schedule. + +AUTOMATIC CLEARING: + +Kubo will automatically clear the queue when it detects a change of +Provide.Strategy upon a restart. + +Learn: https://github.com/ipfs/kubo/blob/master/docs/config.md#providestrategy +`, + }, + Options: []cmds.Option{ + cmds.BoolOption(provideQuietOptionName, "q", "Do not write output."), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + n, err := cmdenv.GetNode(env) + if err != nil { + return err + } + + quiet, _ := req.Options[provideQuietOptionName].(bool) + if n.Provider == nil { + return nil + } + + cleared := n.Provider.Clear() + if quiet { + return nil + } + _ = re.Emit(cleared) + + return nil + }, + Type: int(0), + Encoders: cmds.EncoderMap{ + cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, cleared int) error { + quiet, _ := req.Options[provideQuietOptionName].(bool) + if quiet { + return nil + } + + _, err := fmt.Fprintf(w, "removed %d items from provide queue\n", cleared) + return err + }), + }, +} + +type provideStats struct { + Sweep *stats.Stats + Legacy *boxoprovider.ReproviderStats + FullRT bool // only used for legacy stats +} + +// extractSweepingProvider extracts a SweepingProvider from the given provider interface. +// It handles unwrapping buffered and dual providers, selecting LAN or WAN as specified. +// Returns nil if the provider is not a sweeping provider type. +func extractSweepingProvider(prov any, useLAN bool) *provider.SweepingProvider { + switch p := prov.(type) { + case *provider.SweepingProvider: + return p + case *dual.SweepingProvider: + if useLAN { + return p.LAN + } + return p.WAN + case *buffered.SweepingProvider: + // Recursively extract from the inner provider + return extractSweepingProvider(p.Provider, useLAN) + default: + return nil + } +} + +var provideStatCmd = &cmds.Command{ + Status: cmds.Experimental, + Helptext: cmds.HelpText{ + Tagline: "Show statistics about the provider system", + ShortDescription: ` +Returns statistics about the node's provider system. + +OVERVIEW: + +The provide system advertises content to the DHT by publishing provider +records that map CIDs to your peer ID. These records expire after a fixed +TTL to account for node churn, so content must be reprovided periodically +to stay discoverable. + +Two provider types exist: + +- Sweep provider: Divides the DHT keyspace into regions and systematically + sweeps through them over the reprovide interval. Batches CIDs allocated + to the same DHT servers, reducing lookups from N (one per CID) to a + small static number based on DHT size (~3k for 10k DHT servers). Spreads + work evenly over time to prevent resource spikes and ensure announcements + happen just before records expire. + +- Legacy provider: Processes each CID individually with separate DHT + lookups. Attempts to reprovide all content as quickly as possible at the + start of each cycle. Works well for small datasets but struggles with + large collections. + +Learn more: +- Config: https://github.com/ipfs/kubo/blob/master/docs/config.md#provide +- Metrics: https://github.com/ipfs/kubo/blob/master/docs/provide-stats.md + +DEFAULT OUTPUT: + +Shows a brief summary including queue sizes, scheduled items, average record +holders, ongoing/total provides, and worker warnings. + +DETAILED OUTPUT: + +Use --all for detailed statistics with these sections: connectivity, queues, +schedule, timings, network, operations, and workers. Individual sections can +be displayed with their flags (e.g., --network, --operations). Multiple flags +can be combined. + +Use --compact for monitoring-friendly 2-column output (requires --all). + +EXAMPLES: + +Monitor provider statistics in real-time with 2-column layout: + + watch ipfs provide stat --all --compact + +Get statistics in JSON format for programmatic processing: + + ipfs provide stat --enc=json | jq + +NOTES: + +- This interface is experimental and may change between releases +- Legacy provider shows basic stats only (no flags supported) +- "Regions" are keyspace divisions for spreading reprovide work +- For Dual DHT: use --lan for LAN provider stats (default is WAN) +`, + }, + Arguments: []cmds.Argument{}, + Options: []cmds.Option{ + cmds.BoolOption(provideLanOptionName, "Show stats for LAN DHT only (for Sweep+Dual DHT only)"), + cmds.BoolOption(provideStatAllOptionName, "a", "Display all provide sweep stats"), + cmds.BoolOption(provideStatCompactOptionName, "Display stats in 2-column layout (requires --all)"), + cmds.BoolOption(provideStatConnectivityOptionName, "Display DHT connectivity status"), + cmds.BoolOption(provideStatNetworkOptionName, "Display network stats (peers, reachability, region size)"), + cmds.BoolOption(provideStatScheduleOptionName, "Display reprovide schedule (CIDs/regions scheduled, next reprovide time)"), + cmds.BoolOption(provideStatTimingsOptionName, "Display timing information (uptime, cycle start, reprovide interval)"), + cmds.BoolOption(provideStatWorkersOptionName, "Display worker pool stats (active/available/queued workers)"), + cmds.BoolOption(provideStatOperationsOptionName, "Display operation stats (ongoing/past provides, rates, errors)"), + cmds.BoolOption(provideStatQueuesOptionName, "Display provide and reprovide queue sizes"), + }, + Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { + nd, err := cmdenv.GetNode(env) + if err != nil { + return err + } + + if !nd.IsOnline { + return ErrNotOnline + } + + lanStats, _ := req.Options[provideLanOptionName].(bool) + + // Handle legacy provider + if legacySys, ok := nd.Provider.(boxoprovider.System); ok { + if lanStats { + return errors.New("LAN stats only available for Sweep provider with Dual DHT") + } + stats, err := legacySys.Stat() + if err != nil { + return err + } + _, fullRT := nd.DHTClient.(*fullrt.FullRT) + return res.Emit(provideStats{Legacy: &stats, FullRT: fullRT}) + } + + // Extract sweeping provider (handles buffered and dual unwrapping) + sweepingProvider := extractSweepingProvider(nd.Provider, lanStats) + if sweepingProvider == nil { + if lanStats { + return errors.New("LAN stats only available for Sweep provider with Dual DHT") + } + return fmt.Errorf("stats not available with current routing system %T", nd.Provider) + } + + s := sweepingProvider.Stats() + return res.Emit(provideStats{Sweep: &s}) + }, + Encoders: cmds.EncoderMap{ + cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, s provideStats) error { + wtr := tabwriter.NewWriter(w, 1, 2, 1, ' ', 0) + defer wtr.Flush() + + all, _ := req.Options[provideStatAllOptionName].(bool) + compact, _ := req.Options[provideStatCompactOptionName].(bool) + connectivity, _ := req.Options[provideStatConnectivityOptionName].(bool) + queues, _ := req.Options[provideStatQueuesOptionName].(bool) + schedule, _ := req.Options[provideStatScheduleOptionName].(bool) + network, _ := req.Options[provideStatNetworkOptionName].(bool) + timings, _ := req.Options[provideStatTimingsOptionName].(bool) + operations, _ := req.Options[provideStatOperationsOptionName].(bool) + workers, _ := req.Options[provideStatWorkersOptionName].(bool) + + flagCount := 0 + for _, enabled := range []bool{all, connectivity, queues, schedule, network, timings, operations, workers} { + if enabled { + flagCount++ + } + } + + if s.Legacy != nil { + if flagCount > 0 { + return errors.New("cannot use flags with legacy provide stats") + } + fmt.Fprintf(wtr, "TotalReprovides:\t%s\n", humanNumber(s.Legacy.TotalReprovides)) + fmt.Fprintf(wtr, "AvgReprovideDuration:\t%s\n", humanDuration(s.Legacy.AvgReprovideDuration)) + fmt.Fprintf(wtr, "LastReprovideDuration:\t%s\n", humanDuration(s.Legacy.LastReprovideDuration)) + if !s.Legacy.LastRun.IsZero() { + fmt.Fprintf(wtr, "LastReprovide:\t%s\n", humanTime(s.Legacy.LastRun)) + if s.FullRT { + fmt.Fprintf(wtr, "NextReprovide:\t%s\n", humanTime(s.Legacy.LastRun.Add(s.Legacy.ReprovideInterval))) + } + } + return nil + } + + if s.Sweep == nil { + return errors.New("no provide stats available") + } + + // Sweep provider stats + if s.Sweep.Closed { + fmt.Fprintf(wtr, "Provider is closed\n") + return nil + } + + if compact && !all { + return errors.New("--compact requires --all flag") + } + + brief := flagCount == 0 + showHeadings := flagCount > 1 || all + + compactMode := all && compact + var cols [2][]string + col0MaxWidth := 0 + // formatLine handles both normal and compact output modes: + // - Normal mode: all lines go to cols[0], col parameter is ignored + // - Compact mode: col 0 for left column, col 1 for right column + formatLine := func(col int, format string, a ...any) { + if compactMode { + s := fmt.Sprintf(format, a...) + cols[col] = append(cols[col], s) + if col == 0 { + col0MaxWidth = max(col0MaxWidth, utf8.RuneCountInString(s)) + } + return + } + format = strings.Replace(format, ": ", ":\t", 1) + format = strings.Replace(format, ", ", ",\t", 1) + cols[0] = append(cols[0], fmt.Sprintf(format, a...)) + } + addBlankLine := func(col int) { + if !brief { + formatLine(col, "") + } + } + sectionTitle := func(col int, title string) { + if !brief && showHeadings { + //nolint:govet // dynamic format string is intentional + formatLine(col, title+":") + } + } + + indent := " " + if brief || !showHeadings { + indent = "" + } + + // Connectivity + if all || connectivity || brief && s.Sweep.Connectivity.Status != "online" { + sectionTitle(1, "Connectivity") + since := s.Sweep.Connectivity.Since + if since.IsZero() { + formatLine(1, "%sStatus: %s", indent, s.Sweep.Connectivity.Status) + } else { + formatLine(1, "%sStatus: %s (%s)", indent, s.Sweep.Connectivity.Status, humanTime(since)) + } + addBlankLine(1) + } + + // Queues + if all || queues || brief { + sectionTitle(1, "Queues") + formatLine(1, "%sProvide queue: %s CIDs, %s regions", indent, humanSI(s.Sweep.Queues.PendingKeyProvides, 1), humanSI(s.Sweep.Queues.PendingRegionProvides, 1)) + formatLine(1, "%sReprovide queue: %s regions", indent, humanSI(s.Sweep.Queues.PendingRegionReprovides, 1)) + addBlankLine(1) + } + + // Schedule + if all || schedule || brief { + sectionTitle(0, "Schedule") + formatLine(0, "%sCIDs scheduled: %s", indent, humanNumber(s.Sweep.Schedule.Keys)) + formatLine(0, "%sRegions scheduled: %s", indent, humanNumberOrNA(s.Sweep.Schedule.Regions)) + if !brief { + formatLine(0, "%sAvg prefix length: %s", indent, humanFloatOrNA(s.Sweep.Schedule.AvgPrefixLength)) + nextPrefix := key.BitString(s.Sweep.Schedule.NextReprovidePrefix) + if nextPrefix == "" { + nextPrefix = "N/A" + } + formatLine(0, "%sNext region prefix: %s", indent, nextPrefix) + nextReprovideAt := s.Sweep.Schedule.NextReprovideAt.Format("15:04:05") + if s.Sweep.Schedule.NextReprovideAt.IsZero() { + nextReprovideAt = "N/A" + } + formatLine(0, "%sNext region reprovide: %s", indent, nextReprovideAt) + } + addBlankLine(0) + } + + // Timings + if all || timings { + sectionTitle(1, "Timings") + formatLine(1, "%sUptime: %s (%s)", indent, humanDuration(s.Sweep.Timing.Uptime), humanTime(time.Now().Add(-s.Sweep.Timing.Uptime))) + formatLine(1, "%sCurrent time offset: %s", indent, humanDuration(s.Sweep.Timing.CurrentTimeOffset)) + formatLine(1, "%sCycle started: %s", indent, humanTime(s.Sweep.Timing.CycleStart)) + formatLine(1, "%sReprovide interval: %s", indent, humanDuration(s.Sweep.Timing.ReprovidesInterval)) + addBlankLine(1) + } + + // Network + if all || network || brief { + sectionTitle(0, "Network") + formatLine(0, "%sAvg record holders: %s", indent, humanFloatOrNA(s.Sweep.Network.AvgHolders)) + if !brief { + formatLine(0, "%sPeers swept: %s", indent, humanInt(s.Sweep.Network.Peers)) + formatLine(0, "%sFull keyspace coverage: %t", indent, s.Sweep.Network.CompleteKeyspaceCoverage) + if s.Sweep.Network.Peers > 0 { + formatLine(0, "%sReachable peers: %s (%s%%)", indent, humanInt(s.Sweep.Network.Reachable), humanNumber(100*s.Sweep.Network.Reachable/s.Sweep.Network.Peers)) + } else { + formatLine(0, "%sReachable peers: %s", indent, humanInt(s.Sweep.Network.Reachable)) + } + formatLine(0, "%sAvg region size: %s", indent, humanFloatOrNA(s.Sweep.Network.AvgRegionSize)) + formatLine(0, "%sReplication factor: %s", indent, humanNumber(s.Sweep.Network.ReplicationFactor)) + addBlankLine(0) + } + } + + // Operations + if all || operations || brief { + sectionTitle(1, "Operations") + // Ongoing operations + formatLine(1, "%sOngoing provides: %s CIDs, %s regions", indent, humanSI(s.Sweep.Operations.Ongoing.KeyProvides, 1), humanSI(s.Sweep.Operations.Ongoing.RegionProvides, 1)) + formatLine(1, "%sOngoing reprovides: %s CIDs, %s regions", indent, humanSI(s.Sweep.Operations.Ongoing.KeyReprovides, 1), humanSI(s.Sweep.Operations.Ongoing.RegionReprovides, 1)) + // Past operations summary + formatLine(1, "%sTotal CIDs provided: %s", indent, humanNumber(s.Sweep.Operations.Past.KeysProvided)) + if !brief { + formatLine(1, "%sTotal records provided: %s", indent, humanNumber(s.Sweep.Operations.Past.RecordsProvided)) + formatLine(1, "%sTotal provide errors: %s", indent, humanNumber(s.Sweep.Operations.Past.KeysFailed)) + formatLine(1, "%sCIDs provided/min/worker: %s", indent, humanFloatOrNA(s.Sweep.Operations.Past.KeysProvidedPerMinute)) + formatLine(1, "%sCIDs reprovided/min/worker: %s", indent, humanFloatOrNA(s.Sweep.Operations.Past.KeysReprovidedPerMinute)) + formatLine(1, "%sRegion reprovide duration: %s", indent, humanDurationOrNA(s.Sweep.Operations.Past.RegionReprovideDuration)) + formatLine(1, "%sAvg CIDs/reprovide: %s", indent, humanFloatOrNA(s.Sweep.Operations.Past.AvgKeysPerReprovide)) + formatLine(1, "%sRegions reprovided (last cycle): %s", indent, humanNumber(s.Sweep.Operations.Past.RegionReprovidedLastCycle)) + addBlankLine(1) + } + } + + // Workers + displayWorkers := all || workers + if displayWorkers || brief { + availableReservedBurst := max(0, s.Sweep.Workers.DedicatedBurst-s.Sweep.Workers.ActiveBurst) + availableReservedPeriodic := max(0, s.Sweep.Workers.DedicatedPeriodic-s.Sweep.Workers.ActivePeriodic) + availableFreeWorkers := max(0, s.Sweep.Workers.Max-max(s.Sweep.Workers.DedicatedBurst, s.Sweep.Workers.ActiveBurst)-max(s.Sweep.Workers.DedicatedPeriodic, s.Sweep.Workers.ActivePeriodic)) + availableBurst := availableFreeWorkers + availableReservedBurst + availablePeriodic := availableFreeWorkers + availableReservedPeriodic + + if displayWorkers || availableBurst <= lowWorkerThreshold || availablePeriodic <= lowWorkerThreshold { + // Either we want to display workers information, or we are low on + // available workers and want to warn the user. + sectionTitle(0, "Workers") + specifyWorkers := " workers" + if compactMode { + specifyWorkers = "" + } + formatLine(0, "%sActive%s: %s / %s (max)", indent, specifyWorkers, humanInt(s.Sweep.Workers.Active), humanInt(s.Sweep.Workers.Max)) + if brief { + // Brief mode - show condensed worker info + formatLine(0, "%sPeriodic%s: %s active, %s available, %s queued", indent, specifyWorkers, + humanInt(s.Sweep.Workers.ActivePeriodic), humanInt(availablePeriodic), humanInt(s.Sweep.Workers.QueuedPeriodic)) + formatLine(0, "%sBurst%s: %s active, %s available, %s queued\n", indent, specifyWorkers, + humanInt(s.Sweep.Workers.ActiveBurst), humanInt(availableBurst), humanInt(s.Sweep.Workers.QueuedBurst)) + } else { + formatLine(0, "%sFree%s: %s", indent, specifyWorkers, humanInt(availableFreeWorkers)) + formatLine(0, "%s %-14s %-9s %s", indent, "Workers stats:", "Periodic", "Burst") + formatLine(0, "%s %-14s %-9s %s", indent, "Active:", humanInt(s.Sweep.Workers.ActivePeriodic), humanInt(s.Sweep.Workers.ActiveBurst)) + formatLine(0, "%s %-14s %-9s %s", indent, "Dedicated:", humanInt(s.Sweep.Workers.DedicatedPeriodic), humanInt(s.Sweep.Workers.DedicatedBurst)) + formatLine(0, "%s %-14s %-9s %s", indent, "Available:", humanInt(availablePeriodic), humanInt(availableBurst)) + formatLine(0, "%s %-14s %-9s %s", indent, "Queued:", humanInt(s.Sweep.Workers.QueuedPeriodic), humanInt(s.Sweep.Workers.QueuedBurst)) + formatLine(0, "%sMax connections/worker: %s", indent, humanInt(s.Sweep.Workers.MaxProvideConnsPerWorker)) + addBlankLine(0) + } + } + } + if compactMode { + col0Width := col0MaxWidth + 2 + // Print both columns side by side + maxRows := max(len(cols[0]), len(cols[1])) + if maxRows == 0 { + return nil + } + for i := range maxRows - 1 { // last line is empty + var left, right string + if i < len(cols[0]) { + left = cols[0][i] + } + if i < len(cols[1]) { + right = cols[1][i] + } + fmt.Fprintf(wtr, "%-*s %s\n", col0Width, left, right) + } + } else { + if !brief { + cols[0] = cols[0][:len(cols[0])-1] // remove last blank line + } + for _, line := range cols[0] { + fmt.Fprintln(wtr, line) + } + } + return nil + }), + }, + Type: provideStats{}, +} + +func humanDuration(val time.Duration) string { + if val > time.Second { + return val.Truncate(100 * time.Millisecond).String() + } + return val.Truncate(time.Microsecond).String() +} + +func humanDurationOrNA(val time.Duration) string { + if val <= 0 { + return "N/A" + } + return humanDuration(val) +} + +func humanTime(val time.Time) string { + if val.IsZero() { + return "N/A" + } + return val.Format("2006-01-02 15:04:05") +} + +func humanNumber[T constraints.Float | constraints.Integer](n T) string { + nf := float64(n) + str := humanSI(nf, 0) + fullStr := humanFull(nf, 0) + if str != fullStr { + return fmt.Sprintf("%s\t(%s)", str, fullStr) + } + return str +} + +// humanNumberOrNA is like humanNumber but returns "N/A" for non-positive values. +func humanNumberOrNA[T constraints.Float | constraints.Integer](n T) string { + if n <= 0 { + return "N/A" + } + return humanNumber(n) +} + +// humanFloatOrNA formats a float with 1 decimal place, returning "N/A" for non-positive values. +// This is separate from humanNumberOrNA because it provides simple decimal formatting for +// continuous metrics (averages, rates) rather than SI unit formatting used for discrete counts. +func humanFloatOrNA(val float64) string { + if val <= 0 { + return "N/A" + } + return humanFull(val, 1) +} + +func humanSI[T constraints.Float | constraints.Integer](val T, decimals int) string { + v, unit := humanize.ComputeSI(float64(val)) + return fmt.Sprintf("%s%s", humanFull(v, decimals), unit) +} + +func humanInt[T constraints.Integer](val T) string { + return humanFull(float64(val), 0) +} + +func humanFull(val float64, decimals int) string { + return humanize.CommafWithDigits(val, decimals) +} + +// provideCIDSync performs a synchronous/blocking provide operation to announce +// the given CID to the DHT. +// +// - If the accelerated DHT client is used, a DHT lookup isn't needed, we +// directly allocate provider records to closest peers. +// - If Provide.DHT.SweepEnabled=true or OptimisticProvide=true, we make an +// optimistic provide call. +// - Else we make a standard provide call (much slower). +// +// IMPORTANT: The caller MUST verify DHT availability using HasActiveDHTClient() +// before calling this function. Calling with a nil or invalid router will cause +// a panic - this is the caller's responsibility to prevent. +func provideCIDSync(ctx context.Context, router routing.Routing, c cid.Cid) error { + return router.Provide(ctx, c, true) +} diff --git a/core/commands/pubsub.go b/core/commands/pubsub.go index d50e651b2..9e81ef281 100644 --- a/core/commands/pubsub.go +++ b/core/commands/pubsub.go @@ -6,7 +6,7 @@ import ( "fmt" "io" "net/http" - "sort" + "slices" cmdenv "github.com/ipfs/kubo/core/commands/cmdenv" mbase "github.com/multiformats/go-multibase" @@ -325,7 +325,7 @@ TOPIC AND DATA ENCODING for _, peer := range peers { list.Strings = append(list.Strings, peer.String()) } - sort.Strings(list.Strings) + slices.Sort(list.Strings) return cmds.EmitOnce(res, list) }, Type: stringList{}, diff --git a/core/commands/repo.go b/core/commands/repo.go index 77ce68590..14956ec7c 100644 --- a/core/commands/repo.go +++ b/core/commands/repo.go @@ -5,21 +5,22 @@ import ( "errors" "fmt" "io" - "os" "runtime" "strings" "sync" "text/tabwriter" + "time" oldcmds "github.com/ipfs/kubo/commands" cmdenv "github.com/ipfs/kubo/core/commands/cmdenv" + coreiface "github.com/ipfs/kubo/core/coreiface" corerepo "github.com/ipfs/kubo/core/corerepo" fsrepo "github.com/ipfs/kubo/repo/fsrepo" "github.com/ipfs/kubo/repo/fsrepo/migrations" - "github.com/ipfs/kubo/repo/fsrepo/migrations/ipfsfetcher" humanize "github.com/dustin/go-humanize" bstore "github.com/ipfs/boxo/blockstore" + "github.com/ipfs/boxo/path" cid "github.com/ipfs/go-cid" cmds "github.com/ipfs/go-ipfs-cmds" ) @@ -57,6 +58,7 @@ const ( repoQuietOptionName = "quiet" repoSilentOptionName = "silent" repoAllowDowngradeOptionName = "allow-downgrade" + repoToVersionOptionName = "to" ) var repoGcCmd = &cmds.Command{ @@ -226,45 +228,137 @@ Version string The repo version. }, } +// VerifyProgress reports verification progress to the user. +// It contains either a message about a corrupt block or a progress counter. type VerifyProgress struct { - Msg string - Progress int + Msg string // Message about a corrupt/healed block (empty for valid blocks) + Progress int // Number of blocks processed so far } -func verifyWorkerRun(ctx context.Context, wg *sync.WaitGroup, keys <-chan cid.Cid, results chan<- string, bs bstore.Blockstore) { +// verifyState represents the state of a block after verification. +// States track both the verification result and any remediation actions taken. +type verifyState int + +const ( + verifyStateValid verifyState = iota // Block is valid and uncorrupted + verifyStateCorrupt // Block is corrupt, no action taken + verifyStateCorruptRemoved // Block was corrupt and successfully removed + verifyStateCorruptRemoveFailed // Block was corrupt but removal failed + verifyStateCorruptHealed // Block was corrupt, removed, and successfully re-fetched + verifyStateCorruptHealFailed // Block was corrupt and removed, but re-fetching failed +) + +const ( + // verifyWorkerMultiplier determines worker pool size relative to CPU count. + // Since block verification is I/O-bound (disk reads + potential network fetches), + // we use more workers than CPU cores to maximize throughput. + verifyWorkerMultiplier = 2 +) + +// verifyResult contains the outcome of verifying a single block. +// It includes the block's CID, its verification state, and an optional +// human-readable message describing what happened. +type verifyResult struct { + cid cid.Cid // CID of the block that was verified + state verifyState // Final state after verification and any remediation + msg string // Human-readable message (empty for valid blocks) +} + +// verifyWorkerRun processes CIDs from the keys channel, verifying their integrity. +// If shouldDrop is true, corrupt blocks are removed from the blockstore. +// If shouldHeal is true (implies shouldDrop), removed blocks are re-fetched from the network. +// The api parameter must be non-nil when shouldHeal is true. +// healTimeout specifies the maximum time to wait for each block heal (0 = no timeout). +func verifyWorkerRun(ctx context.Context, wg *sync.WaitGroup, keys <-chan cid.Cid, results chan<- *verifyResult, bs bstore.Blockstore, api coreiface.CoreAPI, shouldDrop, shouldHeal bool, healTimeout time.Duration) { defer wg.Done() + sendResult := func(r *verifyResult) bool { + select { + case results <- r: + return true + case <-ctx.Done(): + return false + } + } + for k := range keys { _, err := bs.Get(ctx, k) if err != nil { - select { - case results <- fmt.Sprintf("block %s was corrupt (%s)", k, err): - case <-ctx.Done(): - return + // Block is corrupt + result := &verifyResult{cid: k, state: verifyStateCorrupt} + + if !shouldDrop { + result.msg = fmt.Sprintf("block %s was corrupt (%s)", k, err) + if !sendResult(result) { + return + } + continue } + // Try to delete + if delErr := bs.DeleteBlock(ctx, k); delErr != nil { + result.state = verifyStateCorruptRemoveFailed + result.msg = fmt.Sprintf("block %s was corrupt (%s), failed to remove (%s)", k, err, delErr) + if !sendResult(result) { + return + } + continue + } + + if !shouldHeal { + result.state = verifyStateCorruptRemoved + result.msg = fmt.Sprintf("block %s was corrupt (%s), removed", k, err) + if !sendResult(result) { + return + } + continue + } + + // Try to heal by re-fetching from network (api is guaranteed non-nil here) + healCtx := ctx + var healCancel context.CancelFunc + if healTimeout > 0 { + healCtx, healCancel = context.WithTimeout(ctx, healTimeout) + } + + if _, healErr := api.Block().Get(healCtx, path.FromCid(k)); healErr != nil { + result.state = verifyStateCorruptHealFailed + result.msg = fmt.Sprintf("block %s was corrupt (%s), removed, failed to heal (%s)", k, err, healErr) + } else { + result.state = verifyStateCorruptHealed + result.msg = fmt.Sprintf("block %s was corrupt (%s), removed, healed", k, err) + } + + if healCancel != nil { + healCancel() + } + + if !sendResult(result) { + return + } continue } - select { - case results <- "": - case <-ctx.Done(): + // Block is valid + if !sendResult(&verifyResult{cid: k, state: verifyStateValid}) { return } } } -func verifyResultChan(ctx context.Context, keys <-chan cid.Cid, bs bstore.Blockstore) <-chan string { - results := make(chan string) +// verifyResultChan creates a channel of verification results by spawning multiple worker goroutines +// to process blocks in parallel. It returns immediately with a channel that will receive results. +func verifyResultChan(ctx context.Context, keys <-chan cid.Cid, bs bstore.Blockstore, api coreiface.CoreAPI, shouldDrop, shouldHeal bool, healTimeout time.Duration) <-chan *verifyResult { + results := make(chan *verifyResult) go func() { defer close(results) var wg sync.WaitGroup - for i := 0; i < runtime.NumCPU()*2; i++ { + for i := 0; i < runtime.NumCPU()*verifyWorkerMultiplier; i++ { wg.Add(1) - go verifyWorkerRun(ctx, &wg, keys, results, bs) + go verifyWorkerRun(ctx, &wg, keys, results, bs, api, shouldDrop, shouldHeal, healTimeout) } wg.Wait() @@ -276,6 +370,45 @@ func verifyResultChan(ctx context.Context, keys <-chan cid.Cid, bs bstore.Blocks var repoVerifyCmd = &cmds.Command{ Helptext: cmds.HelpText{ Tagline: "Verify all blocks in repo are not corrupted.", + ShortDescription: ` +'ipfs repo verify' checks integrity of all blocks in the local datastore. +Each block is read and validated against its CID to ensure data integrity. + +Without any flags, this is a SAFE, read-only check that only reports corrupt +blocks without modifying the repository. This can be used as a "dry run" to +preview what --drop or --heal would do. + +Use --drop to remove corrupt blocks, or --heal to remove and re-fetch from +the network. + +Examples: + ipfs repo verify # safe read-only check, reports corrupt blocks + ipfs repo verify --drop # remove corrupt blocks + ipfs repo verify --heal # remove and re-fetch corrupt blocks + +Exit Codes: + 0: All blocks are valid, OR all corrupt blocks were successfully remediated + (with --drop or --heal) + 1: Corrupt blocks detected (without flags), OR remediation failed (block + removal or healing failed with --drop or --heal) + +Note: --heal requires the daemon to be running in online mode with network +connectivity to nodes that have the missing blocks. Make sure the daemon is +online and connected to other peers. Healing will attempt to re-fetch each +corrupt block from the network after removing it. If a block cannot be found +on the network, it will remain deleted. + +WARNING: Both --drop and --heal are DESTRUCTIVE operations that permanently +delete corrupt blocks from your repository. Once deleted, blocks cannot be +recovered unless --heal successfully fetches them from the network. Blocks +that cannot be healed will remain permanently deleted. Always backup your +repository before using these options. +`, + }, + Options: []cmds.Option{ + cmds.BoolOption("drop", "Remove corrupt blocks from datastore (destructive operation)."), + cmds.BoolOption("heal", "Remove corrupt blocks and re-fetch from network (destructive operation, implies --drop)."), + cmds.StringOption("heal-timeout", "Maximum time to wait for each block heal (e.g., \"30s\"). Only applies with --heal.").WithDefault("30s"), }, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { nd, err := cmdenv.GetNode(env) @@ -283,8 +416,39 @@ var repoVerifyCmd = &cmds.Command{ return err } - bs := bstore.NewBlockstore(nd.Repo.Datastore()) - bs.HashOnRead(true) + drop, _ := req.Options["drop"].(bool) + heal, _ := req.Options["heal"].(bool) + + if heal { + drop = true // heal implies drop + } + + // Parse and validate heal-timeout + timeoutStr, _ := req.Options["heal-timeout"].(string) + healTimeout, err := time.ParseDuration(timeoutStr) + if err != nil { + return fmt.Errorf("invalid heal-timeout: %w", err) + } + if healTimeout < 0 { + return errors.New("heal-timeout must be >= 0") + } + + // Check online mode and API availability for healing operation + var api coreiface.CoreAPI + if heal { + if !nd.IsOnline { + return ErrNotOnline + } + api, err = cmdenv.GetApi(env, req) + if err != nil { + return err + } + if api == nil { + return fmt.Errorf("healing requested but API is not available - make sure daemon is online and connected to other peers") + } + } + + bs := &bstore.ValidatingBlockstore{Blockstore: bstore.NewBlockstore(nd.Repo.Datastore())} keys, err := bs.AllKeysChan(req.Context) if err != nil { @@ -292,17 +456,47 @@ var repoVerifyCmd = &cmds.Command{ return err } - results := verifyResultChan(req.Context, keys, bs) + results := verifyResultChan(req.Context, keys, bs, api, drop, heal, healTimeout) - var fails int + // Track statistics for each type of outcome + var corrupted, removed, removeFailed, healed, healFailed int var i int - for msg := range results { - if msg != "" { - if err := res.Emit(&VerifyProgress{Msg: msg}); err != nil { + + for result := range results { + // Update counters based on the block's final state + switch result.state { + case verifyStateCorrupt: + // Block is corrupt but no action was taken (--drop not specified) + corrupted++ + case verifyStateCorruptRemoved: + // Block was corrupt and successfully removed (--drop specified) + corrupted++ + removed++ + case verifyStateCorruptRemoveFailed: + // Block was corrupt but couldn't be removed + corrupted++ + removeFailed++ + case verifyStateCorruptHealed: + // Block was corrupt, removed, and successfully re-fetched (--heal specified) + corrupted++ + removed++ + healed++ + case verifyStateCorruptHealFailed: + // Block was corrupt and removed, but re-fetching failed + corrupted++ + removed++ + healFailed++ + default: + // verifyStateValid blocks are not counted (they're the expected case) + } + + // Emit progress message for corrupt blocks + if result.state != verifyStateValid && result.msg != "" { + if err := res.Emit(&VerifyProgress{Msg: result.msg}); err != nil { return err } - fails++ } + i++ if err := res.Emit(&VerifyProgress{Progress: i}); err != nil { return err @@ -313,8 +507,42 @@ var repoVerifyCmd = &cmds.Command{ return err } - if fails != 0 { - return errors.New("verify complete, some blocks were corrupt") + if corrupted > 0 { + // Build a summary of what happened with corrupt blocks + summary := fmt.Sprintf("verify complete, %d blocks corrupt", corrupted) + if removed > 0 { + summary += fmt.Sprintf(", %d removed", removed) + } + if removeFailed > 0 { + summary += fmt.Sprintf(", %d failed to remove", removeFailed) + } + if healed > 0 { + summary += fmt.Sprintf(", %d healed", healed) + } + if healFailed > 0 { + summary += fmt.Sprintf(", %d failed to heal", healFailed) + } + + // Determine success/failure based on operation mode + shouldFail := false + + if !drop { + // Detection-only mode: always fail if corruption found + shouldFail = true + } else if heal { + // Heal mode: fail if any removal or heal failed + shouldFail = (removeFailed > 0 || healFailed > 0) + } else { + // Drop mode: fail if any removal failed + shouldFail = (removeFailed > 0) + } + + if shouldFail { + return errors.New(summary) + } + + // Success: emit summary as a message instead of error + return res.Emit(&VerifyProgress{Msg: summary}) } return res.Emit(&VerifyProgress{Msg: "verify complete, all blocks validated."}) @@ -323,7 +551,7 @@ var repoVerifyCmd = &cmds.Command{ Encoders: cmds.EncoderMap{ cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, obj *VerifyProgress) error { if strings.Contains(obj.Msg, "was corrupt") { - fmt.Fprintln(os.Stdout, obj.Msg) + fmt.Fprintln(w, obj.Msg) return nil } @@ -374,63 +602,74 @@ var repoVersionCmd = &cmds.Command{ var repoMigrateCmd = &cmds.Command{ Helptext: cmds.HelpText{ - Tagline: "Apply any outstanding migrations to the repo.", + Tagline: "Apply repository migrations to a specific version.", + ShortDescription: ` +'ipfs repo migrate' applies repository migrations to bring the repository +to a specific version. By default, migrates to the latest version supported +by this IPFS binary. + +Examples: + ipfs repo migrate # Migrate to latest version + ipfs repo migrate --to=17 # Migrate to version 17 + ipfs repo migrate --to=16 --allow-downgrade # Downgrade to version 16 + +WARNING: Downgrading a repository may cause data loss and requires using +an older IPFS binary that supports the target version. After downgrading, +you must use an IPFS implementation compatible with that repository version. + +Repository versions 16+ use embedded migrations for faster, more reliable +migration. Versions below 16 require external migration tools. +`, }, Options: []cmds.Option{ + cmds.IntOption(repoToVersionOptionName, "Target repository version").WithDefault(fsrepo.RepoVersion), cmds.BoolOption(repoAllowDowngradeOptionName, "Allow downgrading to a lower repo version"), }, NoRemote: true, + // SetDoesNotUseRepo(true) might seem counter-intuitive since migrations + // do access the repo, but it's correct - we need direct filesystem access + // without going through the daemon. Migrations handle their own locking. + Extra: CreateCmdExtras(SetDoesNotUseRepo(true)), Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { cctx := env.(*oldcmds.Context) allowDowngrade, _ := req.Options[repoAllowDowngradeOptionName].(bool) + targetVersion, _ := req.Options[repoToVersionOptionName].(int) - _, err := fsrepo.Open(cctx.ConfigRoot) + // Get current repo version + currentVersion, err := migrations.RepoVersion(cctx.ConfigRoot) + if err != nil { + return fmt.Errorf("could not get current repo version: %w", err) + } - if err == nil { - fmt.Println("Repo does not require migration.") + // Check if migration is needed + if currentVersion == targetVersion { + fmt.Printf("Repository is already at version %d.\n", targetVersion) return nil - } else if err != fsrepo.ErrNeedMigration { - return err } - fmt.Println("Found outdated fs-repo, starting migration.") + // Validate downgrade request + if targetVersion < currentVersion && !allowDowngrade { + return fmt.Errorf("downgrade from version %d to %d requires --allow-downgrade flag", currentVersion, targetVersion) + } - // Read Migration section of IPFS config - configFileOpt, _ := req.Options[ConfigFileOption].(string) - migrationCfg, err := migrations.ReadMigrationConfig(cctx.ConfigRoot, configFileOpt) + fmt.Printf("Migrating repository from version %d to %d...\n", currentVersion, targetVersion) + + // Use hybrid migration strategy that intelligently combines external and embedded migrations + // Use req.Context instead of cctx.Context() to avoid opening the repo before migrations run, + // which would acquire the lock that migrations need + err = migrations.RunHybridMigrations(req.Context, targetVersion, cctx.ConfigRoot, allowDowngrade) if err != nil { - return err - } - - // Define function to create IPFS fetcher. Do not supply an - // already-constructed IPFS fetcher, because this may be expensive and - // not needed according to migration config. Instead, supply a function - // to construct the particular IPFS fetcher implementation used here, - // which is called only if an IPFS fetcher is needed. - newIpfsFetcher := func(distPath string) migrations.Fetcher { - return ipfsfetcher.NewIpfsFetcher(distPath, 0, &cctx.ConfigRoot, configFileOpt) - } - - // Fetch migrations from current distribution, or location from environ - fetchDistPath := migrations.GetDistPathEnv(migrations.CurrentIpfsDist) - - // Create fetchers according to migrationCfg.DownloadSources - fetcher, err := migrations.GetMigrationFetcher(migrationCfg.DownloadSources, fetchDistPath, newIpfsFetcher) - if err != nil { - return err - } - defer fetcher.Close() - - err = migrations.RunMigration(cctx.Context(), fetcher, fsrepo.RepoVersion, "", allowDowngrade) - if err != nil { - fmt.Println("The migrations of fs-repo failed:") + fmt.Println("Repository migration failed:") fmt.Printf(" %s\n", err) fmt.Println("If you think this is a bug, please file an issue and include this whole log output.") - fmt.Println(" https://github.com/ipfs/fs-repo-migrations") + fmt.Println(" https://github.com/ipfs/kubo") return err } - fmt.Printf("Success: fs-repo has been migrated to version %d.\n", fsrepo.RepoVersion) + fmt.Printf("Repository successfully migrated to version %d.\n", targetVersion) + if targetVersion < fsrepo.RepoVersion { + fmt.Println("WARNING: After downgrading, you must use an IPFS binary compatible with this repository version.") + } return nil }, } diff --git a/core/commands/repo_verify_test.go b/core/commands/repo_verify_test.go new file mode 100644 index 000000000..4b6b65a07 --- /dev/null +++ b/core/commands/repo_verify_test.go @@ -0,0 +1,371 @@ +//go:build go1.25 + +package commands + +// This file contains unit tests for the --heal-timeout flag functionality +// using testing/synctest to avoid waiting for real timeouts. +// +// End-to-end tests for the full 'ipfs repo verify' command (including --drop +// and --heal flags) are located in test/cli/repo_verify_test.go. + +import ( + "bytes" + "context" + "errors" + "io" + "sync" + "testing" + "testing/synctest" + "time" + + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" + coreiface "github.com/ipfs/kubo/core/coreiface" + "github.com/ipfs/kubo/core/coreiface/options" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/ipfs/boxo/path" +) + +func TestVerifyWorkerHealTimeout(t *testing.T) { + t.Run("heal succeeds before timeout", func(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + const healTimeout = 5 * time.Second + testCID := cid.MustParse("bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi") + + // Setup channels + keys := make(chan cid.Cid, 1) + keys <- testCID + close(keys) + results := make(chan *verifyResult, 1) + + // Mock blockstore that returns error (simulating corruption) + mockBS := &mockBlockstore{ + getError: errors.New("corrupt block"), + } + + // Mock API where Block().Get() completes before timeout + mockAPI := &mockCoreAPI{ + blockAPI: &mockBlockAPI{ + getDelay: 2 * time.Second, // Less than healTimeout + data: []byte("healed data"), + }, + } + + var wg sync.WaitGroup + wg.Add(1) + + // Run worker + go verifyWorkerRun(t.Context(), &wg, keys, results, mockBS, mockAPI, true, true, healTimeout) + + // Advance time past the mock delay but before timeout + time.Sleep(3 * time.Second) + synctest.Wait() + + wg.Wait() + close(results) + + // Verify heal succeeded + result := <-results + require.NotNil(t, result) + assert.Equal(t, verifyStateCorruptHealed, result.state) + assert.Contains(t, result.msg, "healed") + }) + }) + + t.Run("heal fails due to timeout", func(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + const healTimeout = 2 * time.Second + testCID := cid.MustParse("bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi") + + // Setup channels + keys := make(chan cid.Cid, 1) + keys <- testCID + close(keys) + results := make(chan *verifyResult, 1) + + // Mock blockstore that returns error (simulating corruption) + mockBS := &mockBlockstore{ + getError: errors.New("corrupt block"), + } + + // Mock API where Block().Get() takes longer than healTimeout + mockAPI := &mockCoreAPI{ + blockAPI: &mockBlockAPI{ + getDelay: 5 * time.Second, // More than healTimeout + data: []byte("healed data"), + }, + } + + var wg sync.WaitGroup + wg.Add(1) + + // Run worker + go verifyWorkerRun(t.Context(), &wg, keys, results, mockBS, mockAPI, true, true, healTimeout) + + // Advance time past timeout + time.Sleep(3 * time.Second) + synctest.Wait() + + wg.Wait() + close(results) + + // Verify heal failed due to timeout + result := <-results + require.NotNil(t, result) + assert.Equal(t, verifyStateCorruptHealFailed, result.state) + assert.Contains(t, result.msg, "failed to heal") + assert.Contains(t, result.msg, "context deadline exceeded") + }) + }) + + t.Run("heal with zero timeout still attempts heal", func(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + const healTimeout = 0 // Zero timeout means no timeout + testCID := cid.MustParse("bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi") + + // Setup channels + keys := make(chan cid.Cid, 1) + keys <- testCID + close(keys) + results := make(chan *verifyResult, 1) + + // Mock blockstore that returns error (simulating corruption) + mockBS := &mockBlockstore{ + getError: errors.New("corrupt block"), + } + + // Mock API that succeeds quickly + mockAPI := &mockCoreAPI{ + blockAPI: &mockBlockAPI{ + getDelay: 100 * time.Millisecond, + data: []byte("healed data"), + }, + } + + var wg sync.WaitGroup + wg.Add(1) + + // Run worker + go verifyWorkerRun(t.Context(), &wg, keys, results, mockBS, mockAPI, true, true, healTimeout) + + // Advance time to let heal complete + time.Sleep(200 * time.Millisecond) + synctest.Wait() + + wg.Wait() + close(results) + + // Verify heal succeeded even with zero timeout + result := <-results + require.NotNil(t, result) + assert.Equal(t, verifyStateCorruptHealed, result.state) + assert.Contains(t, result.msg, "healed") + }) + }) + + t.Run("multiple blocks with different timeout outcomes", func(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + const healTimeout = 3 * time.Second + testCID1 := cid.MustParse("bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi") + testCID2 := cid.MustParse("bafybeihvvulpp4evxj7x7armbqcyg6uezzuig6jp3lktpbovlqfkjtgyby") + + // Setup channels + keys := make(chan cid.Cid, 2) + keys <- testCID1 + keys <- testCID2 + close(keys) + results := make(chan *verifyResult, 2) + + // Mock blockstore that always returns error (all blocks corrupt) + mockBS := &mockBlockstore{ + getError: errors.New("corrupt block"), + } + + // Create two mock block APIs with different delays + // We'll need to alternate which one gets used + // For simplicity, use one that succeeds fast + mockAPI := &mockCoreAPI{ + blockAPI: &mockBlockAPI{ + getDelay: 1 * time.Second, // Less than healTimeout - will succeed + data: []byte("healed data"), + }, + } + + var wg sync.WaitGroup + wg.Add(2) // Two workers + + // Run two workers + go verifyWorkerRun(t.Context(), &wg, keys, results, mockBS, mockAPI, true, true, healTimeout) + go verifyWorkerRun(t.Context(), &wg, keys, results, mockBS, mockAPI, true, true, healTimeout) + + // Advance time to let both complete + time.Sleep(2 * time.Second) + synctest.Wait() + + wg.Wait() + close(results) + + // Collect results + var healedCount int + for result := range results { + if result.state == verifyStateCorruptHealed { + healedCount++ + } + } + + // Both should heal successfully (both under timeout) + assert.Equal(t, 2, healedCount) + }) + }) + + t.Run("valid block is not healed", func(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + const healTimeout = 5 * time.Second + testCID := cid.MustParse("bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi") + + // Setup channels + keys := make(chan cid.Cid, 1) + keys <- testCID + close(keys) + results := make(chan *verifyResult, 1) + + // Mock blockstore that returns valid block (no error) + mockBS := &mockBlockstore{ + block: blocks.NewBlock([]byte("valid data")), + } + + // Mock API (won't be called since block is valid) + mockAPI := &mockCoreAPI{ + blockAPI: &mockBlockAPI{}, + } + + var wg sync.WaitGroup + wg.Add(1) + + // Run worker with heal enabled + go verifyWorkerRun(t.Context(), &wg, keys, results, mockBS, mockAPI, false, true, healTimeout) + + synctest.Wait() + + wg.Wait() + close(results) + + // Verify block is marked valid, not healed + result := <-results + require.NotNil(t, result) + assert.Equal(t, verifyStateValid, result.state) + assert.Empty(t, result.msg) + }) + }) +} + +// mockBlockstore implements a minimal blockstore for testing +type mockBlockstore struct { + getError error + block blocks.Block +} + +func (m *mockBlockstore) Get(ctx context.Context, c cid.Cid) (blocks.Block, error) { + if m.getError != nil { + return nil, m.getError + } + return m.block, nil +} + +func (m *mockBlockstore) DeleteBlock(ctx context.Context, c cid.Cid) error { + return nil +} + +func (m *mockBlockstore) Has(ctx context.Context, c cid.Cid) (bool, error) { + return m.block != nil, nil +} + +func (m *mockBlockstore) GetSize(ctx context.Context, c cid.Cid) (int, error) { + if m.block != nil { + return len(m.block.RawData()), nil + } + return 0, errors.New("block not found") +} + +func (m *mockBlockstore) Put(ctx context.Context, b blocks.Block) error { + return nil +} + +func (m *mockBlockstore) PutMany(ctx context.Context, bs []blocks.Block) error { + return nil +} + +func (m *mockBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + return nil, errors.New("not implemented") +} + +func (m *mockBlockstore) HashOnRead(enabled bool) { +} + +// mockBlockAPI implements BlockAPI for testing +type mockBlockAPI struct { + getDelay time.Duration + getError error + data []byte +} + +func (m *mockBlockAPI) Get(ctx context.Context, p path.Path) (io.Reader, error) { + if m.getDelay > 0 { + select { + case <-time.After(m.getDelay): + // Delay completed + case <-ctx.Done(): + return nil, ctx.Err() + } + } + if m.getError != nil { + return nil, m.getError + } + return bytes.NewReader(m.data), nil +} + +func (m *mockBlockAPI) Put(ctx context.Context, r io.Reader, opts ...options.BlockPutOption) (coreiface.BlockStat, error) { + return nil, errors.New("not implemented") +} + +func (m *mockBlockAPI) Rm(ctx context.Context, p path.Path, opts ...options.BlockRmOption) error { + return errors.New("not implemented") +} + +func (m *mockBlockAPI) Stat(ctx context.Context, p path.Path) (coreiface.BlockStat, error) { + return nil, errors.New("not implemented") +} + +// mockCoreAPI implements minimal CoreAPI for testing +type mockCoreAPI struct { + blockAPI *mockBlockAPI +} + +func (m *mockCoreAPI) Block() coreiface.BlockAPI { + return m.blockAPI +} + +func (m *mockCoreAPI) Unixfs() coreiface.UnixfsAPI { return nil } +func (m *mockCoreAPI) Dag() coreiface.APIDagService { return nil } +func (m *mockCoreAPI) Name() coreiface.NameAPI { return nil } +func (m *mockCoreAPI) Key() coreiface.KeyAPI { return nil } +func (m *mockCoreAPI) Pin() coreiface.PinAPI { return nil } +func (m *mockCoreAPI) Object() coreiface.ObjectAPI { return nil } +func (m *mockCoreAPI) Swarm() coreiface.SwarmAPI { return nil } +func (m *mockCoreAPI) PubSub() coreiface.PubSubAPI { return nil } +func (m *mockCoreAPI) Routing() coreiface.RoutingAPI { return nil } + +func (m *mockCoreAPI) ResolvePath(ctx context.Context, p path.Path) (path.ImmutablePath, []string, error) { + return path.ImmutablePath{}, nil, errors.New("not implemented") +} + +func (m *mockCoreAPI) ResolveNode(ctx context.Context, p path.Path) (ipld.Node, error) { + return nil, errors.New("not implemented") +} + +func (m *mockCoreAPI) WithOptions(...options.ApiOption) (coreiface.CoreAPI, error) { + return nil, errors.New("not implemented") +} diff --git a/core/commands/root.go b/core/commands/root.go index d062e75b4..d70a49376 100644 --- a/core/commands/root.go +++ b/core/commands/root.go @@ -10,7 +10,7 @@ import ( "github.com/ipfs/kubo/core/commands/pin" cmds "github.com/ipfs/go-ipfs-cmds" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" ) var log = logging.Logger("core/commands") @@ -65,6 +65,7 @@ ADVANCED COMMANDS p2p Libp2p stream mounting (experimental) filestore Manage the filestore (experimental) mount Mount an IPFS read-only mount point (experimental) + provide Control providing operations NETWORK COMMANDS id Show info about IPFS peers @@ -133,6 +134,7 @@ var rootSubcommands = map[string]*cmds.Command{ "files": FilesCmd, "filestore": FileStoreCmd, "get": GetCmd, + "provide": ProvideCmd, "pubsub": PubsubCmd, "repo": RepoCmd, "stats": StatsCmd, diff --git a/core/commands/routing.go b/core/commands/routing.go index 3e503b014..5e1d5334d 100644 --- a/core/commands/routing.go +++ b/core/commands/routing.go @@ -9,7 +9,11 @@ import ( "strings" "time" + "github.com/ipfs/kubo/config" cmdenv "github.com/ipfs/kubo/core/commands/cmdenv" + "github.com/ipfs/kubo/core/commands/cmdutils" + "github.com/ipfs/kubo/core/node" + mh "github.com/multiformats/go-multihash" dag "github.com/ipfs/boxo/ipld/merkledag" "github.com/ipfs/boxo/ipns" @@ -42,6 +46,7 @@ var RoutingCmd = &cmds.Command{ "get": getValueRoutingCmd, "put": putValueRoutingCmd, "provide": provideRefRoutingCmd, + "reprovide": reprovideRoutingCmd, }, } @@ -70,7 +75,7 @@ var findProvidersRoutingCmd = &cmds.Command{ numProviders, _ := req.Options[numProvidersOptionName].(int) if numProviders < 1 { - return fmt.Errorf("number of providers must be greater than 0") + return errors.New("number of providers must be greater than 0") } c, err := cid.Parse(req.Arguments[0]) @@ -85,7 +90,7 @@ var findProvidersRoutingCmd = &cmds.Command{ defer cancel() pchan := n.Routing.FindProvidersAsync(ctx, c, numProviders) for p := range pchan { - np := p + np := cmdutils.CloneAddrInfo(p) routing.PublishQueryEvent(ctx, &routing.QueryEvent{ Type: routing.Provider, Responses: []*peer.AddrInfo{&np}, @@ -157,11 +162,24 @@ var provideRefRoutingCmd = &cmds.Command{ if !nd.IsOnline { return ErrNotOnline } + // respect global config + cfg, err := nd.Repo.Config() + if err != nil { + return err + } + if !cfg.Provide.Enabled.WithDefault(config.DefaultProvideEnabled) { + return errors.New("invalid configuration: Provide.Enabled is set to 'false'") + } - if len(nd.PeerHost.Network().Conns()) == 0 { + if len(nd.PeerHost.Network().Conns()) == 0 && !cfg.HasHTTPProviderConfigured() { + // Node is depending on DHT for providing (no custom HTTP provider + // configured) and currently has no connected peers. return errors.New("cannot provide, no connected peers") } + // If we reach here with no connections but HTTP provider configured, + // we proceed with the provide operation via HTTP + // Needed to parse stdin args. // TODO: Lazy Load err = req.ParseBodyArgs() @@ -194,12 +212,16 @@ var provideRefRoutingCmd = &cmds.Command{ ctx, events := routing.RegisterForQueryEvents(ctx) var provideErr error + // TODO: not sure if necessary to call StartProviding for `ipfs routing + // provide `, since either cid is already being provided, or it will + // be garbage collected and not reprovided anyway. So we may simply stick + // with a single (optimistic) provide, and skip StartProviding call. go func() { defer cancel() if rec { - provideErr = provideKeysRec(ctx, nd.Routing, nd.DAG, cids) + provideErr = provideCidsRec(ctx, nd.Provider, nd.DAG, cids) } else { - provideErr = provideKeys(ctx, nd.Routing, cids) + provideErr = provideCids(nd.Provider, cids) } if provideErr != nil { routing.PublishQueryEvent(ctx, &routing.QueryEvent{ @@ -209,6 +231,16 @@ var provideRefRoutingCmd = &cmds.Command{ } }() + if nd.HasActiveDHTClient() { + // If node has a DHT client, provide immediately the supplied cids before + // returning. + for _, c := range cids { + if err = provideCIDSync(req.Context, nd.DHTClient, c); err != nil { + return fmt.Errorf("error providing cid: %w", err) + } + } + } + for e := range events { if err := res.Emit(e); err != nil { return err @@ -235,39 +267,69 @@ var provideRefRoutingCmd = &cmds.Command{ Type: routing.QueryEvent{}, } -func provideKeys(ctx context.Context, r routing.Routing, cids []cid.Cid) error { - for _, c := range cids { - err := r.Provide(ctx, c, true) +var reprovideRoutingCmd = &cmds.Command{ + Status: cmds.Experimental, + Helptext: cmds.HelpText{ + Tagline: "Trigger reprovider.", + ShortDescription: ` +Trigger reprovider to announce our data to network. +`, + }, + Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { + nd, err := cmdenv.GetNode(env) if err != nil { return err } - } - return nil + + if !nd.IsOnline { + return ErrNotOnline + } + + // respect global config + cfg, err := nd.Repo.Config() + if err != nil { + return err + } + if !cfg.Provide.Enabled.WithDefault(config.DefaultProvideEnabled) { + return errors.New("invalid configuration: Provide.Enabled is set to 'false'") + } + if cfg.Provide.DHT.Interval.WithDefault(config.DefaultProvideDHTInterval) == 0 { + return errors.New("invalid configuration: Provide.DHT.Interval is set to '0'") + } + provideSys, ok := nd.Provider.(*node.LegacyProvider) + if !ok { + return errors.New("manual reprovide not available with experimental sweeping provider (Provide.DHT.SweepEnabled=true)") + } + + err = provideSys.Reprovide(req.Context) + if err != nil { + return err + } + + return nil + }, } -func provideKeysRec(ctx context.Context, r routing.Routing, dserv ipld.DAGService, cids []cid.Cid) error { - provided := cid.NewSet() +func provideCids(prov node.DHTProvider, cids []cid.Cid) error { + mhs := make([]mh.Multihash, len(cids)) + for i, c := range cids { + mhs[i] = c.Hash() + } + // providing happens asynchronously + return prov.StartProviding(true, mhs...) +} + +func provideCidsRec(ctx context.Context, prov node.DHTProvider, dserv ipld.DAGService, cids []cid.Cid) error { for _, c := range cids { kset := cid.NewSet() - err := dag.Walk(ctx, dag.GetLinksDirect(dserv), c, kset.Visit) if err != nil { return err } - - for _, k := range kset.Keys() { - if provided.Has(k) { - continue - } - - err = r.Provide(ctx, k, true) - if err != nil { - return err - } - provided.Add(k) + if err = provideCids(prov, kset.Keys()); err != nil { + return err } } - return nil } diff --git a/core/commands/stat.go b/core/commands/stat.go index 4ceb95f13..2b4485a95 100644 --- a/core/commands/stat.go +++ b/core/commands/stat.go @@ -1,6 +1,7 @@ package commands import ( + "errors" "fmt" "io" "os" @@ -26,11 +27,12 @@ for your IPFS node.`, }, Subcommands: map[string]*cmds.Command{ - "bw": statBwCmd, - "repo": repoStatCmd, - "bitswap": bitswapStatCmd, - "dht": statDhtCmd, - "provide": statProvideCmd, + "bw": statBwCmd, + "repo": repoStatCmd, + "bitswap": bitswapStatCmd, + "dht": statDhtCmd, + "provide": statProvideCmd, + "reprovide": statReprovideCmd, }, } @@ -55,7 +57,7 @@ to a particular peer, use the 'peer' option along with that peer's multihash id. To specify a specific protocol, use the 'proto' option. The 'peer' and 'proto' options cannot be specified simultaneously. The protocols that are queried using this method are outlined in the specification: -https://github.com/libp2p/specs/blob/master/7-properties.md#757-protocol-multicodecs +https://github.com/libp2p/specs/blob/master/_archive/7-properties.md#757-protocol-multicodecs Example protocol options: - /ipfs/id/1.0.0 @@ -100,7 +102,7 @@ Example: } if nd.Reporter == nil { - return fmt.Errorf("bandwidth reporter disabled in config") + return errors.New("bandwidth reporter disabled in config") } pstr, pfound := req.Options[statPeerOptionName].(string) diff --git a/core/commands/stat_dht.go b/core/commands/stat_dht.go index e6006e439..4c63b1355 100644 --- a/core/commands/stat_dht.go +++ b/core/commands/stat_dht.go @@ -7,6 +7,7 @@ import ( "time" cmdenv "github.com/ipfs/kubo/core/commands/cmdenv" + "github.com/ipfs/kubo/core/commands/cmdutils" cmds "github.com/ipfs/go-ipfs-cmds" dht "github.com/libp2p/go-libp2p-kad-dht" @@ -74,7 +75,8 @@ This interface is not stable and may change from release to release. var dht *dht.IpfsDHT var separateClient bool - if nd.DHTClient != nd.DHT { + // Check if using separate DHT client (e.g., accelerated DHT) + if nd.HasActiveDHTClient() && nd.DHTClient != nd.DHT { separateClient = true } @@ -92,7 +94,9 @@ This interface is not stable and may change from release to release. info := dhtPeerInfo{ID: p.String()} if ver, err := nd.Peerstore.Get(p, "AgentVersion"); err == nil { - info.AgentVersion, _ = ver.(string) + if vs, ok := ver.(string); ok { + info.AgentVersion = cmdutils.CleanAndTrim(vs) + } } else if err == pstore.ErrNotFound { // ignore } else { @@ -143,7 +147,9 @@ This interface is not stable and may change from release to release. info := dhtPeerInfo{ID: pi.Id.String()} if ver, err := nd.Peerstore.Get(pi.Id, "AgentVersion"); err == nil { - info.AgentVersion, _ = ver.(string) + if vs, ok := ver.(string); ok { + info.AgentVersion = cmdutils.CleanAndTrim(vs) + } } else if err == pstore.ErrNotFound { // ignore } else { diff --git a/core/commands/stat_provide.go b/core/commands/stat_provide.go index 6ee51e516..56a0f3dc4 100644 --- a/core/commands/stat_provide.go +++ b/core/commands/stat_provide.go @@ -1,84 +1,22 @@ package commands import ( - "fmt" - "io" - "text/tabwriter" - "time" - - humanize "github.com/dustin/go-humanize" - "github.com/ipfs/boxo/provider" cmds "github.com/ipfs/go-ipfs-cmds" - "github.com/ipfs/kubo/core/commands/cmdenv" - "golang.org/x/exp/constraints" ) var statProvideCmd = &cmds.Command{ + Status: cmds.Deprecated, Helptext: cmds.HelpText{ - Tagline: "Returns statistics about the node's (re)provider system.", + Tagline: "Deprecated command, use 'ipfs provide stat' instead.", ShortDescription: ` -Returns statistics about the content the node is advertising. - -This interface is not stable and may change from release to release. +'ipfs stats provide' is deprecated because provide and reprovide operations +are now distinct. This command may be replaced by provide only stats in the +future. `, }, - Arguments: []cmds.Argument{}, - Options: []cmds.Option{}, - Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { - nd, err := cmdenv.GetNode(env) - if err != nil { - return err - } - - if !nd.IsOnline { - return ErrNotOnline - } - - stats, err := nd.Provider.Stat() - if err != nil { - return err - } - - if err := res.Emit(stats); err != nil { - return err - } - - return nil - }, - Encoders: cmds.EncoderMap{ - cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, s *provider.ReproviderStats) error { - wtr := tabwriter.NewWriter(w, 1, 2, 1, ' ', 0) - defer wtr.Flush() - - fmt.Fprintf(wtr, "TotalProvides:\t%s\n", humanNumber(s.TotalProvides)) - fmt.Fprintf(wtr, "AvgProvideDuration:\t%s\n", humanDuration(s.AvgProvideDuration)) - fmt.Fprintf(wtr, "LastReprovideDuration:\t%s\n", humanDuration(s.LastReprovideDuration)) - fmt.Fprintf(wtr, "LastReprovideBatchSize:\t%s\n", humanNumber(s.LastReprovideBatchSize)) - return nil - }), - }, - Type: provider.ReproviderStats{}, -} - -func humanDuration(val time.Duration) string { - return val.Truncate(time.Microsecond).String() -} - -func humanNumber[T constraints.Float | constraints.Integer](n T) string { - nf := float64(n) - str := humanSI(nf, 0) - fullStr := humanFull(nf, 0) - if str != fullStr { - return fmt.Sprintf("%s\t(%s)", str, fullStr) - } - return str -} - -func humanSI(val float64, decimals int) string { - v, unit := humanize.ComputeSI(val) - return fmt.Sprintf("%s%s", humanFull(v, decimals), unit) -} - -func humanFull(val float64, decimals int) string { - return humanize.CommafWithDigits(val, decimals) + Arguments: provideStatCmd.Arguments, + Options: provideStatCmd.Options, + Run: provideStatCmd.Run, + Encoders: provideStatCmd.Encoders, + Type: provideStatCmd.Type, } diff --git a/core/commands/stat_reprovide.go b/core/commands/stat_reprovide.go new file mode 100644 index 000000000..87893d1b5 --- /dev/null +++ b/core/commands/stat_reprovide.go @@ -0,0 +1,21 @@ +package commands + +import ( + cmds "github.com/ipfs/go-ipfs-cmds" +) + +var statReprovideCmd = &cmds.Command{ + Status: cmds.Deprecated, + Helptext: cmds.HelpText{ + Tagline: "Deprecated command, use 'ipfs provide stat' instead.", + ShortDescription: ` +'ipfs stats reprovide' is deprecated because provider stats are now +available from 'ipfs provide stat'. +`, + }, + Arguments: provideStatCmd.Arguments, + Options: provideStatCmd.Options, + Run: provideStatCmd.Run, + Encoders: provideStatCmd.Encoders, + Type: provideStatCmd.Type, +} diff --git a/core/commands/swarm.go b/core/commands/swarm.go index 252c48d33..533ccc078 100644 --- a/core/commands/swarm.go +++ b/core/commands/swarm.go @@ -8,8 +8,9 @@ import ( "fmt" "io" "path" - "sort" + "slices" "strconv" + "strings" "sync" "text/tabwriter" "time" @@ -17,6 +18,7 @@ import ( "github.com/ipfs/kubo/commands" "github.com/ipfs/kubo/config" "github.com/ipfs/kubo/core/commands/cmdenv" + "github.com/ipfs/kubo/core/commands/cmdutils" "github.com/ipfs/kubo/core/node/libp2p" "github.com/ipfs/kubo/repo" "github.com/ipfs/kubo/repo/fsrepo" @@ -26,6 +28,7 @@ import ( inet "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" pstore "github.com/libp2p/go-libp2p/core/peerstore" + "github.com/libp2p/go-libp2p/core/protocol" rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager" ma "github.com/multiformats/go-multiaddr" madns "github.com/multiformats/go-multiaddr-dns" @@ -289,7 +292,7 @@ var swarmPeersCmd = &cmds.Command{ } for _, s := range strs { - ci.Streams = append(ci.Streams, streamInfo{Protocol: string(s)}) + ci.Streams = append(ci.Streams, streamInfo{Protocol: cmdutils.CleanAndTrim(string(s))}) } } @@ -301,11 +304,11 @@ var swarmPeersCmd = &cmds.Command{ identifyResult, _ := ci.identifyPeer(n.Peerstore, c.ID()) ci.Identify = identifyResult } - sort.Sort(&ci) + ci.Sort() out.Peers = append(out.Peers, ci) } - sort.Sort(&out) + out.Sort() return cmds.EmitOnce(res, &out) }, Encoders: cmds.EncoderMap{ @@ -435,32 +438,20 @@ type connInfo struct { Identify IdOutput `json:",omitempty"` } -func (ci *connInfo) Less(i, j int) bool { - return ci.Streams[i].Protocol < ci.Streams[j].Protocol -} - -func (ci *connInfo) Len() int { - return len(ci.Streams) -} - -func (ci *connInfo) Swap(i, j int) { - ci.Streams[i], ci.Streams[j] = ci.Streams[j], ci.Streams[i] +func (ci *connInfo) Sort() { + slices.SortFunc(ci.Streams, func(a, b streamInfo) int { + return strings.Compare(a.Protocol, b.Protocol) + }) } type connInfos struct { Peers []connInfo } -func (ci connInfos) Less(i, j int) bool { - return ci.Peers[i].Addr < ci.Peers[j].Addr -} - -func (ci connInfos) Len() int { - return len(ci.Peers) -} - -func (ci connInfos) Swap(i, j int) { - ci.Peers[i], ci.Peers[j] = ci.Peers[j], ci.Peers[i] +func (ci *connInfos) Sort() { + slices.SortFunc(ci.Peers, func(a, b connInfo) int { + return strings.Compare(a.Addr, b.Addr) + }) } func (ci *connInfo) identifyPeer(ps pstore.Peerstore, p peer.ID) (IdOutput, error) { @@ -484,16 +475,18 @@ func (ci *connInfo) identifyPeer(ps pstore.Peerstore, p peer.ID) (IdOutput, erro for _, a := range addrs { info.Addresses = append(info.Addresses, a.String()) } - sort.Strings(info.Addresses) + slices.Sort(info.Addresses) if protocols, err := ps.GetProtocols(p); err == nil { - info.Protocols = append(info.Protocols, protocols...) - sort.Slice(info.Protocols, func(i, j int) bool { return info.Protocols[i] < info.Protocols[j] }) + for _, proto := range protocols { + info.Protocols = append(info.Protocols, protocol.ID(cmdutils.CleanAndTrim(string(proto)))) + } + slices.Sort(info.Protocols) } if v, err := ps.Get(p, "AgentVersion"); err == nil { if vs, ok := v.(string); ok { - info.AgentVersion = vs + info.AgentVersion = cmdutils.CleanAndTrim(vs) } } @@ -551,7 +544,7 @@ var swarmAddrsCmd = &cmds.Command{ for p := range am.Addrs { ids = append(ids, p) } - sort.Strings(ids) + slices.Sort(ids) for _, p := range ids { paddrs := am.Addrs[p] @@ -603,7 +596,7 @@ var swarmAddrsLocalCmd = &cmds.Command{ } addrs = append(addrs, saddr) } - sort.Strings(addrs) + slices.Sort(addrs) return cmds.EmitOnce(res, &stringList{addrs}) }, Type: stringList{}, @@ -634,7 +627,7 @@ var swarmAddrsListenCmd = &cmds.Command{ for _, addr := range maddrs { addrs = append(addrs, addr.String()) } - sort.Strings(addrs) + slices.Sort(addrs) return cmds.EmitOnce(res, &stringList{addrs}) }, diff --git a/core/commands/sysdiag.go b/core/commands/sysdiag.go index 123dcb973..5a7c41ce9 100644 --- a/core/commands/sysdiag.go +++ b/core/commands/sysdiag.go @@ -2,14 +2,13 @@ package commands import ( "os" - "path" "runtime" + "github.com/ipfs/go-ipfs-cmds" version "github.com/ipfs/kubo" + "github.com/ipfs/kubo/config" "github.com/ipfs/kubo/core" cmdenv "github.com/ipfs/kubo/core/commands/cmdenv" - - cmds "github.com/ipfs/go-ipfs-cmds" manet "github.com/multiformats/go-multiaddr/net" sysi "github.com/whyrusleeping/go-sysinfo" ) @@ -84,32 +83,28 @@ func runtimeInfo(out map[string]interface{}) error { func envVarInfo(out map[string]interface{}) error { ev := make(map[string]interface{}) ev["GOPATH"] = os.Getenv("GOPATH") - ev["IPFS_PATH"] = os.Getenv("IPFS_PATH") + ev[config.EnvDir] = os.Getenv(config.EnvDir) out["environment"] = ev return nil } -func ipfsPath() string { - p := os.Getenv("IPFS_PATH") - if p == "" { - p = path.Join(os.Getenv("HOME"), ".ipfs") - } - return p -} - func diskSpaceInfo(out map[string]interface{}) error { - di := make(map[string]interface{}) - dinfo, err := sysi.DiskUsage(ipfsPath()) + pathRoot, err := config.PathRoot() + if err != nil { + return err + } + dinfo, err := sysi.DiskUsage(pathRoot) if err != nil { return err } - di["fstype"] = dinfo.FsType - di["total_space"] = dinfo.Total - di["free_space"] = dinfo.Free + out["diskinfo"] = map[string]interface{}{ + "fstype": dinfo.FsType, + "total_space": dinfo.Total, + "free_space": dinfo.Free, + } - out["diskinfo"] = di return nil } diff --git a/core/commands/version.go b/core/commands/version.go index 221726881..86f566ab1 100644 --- a/core/commands/version.go +++ b/core/commands/version.go @@ -227,7 +227,7 @@ func DetectNewKuboVersion(nd *core.IpfsNode, minPercent int64) (VersionCheckOutp return } - // Ignore prerelases and development releases (-dev, -rcX) + // Ignore prereleases and development releases (-dev, -rcX) if peerVersion.Metadata() != "" || peerVersion.Prerelease() != "" { return } @@ -255,7 +255,7 @@ func DetectNewKuboVersion(nd *core.IpfsNode, minPercent int64) (VersionCheckOutp } // Amino DHT client keeps information about previously seen peers - if nd.DHTClient != nd.DHT && nd.DHTClient != nil { + if nd.HasActiveDHTClient() && nd.DHTClient != nd.DHT { client, ok := nd.DHTClient.(*fullrt.FullRT) if !ok { return VersionCheckOutput{}, errors.New("could not perform version check due to missing or incompatible DHT configuration") diff --git a/core/core.go b/core/core.go index 0c9333e06..5f37c2871 100644 --- a/core/core.go +++ b/core/core.go @@ -19,6 +19,7 @@ import ( pin "github.com/ipfs/boxo/pinning/pinner" "github.com/ipfs/go-datastore" + bitswap "github.com/ipfs/boxo/bitswap" bserv "github.com/ipfs/boxo/blockservice" bstore "github.com/ipfs/boxo/blockstore" exchange "github.com/ipfs/boxo/exchange" @@ -27,12 +28,13 @@ import ( pathresolver "github.com/ipfs/boxo/path/resolver" provider "github.com/ipfs/boxo/provider" ipld "github.com/ipfs/go-ipld-format" - logging "github.com/ipfs/go-log" - goprocess "github.com/jbenet/goprocess" + logging "github.com/ipfs/go-log/v2" ddht "github.com/libp2p/go-libp2p-kad-dht/dual" + "github.com/libp2p/go-libp2p-kad-dht/fullrt" pubsub "github.com/libp2p/go-libp2p-pubsub" psrouter "github.com/libp2p/go-libp2p-pubsub-router" record "github.com/libp2p/go-libp2p-record" + routinghelpers "github.com/libp2p/go-libp2p-routing-helpers" connmgr "github.com/libp2p/go-libp2p/core/connmgr" ic "github.com/libp2p/go-libp2p/core/crypto" p2phost "github.com/libp2p/go-libp2p/core/host" @@ -92,32 +94,35 @@ type IpfsNode struct { RecordValidator record.Validator // Online - PeerHost p2phost.Host `optional:"true"` // the network host (server+client) - Peering *peering.PeeringService `optional:"true"` - Filters *ma.Filters `optional:"true"` - Bootstrapper io.Closer `optional:"true"` // the periodic bootstrapper - Routing irouting.ProvideManyRouter `optional:"true"` // the routing system. recommend ipfs-dht - DNSResolver *madns.Resolver // the DNS resolver - IPLDPathResolver pathresolver.Resolver `name:"ipldPathResolver"` // The IPLD path resolver - UnixFSPathResolver pathresolver.Resolver `name:"unixFSPathResolver"` // The UnixFS path resolver - OfflineIPLDPathResolver pathresolver.Resolver `name:"offlineIpldPathResolver"` // The IPLD path resolver that uses only locally available blocks - OfflineUnixFSPathResolver pathresolver.Resolver `name:"offlineUnixFSPathResolver"` // The UnixFS path resolver that uses only locally available blocks - Exchange exchange.Interface // the block exchange + strategy (bitswap) - Namesys namesys.NameSystem // the name system, resolves paths to hashes - Provider provider.System // the value provider system - IpnsRepub *ipnsrp.Republisher `optional:"true"` - ResourceManager network.ResourceManager `optional:"true"` + PeerHost p2phost.Host `optional:"true"` // the network host (server+client) + Peering *peering.PeeringService `optional:"true"` + Filters *ma.Filters `optional:"true"` + Bootstrapper io.Closer `optional:"true"` // the periodic bootstrapper + ContentDiscovery routing.ContentDiscovery `optional:"true"` // the discovery part of the routing system + DNSResolver *madns.Resolver // the DNS resolver + IPLDPathResolver pathresolver.Resolver `name:"ipldPathResolver"` // The IPLD path resolver + UnixFSPathResolver pathresolver.Resolver `name:"unixFSPathResolver"` // The UnixFS path resolver + OfflineIPLDPathResolver pathresolver.Resolver `name:"offlineIpldPathResolver"` // The IPLD path resolver that uses only locally available blocks + OfflineUnixFSPathResolver pathresolver.Resolver `name:"offlineUnixFSPathResolver"` // The UnixFS path resolver that uses only locally available blocks + Exchange exchange.Interface // the block exchange + strategy + Bitswap *bitswap.Bitswap `optional:"true"` // The Bitswap instance + Namesys namesys.NameSystem // the name system, resolves paths to hashes + ProvidingStrategy config.ProvideStrategy `optional:"true"` + ProvidingKeyChanFunc provider.KeyChanFunc `optional:"true"` + IpnsRepub *ipnsrp.Republisher `optional:"true"` + ResourceManager network.ResourceManager `optional:"true"` PubSub *pubsub.PubSub `optional:"true"` PSRouter *psrouter.PubsubValueStore `optional:"true"` - DHT *ddht.DHT `optional:"true"` - DHTClient routing.Routing `name:"dhtc" optional:"true"` + Routing irouting.ProvideManyRouter `optional:"true"` // the routing system. recommend ipfs-dht + Provider node.DHTProvider // the value provider system + DHT *ddht.DHT `optional:"true"` + DHTClient routing.Routing `name:"dhtc" optional:"true"` P2P *p2p.P2P `optional:"true"` - Process goprocess.Process - ctx context.Context + ctx context.Context stop func() error @@ -132,6 +137,7 @@ type IpfsNode struct { type Mounts struct { Ipfs mount.Mount Ipns mount.Mount + Mfs mount.Mount } // Close calls Close() on the App object @@ -139,6 +145,42 @@ func (n *IpfsNode) Close() error { return n.stop() } +// HasActiveDHTClient checks if the node's DHT client is active and usable for DHT operations. +// +// Returns false for: +// - nil DHTClient +// - typed nil pointers (e.g., (*ddht.DHT)(nil)) +// - no-op routers (routinghelpers.Null) +// +// Note: This method only checks for known DHT client types (ddht.DHT, fullrt.FullRT). +// Custom routing.Routing implementations are not explicitly validated. +// +// This method prevents the "typed nil interface" bug where an interface contains +// a nil pointer of a concrete type, which passes nil checks but panics when methods +// are called. +func (n *IpfsNode) HasActiveDHTClient() bool { + if n.DHTClient == nil { + return false + } + + // Check for no-op router (Routing.Type=none) + if _, ok := n.DHTClient.(routinghelpers.Null); ok { + return false + } + + // Check for typed nil *ddht.DHT (common when Routing.Type=delegated or HTTP-only) + if d, ok := n.DHTClient.(*ddht.DHT); ok && d == nil { + return false + } + + // Check for typed nil *fullrt.FullRT (accelerated DHT client) + if f, ok := n.DHTClient.(*fullrt.FullRT); ok && f == nil { + return false + } + + return true +} + // Context returns the IpfsNode context func (n *IpfsNode) Context() context.Context { if n.ctx == nil { @@ -209,7 +251,8 @@ func (n *IpfsNode) loadBootstrapPeers() ([]peer.AddrInfo, error) { return nil, err } - return cfg.BootstrapPeers() + // Use auto-config resolution for actual bootstrap connectivity + return cfg.BootstrapPeersWithAutoConf() } func (n *IpfsNode) saveTempBootstrapPeers(ctx context.Context, peerList []peer.AddrInfo) error { diff --git a/core/core_test.go b/core/core_test.go index 5d004937a..a7849a077 100644 --- a/core/core_test.go +++ b/core/core_test.go @@ -1,15 +1,28 @@ package core import ( + "os" + "path/filepath" "testing" context "context" "github.com/ipfs/kubo/repo" + "github.com/ipfs/boxo/filestore" + "github.com/ipfs/boxo/keystore" datastore "github.com/ipfs/go-datastore" syncds "github.com/ipfs/go-datastore/sync" config "github.com/ipfs/kubo/config" + "github.com/ipfs/kubo/core/node/libp2p" + golib "github.com/libp2p/go-libp2p" + ddht "github.com/libp2p/go-libp2p-kad-dht/dual" + "github.com/libp2p/go-libp2p-kad-dht/fullrt" + routinghelpers "github.com/libp2p/go-libp2p-routing-helpers" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" + pstore "github.com/libp2p/go-libp2p/core/peerstore" + mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" ) func TestInitialization(t *testing.T) { @@ -65,3 +78,151 @@ var testIdentity = config.Identity{ PeerID: "QmNgdzLieYi8tgfo2WfTUzNVH5hQK9oAYGVf6dxN12NrHt", PrivKey: "CAASrRIwggkpAgEAAoICAQCwt67GTUQ8nlJhks6CgbLKOx7F5tl1r9zF4m3TUrG3Pe8h64vi+ILDRFd7QJxaJ/n8ux9RUDoxLjzftL4uTdtv5UXl2vaufCc/C0bhCRvDhuWPhVsD75/DZPbwLsepxocwVWTyq7/ZHsCfuWdoh/KNczfy+Gn33gVQbHCnip/uhTVxT7ARTiv8Qa3d7qmmxsR+1zdL/IRO0mic/iojcb3Oc/PRnYBTiAZFbZdUEit/99tnfSjMDg02wRayZaT5ikxa6gBTMZ16Yvienq7RwSELzMQq2jFA4i/TdiGhS9uKywltiN2LrNDBcQJSN02pK12DKoiIy+wuOCRgs2NTQEhU2sXCk091v7giTTOpFX2ij9ghmiRfoSiBFPJA5RGwiH6ansCHtWKY1K8BS5UORM0o3dYk87mTnKbCsdz4bYnGtOWafujYwzueGx8r+IWiys80IPQKDeehnLW6RgoyjszKgL/2XTyP54xMLSW+Qb3BPgDcPaPO0hmop1hW9upStxKsefW2A2d46Ds4HEpJEry7PkS5M4gKL/zCKHuxuXVk14+fZQ1rstMuvKjrekpAC2aVIKMI9VRA3awtnje8HImQMdj+r+bPmv0N8rTTr3eS4J8Yl7k12i95LLfK+fWnmUh22oTNzkRlaiERQrUDyE4XNCtJc0xs1oe1yXGqazCIAQIDAQABAoICAQCk1N/ftahlRmOfAXk//8wNl7FvdJD3le6+YSKBj0uWmN1ZbUSQk64chr12iGCOM2WY180xYjy1LOS44PTXaeW5bEiTSnb3b3SH+HPHaWCNM2EiSogHltYVQjKW+3tfH39vlOdQ9uQ+l9Gh6iTLOqsCRyszpYPqIBwi1NMLY2Ej8PpVU7ftnFWouHZ9YKS7nAEiMoowhTu/7cCIVwZlAy3AySTuKxPMVj9LORqC32PVvBHZaMPJ+X1Xyijqg6aq39WyoztkXg3+Xxx5j5eOrK6vO/Lp6ZUxaQilHDXoJkKEJjgIBDZpluss08UPfOgiWAGkW+L4fgUxY0qDLDAEMhyEBAn6KOKVL1JhGTX6GjhWziI94bddSpHKYOEIDzUy4H8BXnKhtnyQV6ELS65C2hj9D0IMBTj7edCF1poJy0QfdK0cuXgMvxHLeUO5uc2YWfbNosvKxqygB9rToy4b22YvNwsZUXsTY6Jt+p9V2OgXSKfB5VPeRbjTJL6xqvvUJpQytmII/C9JmSDUtCbYceHj6X9jgigLk20VV6nWHqCTj3utXD6NPAjoycVpLKDlnWEgfVELDIk0gobxUqqSm3jTPEKRPJgxkgPxbwxYumtw++1UY2y35w3WRDc2xYPaWKBCQeZy+mL6ByXp9bWlNvxS3Knb6oZp36/ovGnf2pGvdQKCAQEAyKpipz2lIUySDyE0avVWAmQb2tWGKXALPohzj7AwkcfEg2GuwoC6GyVE2sTJD1HRazIjOKn3yQORg2uOPeG7sx7EKHxSxCKDrbPawkvLCq8JYSy9TLvhqKUVVGYPqMBzu2POSLEA81QXas+aYjKOFWA2Zrjq26zV9ey3+6Lc6WULePgRQybU8+RHJc6fdjUCCfUxgOrUO2IQOuTJ+FsDpVnrMUGlokmWn23OjL4qTL9wGDnWGUs2pjSzNbj3qA0d8iqaiMUyHX/D/VS0wpeT1osNBSm8suvSibYBn+7wbIApbwXUxZaxMv2OHGz3empae4ckvNZs7r8wsI9UwFt8mwKCAQEA4XK6gZkv9t+3YCcSPw2ensLvL/xU7i2bkC9tfTGdjnQfzZXIf5KNdVuj/SerOl2S1s45NMs3ysJbADwRb4ahElD/V71nGzV8fpFTitC20ro9fuX4J0+twmBolHqeH9pmeGTjAeL1rvt6vxs4FkeG/yNft7GdXpXTtEGaObn8Mt0tPY+aB3UnKrnCQoQAlPyGHFrVRX0UEcp6wyyNGhJCNKeNOvqCHTFObhbhO+KWpWSN0MkVHnqaIBnIn1Te8FtvP/iTwXGnKc0YXJUG6+LM6LmOguW6tg8ZqiQeYyyR+e9eCFH4csLzkrTl1GxCxwEsoSLIMm7UDcjttW6tYEghkwKCAQEAmeCO5lCPYImnN5Lu71ZTLmI2OgmjaANTnBBnDbi+hgv61gUCToUIMejSdDCTPfwv61P3TmyIZs0luPGxkiKYHTNqmOE9Vspgz8Mr7fLRMNApESuNvloVIY32XVImj/GEzh4rAfM6F15U1sN8T/EUo6+0B/Glp+9R49QzAfRSE2g48/rGwgf1JVHYfVWFUtAzUA+GdqWdOixo5cCsYJbqpNHfWVZN/bUQnBFIYwUwysnC29D+LUdQEQQ4qOm+gFAOtrWU62zMkXJ4iLt8Ify6kbrvsRXgbhQIzzGS7WH9XDarj0eZciuslr15TLMC1Azadf+cXHLR9gMHA13mT9vYIQKCAQA/DjGv8cKCkAvf7s2hqROGYAs6Jp8yhrsN1tYOwAPLRhtnCs+rLrg17M2vDptLlcRuI/vIElamdTmylRpjUQpX7yObzLO73nfVhpwRJVMdGU394iBIDncQ+JoHfUwgqJskbUM40dvZdyjbrqc/Q/4z+hbZb+oN/GXb8sVKBATPzSDMKQ/xqgisYIw+wmDPStnPsHAaIWOtni47zIgilJzD0WEk78/YjmPbUrboYvWziK5JiRRJFA1rkQqV1c0M+OXixIm+/yS8AksgCeaHr0WUieGcJtjT9uE8vyFop5ykhRiNxy9wGaq6i7IEecsrkd6DqxDHWkwhFuO1bSE83q/VAoIBAEA+RX1i/SUi08p71ggUi9WFMqXmzELp1L3hiEjOc2AklHk2rPxsaTh9+G95BvjhP7fRa/Yga+yDtYuyjO99nedStdNNSg03aPXILl9gs3r2dPiQKUEXZJ3FrH6tkils/8BlpOIRfbkszrdZIKTO9GCdLWQ30dQITDACs8zV/1GFGrHFrqnnMe/NpIFHWNZJ0/WZMi8wgWO6Ik8jHEpQtVXRiXLqy7U6hk170pa4GHOzvftfPElOZZjy9qn7KjdAQqy6spIrAE94OEL+fBgbHQZGLpuTlj6w6YGbMtPU8uo7sXKoc6WOCb68JWft3tejGLDa1946HAWqVM9B/UcneNc=", } + +// mockHostOption creates a HostOption that uses the provided mocknet. +// Inlined to avoid import cycle with core/mock package. +func mockHostOption(mn mocknet.Mocknet) libp2p.HostOption { + return func(id peer.ID, ps pstore.Peerstore, opts ...golib.Option) (host.Host, error) { + var cfg golib.Config + if err := cfg.Apply(opts...); err != nil { + return nil, err + } + + // The mocknet does not use the provided libp2p.Option. This options include + // the listening addresses we want our peer listening on. Therefore, we have + // to manually parse the configuration and add them here. + ps.AddAddrs(id, cfg.ListenAddrs, pstore.PermanentAddrTTL) + return mn.AddPeerWithPeerstore(id, ps) + } +} + +func TestHasActiveDHTClient(t *testing.T) { + // Test 1: nil DHTClient + t.Run("nil DHTClient", func(t *testing.T) { + node := &IpfsNode{ + DHTClient: nil, + } + if node.HasActiveDHTClient() { + t.Error("Expected false for nil DHTClient") + } + }) + + // Test 2: Typed nil *ddht.DHT (common case when Routing.Type=delegated) + t.Run("typed nil ddht.DHT", func(t *testing.T) { + node := &IpfsNode{ + DHTClient: (*ddht.DHT)(nil), + } + if node.HasActiveDHTClient() { + t.Error("Expected false for typed nil *ddht.DHT") + } + }) + + // Test 3: Typed nil *fullrt.FullRT (accelerated DHT client) + t.Run("typed nil fullrt.FullRT", func(t *testing.T) { + node := &IpfsNode{ + DHTClient: (*fullrt.FullRT)(nil), + } + if node.HasActiveDHTClient() { + t.Error("Expected false for typed nil *fullrt.FullRT") + } + }) + + // Test 4: routinghelpers.Null no-op router (Routing.Type=none) + t.Run("routinghelpers.Null", func(t *testing.T) { + node := &IpfsNode{ + DHTClient: routinghelpers.Null{}, + } + if node.HasActiveDHTClient() { + t.Error("Expected false for routinghelpers.Null") + } + }) + + // Test 5: Valid standard dual DHT (Routing.Type=auto/dht/dhtclient) + t.Run("valid standard dual DHT", func(t *testing.T) { + ctx := context.Background() + mn := mocknet.New() + defer mn.Close() + + ds := syncds.MutexWrap(datastore.NewMapDatastore()) + c := config.Config{} + c.Identity = testIdentity + c.Addresses.Swarm = []string{"/ip4/0.0.0.0/tcp/4001"} + + r := &repo.Mock{ + C: c, + D: ds, + K: keystore.NewMemKeystore(), + F: filestore.NewFileManager(ds, filepath.Dir(os.TempDir())), + } + + node, err := NewNode(ctx, &BuildCfg{ + Routing: libp2p.DHTServerOption, + Repo: r, + Host: mockHostOption(mn), + Online: true, + }) + if err != nil { + t.Fatalf("Failed to create node with DHT: %v", err) + } + defer node.Close() + + // First verify test setup created the expected DHT type + if node.DHTClient == nil { + t.Fatalf("Test setup failed: DHTClient is nil") + } + + if _, ok := node.DHTClient.(*ddht.DHT); !ok { + t.Fatalf("Test setup failed: expected DHTClient to be *ddht.DHT, got %T", node.DHTClient) + } + + // Now verify HasActiveDHTClient() correctly identifies it as active + if !node.HasActiveDHTClient() { + t.Error("Expected true for valid dual DHT client") + } + }) + + // Test 6: Valid accelerated DHT client (Routing.Type=autoclient) + t.Run("valid accelerated DHT client", func(t *testing.T) { + ctx := context.Background() + mn := mocknet.New() + defer mn.Close() + + ds := syncds.MutexWrap(datastore.NewMapDatastore()) + c := config.Config{} + c.Identity = testIdentity + c.Addresses.Swarm = []string{"/ip4/0.0.0.0/tcp/4001"} + c.Routing.AcceleratedDHTClient = config.True + + r := &repo.Mock{ + C: c, + D: ds, + K: keystore.NewMemKeystore(), + F: filestore.NewFileManager(ds, filepath.Dir(os.TempDir())), + } + + node, err := NewNode(ctx, &BuildCfg{ + Routing: libp2p.DHTOption, + Repo: r, + Host: mockHostOption(mn), + Online: true, + }) + if err != nil { + t.Fatalf("Failed to create node with accelerated DHT: %v", err) + } + defer node.Close() + + // First verify test setup created the expected accelerated DHT type + if node.DHTClient == nil { + t.Fatalf("Test setup failed: DHTClient is nil") + } + + if _, ok := node.DHTClient.(*fullrt.FullRT); !ok { + t.Fatalf("Test setup failed: expected DHTClient to be *fullrt.FullRT, got %T", node.DHTClient) + } + + // Now verify HasActiveDHTClient() correctly identifies it as active + if !node.HasActiveDHTClient() { + t.Error("Expected true for valid accelerated DHT client") + } + }) +} diff --git a/core/coreapi/coreapi.go b/core/coreapi/coreapi.go index b757929a2..eca9fd989 100644 --- a/core/coreapi/coreapi.go +++ b/core/coreapi/coreapi.go @@ -23,7 +23,6 @@ import ( dag "github.com/ipfs/boxo/ipld/merkledag" pathresolver "github.com/ipfs/boxo/path/resolver" pin "github.com/ipfs/boxo/pinning/pinner" - provider "github.com/ipfs/boxo/provider" offlineroute "github.com/ipfs/boxo/routing/offline" ipld "github.com/ipfs/go-ipld-format" "github.com/ipfs/kubo/config" @@ -70,7 +69,8 @@ type CoreAPI struct { ipldPathResolver pathresolver.Resolver unixFSPathResolver pathresolver.Resolver - provider provider.System + provider node.DHTProvider + providingStrategy config.ProvideStrategy pubSub *pubsub.PubSub @@ -185,7 +185,8 @@ func (api *CoreAPI) WithOptions(opts ...options.ApiOption) (coreiface.CoreAPI, e ipldPathResolver: n.IPLDPathResolver, unixFSPathResolver: n.UnixFSPathResolver, - provider: n.Provider, + provider: n.Provider, + providingStrategy: n.ProvidingStrategy, pubSub: n.PubSub, @@ -207,18 +208,18 @@ func (api *CoreAPI) WithOptions(opts ...options.ApiOption) (coreiface.CoreAPI, e return nil } - if settings.Offline { - cfg, err := n.Repo.Config() - if err != nil { - return nil, err - } + cfg, err := n.Repo.Config() + if err != nil { + return nil, err + } + if settings.Offline { cs := cfg.Ipns.ResolveCacheSize if cs == 0 { cs = node.DefaultIpnsCacheSize } if cs < 0 { - return nil, fmt.Errorf("cannot specify negative resolve cache size") + return nil, errors.New("cannot specify negative resolve cache size") } nsOptions := []namesys.Option{ @@ -235,8 +236,6 @@ func (api *CoreAPI) WithOptions(opts ...options.ApiOption) (coreiface.CoreAPI, e return nil, fmt.Errorf("error constructing namesys: %w", err) } - subAPI.provider = provider.NewNoopProvider() - subAPI.peerstore = nil subAPI.peerHost = nil subAPI.recordValidator = nil @@ -244,7 +243,9 @@ func (api *CoreAPI) WithOptions(opts ...options.ApiOption) (coreiface.CoreAPI, e if settings.Offline || !settings.FetchBlocks { subAPI.exchange = offlinexch.Exchange(subAPI.blockstore) - subAPI.blocks = bserv.New(subAPI.blockstore, subAPI.exchange) + subAPI.blocks = bserv.New(subAPI.blockstore, subAPI.exchange, + bserv.WriteThrough(cfg.Datastore.WriteThrough.WithDefault(config.DefaultWriteThrough)), + ) subAPI.dag = dag.NewDAGService(subAPI.blocks) } diff --git a/core/coreapi/key.go b/core/coreapi/key.go index a6101dae8..e779c773f 100644 --- a/core/coreapi/key.go +++ b/core/coreapi/key.go @@ -29,7 +29,7 @@ type key struct { func newKey(name string, pid peer.ID) (*key, error) { p, err := path.NewPath("/ipns/" + ipns.NameFromPeer(pid).String()) if err != nil { - return nil, err + return nil, fmt.Errorf("cannot create new key: %w", err) } return &key{ name: name, @@ -65,7 +65,7 @@ func (api *KeyAPI) Generate(ctx context.Context, name string, opts ...caopts.Key } if name == "self" { - return nil, fmt.Errorf("cannot create key with name 'self'") + return nil, errors.New("cannot create key with name 'self'") } _, err = api.repo.Keystore().Get(name) @@ -121,34 +121,37 @@ func (api *KeyAPI) List(ctx context.Context) ([]coreiface.Key, error) { keys, err := api.repo.Keystore().List() if err != nil { - return nil, err + return nil, fmt.Errorf("cannot list keys in keystore: %w", err) } sort.Strings(keys) - out := make([]coreiface.Key, len(keys)+1) + out := make([]coreiface.Key, 1, len(keys)+1) out[0], err = newKey("self", api.identity) if err != nil { return nil, err } - for n, k := range keys { + for _, k := range keys { privKey, err := api.repo.Keystore().Get(k) if err != nil { - return nil, err + log.Errorf("cannot get key from keystore: %s", err) + continue } pubKey := privKey.GetPublic() pid, err := peer.IDFromPublicKey(pubKey) if err != nil { - return nil, err + log.Errorf("cannot decode public key: %s", err) + continue } - out[n+1], err = newKey(k, pid) + k, err := newKey(k, pid) if err != nil { return nil, err } + out = append(out, k) } return out, nil } @@ -168,11 +171,11 @@ func (api *KeyAPI) Rename(ctx context.Context, oldName string, newName string, o ks := api.repo.Keystore() if oldName == "self" { - return nil, false, fmt.Errorf("cannot rename key with name 'self'") + return nil, false, errors.New("cannot rename key with name 'self'") } if newName == "self" { - return nil, false, fmt.Errorf("cannot overwrite key with name 'self'") + return nil, false, errors.New("cannot overwrite key with name 'self'") } oldKey, err := ks.Get(oldName) @@ -232,7 +235,7 @@ func (api *KeyAPI) Remove(ctx context.Context, name string) (coreiface.Key, erro ks := api.repo.Keystore() if name == "self" { - return nil, fmt.Errorf("cannot remove key with name 'self'") + return nil, errors.New("cannot remove key with name 'self'") } removed, err := ks.Get(name) diff --git a/core/coreapi/name.go b/core/coreapi/name.go index 3c4145ed5..5e7971698 100644 --- a/core/coreapi/name.go +++ b/core/coreapi/name.go @@ -2,6 +2,7 @@ package coreapi import ( "context" + "errors" "fmt" "strings" "time" @@ -44,9 +45,25 @@ func (api *NameAPI) Publish(ctx context.Context, p path.Path, opts ...caopts.Nam span.SetAttributes(attribute.Float64("ttl", options.TTL.Seconds())) } - err = api.checkOnline(options.AllowOffline) - if err != nil { - return ipns.Name{}, err + // Handle different publishing modes + if options.AllowDelegated { + // AllowDelegated mode: check if delegated publishers are configured + cfg, err := api.repo.Config() + if err != nil { + return ipns.Name{}, fmt.Errorf("failed to read config: %w", err) + } + delegatedPublishers := cfg.DelegatedPublishersWithAutoConf() + if len(delegatedPublishers) == 0 { + return ipns.Name{}, errors.New("no delegated publishers configured: add Ipns.DelegatedPublishers or use --allow-offline for local-only publishing") + } + // For allow-delegated mode, we only require that we have delegated publishers configured + // The node doesn't need P2P connectivity since we're using HTTP publishing + } else { + // Normal mode: check online status with allow-offline flag + err = api.checkOnline(options.AllowOffline) + if err != nil { + return ipns.Name{}, err + } } k, err := keylookup(api.privateKey, api.repo.Keystore(), options.Key) @@ -65,6 +82,10 @@ func (api *NameAPI) Publish(ctx context.Context, p path.Path, opts ...caopts.Nam publishOptions = append(publishOptions, namesys.PublishWithTTL(*options.TTL)) } + if options.Sequence != nil { + publishOptions = append(publishOptions, namesys.PublishWithSequence(*options.Sequence)) + } + err = api.namesys.Publish(ctx, k, p, publishOptions...) if err != nil { return ipns.Name{}, err @@ -214,5 +235,5 @@ func keylookup(self ci.PrivKey, kstore keystore.Keystore, k string) (ci.PrivKey, } } - return nil, fmt.Errorf("no key by the given name or PeerID was found") + return nil, errors.New("no key by the given name or PeerID was found") } diff --git a/core/coreapi/pin.go b/core/coreapi/pin.go index 22b3aa25c..9bb44bac5 100644 --- a/core/coreapi/pin.go +++ b/core/coreapi/pin.go @@ -44,20 +44,17 @@ func (api *PinAPI) Add(ctx context.Context, p path.Path, opts ...caopts.PinAddOp return fmt.Errorf("pin: %s", err) } - if err := api.provider.Provide(dagNode.Cid()); err != nil { - return err - } - return api.pinning.Flush(ctx) } -func (api *PinAPI) Ls(ctx context.Context, opts ...caopts.PinLsOption) (<-chan coreiface.Pin, error) { +func (api *PinAPI) Ls(ctx context.Context, pins chan<- coreiface.Pin, opts ...caopts.PinLsOption) error { ctx, span := tracing.Span(ctx, "CoreAPI.PinAPI", "Ls") defer span.End() settings, err := caopts.PinLsOptions(opts...) if err != nil { - return nil, err + close(pins) + return err } span.SetAttributes(attribute.String("type", settings.Type)) @@ -65,10 +62,11 @@ func (api *PinAPI) Ls(ctx context.Context, opts ...caopts.PinLsOption) (<-chan c switch settings.Type { case "all", "direct", "indirect", "recursive": default: - return nil, fmt.Errorf("invalid type '%s', must be one of {direct, indirect, recursive, all}", settings.Type) + close(pins) + return fmt.Errorf("invalid type '%s', must be one of {direct, indirect, recursive, all}", settings.Type) } - return api.pinLsAll(ctx, settings.Type, settings.Detailed, settings.Name), nil + return api.pinLsAll(ctx, settings.Type, settings.Detailed, settings.Name, pins) } func (api *PinAPI) IsPinned(ctx context.Context, p path.Path, opts ...caopts.PinIsPinnedOption) (string, bool, error) { @@ -230,6 +228,7 @@ func (api *PinAPI) Verify(ctx context.Context) (<-chan coreiface.PinStatus, erro } out := make(chan coreiface.PinStatus) + go func() { defer close(out) for p := range api.pinning.RecursiveKeys(ctx, false) { @@ -254,7 +253,6 @@ type pinInfo struct { pinType string path path.ImmutablePath name string - err error } func (p *pinInfo) Path() path.ImmutablePath { @@ -269,17 +267,12 @@ func (p *pinInfo) Name() string { return p.name } -func (p *pinInfo) Err() error { - return p.err -} - // pinLsAll is an internal function for returning a list of pins // // The caller must keep reading results until the channel is closed to prevent // leaking the goroutine that is fetching pins. -func (api *PinAPI) pinLsAll(ctx context.Context, typeStr string, detailed bool, name string) <-chan coreiface.Pin { - out := make(chan coreiface.Pin, 1) - +func (api *PinAPI) pinLsAll(ctx context.Context, typeStr string, detailed bool, name string, out chan<- coreiface.Pin) error { + defer close(out) emittedSet := cid.NewSet() AddToResultKeys := func(c cid.Cid, pinName, typeStr string) error { @@ -297,87 +290,79 @@ func (api *PinAPI) pinLsAll(ctx context.Context, typeStr string, detailed bool, return nil } - go func() { - defer close(out) - - var rkeys []cid.Cid - var err error - if typeStr == "recursive" || typeStr == "all" { - for streamedCid := range api.pinning.RecursiveKeys(ctx, detailed) { - if streamedCid.Err != nil { - out <- &pinInfo{err: streamedCid.Err} - return - } - if err = AddToResultKeys(streamedCid.Pin.Key, streamedCid.Pin.Name, "recursive"); err != nil { - out <- &pinInfo{err: err} - return - } - rkeys = append(rkeys, streamedCid.Pin.Key) + var rkeys []cid.Cid + var err error + if typeStr == "recursive" || typeStr == "all" { + for streamedCid := range api.pinning.RecursiveKeys(ctx, detailed) { + if streamedCid.Err != nil { + return streamedCid.Err + } + if err = AddToResultKeys(streamedCid.Pin.Key, streamedCid.Pin.Name, "recursive"); err != nil { + return err + } + rkeys = append(rkeys, streamedCid.Pin.Key) + } + } + if typeStr == "direct" || typeStr == "all" { + for streamedCid := range api.pinning.DirectKeys(ctx, detailed) { + if streamedCid.Err != nil { + return streamedCid.Err + } + if err = AddToResultKeys(streamedCid.Pin.Key, streamedCid.Pin.Name, "direct"); err != nil { + return err } } - if typeStr == "direct" || typeStr == "all" { - for streamedCid := range api.pinning.DirectKeys(ctx, detailed) { - if streamedCid.Err != nil { - out <- &pinInfo{err: streamedCid.Err} - return - } - if err = AddToResultKeys(streamedCid.Pin.Key, streamedCid.Pin.Name, "direct"); err != nil { - out <- &pinInfo{err: err} - return - } + } + if typeStr == "indirect" { + // We need to first visit the direct pins that have priority + // without emitting them + + for streamedCid := range api.pinning.DirectKeys(ctx, detailed) { + if streamedCid.Err != nil { + return streamedCid.Err + } + emittedSet.Add(streamedCid.Pin.Key) + } + + for streamedCid := range api.pinning.RecursiveKeys(ctx, detailed) { + if streamedCid.Err != nil { + return streamedCid.Err + } + emittedSet.Add(streamedCid.Pin.Key) + rkeys = append(rkeys, streamedCid.Pin.Key) + } + } + if typeStr == "indirect" || typeStr == "all" { + if len(rkeys) == 0 { + return nil + } + var addErr error + walkingSet := cid.NewSet() + for _, k := range rkeys { + err = merkledag.Walk( + ctx, merkledag.GetLinksWithDAG(api.dag), k, + func(c cid.Cid) bool { + if !walkingSet.Visit(c) { + return false + } + if emittedSet.Has(c) { + return true // skipped + } + addErr = AddToResultKeys(c, "", "indirect") + return addErr == nil + }, + merkledag.SkipRoot(), merkledag.Concurrent(), + ) + if err != nil { + return err + } + if addErr != nil { + return addErr } } - if typeStr == "indirect" { - // We need to first visit the direct pins that have priority - // without emitting them + } - for streamedCid := range api.pinning.DirectKeys(ctx, detailed) { - if streamedCid.Err != nil { - out <- &pinInfo{err: streamedCid.Err} - return - } - emittedSet.Add(streamedCid.Pin.Key) - } - - for streamedCid := range api.pinning.RecursiveKeys(ctx, detailed) { - if streamedCid.Err != nil { - out <- &pinInfo{err: streamedCid.Err} - return - } - emittedSet.Add(streamedCid.Pin.Key) - rkeys = append(rkeys, streamedCid.Pin.Key) - } - } - if typeStr == "indirect" || typeStr == "all" { - walkingSet := cid.NewSet() - for _, k := range rkeys { - err = merkledag.Walk( - ctx, merkledag.GetLinksWithDAG(api.dag), k, - func(c cid.Cid) bool { - if !walkingSet.Visit(c) { - return false - } - if emittedSet.Has(c) { - return true // skipped - } - err := AddToResultKeys(c, "", "indirect") - if err != nil { - out <- &pinInfo{err: err} - return false - } - return true - }, - merkledag.SkipRoot(), merkledag.Concurrent(), - ) - if err != nil { - out <- &pinInfo{err: err} - return - } - } - } - }() - - return out + return nil } func (api *PinAPI) core() coreiface.CoreAPI { diff --git a/core/coreapi/routing.go b/core/coreapi/routing.go index fe273158e..b9c258056 100644 --- a/core/coreapi/routing.go +++ b/core/coreapi/routing.go @@ -15,9 +15,10 @@ import ( cidutil "github.com/ipfs/go-cidutil" coreiface "github.com/ipfs/kubo/core/coreiface" caopts "github.com/ipfs/kubo/core/coreiface/options" + "github.com/ipfs/kubo/core/node" "github.com/ipfs/kubo/tracing" peer "github.com/libp2p/go-libp2p/core/peer" - routing "github.com/libp2p/go-libp2p/core/routing" + mh "github.com/multiformats/go-multihash" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" ) @@ -109,7 +110,7 @@ func (api *RoutingAPI) FindProviders(ctx context.Context, p path.Path, opts ...c numProviders := settings.NumProviders if numProviders < 1 { - return nil, fmt.Errorf("number of providers must be greater than 0") + return nil, errors.New("number of providers must be greater than 0") } pchan := api.routing.FindProvidersAsync(ctx, rp.RootCid(), numProviders) @@ -148,9 +149,9 @@ func (api *RoutingAPI) Provide(ctx context.Context, path path.Path, opts ...caop } if settings.Recursive { - err = provideKeysRec(ctx, api.routing, api.blockstore, []cid.Cid{c}) + err = provideKeysRec(ctx, api.provider, api.blockstore, []cid.Cid{c}) } else { - err = provideKeys(ctx, api.routing, []cid.Cid{c}) + err = api.provider.StartProviding(false, c.Hash()) } if err != nil { return err @@ -159,41 +160,64 @@ func (api *RoutingAPI) Provide(ctx context.Context, path path.Path, opts ...caop return nil } -func provideKeys(ctx context.Context, r routing.Routing, cids []cid.Cid) error { - for _, c := range cids { - err := r.Provide(ctx, c, true) - if err != nil { - return err - } - } - return nil -} - -func provideKeysRec(ctx context.Context, r routing.Routing, bs blockstore.Blockstore, cids []cid.Cid) error { +func provideKeysRec(ctx context.Context, prov node.DHTProvider, bs blockstore.Blockstore, cids []cid.Cid) error { provided := cidutil.NewStreamingSet() - errCh := make(chan error) + // Error channel with buffer size 1 to avoid blocking the goroutine + errCh := make(chan error, 1) go func() { + // Always close provided.New to signal completion + defer close(provided.New) + // Also close error channel to distinguish between "no error" and "pending error" + defer close(errCh) + dserv := dag.NewDAGService(blockservice.New(bs, offline.Exchange(bs))) for _, c := range cids { - err := dag.Walk(ctx, dag.GetLinksDirect(dserv), c, provided.Visitor(ctx)) - if err != nil { - errCh <- err + if err := dag.Walk(ctx, dag.GetLinksDirect(dserv), c, provided.Visitor(ctx)); err != nil { + // Send error to channel. If context is cancelled while trying to send, + // exit immediately as the main loop will return ctx.Err() + select { + case errCh <- err: + // Error sent successfully, exit goroutine + case <-ctx.Done(): + // Context cancelled, exit without sending error + return + } + return } } + // All CIDs walked successfully, goroutine will exit and channels will close }() + keys := make([]mh.Multihash, 0) for { select { - case k := <-provided.New: - err := r.Provide(ctx, k, true) - if err != nil { - return err - } - case err := <-errCh: - return err case <-ctx.Done(): + // Context cancelled, return immediately return ctx.Err() + case err := <-errCh: + // Received error from DAG walk, return it + return err + case c, ok := <-provided.New: + if !ok { + // Channel closed means goroutine finished. + // CRITICAL: Check for any error that was sent just before channel closure. + // This handles the race where error is sent to errCh but main loop + // sees provided.New close first. + select { + case err := <-errCh: + if err != nil { + return err + } + // errCh closed with nil, meaning success + default: + // No pending error in errCh + } + // All CIDs successfully processed, start providing + return prov.StartProviding(true, keys...) + } + // Accumulate the CID for providing + keys = append(keys, c.Hash()) } } } diff --git a/core/coreapi/test/api_test.go b/core/coreapi/test/api_test.go index d647a32c8..bf80686f1 100644 --- a/core/coreapi/test/api_test.go +++ b/core/coreapi/test/api_test.go @@ -69,6 +69,10 @@ func (NodeProvider) MakeAPISwarm(t *testing.T, ctx context.Context, fullIdentity c.Addresses.Swarm = []string{fmt.Sprintf("/ip4/18.0.%d.1/tcp/4001", i)} c.Identity = ident c.Experimental.FilestoreEnabled = true + c.AutoTLS.Enabled = config.False // disable so no /ws listener is added + // For provider tests, avoid that content gets + // auto-provided without calling "provide" (unless pinned). + c.Provide.Strategy = config.NewOptionalString("roots") ds := syncds.MutexWrap(datastore.NewMapDatastore()) r := &repo.Mock{ diff --git a/core/coreapi/test/path_test.go b/core/coreapi/test/path_test.go index 692853a9a..c4a6e8a04 100644 --- a/core/coreapi/test/path_test.go +++ b/core/coreapi/test/path_test.go @@ -15,8 +15,7 @@ import ( ) func TestPathUnixFSHAMTPartial(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() // Create a node apis, err := NodeProvider{}.MakeAPISwarm(t, ctx, true, true, 1) @@ -39,7 +38,7 @@ func TestPathUnixFSHAMTPartial(t *testing.T) { dir[strconv.Itoa(i)] = files.NewBytesFile([]byte(strconv.Itoa(i))) } - r, err := a.Unixfs().Add(ctx, files.NewMapDirectory(dir), options.Unixfs.Pin(false)) + r, err := a.Unixfs().Add(ctx, files.NewMapDirectory(dir), options.Unixfs.Pin(false, "")) if err != nil { t.Fatal(err) } diff --git a/core/coreapi/unixfs.go b/core/coreapi/unixfs.go index e175488f3..7f068a227 100644 --- a/core/coreapi/unixfs.go +++ b/core/coreapi/unixfs.go @@ -2,6 +2,7 @@ package coreapi import ( "context" + "errors" "fmt" blockservice "github.com/ipfs/boxo/blockservice" @@ -15,19 +16,25 @@ import ( uio "github.com/ipfs/boxo/ipld/unixfs/io" "github.com/ipfs/boxo/mfs" "github.com/ipfs/boxo/path" + "github.com/ipfs/boxo/provider" cid "github.com/ipfs/go-cid" cidutil "github.com/ipfs/go-cidutil" ds "github.com/ipfs/go-datastore" dssync "github.com/ipfs/go-datastore/sync" ipld "github.com/ipfs/go-ipld-format" + logging "github.com/ipfs/go-log/v2" + "github.com/ipfs/kubo/config" coreiface "github.com/ipfs/kubo/core/coreiface" options "github.com/ipfs/kubo/core/coreiface/options" "github.com/ipfs/kubo/core/coreunix" "github.com/ipfs/kubo/tracing" + mh "github.com/multiformats/go-multihash" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" ) +var log = logging.Logger("coreapi") + type UnixfsAPI CoreAPI // Add builds a merkledag node from a reader, adds it to the blockstore, @@ -48,8 +55,15 @@ func (api *UnixfsAPI) Add(ctx context.Context, files files.Node, opts ...options attribute.Int("inlinelimit", settings.InlineLimit), attribute.Bool("rawleaves", settings.RawLeaves), attribute.Bool("rawleavesset", settings.RawLeavesSet), + attribute.Int("maxfilelinks", settings.MaxFileLinks), + attribute.Bool("maxfilelinksset", settings.MaxFileLinksSet), + attribute.Int("maxdirectorylinks", settings.MaxDirectoryLinks), + attribute.Bool("maxdirectorylinksset", settings.MaxDirectoryLinksSet), + attribute.Int("maxhamtfanout", settings.MaxHAMTFanout), + attribute.Bool("maxhamtfanoutset", settings.MaxHAMTFanoutSet), attribute.Int("layout", int(settings.Layout)), attribute.Bool("pin", settings.Pin), + attribute.String("pin-name", settings.PinName), attribute.Bool("onlyhash", settings.OnlyHash), attribute.Bool("fscache", settings.FsCache), attribute.Bool("nocopy", settings.NoCopy), @@ -71,7 +85,7 @@ func (api *UnixfsAPI) Add(ctx context.Context, files files.Node, opts ...options //} if settings.NoCopy && !(cfg.Experimental.FilestoreEnabled || cfg.Experimental.UrlstoreEnabled) { - return path.ImmutablePath{}, fmt.Errorf("either the filestore or the urlstore must be enabled to use nocopy, see: https://github.com/ipfs/kubo/blob/master/docs/experimental-features.md#ipfs-filestore") + return path.ImmutablePath{}, errors.New("either the filestore or the urlstore must be enabled to use nocopy, see: https://github.com/ipfs/kubo/blob/master/docs/experimental-features.md#ipfs-filestore") } addblockstore := api.blockstore @@ -84,14 +98,31 @@ func (api *UnixfsAPI) Add(ctx context.Context, files files.Node, opts ...options if settings.OnlyHash { // setup a /dev/null pipeline to simulate adding the data dstore := dssync.MutexWrap(ds.NewNullDatastore()) - bs := bstore.NewBlockstore(dstore, bstore.WriteThrough()) - addblockstore = bstore.NewGCBlockstore(bs, nil) // gclocker will never be used - exch = nil // exchange will never be used - pinning = nil // pinner will never be used + bs := bstore.NewBlockstore(dstore, bstore.WriteThrough(true)) // we use NewNullDatastore, so ok to always WriteThrough when OnlyHash + addblockstore = bstore.NewGCBlockstore(bs, nil) // gclocker will never be used + exch = nil // exchange will never be used + pinning = nil // pinner will never be used } - bserv := blockservice.New(addblockstore, exch) // hash security 001 - dserv := merkledag.NewDAGService(bserv) + bserv := blockservice.New(addblockstore, exch, + blockservice.WriteThrough(cfg.Datastore.WriteThrough.WithDefault(config.DefaultWriteThrough)), + ) // hash security 001 + + var dserv ipld.DAGService = merkledag.NewDAGService(bserv) + + // wrap the DAGService in a providingDAG service which provides every block written. + // note about strategies: + // - "all" gets handled directly at the blockstore so no need to provide + // - "roots" gets handled in the pinner + // - "mfs" gets handled in mfs + // We need to provide the "pinned" cases only. Added blocks are not + // going to be provided by the blockstore (wrong strategy for that), + // nor by the pinner (the pinner doesn't traverse the pinned DAG itself, it only + // handles roots). This wrapping ensures all blocks of pinned content get provided. + if settings.Pin && !settings.OnlyHash && + (api.providingStrategy&config.ProvideStrategyPinned) != 0 { + dserv = &providingDagService{dserv, api.provider} + } // add a sync call to the DagService // this ensures that data written to the DagService is persisted to the underlying datastore @@ -115,6 +146,11 @@ func (api *UnixfsAPI) Add(ctx context.Context, files files.Node, opts ...options } } + // Note: the dag service gets wrapped multiple times: + // 1. providingDagService (if pinned strategy) - provides blocks as they're added + // 2. syncDagService - ensures data persistence + // 3. batchingDagService (in coreunix.Adder) - batches operations for efficiency + fileAdder, err := coreunix.NewAdder(ctx, pinning, addblockstore, syncDserv) if err != nil { return path.ImmutablePath{}, err @@ -126,8 +162,21 @@ func (api *UnixfsAPI) Add(ctx context.Context, files files.Node, opts ...options fileAdder.Progress = settings.Progress } fileAdder.Pin = settings.Pin && !settings.OnlyHash + if settings.Pin { + fileAdder.PinName = settings.PinName + } fileAdder.Silent = settings.Silent fileAdder.RawLeaves = settings.RawLeaves + if settings.MaxFileLinksSet { + fileAdder.MaxLinks = settings.MaxFileLinks + } + if settings.MaxDirectoryLinksSet { + fileAdder.MaxDirectoryLinks = settings.MaxDirectoryLinks + } + + if settings.MaxHAMTFanoutSet { + fileAdder.MaxHAMTFanout = settings.MaxHAMTFanout + } fileAdder.NoCopy = settings.NoCopy fileAdder.CidBuilder = prefix fileAdder.PreserveMode = settings.PreserveMode @@ -159,7 +208,8 @@ func (api *UnixfsAPI) Add(ctx context.Context, files files.Node, opts ...options if err != nil { return path.ImmutablePath{}, err } - mr, err := mfs.NewRoot(ctx, md, emptyDirNode, nil) + // MFS root for OnlyHash mode: provider is nil since we're not storing/providing anything + mr, err := mfs.NewRoot(ctx, md, emptyDirNode, nil, nil) if err != nil { return path.ImmutablePath{}, err } @@ -172,12 +222,6 @@ func (api *UnixfsAPI) Add(ctx context.Context, files files.Node, opts ...options return path.ImmutablePath{}, err } - if !settings.OnlyHash { - if err := api.provider.Provide(nd.Cid()); err != nil { - return path.ImmutablePath{}, err - } - } - return path.FromCid(nd.Cid()), nil } @@ -197,13 +241,15 @@ func (api *UnixfsAPI) Get(ctx context.Context, p path.Path) (files.Node, error) // Ls returns the contents of an IPFS or IPNS object(s) at path p, with the format: // ` ` -func (api *UnixfsAPI) Ls(ctx context.Context, p path.Path, opts ...options.UnixfsLsOption) (<-chan coreiface.DirEntry, error) { +func (api *UnixfsAPI) Ls(ctx context.Context, p path.Path, out chan<- coreiface.DirEntry, opts ...options.UnixfsLsOption) error { ctx, span := tracing.Span(ctx, "CoreAPI.UnixfsAPI", "Ls", trace.WithAttributes(attribute.String("path", p.String()))) defer span.End() + defer close(out) + settings, err := options.UnixfsLsOptions(opts...) if err != nil { - return nil, err + return err } span.SetAttributes(attribute.Bool("resolvechildren", settings.ResolveChildren)) @@ -213,21 +259,21 @@ func (api *UnixfsAPI) Ls(ctx context.Context, p path.Path, opts ...options.Unixf dagnode, err := ses.ResolveNode(ctx, p) if err != nil { - return nil, err + return err } dir, err := uio.NewDirectoryFromNode(ses.dag, dagnode) - if err == uio.ErrNotADir { - return uses.lsFromLinks(ctx, dagnode.Links(), settings) - } if err != nil { - return nil, err + if errors.Is(err, uio.ErrNotADir) { + return uses.lsFromLinks(ctx, dagnode.Links(), settings, out) + } + return err } - return uses.lsFromLinksAsync(ctx, dir, settings) + return uses.lsFromDirLinks(ctx, dir, settings, out) } -func (api *UnixfsAPI) processLink(ctx context.Context, linkres ft.LinkResult, settings *options.UnixfsLsSettings) coreiface.DirEntry { +func (api *UnixfsAPI) processLink(ctx context.Context, linkres ft.LinkResult, settings *options.UnixfsLsSettings) (coreiface.DirEntry, error) { ctx, span := tracing.Span(ctx, "CoreAPI.UnixfsAPI", "ProcessLink") defer span.End() if linkres.Link != nil { @@ -235,7 +281,7 @@ func (api *UnixfsAPI) processLink(ctx context.Context, linkres ft.LinkResult, se } if linkres.Err != nil { - return coreiface.DirEntry{Err: linkres.Err} + return coreiface.DirEntry{}, linkres.Err } lnk := coreiface.DirEntry{ @@ -252,15 +298,13 @@ func (api *UnixfsAPI) processLink(ctx context.Context, linkres ft.LinkResult, se if settings.ResolveChildren { linkNode, err := linkres.Link.GetNode(ctx, api.dag) if err != nil { - lnk.Err = err - break + return coreiface.DirEntry{}, err } if pn, ok := linkNode.(*merkledag.ProtoNode); ok { d, err := ft.FSNodeFromBytes(pn.Data()) if err != nil { - lnk.Err = err - break + return coreiface.DirEntry{}, err } switch d.Type() { case ft.TFile, ft.TRaw: @@ -284,35 +328,50 @@ func (api *UnixfsAPI) processLink(ctx context.Context, linkres ft.LinkResult, se } } - return lnk + return lnk, nil } -func (api *UnixfsAPI) lsFromLinksAsync(ctx context.Context, dir uio.Directory, settings *options.UnixfsLsSettings) (<-chan coreiface.DirEntry, error) { - out := make(chan coreiface.DirEntry, uio.DefaultShardWidth) +func (api *UnixfsAPI) lsFromDirLinks(ctx context.Context, dir uio.Directory, settings *options.UnixfsLsSettings, out chan<- coreiface.DirEntry) error { + for l := range dir.EnumLinksAsync(ctx) { + dirEnt, err := api.processLink(ctx, l, settings) // TODO: perf: processing can be done in background and in parallel + if err != nil { + return err + } + select { + case out <- dirEnt: + case <-ctx.Done(): + return nil + } + } + return nil +} +func (api *UnixfsAPI) lsFromLinks(ctx context.Context, ndlinks []*ipld.Link, settings *options.UnixfsLsSettings, out chan<- coreiface.DirEntry) error { + // Create links channel large enough to not block when writing to out is slower. + links := make(chan coreiface.DirEntry, len(ndlinks)) + errs := make(chan error, 1) go func() { - defer close(out) - for l := range dir.EnumLinksAsync(ctx) { + defer close(links) + defer close(errs) + for _, l := range ndlinks { + lr := ft.LinkResult{Link: &ipld.Link{Name: l.Name, Size: l.Size, Cid: l.Cid}} + lnk, err := api.processLink(ctx, lr, settings) // TODO: can be parallel if settings.Async + if err != nil { + errs <- err + return + } select { - case out <- api.processLink(ctx, l, settings): // TODO: perf: processing can be done in background and in parallel + case links <- lnk: case <-ctx.Done(): return } } }() - return out, nil -} - -func (api *UnixfsAPI) lsFromLinks(ctx context.Context, ndlinks []*ipld.Link, settings *options.UnixfsLsSettings) (<-chan coreiface.DirEntry, error) { - links := make(chan coreiface.DirEntry, len(ndlinks)) - for _, l := range ndlinks { - lr := ft.LinkResult{Link: &ipld.Link{Name: l.Name, Size: l.Size, Cid: l.Cid}} - - links <- api.processLink(ctx, lr, settings) // TODO: can be parallel if settings.Async + for lnk := range links { + out <- lnk } - close(links) - return links, nil + return <-errs } func (api *UnixfsAPI) core() *CoreAPI { @@ -328,3 +387,39 @@ type syncDagService struct { func (s *syncDagService) Sync() error { return s.syncFn() } + +type providingDagService struct { + ipld.DAGService + provider.MultihashProvider +} + +func (pds *providingDagService) Add(ctx context.Context, n ipld.Node) error { + if err := pds.DAGService.Add(ctx, n); err != nil { + return err + } + // Provider errors are logged but not propagated. + // We don't want DAG operations to fail due to providing issues. + // The user's data is still stored successfully even if the + // announcement to the routing system fails temporarily. + if err := pds.StartProviding(false, n.Cid().Hash()); err != nil { + log.Errorf("failed to provide new block: %s", err) + } + return nil +} + +func (pds *providingDagService) AddMany(ctx context.Context, nds []ipld.Node) error { + if err := pds.DAGService.AddMany(ctx, nds); err != nil { + return err + } + keys := make([]mh.Multihash, len(nds)) + for i, n := range nds { + keys[i] = n.Cid().Hash() + } + // Same error handling philosophy as Add(): log but don't fail. + if err := pds.StartProviding(false, keys...); err != nil { + log.Errorf("failed to provide new blocks: %s", err) + } + return nil +} + +var _ ipld.DAGService = (*providingDagService)(nil) diff --git a/core/corehttp/corehttp.go b/core/corehttp/corehttp.go index 6a9f43b51..6749c738b 100644 --- a/core/corehttp/corehttp.go +++ b/core/corehttp/corehttp.go @@ -11,10 +11,8 @@ import ( "net/http" "time" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" core "github.com/ipfs/kubo/core" - "github.com/jbenet/goprocess" - periodicproc "github.com/jbenet/goprocess/periodic" ma "github.com/multiformats/go-multiaddr" manet "github.com/multiformats/go-multiaddr/net" ) @@ -80,9 +78,23 @@ func ListenAndServe(n *core.IpfsNode, listeningMultiAddr string, options ...Serv return Serve(n, manet.NetListener(list), options...) } -// Serve accepts incoming HTTP connections on the listener and pass them +// Serve accepts incoming HTTP connections on the listener and passes them // to ServeOption handlers. func Serve(node *core.IpfsNode, lis net.Listener, options ...ServeOption) error { + return ServeWithReady(node, lis, nil, options...) +} + +// ServeWithReady is like Serve but signals on the ready channel when the +// server is about to accept connections. The channel is closed right before +// server.Serve() is called. +// +// This is useful for callers that need to perform actions (like writing +// address files) only after the server is guaranteed to be accepting +// connections, avoiding race conditions where clients see the file before +// the server is ready. +// +// Passing nil for ready is equivalent to calling Serve(). +func ServeWithReady(node *core.IpfsNode, lis net.Listener, ready chan<- struct{}, options ...ServeOption) error { // make sure we close this no matter what. defer lis.Close() @@ -97,7 +109,7 @@ func Serve(node *core.IpfsNode, lis net.Listener, options ...ServeOption) error } select { - case <-node.Process.Closing(): + case <-node.Context().Done(): return fmt.Errorf("failed to start server, process closing") default: } @@ -107,20 +119,34 @@ func Serve(node *core.IpfsNode, lis net.Listener, options ...ServeOption) error } var serverError error - serverProc := node.Process.Go(func(p goprocess.Process) { + serverClosed := make(chan struct{}) + go func() { + if ready != nil { + close(ready) + } serverError = server.Serve(lis) - }) + close(serverClosed) + }() // wait for server to exit. select { - case <-serverProc.Closed(): + case <-serverClosed: // if node being closed before server exits, close server - case <-node.Process.Closing(): + case <-node.Context().Done(): log.Infof("server at %s terminating...", addr) - warnProc := periodicproc.Tick(5*time.Second, func(_ goprocess.Process) { - log.Infof("waiting for server at %s to terminate...", addr) - }) + go func() { + ticker := time.NewTicker(5 * time.Second) + defer ticker.Stop() + for { + select { + case <-ticker.C: + log.Infof("waiting for server at %s to terminate...", addr) + case <-serverClosed: + return + } + } + }() // This timeout shouldn't be necessary if all of our commands // are obeying their contexts but we should have *some* timeout. @@ -130,10 +156,8 @@ func Serve(node *core.IpfsNode, lis net.Listener, options ...ServeOption) error // Should have already closed but we still need to wait for it // to set the error. - <-serverProc.Closed() + <-serverClosed serverError = err - - warnProc.Close() } log.Infof("server at %s terminated", addr) diff --git a/core/corehttp/gateway.go b/core/corehttp/gateway.go index 6ac381885..393a668bf 100644 --- a/core/corehttp/gateway.go +++ b/core/corehttp/gateway.go @@ -97,11 +97,24 @@ func Libp2pGatewayOption() ServeOption { return nil, err } + // Get gateway configuration from the node's config + cfg, err := n.Repo.Config() + if err != nil { + return nil, err + } + gwConfig := gateway.Config{ - DeserializedResponses: false, - NoDNSLink: true, + // Keep these constraints for security + DeserializedResponses: false, // Trustless-only + NoDNSLink: true, // No DNS resolution + DisableHTMLErrors: true, // Plain text errors only PublicGateways: nil, Menu: nil, + // Apply timeout and concurrency limits from user config + RetrievalTimeout: cfg.Gateway.RetrievalTimeout.WithDefault(config.DefaultRetrievalTimeout), + MaxConcurrentRequests: int(cfg.Gateway.MaxConcurrentRequests.WithDefault(int64(config.DefaultMaxConcurrentRequests))), + MaxRangeRequestFileSize: int64(cfg.Gateway.MaxRangeRequestFileSize.WithDefault(uint64(config.DefaultMaxRangeRequestFileSize))), + DiagnosticServiceURL: "", // Not used since DisableHTMLErrors=true } handler := gateway.NewHandler(gwConfig, &offlineGatewayErrWrapper{gwimpl: backend}) @@ -254,10 +267,14 @@ func getGatewayConfig(n *core.IpfsNode) (gateway.Config, map[string][]string, er // Initialize gateway configuration, with empty PublicGateways, handled after. gwCfg := gateway.Config{ - DeserializedResponses: cfg.Gateway.DeserializedResponses.WithDefault(config.DefaultDeserializedResponses), - DisableHTMLErrors: cfg.Gateway.DisableHTMLErrors.WithDefault(config.DefaultDisableHTMLErrors), - NoDNSLink: cfg.Gateway.NoDNSLink, - PublicGateways: map[string]*gateway.PublicGateway{}, + DeserializedResponses: cfg.Gateway.DeserializedResponses.WithDefault(config.DefaultDeserializedResponses), + DisableHTMLErrors: cfg.Gateway.DisableHTMLErrors.WithDefault(config.DefaultDisableHTMLErrors), + NoDNSLink: cfg.Gateway.NoDNSLink, + PublicGateways: map[string]*gateway.PublicGateway{}, + RetrievalTimeout: cfg.Gateway.RetrievalTimeout.WithDefault(config.DefaultRetrievalTimeout), + MaxConcurrentRequests: int(cfg.Gateway.MaxConcurrentRequests.WithDefault(int64(config.DefaultMaxConcurrentRequests))), + MaxRangeRequestFileSize: int64(cfg.Gateway.MaxRangeRequestFileSize.WithDefault(uint64(config.DefaultMaxRangeRequestFileSize))), + DiagnosticServiceURL: cfg.Gateway.DiagnosticServiceURL.WithDefault(config.DefaultDiagnosticServiceURL), } // Add default implicit known gateways, such as subdomain gateway on localhost. diff --git a/core/corehttp/logs.go b/core/corehttp/logs.go index 944e62c5b..fbdc94f6f 100644 --- a/core/corehttp/logs.go +++ b/core/corehttp/logs.go @@ -1,57 +1,68 @@ package corehttp import ( - "io" + "bufio" + "fmt" "net" "net/http" - lwriter "github.com/ipfs/go-log/writer" + logging "github.com/ipfs/go-log/v2" core "github.com/ipfs/kubo/core" ) -type writeErrNotifier struct { - w io.Writer - errs chan error -} - -func newWriteErrNotifier(w io.Writer) (io.WriteCloser, <-chan error) { - ch := make(chan error, 1) - return &writeErrNotifier{ - w: w, - errs: ch, - }, ch -} - -func (w *writeErrNotifier) Write(b []byte) (int, error) { - n, err := w.w.Write(b) - if err != nil { - select { - case w.errs <- err: - default: - } - } - if f, ok := w.w.(http.Flusher); ok { - f.Flush() - } - return n, err -} - -func (w *writeErrNotifier) Close() error { - select { - case w.errs <- io.EOF: - default: - } - return nil -} - func LogOption() ServeOption { return func(n *core.IpfsNode, _ net.Listener, mux *http.ServeMux) (*http.ServeMux, error) { mux.HandleFunc("/logs", func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(200) - wnf, errs := newWriteErrNotifier(w) - lwriter.WriterGroup.AddWriter(wnf) - log.Event(n.Context(), "log API client connected") //nolint deprecated - <-errs + // The log data comes from an io.Reader, and we need to constantly + // read from it and then write to the HTTP response. + pipeReader := logging.NewPipeReader() + done := make(chan struct{}) + + // Close the pipe reader if the request context is canceled. This + // is necessary to avoiding blocking on reading from the pipe + // reader when the client terminates the request. + go func() { + select { + case <-r.Context().Done(): // Client canceled request + case <-n.Context().Done(): // Node shutdown + case <-done: // log reader goroutine exitex + } + pipeReader.Close() + }() + + errs := make(chan error, 1) + + go func() { + defer close(errs) + defer close(done) + + rdr := bufio.NewReader(pipeReader) + for { + // Read a line of log data and send it to the client. + line, err := rdr.ReadString('\n') + if err != nil { + errs <- fmt.Errorf("error reading log message: %s", err) + return + } + _, err = w.Write([]byte(line)) + if err != nil { + // Failed to write to client, probably disconnected. + return + } + if f, ok := w.(http.Flusher); ok { + f.Flush() + } + if r.Context().Err() != nil { + return + } + } + }() + log.Info("log API client connected") + err := <-errs + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } }) return mux, nil } diff --git a/core/corehttp/metrics.go b/core/corehttp/metrics.go index f43362ff7..be1031513 100644 --- a/core/corehttp/metrics.go +++ b/core/corehttp/metrics.go @@ -87,6 +87,7 @@ func MetricsCollectionOption(handlerName string) ServeOption { Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, } + // Legacy metric - new metrics are provided by boxo/gateway as gw_http_responses_total reqCnt := prometheus.NewCounterVec( prometheus.CounterOpts{ Namespace: opts.Namespace, diff --git a/core/corehttp/p2p_proxy_test.go b/core/corehttp/p2p_proxy_test.go index 969bc31e1..e915c0822 100644 --- a/core/corehttp/p2p_proxy_test.go +++ b/core/corehttp/p2p_proxy_test.go @@ -5,9 +5,8 @@ import ( "strings" "testing" - "github.com/ipfs/kubo/thirdparty/assert" - protocol "github.com/libp2p/go-libp2p/core/protocol" + "github.com/stretchr/testify/require" ) type TestCase struct { @@ -29,12 +28,10 @@ func TestParseRequest(t *testing.T) { req, _ := http.NewRequest(http.MethodGet, url, strings.NewReader("")) parsed, err := parseRequest(req) - if err != nil { - t.Fatal(err) - } - assert.True(parsed.httpPath == tc.path, t, "proxy request path") - assert.True(parsed.name == protocol.ID(tc.name), t, "proxy request name") - assert.True(parsed.target == tc.target, t, "proxy request peer-id") + require.NoError(t, err) + require.Equal(t, tc.path, parsed.httpPath, "proxy request path") + require.Equal(t, protocol.ID(tc.name), parsed.name, "proxy request name") + require.Equal(t, tc.target, parsed.target, "proxy request peer-id") } } @@ -49,8 +46,6 @@ func TestParseRequestInvalidPath(t *testing.T) { req, _ := http.NewRequest(http.MethodGet, url, strings.NewReader("")) _, err := parseRequest(req) - if err == nil { - t.Fail() - } + require.Error(t, err) } } diff --git a/core/corehttp/routing.go b/core/corehttp/routing.go index 9a2591d32..239f8737b 100644 --- a/core/corehttp/routing.go +++ b/core/corehttp/routing.go @@ -2,6 +2,8 @@ package corehttp import ( "context" + "errors" + "fmt" "net" "net/http" "time" @@ -13,6 +15,9 @@ import ( "github.com/ipfs/boxo/routing/http/types/iter" cid "github.com/ipfs/go-cid" core "github.com/ipfs/kubo/core" + dht "github.com/libp2p/go-libp2p-kad-dht" + "github.com/libp2p/go-libp2p-kad-dht/dual" + "github.com/libp2p/go-libp2p-kad-dht/fullrt" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/routing" ) @@ -96,6 +101,60 @@ func (r *contentRouter) PutIPNS(ctx context.Context, name ipns.Name, record *ipn return r.n.Routing.PutValue(ctx, string(name.RoutingKey()), raw) } +func (r *contentRouter) GetClosestPeers(ctx context.Context, key cid.Cid) (iter.ResultIter[*types.PeerRecord], error) { + // Per the spec, if the peer ID is empty, we should use self. + if key == cid.Undef { + return nil, errors.New("GetClosestPeers key is undefined") + } + + keyStr := string(key.Hash()) + var peers []peer.ID + var err error + + if r.n.DHTClient == nil { + return nil, fmt.Errorf("GetClosestPeers not supported: DHT is not available") + } + + switch dhtClient := r.n.DHTClient.(type) { + case *dual.DHT: + // Only use WAN DHT for public HTTP Routing API. + // LAN DHT contains private network peers that should not be exposed publicly. + if dhtClient.WAN == nil { + return nil, fmt.Errorf("GetClosestPeers not supported: WAN DHT is not available") + } + peers, err = dhtClient.WAN.GetClosestPeers(ctx, keyStr) + case *fullrt.FullRT: + peers, err = dhtClient.GetClosestPeers(ctx, keyStr) + case *dht.IpfsDHT: + peers, err = dhtClient.GetClosestPeers(ctx, keyStr) + default: + return nil, fmt.Errorf("GetClosestPeers not supported for DHT type %T", r.n.DHTClient) + } + + if err != nil { + return nil, err + } + + // We have some DHT-closest peers. Find addresses for them. + // The addresses should be in the peerstore. + records := make([]*types.PeerRecord, 0, len(peers)) + for _, p := range peers { + addrs := r.n.Peerstore.Addrs(p) + rAddrs := make([]types.Multiaddr, len(addrs)) + for i, addr := range addrs { + rAddrs[i] = types.Multiaddr{Multiaddr: addr} + } + record := types.PeerRecord{ + ID: &p, + Schema: types.SchemaPeer, + Addrs: rAddrs, + } + records = append(records, &record) + } + + return iter.ToResultIter(iter.FromSlice(records)), nil +} + type peerChanIter struct { ch <-chan peer.AddrInfo cancel context.CancelFunc diff --git a/core/corehttp/webui.go b/core/corehttp/webui.go index 2117d78af..da0ba9860 100644 --- a/core/corehttp/webui.go +++ b/core/corehttp/webui.go @@ -1,11 +1,32 @@ package corehttp +import ( + "fmt" + "net" + "net/http" + "strings" + + "github.com/ipfs/go-cid" + "github.com/ipfs/kubo/config" + core "github.com/ipfs/kubo/core" +) + // WebUI version confirmed to work with this Kubo version -const WebUIPath = "/ipfs/bafybeif6abowqcavbkz243biyh7pde7ick5kkwwytrh7pd2hkbtuqysjxy" // v4.3.2 +const WebUIPath = "/ipfs/bafybeidsjptidvb6wf6benznq2pxgnt5iyksgtecpmjoimlmswhtx2u5ua" // v4.10.0 // WebUIPaths is a list of all past webUI paths. var WebUIPaths = []string{ WebUIPath, + "/ipfs/bafybeicg7e6o2eszkfdzxg5233gmuip2a7kfzoloh7voyvt2r6ivdet54u", // v4.9.1 + "/ipfs/bafybeifplj2s3yegn7ko7tdnwpoxa4c5uaqnk2ajnw5geqm34slcj6b6mu", // v4.8.0 + "/ipfs/bafybeibfd5kbebqqruouji6ct5qku3tay273g7mt24mmrfzrsfeewaal5y", // v4.7.0 + "/ipfs/bafybeibpaa5kqrj4gkemiswbwndjqiryl65cks64ypwtyerxixu56gnvvm", // v4.6.0 + "/ipfs/bafybeiata4qg7xjtwgor6r5dw63jjxyouenyromrrb4lrewxrlvav7gzgi", // v4.5.0 + "/ipfs/bafybeigp3zm7cqoiciqk5anlheenqjsgovp7j7zq6hah4nu6iugdgb4nby", // v4.4.2 + "/ipfs/bafybeiatztgdllxnp5p6zu7bdwhjmozsmd7jprff4bdjqjljxtylitvss4", // v4.4.1 + "/ipfs/bafybeibgic2ex3fvzkinhy6k6aqyv3zy2o7bkbsmrzvzka24xetv7eeadm", // v4.4.0 + "/ipfs/bafybeid4uxz7klxcu3ffsnmn64r7ihvysamlj4ohl5h2orjsffuegcpaeq", // v4.3.3 + "/ipfs/bafybeif6abowqcavbkz243biyh7pde7ick5kkwwytrh7pd2hkbtuqysjxy", // v4.3.2 "/ipfs/bafybeihatzsgposbr3hrngo42yckdyqcc56yean2rynnwpzxstvdlphxf4", "/ipfs/bafybeigggyffcf6yfhx5irtwzx3cgnk6n3dwylkvcpckzhqqrigsxowjwe", "/ipfs/bafybeidf7cpkwsjkq6xs3r6fbbxghbugilx3jtezbza7gua3k5wjixpmba", @@ -14,18 +35,18 @@ var WebUIPaths = []string{ "/ipfs/bafybeicyp7ssbnj3hdzehcibmapmpuc3atrsc4ch3q6acldfh4ojjdbcxe", "/ipfs/bafybeigs6d53gpgu34553mbi5bbkb26e4ikruoaaar75jpfdywpup2r3my", "/ipfs/bafybeic4gops3d3lyrisqku37uio33nvt6fqxvkxihrwlqsuvf76yln4fm", - "/ipfs/bafybeifeqt7mvxaniphyu2i3qhovjaf3sayooxbh5enfdqtiehxjv2ldte", + "/ipfs/bafybeifeqt7mvxaniphyu2i3qhovjaf3sayooxbh5enfdqtiehxjv2ldte", // v2.22.0 "/ipfs/bafybeiequgo72mrvuml56j4gk7crewig5bavumrrzhkqbim6b3s2yqi7ty", - "/ipfs/bafybeibjbq3tmmy7wuihhhwvbladjsd3gx3kfjepxzkq6wylik6wc3whzy", - "/ipfs/bafybeiavrvt53fks6u32n5p2morgblcmck4bh4ymf4rrwu7ah5zsykmqqa", - "/ipfs/bafybeiageaoxg6d7npaof6eyzqbwvbubyler7bq44hayik2hvqcggg7d2y", - "/ipfs/bafybeidb5eryh72zajiokdggzo7yct2d6hhcflncji5im2y5w26uuygdsm", - "/ipfs/bafybeibozpulxtpv5nhfa2ue3dcjx23ndh3gwr5vwllk7ptoyfwnfjjr4q", - "/ipfs/bafybeiednzu62vskme5wpoj4bjjikeg3xovfpp4t7vxk5ty2jxdi4mv4bu", - "/ipfs/bafybeihcyruaeza7uyjd6ugicbcrqumejf6uf353e5etdkhotqffwtguva", + "/ipfs/bafybeibjbq3tmmy7wuihhhwvbladjsd3gx3kfjepxzkq6wylik6wc3whzy", // v2.20.0 + "/ipfs/bafybeiavrvt53fks6u32n5p2morgblcmck4bh4ymf4rrwu7ah5zsykmqqa", // v2.19.0 + "/ipfs/bafybeiageaoxg6d7npaof6eyzqbwvbubyler7bq44hayik2hvqcggg7d2y", // v2.18.1 + "/ipfs/bafybeidb5eryh72zajiokdggzo7yct2d6hhcflncji5im2y5w26uuygdsm", // v2.18.0 + "/ipfs/bafybeibozpulxtpv5nhfa2ue3dcjx23ndh3gwr5vwllk7ptoyfwnfjjr4q", // v2.15.1 + "/ipfs/bafybeiednzu62vskme5wpoj4bjjikeg3xovfpp4t7vxk5ty2jxdi4mv4bu", // v2.15.0 + "/ipfs/bafybeihcyruaeza7uyjd6ugicbcrqumejf6uf353e5etdkhotqffwtguva", // v2.13.0 "/ipfs/bafybeiflkjt66aetfgcrgvv75izymd5kc47g6luepqmfq6zsf5w6ueth6y", "/ipfs/bafybeid26vjplsejg7t3nrh7mxmiaaxriebbm4xxrxxdunlk7o337m5sqq", - "/ipfs/bafybeif4zkmu7qdhkpf3pnhwxipylqleof7rl6ojbe7mq3fzogz6m4xk3i", + "/ipfs/bafybeif4zkmu7qdhkpf3pnhwxipylqleof7rl6ojbe7mq3fzogz6m4xk3i", // v2.11.4 "/ipfs/bafybeianwe4vy7sprht5sm3hshvxjeqhwcmvbzq73u55sdhqngmohkjgs4", "/ipfs/bafybeicitin4p7ggmyjaubqpi3xwnagrwarsy6hiihraafk5rcrxqxju6m", "/ipfs/bafybeihpetclqvwb4qnmumvcn7nh4pxrtugrlpw4jgjpqicdxsv7opdm6e", @@ -64,4 +85,85 @@ var WebUIPaths = []string{ "/ipfs/Qmexhq2sBHnXQbvyP2GfUdbnY7HCagH2Mw5vUNSBn2nxip", } -var WebUIOption = RedirectOption("webui", WebUIPath) +// WebUIOption provides the WebUI handler for the RPC API. +func WebUIOption(n *core.IpfsNode, _ net.Listener, mux *http.ServeMux) (*http.ServeMux, error) { + cfg, err := n.Repo.Config() + if err != nil { + return nil, err + } + + handler := &webUIHandler{ + headers: cfg.API.HTTPHeaders, + node: n, + noFetch: cfg.Gateway.NoFetch, + deserializedResponses: cfg.Gateway.DeserializedResponses.WithDefault(config.DefaultDeserializedResponses), + } + + mux.Handle("/webui/", handler) + return mux, nil +} + +type webUIHandler struct { + headers map[string][]string + node *core.IpfsNode + noFetch bool + deserializedResponses bool +} + +func (h *webUIHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + for k, v := range h.headers { + w.Header()[http.CanonicalHeaderKey(k)] = v + } + + // Check if WebUI is incompatible with current configuration + if !h.deserializedResponses { + h.writeIncompatibleError(w) + return + } + + // Check if WebUI is available locally when Gateway.NoFetch is true + if h.noFetch { + cidStr := strings.TrimPrefix(WebUIPath, "/ipfs/") + webUICID, err := cid.Parse(cidStr) + if err != nil { + // This should never happen with hardcoded constant + log.Errorf("failed to parse WebUI CID: %v", err) + } else { + has, err := h.node.Blockstore.Has(r.Context(), webUICID) + if err != nil { + log.Debugf("error checking WebUI availability: %v", err) + } else if !has { + h.writeNotAvailableError(w) + return + } + } + } + + // Default behavior: redirect to the WebUI path + http.Redirect(w, r, WebUIPath, http.StatusFound) +} + +func (h *webUIHandler) writeIncompatibleError(w http.ResponseWriter) { + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + w.WriteHeader(http.StatusServiceUnavailable) + fmt.Fprintf(w, `IPFS WebUI Incompatible + +WebUI is not compatible with Gateway.DeserializedResponses=false. + +The WebUI requires deserializing IPFS responses to render the interface. +To use the WebUI, set Gateway.DeserializedResponses=true in your config. +`) +} + +func (h *webUIHandler) writeNotAvailableError(w http.ResponseWriter) { + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + w.WriteHeader(http.StatusServiceUnavailable) + fmt.Fprintf(w, `IPFS WebUI Not Available + +WebUI at %s is not in your local node due to Gateway.NoFetch=true. + +To use the WebUI, either: +1. Run: ipfs pin add --progress --name ipfs-webui %s +2. Download from https://github.com/ipfs/ipfs-webui/releases and import with: ipfs dag import ipfs-webui.car +`, WebUIPath, WebUIPath) +} diff --git a/core/coreiface/options/name.go b/core/coreiface/options/name.go index 7b4b6a8fd..8fc4f552a 100644 --- a/core/coreiface/options/name.go +++ b/core/coreiface/options/name.go @@ -16,6 +16,8 @@ type NamePublishSettings struct { TTL *time.Duration CompatibleWithV1 bool AllowOffline bool + AllowDelegated bool + Sequence *uint64 } type NameResolveSettings struct { @@ -34,7 +36,8 @@ func NamePublishOptions(opts ...NamePublishOption) (*NamePublishSettings, error) ValidTime: DefaultNameValidTime, Key: "self", - AllowOffline: false, + AllowOffline: false, + AllowDelegated: false, } for _, opt := range opts { @@ -96,6 +99,16 @@ func (nameOpts) AllowOffline(allow bool) NamePublishOption { } } +// AllowDelegated is an option for Name.Publish which allows publishing without +// DHT connectivity, using local datastore and HTTP delegated publishers only. +// Default value is false +func (nameOpts) AllowDelegated(allowDelegated bool) NamePublishOption { + return func(settings *NamePublishSettings) error { + settings.AllowDelegated = allowDelegated + return nil + } +} + // TTL is an option for Name.Publish which specifies the time duration the // published record should be cached for (caution: experimental). func (nameOpts) TTL(ttl time.Duration) NamePublishOption { @@ -105,6 +118,15 @@ func (nameOpts) TTL(ttl time.Duration) NamePublishOption { } } +// Sequence is an option for Name.Publish which specifies the sequence number of +// a namesys record. +func (nameOpts) Sequence(seq uint64) NamePublishOption { + return func(settings *NamePublishSettings) error { + settings.Sequence = &seq + return nil + } +} + // CompatibleWithV1 is an option for [Name.Publish] which specifies if the // created record should be backwards compatible with V1 IPNS Records. func (nameOpts) CompatibleWithV1(compatible bool) NamePublishOption { diff --git a/core/coreiface/options/unixfs.go b/core/coreiface/options/unixfs.go index c837ec1b2..45e880ed1 100644 --- a/core/coreiface/options/unixfs.go +++ b/core/coreiface/options/unixfs.go @@ -7,6 +7,8 @@ import ( "time" dag "github.com/ipfs/boxo/ipld/merkledag" + "github.com/ipfs/boxo/ipld/unixfs/importer/helpers" + "github.com/ipfs/boxo/ipld/unixfs/io" cid "github.com/ipfs/go-cid" mh "github.com/multiformats/go-multihash" ) @@ -22,15 +24,22 @@ type UnixfsAddSettings struct { CidVersion int MhType uint64 - Inline bool - InlineLimit int - RawLeaves bool - RawLeavesSet bool + Inline bool + InlineLimit int + RawLeaves bool + RawLeavesSet bool + MaxFileLinks int + MaxFileLinksSet bool + MaxDirectoryLinks int + MaxDirectoryLinksSet bool + MaxHAMTFanout int + MaxHAMTFanoutSet bool Chunker string Layout Layout Pin bool + PinName string OnlyHash bool FsCache bool NoCopy bool @@ -60,15 +69,22 @@ func UnixfsAddOptions(opts ...UnixfsAddOption) (*UnixfsAddSettings, cid.Prefix, CidVersion: -1, MhType: mh.SHA2_256, - Inline: false, - InlineLimit: 32, - RawLeaves: false, - RawLeavesSet: false, + Inline: false, + InlineLimit: 32, + RawLeaves: false, + RawLeavesSet: false, + MaxFileLinks: helpers.DefaultLinksPerBlock, + MaxFileLinksSet: false, + MaxDirectoryLinks: 0, + MaxDirectoryLinksSet: false, + MaxHAMTFanout: io.DefaultShardWidth, + MaxHAMTFanoutSet: false, Chunker: "size-262144", Layout: BalancedLayout, Pin: false, + PinName: "", OnlyHash: false, FsCache: false, NoCopy: false, @@ -190,6 +206,35 @@ func (unixfsOpts) RawLeaves(enable bool) UnixfsAddOption { } } +// MaxFileLinks specifies the maximum number of children for UnixFS file +// nodes. +func (unixfsOpts) MaxFileLinks(n int) UnixfsAddOption { + return func(settings *UnixfsAddSettings) error { + settings.MaxFileLinks = n + settings.MaxFileLinksSet = true + return nil + } +} + +// MaxDirectoryLinks specifies the maximum number of children for UnixFS basic +// directory nodes. +func (unixfsOpts) MaxDirectoryLinks(n int) UnixfsAddOption { + return func(settings *UnixfsAddSettings) error { + settings.MaxDirectoryLinks = n + settings.MaxDirectoryLinksSet = true + return nil + } +} + +// MaxHAMTFanout specifies the maximum width of the HAMT directory shards. +func (unixfsOpts) MaxHAMTFanout(n int) UnixfsAddOption { + return func(settings *UnixfsAddSettings) error { + settings.MaxHAMTFanout = n + settings.MaxHAMTFanoutSet = true + return nil + } +} + // Inline tells the adder to inline small blocks into CIDs func (unixfsOpts) Inline(enable bool) UnixfsAddOption { return func(settings *UnixfsAddSettings) error { @@ -237,9 +282,12 @@ func (unixfsOpts) Layout(layout Layout) UnixfsAddOption { } // Pin tells the adder to pin the file root recursively after adding -func (unixfsOpts) Pin(pin bool) UnixfsAddOption { +func (unixfsOpts) Pin(pin bool, pinName string) UnixfsAddOption { return func(settings *UnixfsAddSettings) error { settings.Pin = pin + if pin { + settings.PinName = pinName + } return nil } } diff --git a/core/coreiface/pin.go b/core/coreiface/pin.go index ed837fc9c..e0fd2fb90 100644 --- a/core/coreiface/pin.go +++ b/core/coreiface/pin.go @@ -18,9 +18,6 @@ type Pin interface { // Type of the pin Type() string - - // if not nil, an error happened. Everything else should be ignored. - Err() error } // PinStatus holds information about pin health @@ -50,8 +47,9 @@ type PinAPI interface { // tree Add(context.Context, path.Path, ...options.PinAddOption) error - // Ls returns list of pinned objects on this node - Ls(context.Context, ...options.PinLsOption) (<-chan Pin, error) + // Ls returns this node's pinned objects on the provided channel. The + // channel is closed when there are no more pins and an error is returned. + Ls(context.Context, chan<- Pin, ...options.PinLsOption) error // IsPinned returns whether or not the given cid is pinned // and an explanation of why its pinned diff --git a/core/coreiface/tests/block.go b/core/coreiface/tests/block.go index 3b4ca0bc0..71953609b 100644 --- a/core/coreiface/tests/block.go +++ b/core/coreiface/tests/block.go @@ -2,7 +2,6 @@ package tests import ( "bytes" - "context" "io" "strings" "testing" @@ -55,8 +54,7 @@ func (tp *TestSuite) TestBlock(t *testing.T) { // when no opts are passed, produced CID has 'raw' codec func (tp *TestSuite) TestBlockPut(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) if err != nil { t.Fatal(err) @@ -75,8 +73,7 @@ func (tp *TestSuite) TestBlockPut(t *testing.T) { // Format is deprecated, it used invalid codec names. // Confirm 'cbor' gets fixed to 'dag-cbor' func (tp *TestSuite) TestBlockPutFormatDagCbor(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) if err != nil { t.Fatal(err) @@ -95,8 +92,7 @@ func (tp *TestSuite) TestBlockPutFormatDagCbor(t *testing.T) { // Format is deprecated, it used invalid codec names. // Confirm 'protobuf' got fixed to 'dag-pb' func (tp *TestSuite) TestBlockPutFormatDagPb(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) if err != nil { t.Fatal(err) @@ -115,8 +111,7 @@ func (tp *TestSuite) TestBlockPutFormatDagPb(t *testing.T) { // Format is deprecated, it used invalid codec names. // Confirm fake codec 'v0' got fixed to CIDv0 (with implicit dag-pb codec) func (tp *TestSuite) TestBlockPutFormatV0(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) if err != nil { t.Fatal(err) @@ -133,8 +128,7 @@ func (tp *TestSuite) TestBlockPutFormatV0(t *testing.T) { } func (tp *TestSuite) TestBlockPutCidCodecDagCbor(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) if err != nil { t.Fatal(err) @@ -151,8 +145,7 @@ func (tp *TestSuite) TestBlockPutCidCodecDagCbor(t *testing.T) { } func (tp *TestSuite) TestBlockPutCidCodecDagPb(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) if err != nil { t.Fatal(err) @@ -169,8 +162,7 @@ func (tp *TestSuite) TestBlockPutCidCodecDagPb(t *testing.T) { } func (tp *TestSuite) TestBlockPutHash(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) if err != nil { t.Fatal(err) @@ -192,8 +184,7 @@ func (tp *TestSuite) TestBlockPutHash(t *testing.T) { } func (tp *TestSuite) TestBlockGet(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) if err != nil { t.Fatal(err) @@ -230,8 +221,7 @@ func (tp *TestSuite) TestBlockGet(t *testing.T) { } func (tp *TestSuite) TestBlockRm(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) if err != nil { t.Fatal(err) @@ -284,8 +274,7 @@ func (tp *TestSuite) TestBlockRm(t *testing.T) { } func (tp *TestSuite) TestBlockStat(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) if err != nil { t.Fatal(err) @@ -311,8 +300,7 @@ func (tp *TestSuite) TestBlockStat(t *testing.T) { } func (tp *TestSuite) TestBlockPin(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) if err != nil { t.Fatal(err) @@ -323,9 +311,17 @@ func (tp *TestSuite) TestBlockPin(t *testing.T) { t.Fatal(err) } - if pins, err := api.Pin().Ls(ctx); err != nil || len(pins) != 0 { + pinCh := make(chan coreiface.Pin) + go func() { + err = api.Pin().Ls(ctx, pinCh) + }() + + for range pinCh { t.Fatal("expected 0 pins") } + if err != nil { + t.Fatal(err) + } res, err := api.Block().Put( ctx, @@ -337,7 +333,7 @@ func (tp *TestSuite) TestBlockPin(t *testing.T) { t.Fatal(err) } - pins, err := accPins(api.Pin().Ls(ctx)) + pins, err := accPins(ctx, api) if err != nil { t.Fatal(err) } diff --git a/core/coreiface/tests/dag.go b/core/coreiface/tests/dag.go index 3a388c556..955125967 100644 --- a/core/coreiface/tests/dag.go +++ b/core/coreiface/tests/dag.go @@ -1,7 +1,6 @@ package tests import ( - "context" "math" "strings" "testing" @@ -38,8 +37,7 @@ var treeExpected = map[string]struct{}{ } func (tp *TestSuite) TestPut(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) if err != nil { t.Fatal(err) @@ -61,8 +59,7 @@ func (tp *TestSuite) TestPut(t *testing.T) { } func (tp *TestSuite) TestPutWithHash(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) if err != nil { t.Fatal(err) @@ -84,8 +81,7 @@ func (tp *TestSuite) TestPutWithHash(t *testing.T) { } func (tp *TestSuite) TestDagPath(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) if err != nil { t.Fatal(err) @@ -132,8 +128,7 @@ func (tp *TestSuite) TestDagPath(t *testing.T) { } func (tp *TestSuite) TestTree(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) if err != nil { t.Fatal(err) @@ -167,8 +162,7 @@ func (tp *TestSuite) TestTree(t *testing.T) { } func (tp *TestSuite) TestBatch(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) if err != nil { t.Fatal(err) diff --git a/core/coreiface/tests/key.go b/core/coreiface/tests/key.go index 90936b0e2..ed97719b1 100644 --- a/core/coreiface/tests/key.go +++ b/core/coreiface/tests/key.go @@ -1,7 +1,6 @@ package tests import ( - "context" "strings" "testing" @@ -43,8 +42,7 @@ func (tp *TestSuite) TestKey(t *testing.T) { } func (tp *TestSuite) TestListSelf(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) require.NoError(t, err) @@ -60,8 +58,7 @@ func (tp *TestSuite) TestListSelf(t *testing.T) { } func (tp *TestSuite) TestRenameSelf(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) require.NoError(t, err) @@ -74,8 +71,7 @@ func (tp *TestSuite) TestRenameSelf(t *testing.T) { } func (tp *TestSuite) TestRemoveSelf(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) require.NoError(t, err) @@ -85,8 +81,7 @@ func (tp *TestSuite) TestRemoveSelf(t *testing.T) { } func (tp *TestSuite) TestGenerate(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) require.NoError(t, err) @@ -113,8 +108,7 @@ func verifyIPNSPath(t *testing.T, p string) { } func (tp *TestSuite) TestGenerateSize(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) require.NoError(t, err) @@ -129,8 +123,7 @@ func (tp *TestSuite) TestGenerateSize(t *testing.T) { func (tp *TestSuite) TestGenerateType(t *testing.T) { t.Skip("disabled until libp2p/specs#111 is fixed") - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) require.NoError(t, err) @@ -143,8 +136,7 @@ func (tp *TestSuite) TestGenerateType(t *testing.T) { } func (tp *TestSuite) TestGenerateExisting(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) require.NoError(t, err) @@ -160,8 +152,7 @@ func (tp *TestSuite) TestGenerateExisting(t *testing.T) { } func (tp *TestSuite) TestList(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) require.NoError(t, err) @@ -180,8 +171,7 @@ func (tp *TestSuite) TestList(t *testing.T) { } func (tp *TestSuite) TestRename(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) require.NoError(t, err) @@ -196,8 +186,7 @@ func (tp *TestSuite) TestRename(t *testing.T) { } func (tp *TestSuite) TestRenameToSelf(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) require.NoError(t, err) @@ -210,8 +199,7 @@ func (tp *TestSuite) TestRenameToSelf(t *testing.T) { } func (tp *TestSuite) TestRenameToSelfForce(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) require.NoError(t, err) @@ -224,8 +212,7 @@ func (tp *TestSuite) TestRenameToSelfForce(t *testing.T) { } func (tp *TestSuite) TestRenameOverwriteNoForce(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) require.NoError(t, err) @@ -241,8 +228,7 @@ func (tp *TestSuite) TestRenameOverwriteNoForce(t *testing.T) { } func (tp *TestSuite) TestRenameOverwrite(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) require.NoError(t, err) @@ -261,8 +247,7 @@ func (tp *TestSuite) TestRenameOverwrite(t *testing.T) { } func (tp *TestSuite) TestRenameSameNameNoForce(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) require.NoError(t, err) @@ -277,8 +262,7 @@ func (tp *TestSuite) TestRenameSameNameNoForce(t *testing.T) { } func (tp *TestSuite) TestRenameSameName(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) require.NoError(t, err) @@ -293,8 +277,7 @@ func (tp *TestSuite) TestRenameSameName(t *testing.T) { } func (tp *TestSuite) TestRemove(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) require.NoError(t, err) @@ -317,8 +300,7 @@ func (tp *TestSuite) TestRemove(t *testing.T) { } func (tp *TestSuite) TestSign(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) require.NoError(t, err) @@ -348,8 +330,7 @@ func (tp *TestSuite) TestVerify(t *testing.T) { t.Run("Verify Own Key", func(t *testing.T) { t.Parallel() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) require.NoError(t, err) @@ -370,8 +351,7 @@ func (tp *TestSuite) TestVerify(t *testing.T) { t.Run("Verify Self", func(t *testing.T) { t.Parallel() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPIWithIdentityAndOffline(t, ctx) require.NoError(t, err) @@ -390,8 +370,7 @@ func (tp *TestSuite) TestVerify(t *testing.T) { t.Parallel() // Spin some node and get signature out. - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) require.NoError(t, err) @@ -411,8 +390,7 @@ func (tp *TestSuite) TestVerify(t *testing.T) { {"Prefixed IPNS Path", ipns.NameFromPeer(key.ID()).AsPath().String()}, } { t.Run(testCase[0], func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() // Spin new node. api, err := tp.makeAPI(t, ctx) diff --git a/core/coreiface/tests/name.go b/core/coreiface/tests/name.go index 1e739fdd0..96c9c5bfc 100644 --- a/core/coreiface/tests/name.go +++ b/core/coreiface/tests/name.go @@ -35,8 +35,7 @@ func addTestObject(ctx context.Context, api coreiface.CoreAPI) (path.Path, error } func (tp *TestSuite) TestPublishResolve(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() init := func() (coreiface.CoreAPI, path.Path) { apis, err := tp.MakeAPISwarm(t, ctx, 5) require.NoError(t, err) @@ -120,8 +119,7 @@ func (tp *TestSuite) TestPublishResolve(t *testing.T) { } func (tp *TestSuite) TestBasicPublishResolveKey(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() apis, err := tp.MakeAPISwarm(t, ctx, 5) require.NoError(t, err) api := apis[0] @@ -142,10 +140,7 @@ func (tp *TestSuite) TestBasicPublishResolveKey(t *testing.T) { } func (tp *TestSuite) TestBasicPublishResolveTimeout(t *testing.T) { - t.Skip("ValidTime doesn't appear to work at this time resolution") - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() apis, err := tp.MakeAPISwarm(t, ctx, 5) require.NoError(t, err) api := apis[0] @@ -155,14 +150,25 @@ func (tp *TestSuite) TestBasicPublishResolveTimeout(t *testing.T) { self, err := api.Key().Self(ctx) require.NoError(t, err) - name, err := api.Name().Publish(ctx, p, opt.Name.ValidTime(time.Millisecond*100)) + name, err := api.Name().Publish(ctx, p, opt.Name.ValidTime(time.Second*1)) require.NoError(t, err) require.Equal(t, name.String(), ipns.NameFromPeer(self.ID()).String()) - time.Sleep(time.Second) - - _, err = api.Name().Resolve(ctx, name.String()) + // First resolve should succeed (before expiration) + resPath, err := api.Name().Resolve(ctx, name.String()) require.NoError(t, err) + require.Equal(t, p.String(), resPath.String()) + + // Wait for record to expire (1 second ValidTime + buffer) + time.Sleep(time.Second * 2) + + // Second resolve should now fail after ValidTime expiration (cached) + _, err = api.Name().Resolve(ctx, name.String()) + require.Error(t, err, "IPNS resolution should fail after ValidTime expires (cached)") + + // Third resolve should also fail after ValidTime expiration (non-cached) + _, err = api.Name().Resolve(ctx, name.String(), opt.Name.Cache(false)) + require.Error(t, err, "IPNS resolution should fail after ValidTime expires (non-cached)") } // TODO: When swarm api is created, add multinode tests diff --git a/core/coreiface/tests/object.go b/core/coreiface/tests/object.go index 239b022e1..bbc7180e4 100644 --- a/core/coreiface/tests/object.go +++ b/core/coreiface/tests/object.go @@ -45,8 +45,7 @@ func putDagPbNode(t *testing.T, ctx context.Context, api iface.CoreAPI, data str } func (tp *TestSuite) TestObjectAddLink(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) require.NoError(t, err) @@ -72,8 +71,7 @@ func (tp *TestSuite) TestObjectAddLink(t *testing.T) { } func (tp *TestSuite) TestObjectAddLinkCreate(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) require.NoError(t, err) @@ -102,8 +100,7 @@ func (tp *TestSuite) TestObjectAddLinkCreate(t *testing.T) { } func (tp *TestSuite) TestObjectRmLink(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) require.NoError(t, err) @@ -127,8 +124,7 @@ func (tp *TestSuite) TestObjectRmLink(t *testing.T) { } func (tp *TestSuite) TestDiffTest(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) require.NoError(t, err) diff --git a/core/coreiface/tests/path.go b/core/coreiface/tests/path.go index 87dce2c91..80a1c0e22 100644 --- a/core/coreiface/tests/path.go +++ b/core/coreiface/tests/path.go @@ -1,7 +1,6 @@ package tests import ( - "context" "fmt" "math" "strings" @@ -32,8 +31,7 @@ func (tp *TestSuite) TestPath(t *testing.T) { } func (tp *TestSuite) TestMutablePath(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) require.NoError(t, err) @@ -49,8 +47,7 @@ func (tp *TestSuite) TestMutablePath(t *testing.T) { } func (tp *TestSuite) TestPathRemainder(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) require.NoError(t, err) @@ -71,8 +68,7 @@ func (tp *TestSuite) TestPathRemainder(t *testing.T) { } func (tp *TestSuite) TestEmptyPathRemainder(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) require.NoError(t, err) @@ -90,8 +86,7 @@ func (tp *TestSuite) TestEmptyPathRemainder(t *testing.T) { } func (tp *TestSuite) TestInvalidPathRemainder(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) require.NoError(t, err) @@ -112,8 +107,7 @@ func (tp *TestSuite) TestInvalidPathRemainder(t *testing.T) { } func (tp *TestSuite) TestPathRoot(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) require.NoError(t, err) diff --git a/core/coreiface/tests/pin.go b/core/coreiface/tests/pin.go index fdd7c15cc..04f812ee0 100644 --- a/core/coreiface/tests/pin.go +++ b/core/coreiface/tests/pin.go @@ -12,6 +12,7 @@ import ( ipld "github.com/ipfs/go-ipld-format" iface "github.com/ipfs/kubo/core/coreiface" opt "github.com/ipfs/kubo/core/coreiface/options" + "github.com/stretchr/testify/require" ) func (tp *TestSuite) TestPin(t *testing.T) { @@ -28,11 +29,11 @@ func (tp *TestSuite) TestPin(t *testing.T) { t.Run("TestPinLsIndirect", tp.TestPinLsIndirect) t.Run("TestPinLsPrecedence", tp.TestPinLsPrecedence) t.Run("TestPinIsPinned", tp.TestPinIsPinned) + t.Run("TestPinNames", tp.TestPinNames) } func (tp *TestSuite) TestPinAdd(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) if err != nil { t.Fatal(err) @@ -50,8 +51,7 @@ func (tp *TestSuite) TestPinAdd(t *testing.T) { } func (tp *TestSuite) TestPinSimple(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) if err != nil { t.Fatal(err) @@ -67,7 +67,7 @@ func (tp *TestSuite) TestPinSimple(t *testing.T) { t.Fatal(err) } - list, err := accPins(api.Pin().Ls(ctx)) + list, err := accPins(ctx, api) if err != nil { t.Fatal(err) } @@ -91,7 +91,7 @@ func (tp *TestSuite) TestPinSimple(t *testing.T) { t.Fatal(err) } - list, err = accPins(api.Pin().Ls(ctx)) + list, err = accPins(ctx, api) if err != nil { t.Fatal(err) } @@ -102,8 +102,7 @@ func (tp *TestSuite) TestPinSimple(t *testing.T) { } func (tp *TestSuite) TestPinRecursive(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) if err != nil { t.Fatal(err) @@ -143,7 +142,7 @@ func (tp *TestSuite) TestPinRecursive(t *testing.T) { t.Fatal(err) } - list, err := accPins(api.Pin().Ls(ctx)) + list, err := accPins(ctx, api) if err != nil { t.Fatal(err) } @@ -152,7 +151,7 @@ func (tp *TestSuite) TestPinRecursive(t *testing.T) { t.Errorf("unexpected pin list len: %d", len(list)) } - list, err = accPins(api.Pin().Ls(ctx, opt.Pin.Ls.Direct())) + list, err = accPins(ctx, api, opt.Pin.Ls.Direct()) if err != nil { t.Fatal(err) } @@ -165,7 +164,7 @@ func (tp *TestSuite) TestPinRecursive(t *testing.T) { t.Errorf("unexpected path, %s != %s", list[0].Path().String(), path.FromCid(nd3.Cid()).String()) } - list, err = accPins(api.Pin().Ls(ctx, opt.Pin.Ls.Recursive())) + list, err = accPins(ctx, api, opt.Pin.Ls.Recursive()) if err != nil { t.Fatal(err) } @@ -178,7 +177,7 @@ func (tp *TestSuite) TestPinRecursive(t *testing.T) { t.Errorf("unexpected path, %s != %s", list[0].Path().String(), path.FromCid(nd2.Cid()).String()) } - list, err = accPins(api.Pin().Ls(ctx, opt.Pin.Ls.Indirect())) + list, err = accPins(ctx, api, opt.Pin.Ls.Indirect()) if err != nil { t.Fatal(err) } @@ -249,8 +248,7 @@ func (tp *TestSuite) TestPinRecursive(t *testing.T) { // TestPinLsIndirect verifies that indirect nodes are listed by pin ls even if a parent node is directly pinned func (tp *TestSuite) TestPinLsIndirect(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) if err != nil { t.Fatal(err) @@ -282,8 +280,7 @@ func (tp *TestSuite) TestPinLsPrecedence(t *testing.T) { } func (tp *TestSuite) TestPinLsPredenceRecursiveIndirect(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) if err != nil { t.Fatal(err) @@ -306,8 +303,7 @@ func (tp *TestSuite) TestPinLsPredenceRecursiveIndirect(t *testing.T) { } func (tp *TestSuite) TestPinLsPrecedenceDirectIndirect(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) if err != nil { t.Fatal(err) @@ -330,8 +326,7 @@ func (tp *TestSuite) TestPinLsPrecedenceDirectIndirect(t *testing.T) { } func (tp *TestSuite) TestPinLsPrecedenceRecursiveDirect(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) if err != nil { t.Fatal(err) @@ -366,8 +361,7 @@ func (tp *TestSuite) TestPinLsPrecedenceRecursiveDirect(t *testing.T) { } func (tp *TestSuite) TestPinIsPinned(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) if err != nil { t.Fatal(err) @@ -433,24 +427,24 @@ func getThreeChainedNodes(t *testing.T, ctx context.Context, api iface.CoreAPI, return immutablePathCidContainer{leaf}, parent, grandparent } -func assertPinTypes(t *testing.T, ctx context.Context, api iface.CoreAPI, recusive, direct, indirect []cidContainer) { +func assertPinTypes(t *testing.T, ctx context.Context, api iface.CoreAPI, recursive, direct, indirect []cidContainer) { assertPinLsAllConsistency(t, ctx, api) - list, err := accPins(api.Pin().Ls(ctx, opt.Pin.Ls.Recursive())) + list, err := accPins(ctx, api, opt.Pin.Ls.Recursive()) if err != nil { t.Fatal(err) } - assertPinCids(t, list, recusive...) + assertPinCids(t, list, recursive...) - list, err = accPins(api.Pin().Ls(ctx, opt.Pin.Ls.Direct())) + list, err = accPins(ctx, api, opt.Pin.Ls.Direct()) if err != nil { t.Fatal(err) } assertPinCids(t, list, direct...) - list, err = accPins(api.Pin().Ls(ctx, opt.Pin.Ls.Indirect())) + list, err = accPins(ctx, api, opt.Pin.Ls.Indirect()) if err != nil { t.Fatal(err) } @@ -500,7 +494,7 @@ func assertPinCids(t *testing.T, pins []iface.Pin, cids ...cidContainer) { // assertPinLsAllConsistency verifies that listing all pins gives the same result as listing the pin types individually func assertPinLsAllConsistency(t *testing.T, ctx context.Context, api iface.CoreAPI) { t.Helper() - allPins, err := accPins(api.Pin().Ls(ctx)) + allPins, err := accPins(ctx, api) if err != nil { t.Fatal(err) } @@ -531,7 +525,7 @@ func assertPinLsAllConsistency(t *testing.T, ctx context.Context, api iface.Core } for typeStr, pinProps := range typeMap { - pins, err := accPins(api.Pin().Ls(ctx, pinProps.PinLsOption)) + pins, err := accPins(ctx, api, pinProps.PinLsOption) if err != nil { t.Fatal(err) } @@ -580,6 +574,144 @@ func assertIsPinned(t *testing.T, ctx context.Context, api iface.CoreAPI, p path } } +func (tp *TestSuite) TestPinNames(t *testing.T) { + ctx := t.Context() + api, err := tp.makeAPI(t, ctx) + require.NoError(t, err) + + // Create test content + p1, err := api.Unixfs().Add(ctx, strFile("content1")()) + require.NoError(t, err) + + p2, err := api.Unixfs().Add(ctx, strFile("content2")()) + require.NoError(t, err) + + p3, err := api.Unixfs().Add(ctx, strFile("content3")()) + require.NoError(t, err) + + p4, err := api.Unixfs().Add(ctx, strFile("content4")()) + require.NoError(t, err) + + // Test 1: Pin with name + err = api.Pin().Add(ctx, p1, opt.Pin.Name("test-pin-1")) + require.NoError(t, err, "failed to add pin with name") + + // Test 2: Pin without name + err = api.Pin().Add(ctx, p2) + require.NoError(t, err, "failed to add pin without name") + + // Test 3: List pins with detailed option to get names + pins := make(chan iface.Pin) + go func() { + err = api.Pin().Ls(ctx, pins, opt.Pin.Ls.Detailed(true)) + }() + + pinMap := make(map[string]string) + for pin := range pins { + pinMap[pin.Path().String()] = pin.Name() + } + require.NoError(t, err, "failed to list pins with names") + + // Verify pin names + name1, ok := pinMap[p1.String()] + require.True(t, ok, "pin for %s not found", p1) + require.Equal(t, "test-pin-1", name1, "unexpected pin name for %s", p1) + + name2, ok := pinMap[p2.String()] + require.True(t, ok, "pin for %s not found", p2) + require.Empty(t, name2, "expected empty pin name for %s, got '%s'", p2, name2) + + // Test 4: Pin update preserves name + err = api.Pin().Add(ctx, p3, opt.Pin.Name("updatable-pin")) + require.NoError(t, err, "failed to add pin with name for update test") + + err = api.Pin().Update(ctx, p3, p4) + require.NoError(t, err, "failed to update pin") + + // Verify name was preserved after update + pins2 := make(chan iface.Pin) + go func() { + err = api.Pin().Ls(ctx, pins2, opt.Pin.Ls.Detailed(true)) + }() + + updatedPinMap := make(map[string]string) + for pin := range pins2 { + updatedPinMap[pin.Path().String()] = pin.Name() + } + require.NoError(t, err, "failed to list pins after update") + + // Old pin should not exist + _, oldExists := updatedPinMap[p3.String()] + require.False(t, oldExists, "old pin %s should not exist after update", p3) + + // New pin should have the preserved name + name4, ok := updatedPinMap[p4.String()] + require.True(t, ok, "updated pin for %s not found", p4) + require.Equal(t, "updatable-pin", name4, "pin name not preserved after update from %s to %s", p3, p4) + + // Test 5: Re-pinning with different name updates the name + err = api.Pin().Add(ctx, p1, opt.Pin.Name("new-name-for-p1")) + require.NoError(t, err, "failed to re-pin with new name") + + // Verify name was updated + pins3 := make(chan iface.Pin) + go func() { + err = api.Pin().Ls(ctx, pins3, opt.Pin.Ls.Detailed(true)) + }() + + repinMap := make(map[string]string) + for pin := range pins3 { + repinMap[pin.Path().String()] = pin.Name() + } + require.NoError(t, err, "failed to list pins after re-pin") + + rePinnedName, ok := repinMap[p1.String()] + require.True(t, ok, "re-pinned content %s not found", p1) + require.Equal(t, "new-name-for-p1", rePinnedName, "pin name not updated after re-pinning %s", p1) + + // Test 6: Direct pin with name + p5, err := api.Unixfs().Add(ctx, strFile("direct-content")()) + require.NoError(t, err) + + err = api.Pin().Add(ctx, p5, opt.Pin.Recursive(false), opt.Pin.Name("direct-pin-name")) + require.NoError(t, err, "failed to add direct pin with name") + + // Verify direct pin has name + directPins := make(chan iface.Pin) + typeOpt, err := opt.Pin.Ls.Type("direct") + require.NoError(t, err, "failed to create type option") + go func() { + err = api.Pin().Ls(ctx, directPins, typeOpt, opt.Pin.Ls.Detailed(true)) + }() + + directPinMap := make(map[string]string) + for pin := range directPins { + directPinMap[pin.Path().String()] = pin.Name() + } + require.NoError(t, err, "failed to list direct pins") + + directName, ok := directPinMap[p5.String()] + require.True(t, ok, "direct pin %s not found", p5) + require.Equal(t, "direct-pin-name", directName, "unexpected name for direct pin %s", p5) + + // Test 7: List without detailed option doesn't return names + pinsNoDetails := make(chan iface.Pin) + go func() { + err = api.Pin().Ls(ctx, pinsNoDetails) + }() + + noDetailsMap := make(map[string]string) + for pin := range pinsNoDetails { + noDetailsMap[pin.Path().String()] = pin.Name() + } + require.NoError(t, err, "failed to list pins without detailed option") + + // All names should be empty without detailed option + for path, name := range noDetailsMap { + require.Empty(t, name, "expected empty name for %s without detailed option, got '%s'", path, name) + } +} + func assertNotPinned(t *testing.T, ctx context.Context, api iface.CoreAPI, p path.Path) { t.Helper() @@ -593,19 +725,19 @@ func assertNotPinned(t *testing.T, ctx context.Context, api iface.CoreAPI, p pat } } -func accPins(pins <-chan iface.Pin, err error) ([]iface.Pin, error) { +func accPins(ctx context.Context, api iface.CoreAPI, opts ...opt.PinLsOption) ([]iface.Pin, error) { + var err error + pins := make(chan iface.Pin) + go func() { + err = api.Pin().Ls(ctx, pins, opts...) + }() + + var results []iface.Pin + for pin := range pins { + results = append(results, pin) + } if err != nil { return nil, err } - - var result []iface.Pin - - for pin := range pins { - if pin.Err() != nil { - return nil, pin.Err() - } - result = append(result, pin) - } - - return result, nil + return results, nil } diff --git a/core/coreiface/tests/routing.go b/core/coreiface/tests/routing.go index 753d49550..fe529c9b4 100644 --- a/core/coreiface/tests/routing.go +++ b/core/coreiface/tests/routing.go @@ -41,8 +41,7 @@ func (tp *TestSuite) testRoutingPublishKey(t *testing.T, ctx context.Context, ap } func (tp *TestSuite) TestRoutingGet(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() apis, err := tp.MakeAPISwarm(t, ctx, 2) require.NoError(t, err) @@ -63,8 +62,7 @@ func (tp *TestSuite) TestRoutingGet(t *testing.T) { } func (tp *TestSuite) TestRoutingPut(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() apis, err := tp.MakeAPISwarm(t, ctx, 2) require.NoError(t, err) @@ -81,8 +79,7 @@ func (tp *TestSuite) TestRoutingPut(t *testing.T) { } func (tp *TestSuite) TestRoutingPutOffline(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() // init a swarm & publish an IPNS entry to get a valid payload apis, err := tp.MakeAPISwarm(t, ctx, 2) @@ -104,8 +101,7 @@ func (tp *TestSuite) TestRoutingPutOffline(t *testing.T) { } func (tp *TestSuite) TestRoutingFindPeer(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() apis, err := tp.MakeAPISwarm(t, ctx, 5) if err != nil { t.Fatal(err) @@ -159,8 +155,7 @@ func (tp *TestSuite) TestRoutingFindPeer(t *testing.T) { } func (tp *TestSuite) TestRoutingFindProviders(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() apis, err := tp.MakeAPISwarm(t, ctx, 5) if err != nil { t.Fatal(err) @@ -171,6 +166,13 @@ func (tp *TestSuite) TestRoutingFindProviders(t *testing.T) { t.Fatal(err) } + // Pin so that it is provided, given that providing strategy is + // "roots" and addTestObject does not pin. + err = apis[0].Pin().Add(ctx, p) + if err != nil { + t.Fatal(err) + } + time.Sleep(3 * time.Second) out, err := apis[2].Routing().FindProviders(ctx, p, options.Routing.NumProviders(1)) @@ -191,8 +193,7 @@ func (tp *TestSuite) TestRoutingFindProviders(t *testing.T) { } func (tp *TestSuite) TestRoutingProvide(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() apis, err := tp.MakeAPISwarm(t, ctx, 5) if err != nil { t.Fatal(err) @@ -233,14 +234,27 @@ func (tp *TestSuite) TestRoutingProvide(t *testing.T) { t.Fatal(err) } - out, err = apis[2].Routing().FindProviders(ctx, p, options.Routing.NumProviders(1)) - if err != nil { - t.Fatal(err) + maxAttempts := 5 + success := false + for range maxAttempts { + // We may need to try again as Provide() doesn't block until the CID is + // actually provided. + out, err = apis[2].Routing().FindProviders(ctx, p, options.Routing.NumProviders(1)) + if err != nil { + t.Fatal(err) + } + provider := <-out + + if provider.ID.String() == self0.ID().String() { + success = true + break + } + if len(provider.ID.String()) > 0 { + t.Errorf("got wrong provider: %s != %s", provider.ID.String(), self0.ID().String()) + } + time.Sleep(time.Second) } - - provider := <-out - - if provider.ID.String() != self0.ID().String() { - t.Errorf("got wrong provider: %s != %s", provider.ID.String(), self0.ID().String()) + if !success { + t.Errorf("missing provider after %d attempts", maxAttempts) } } diff --git a/core/coreiface/tests/unixfs.go b/core/coreiface/tests/unixfs.go index 9d3362b9a..84db01721 100644 --- a/core/coreiface/tests/unixfs.go +++ b/core/coreiface/tests/unixfs.go @@ -98,8 +98,7 @@ func wrapped(names ...string) func(f files.Node) files.Node { } func (tp *TestSuite) TestAdd(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) if err != nil { t.Fatal(err) @@ -532,19 +531,18 @@ func (tp *TestSuite) TestAdd(t *testing.T) { } func (tp *TestSuite) TestAddPinned(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) if err != nil { t.Fatal(err) } - _, err = api.Unixfs().Add(ctx, strFile(helloStr)(), options.Unixfs.Pin(true)) + _, err = api.Unixfs().Add(ctx, strFile(helloStr)(), options.Unixfs.Pin(true, "")) if err != nil { t.Fatal(err) } - pins, err := accPins(api.Pin().Ls(ctx)) + pins, err := accPins(ctx, api) if err != nil { t.Fatal(err) } @@ -558,8 +556,7 @@ func (tp *TestSuite) TestAddPinned(t *testing.T) { } func (tp *TestSuite) TestAddHashOnly(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) if err != nil { t.Fatal(err) @@ -584,8 +581,7 @@ func (tp *TestSuite) TestAddHashOnly(t *testing.T) { } func (tp *TestSuite) TestGetEmptyFile(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) if err != nil { t.Fatal(err) @@ -617,8 +613,7 @@ func (tp *TestSuite) TestGetEmptyFile(t *testing.T) { } func (tp *TestSuite) TestGetDir(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) if err != nil { t.Fatal(err) @@ -645,8 +640,7 @@ func (tp *TestSuite) TestGetDir(t *testing.T) { } func (tp *TestSuite) TestGetNonUnixfs(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) if err != nil { t.Fatal(err) @@ -659,14 +653,13 @@ func (tp *TestSuite) TestGetNonUnixfs(t *testing.T) { } _, err = api.Unixfs().Get(ctx, path.FromCid(nd.Cid())) - if !strings.Contains(err.Error(), "proto: required field") { - t.Fatalf("expected protobuf error, got: %s", err) + if !strings.Contains(err.Error(), "proto:") || !strings.Contains(err.Error(), "required field") { + t.Fatalf("expected \"proto: required field\", got: %q", err) } } func (tp *TestSuite) TestLs(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) if err != nil { t.Fatal(err) @@ -681,14 +674,15 @@ func (tp *TestSuite) TestLs(t *testing.T) { t.Fatal(err) } - entries, err := api.Unixfs().Ls(ctx, p) - if err != nil { - t.Fatal(err) - } + errCh := make(chan error, 1) + entries := make(chan coreiface.DirEntry) + go func() { + errCh <- api.Unixfs().Ls(ctx, p, entries) + }() - entry := <-entries - if entry.Err != nil { - t.Fatal(entry.Err) + entry, ok := <-entries + if !ok { + t.Fatal("expected another entry") } if entry.Size != 15 { t.Errorf("expected size = 15, got %d", entry.Size) @@ -702,9 +696,9 @@ func (tp *TestSuite) TestLs(t *testing.T) { if entry.Cid.String() != "QmX3qQVKxDGz3URVC3861Z3CKtQKGBn6ffXRBBWGMFz9Lr" { t.Errorf("expected cid = QmX3qQVKxDGz3URVC3861Z3CKtQKGBn6ffXRBBWGMFz9Lr, got %s", entry.Cid) } - entry = <-entries - if entry.Err != nil { - t.Fatal(entry.Err) + entry, ok = <-entries + if !ok { + t.Fatal("expected another entry") } if entry.Type != coreiface.TSymlink { t.Errorf("wrong type %s", entry.Type) @@ -716,11 +710,12 @@ func (tp *TestSuite) TestLs(t *testing.T) { t.Errorf("expected symlink target to be /foo/bar, got %s", entry.Target) } - if l, ok := <-entries; ok { - t.Errorf("didn't expect a second link") - if l.Err != nil { - t.Error(l.Err) - } + _, ok = <-entries + if ok { + t.Errorf("didn't expect a another link") + } + if err = <-errCh; err != nil { + t.Error(err) } } @@ -767,8 +762,7 @@ func (tp *TestSuite) TestEntriesExpired(t *testing.T) { } func (tp *TestSuite) TestLsEmptyDir(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) if err != nil { t.Fatal(err) @@ -779,20 +773,28 @@ func (tp *TestSuite) TestLsEmptyDir(t *testing.T) { t.Fatal(err) } - links, err := api.Unixfs().Ls(ctx, p) - if err != nil { + errCh := make(chan error, 1) + links := make(chan coreiface.DirEntry) + go func() { + errCh <- api.Unixfs().Ls(ctx, p, links) + }() + + var count int + for range links { + count++ + } + if err = <-errCh; err != nil { t.Fatal(err) } - if len(links) != 0 { - t.Fatalf("expected 0 links, got %d", len(links)) + if count != 0 { + t.Fatalf("expected 0 links, got %d", count) } } // TODO(lgierth) this should test properly, with len(links) > 0 func (tp *TestSuite) TestLsNonUnixfs(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) if err != nil { t.Fatal(err) @@ -808,13 +810,22 @@ func (tp *TestSuite) TestLsNonUnixfs(t *testing.T) { t.Fatal(err) } - links, err := api.Unixfs().Ls(ctx, path.FromCid(nd.Cid())) - if err != nil { + errCh := make(chan error, 1) + links := make(chan coreiface.DirEntry) + go func() { + errCh <- api.Unixfs().Ls(ctx, path.FromCid(nd.Cid()), links) + }() + + var count int + for range links { + count++ + } + if err = <-errCh; err != nil { t.Fatal(err) } - if len(links) != 0 { - t.Fatalf("expected 0 links, got %d", len(links)) + if count != 0 { + t.Fatalf("expected 0 links, got %d", count) } } @@ -850,8 +861,7 @@ func (f *closeTestF) Close() error { } func (tp *TestSuite) TestAddCloses(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) if err != nil { t.Fatal(err) @@ -888,8 +898,7 @@ func (tp *TestSuite) TestAddCloses(t *testing.T) { } func (tp *TestSuite) TestGetSeek(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) if err != nil { t.Fatal(err) @@ -994,8 +1003,7 @@ func (tp *TestSuite) TestGetSeek(t *testing.T) { } func (tp *TestSuite) TestGetReadAt(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() api, err := tp.makeAPI(t, ctx) if err != nil { t.Fatal(err) diff --git a/core/coreiface/unixfs.go b/core/coreiface/unixfs.go index c0150bd12..a8740e2b3 100644 --- a/core/coreiface/unixfs.go +++ b/core/coreiface/unixfs.go @@ -2,6 +2,7 @@ package iface import ( "context" + "iter" "os" "time" @@ -63,8 +64,6 @@ type DirEntry struct { Mode os.FileMode ModTime time.Time - - Err error } // UnixfsAPI is the basic interface to immutable files in IPFS @@ -81,7 +80,56 @@ type UnixfsAPI interface { // to operations performed on the returned file Get(context.Context, path.Path) (files.Node, error) - // Ls returns the list of links in a directory. Links aren't guaranteed to be - // returned in order - Ls(context.Context, path.Path, ...options.UnixfsLsOption) (<-chan DirEntry, error) + // Ls writes the links in a directory to the DirEntry channel. Links aren't + // guaranteed to be returned in order. If an error occurs or the context is + // canceled, the DirEntry channel is closed and an error is returned. + // + // Example: + // + // dirs := make(chan DirEntry) + // lsErr := make(chan error, 1) + // go func() { + // lsErr <- Ls(ctx, p, dirs) + // }() + // for dirEnt := range dirs { + // fmt.Println("Dir name:", dirEnt.Name) + // } + // err := <-lsErr + // if err != nil { + // return fmt.Errorf("error listing directory: %w", err) + // } + Ls(context.Context, path.Path, chan<- DirEntry, ...options.UnixfsLsOption) error +} + +// LsIter returns a go iterator that allows ranging over DirEntry results. +// Iteration stops if the context is canceled or if the iterator yields an +// error. +// +// Example: +// +// for dirEnt, err := LsIter(ctx, ufsAPI, p) { +// if err != nil { +// return fmt.Errorf("error listing directory: %w", err) +// } +// fmt.Println("Dir name:", dirEnt.Name) +// } +func LsIter(ctx context.Context, api UnixfsAPI, p path.Path, opts ...options.UnixfsLsOption) iter.Seq2[DirEntry, error] { + return func(yield func(DirEntry, error) bool) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() // cancel Ls if done iterating early + + dirs := make(chan DirEntry) + lsErr := make(chan error, 1) + go func() { + lsErr <- api.Ls(ctx, p, dirs, opts...) + }() + for dirEnt := range dirs { + if !yield(dirEnt, nil) { + return + } + } + if err := <-lsErr; err != nil { + yield(DirEntry{}, err) + } + } } diff --git a/core/corerepo/gc.go b/core/corerepo/gc.go index cf89587d6..bf285e3d9 100644 --- a/core/corerepo/gc.go +++ b/core/corerepo/gc.go @@ -13,7 +13,7 @@ import ( "github.com/dustin/go-humanize" "github.com/ipfs/boxo/mfs" "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" ) var log = logging.Logger("corerepo") diff --git a/core/coreunix/add.go b/core/coreunix/add.go index 5f7cbb610..55a9d5bec 100644 --- a/core/coreunix/add.go +++ b/core/coreunix/add.go @@ -19,12 +19,13 @@ import ( "github.com/ipfs/boxo/ipld/unixfs/importer/balanced" ihelper "github.com/ipfs/boxo/ipld/unixfs/importer/helpers" "github.com/ipfs/boxo/ipld/unixfs/importer/trickle" + uio "github.com/ipfs/boxo/ipld/unixfs/io" "github.com/ipfs/boxo/mfs" "github.com/ipfs/boxo/path" pin "github.com/ipfs/boxo/pinning/pinner" "github.com/ipfs/go-cid" ipld "github.com/ipfs/go-ipld-format" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" coreiface "github.com/ipfs/kubo/core/coreiface" "github.com/ipfs/kubo/tracing" @@ -51,38 +52,44 @@ func NewAdder(ctx context.Context, p pin.Pinner, bs bstore.GCLocker, ds ipld.DAG bufferedDS := ipld.NewBufferedDAG(ctx, ds) return &Adder{ - ctx: ctx, - pinning: p, - gcLocker: bs, - dagService: ds, - bufferedDS: bufferedDS, - Progress: false, - Pin: true, - Trickle: false, - Chunker: "", + ctx: ctx, + pinning: p, + gcLocker: bs, + dagService: ds, + bufferedDS: bufferedDS, + Progress: false, + Pin: true, + Trickle: false, + MaxLinks: ihelper.DefaultLinksPerBlock, + MaxHAMTFanout: uio.DefaultShardWidth, + Chunker: "", }, nil } // Adder holds the switches passed to the `add` command. type Adder struct { - ctx context.Context - pinning pin.Pinner - gcLocker bstore.GCLocker - dagService ipld.DAGService - bufferedDS *ipld.BufferedDAG - Out chan<- interface{} - Progress bool - Pin bool - Trickle bool - RawLeaves bool - Silent bool - NoCopy bool - Chunker string - mroot *mfs.Root - unlocker bstore.Unlocker - tempRoot cid.Cid - CidBuilder cid.Builder - liveNodes uint64 + ctx context.Context + pinning pin.Pinner + gcLocker bstore.GCLocker + dagService ipld.DAGService + bufferedDS *ipld.BufferedDAG + Out chan<- interface{} + Progress bool + Pin bool + PinName string + Trickle bool + RawLeaves bool + MaxLinks int + MaxDirectoryLinks int + MaxHAMTFanout int + Silent bool + NoCopy bool + Chunker string + mroot *mfs.Root + unlocker bstore.Unlocker + tempRoot cid.Cid + CidBuilder cid.Builder + liveNodes uint64 PreserveMode bool PreserveMtime bool @@ -94,12 +101,13 @@ func (adder *Adder) mfsRoot() (*mfs.Root, error) { if adder.mroot != nil { return adder.mroot, nil } - rnode := unixfs.EmptyDirNode() - err := rnode.SetCidBuilder(adder.CidBuilder) - if err != nil { - return nil, err - } - mr, err := mfs.NewRoot(adder.ctx, adder.dagService, rnode, nil) + + // Note, this adds it to DAGService already. + mr, err := mfs.NewEmptyRoot(adder.ctx, adder.dagService, nil, nil, mfs.MkdirOpts{ + CidBuilder: adder.CidBuilder, + MaxLinks: adder.MaxDirectoryLinks, + MaxHAMTFanout: adder.MaxHAMTFanout, + }) if err != nil { return nil, err } @@ -119,10 +127,15 @@ func (adder *Adder) add(reader io.Reader) (ipld.Node, error) { return nil, err } + maxLinks := ihelper.DefaultLinksPerBlock + if adder.MaxLinks > 0 { + maxLinks = adder.MaxLinks + } + params := ihelper.DagBuilderParams{ Dagserv: adder.bufferedDS, RawLeaves: adder.RawLeaves, - Maxlinks: ihelper.DefaultLinksPerBlock, + Maxlinks: maxLinks, NoCopy: adder.NoCopy, CidBuilder: adder.CidBuilder, FileMode: adder.FileMode, @@ -170,9 +183,10 @@ func (adder *Adder) curRootNode() (ipld.Node, error) { return root, err } -// Recursively pins the root node of Adder and -// writes the pin state to the backing datastore. -func (adder *Adder) PinRoot(ctx context.Context, root ipld.Node) error { +// PinRoot recursively pins the root node of Adder with an optional name and +// writes the pin state to the backing datastore. If name is empty, the pin +// will be created without a name. +func (adder *Adder) PinRoot(ctx context.Context, root ipld.Node, name string) error { ctx, span := tracing.Span(ctx, "CoreUnix.Adder", "PinRoot") defer span.End() @@ -195,7 +209,7 @@ func (adder *Adder) PinRoot(ctx context.Context, root ipld.Node) error { adder.tempRoot = rnk } - err = adder.pinning.PinWithMode(ctx, rnk, pin.Recursive, "") + err = adder.pinning.PinWithMode(ctx, rnk, pin.Recursive, name) if err != nil { return err } @@ -252,12 +266,15 @@ func (adder *Adder) addNode(node ipld.Node, path string) error { if err != nil { return err } + dir := gopath.Dir(path) if dir != "." { opts := mfs.MkdirOpts{ - Mkparents: true, - Flush: false, - CidBuilder: adder.CidBuilder, + Mkparents: true, + Flush: false, + CidBuilder: adder.CidBuilder, + MaxLinks: adder.MaxDirectoryLinks, + MaxHAMTFanout: adder.MaxHAMTFanout, } if err := mfs.Mkdir(mr, dir, opts); err != nil { return err @@ -354,7 +371,12 @@ func (adder *Adder) AddAllAndPin(ctx context.Context, file files.Node) (ipld.Nod if !adder.Pin { return nd, nil } - return nd, adder.PinRoot(ctx, nd) + + if err := adder.PinRoot(ctx, nd, adder.PinName); err != nil { + return nil, err + } + + return nd, nil } func (adder *Adder) addFileNode(ctx context.Context, path string, file files.Node, toplevel bool) error { @@ -394,7 +416,7 @@ func (adder *Adder) addFileNode(ctx context.Context, path string, file files.Nod case files.Directory: return adder.addDir(ctx, path, f, toplevel) case *files.Symlink: - return adder.addSymlink(path, f) + return adder.addSymlink(ctx, path, f) case files.File: return adder.addFile(path, f) default: @@ -402,7 +424,7 @@ func (adder *Adder) addFileNode(ctx context.Context, path string, file files.Nod } } -func (adder *Adder) addSymlink(path string, l *files.Symlink) error { +func (adder *Adder) addSymlink(ctx context.Context, path string, l *files.Symlink) error { sdata, err := unixfs.SymlinkData(l.Target) if err != nil { return err @@ -460,12 +482,14 @@ func (adder *Adder) addDir(ctx context.Context, path string, dir files.Directory // if we need to store mode or modification time then create a new root which includes that data if toplevel && (adder.FileMode != 0 || !adder.FileMtime.IsZero()) { - nd := unixfs.EmptyDirNodeWithStat(adder.FileMode, adder.FileMtime) - err := nd.SetCidBuilder(adder.CidBuilder) - if err != nil { - return err - } - mr, err := mfs.NewRoot(ctx, adder.dagService, nd, nil) + mr, err := mfs.NewEmptyRoot(ctx, adder.dagService, nil, nil, + mfs.MkdirOpts{ + CidBuilder: adder.CidBuilder, + MaxLinks: adder.MaxDirectoryLinks, + MaxHAMTFanout: adder.MaxHAMTFanout, + ModTime: adder.FileMtime, + Mode: adder.FileMode, + }) if err != nil { return err } @@ -478,11 +502,13 @@ func (adder *Adder) addDir(ctx context.Context, path string, dir files.Directory return err } err = mfs.Mkdir(mr, path, mfs.MkdirOpts{ - Mkparents: true, - Flush: false, - CidBuilder: adder.CidBuilder, - Mode: adder.FileMode, - ModTime: adder.FileMtime, + Mkparents: true, + Flush: false, + CidBuilder: adder.CidBuilder, + Mode: adder.FileMode, + ModTime: adder.FileMtime, + MaxLinks: adder.MaxDirectoryLinks, + MaxHAMTFanout: adder.MaxHAMTFanout, }) if err != nil { return err @@ -511,7 +537,7 @@ func (adder *Adder) maybePauseForGC(ctx context.Context) error { return err } - err = adder.PinRoot(ctx, rn) + err = adder.PinRoot(ctx, rn, "") if err != nil { return err } diff --git a/core/coreunix/add_test.go b/core/coreunix/add_test.go index 1eb050ee9..d5b06176a 100644 --- a/core/coreunix/add_test.go +++ b/core/coreunix/add_test.go @@ -30,6 +30,7 @@ import ( const testPeerID = "QmTFauExutTsy4XP6JbMFcw2Wa9645HJt2bTqL6qYDCKfe" func TestAddMultipleGCLive(t *testing.T) { + ctx := t.Context() r := &repo.Mock{ C: config.Config{ Identity: config.Identity{ @@ -38,13 +39,13 @@ func TestAddMultipleGCLive(t *testing.T) { }, D: syncds.MutexWrap(datastore.NewMapDatastore()), } - node, err := core.NewNode(context.Background(), &core.BuildCfg{Repo: r}) + node, err := core.NewNode(ctx, &core.BuildCfg{Repo: r}) if err != nil { t.Fatal(err) } out := make(chan interface{}, 10) - adder, err := NewAdder(context.Background(), node.Pinning, node.Blockstore, node.DAG) + adder, err := NewAdder(ctx, node.Pinning, node.Blockstore, node.DAG) if err != nil { t.Fatal(err) } @@ -67,7 +68,7 @@ func TestAddMultipleGCLive(t *testing.T) { go func() { defer close(out) - _, _ = adder.AddAllAndPin(context.Background(), slf) + _, _ = adder.AddAllAndPin(ctx, slf) // Ignore errors for clarity - the real bug would be gc'ing files while adding them, not this resultant error }() @@ -80,9 +81,12 @@ func TestAddMultipleGCLive(t *testing.T) { gc1started := make(chan struct{}) go func() { defer close(gc1started) - gc1out = gc.GC(context.Background(), node.Blockstore, node.Repo.Datastore(), node.Pinning, nil) + gc1out = gc.GC(ctx, node.Blockstore, node.Repo.Datastore(), node.Pinning, nil) }() + // Give GC goroutine time to reach GCLock (will block there waiting for adder) + time.Sleep(time.Millisecond * 100) + // GC shouldn't get the lock until after the file is completely added select { case <-gc1started: @@ -93,8 +97,15 @@ func TestAddMultipleGCLive(t *testing.T) { // finish write and unblock gc pipew1.Close() - // Should have gotten the lock at this point - <-gc1started + // Wait for GC to acquire the lock + // The adder needs to finish processing file 'a' and call maybePauseForGC + // when starting file 'b' before GC can proceed + select { + case <-gc1started: + // GC got the lock as expected + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for GC to start - possible deadlock") + } removedHashes := make(map[string]struct{}) for r := range gc1out { @@ -112,9 +123,12 @@ func TestAddMultipleGCLive(t *testing.T) { gc2started := make(chan struct{}) go func() { defer close(gc2started) - gc2out = gc.GC(context.Background(), node.Blockstore, node.Repo.Datastore(), node.Pinning, nil) + gc2out = gc.GC(ctx, node.Blockstore, node.Repo.Datastore(), node.Pinning, nil) }() + // Give GC goroutine time to reach GCLock + time.Sleep(time.Millisecond * 100) + select { case <-gc2started: t.Fatal("gc shouldn't have started yet") @@ -123,7 +137,15 @@ func TestAddMultipleGCLive(t *testing.T) { pipew2.Close() - <-gc2started + // Wait for second GC to acquire the lock + // The adder needs to finish processing file 'b' and call maybePauseForGC + // when starting file 'c' before GC can proceed + select { + case <-gc2started: + // GC got the lock as expected + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for second GC to start - possible deadlock") + } for r := range gc2out { if r.Error != nil { @@ -140,6 +162,7 @@ func TestAddMultipleGCLive(t *testing.T) { } func TestAddGCLive(t *testing.T) { + ctx := t.Context() r := &repo.Mock{ C: config.Config{ Identity: config.Identity{ @@ -148,13 +171,13 @@ func TestAddGCLive(t *testing.T) { }, D: syncds.MutexWrap(datastore.NewMapDatastore()), } - node, err := core.NewNode(context.Background(), &core.BuildCfg{Repo: r}) + node, err := core.NewNode(ctx, &core.BuildCfg{Repo: r}) if err != nil { t.Fatal(err) } out := make(chan interface{}) - adder, err := NewAdder(context.Background(), node.Pinning, node.Blockstore, node.DAG) + adder, err := NewAdder(ctx, node.Pinning, node.Blockstore, node.DAG) if err != nil { t.Fatal(err) } @@ -178,7 +201,7 @@ func TestAddGCLive(t *testing.T) { go func() { defer close(addDone) defer close(out) - _, err := adder.AddAllAndPin(context.Background(), slf) + _, err := adder.AddAllAndPin(ctx, slf) if err != nil { t.Error(err) } @@ -196,7 +219,7 @@ func TestAddGCLive(t *testing.T) { gcstarted := make(chan struct{}) go func() { defer close(gcstarted) - gcout = gc.GC(context.Background(), node.Blockstore, node.Repo.Datastore(), node.Pinning, nil) + gcout = gc.GC(ctx, node.Blockstore, node.Repo.Datastore(), node.Pinning, nil) }() // gc shouldn't start until we let the add finish its current file. @@ -240,9 +263,6 @@ func TestAddGCLive(t *testing.T) { last = c } - ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) - defer cancel() - set := cid.NewSet() err = dag.Walk(ctx, dag.GetLinksWithDAG(node.DAG), last, set.Visit) if err != nil { diff --git a/core/node/bitswap.go b/core/node/bitswap.go index 4132d5a01..e73145292 100644 --- a/core/node/bitswap.go +++ b/core/node/bitswap.go @@ -2,17 +2,29 @@ package node import ( "context" + "errors" + "io" "time" + "github.com/dustin/go-humanize" "github.com/ipfs/boxo/bitswap" + "github.com/ipfs/boxo/bitswap/client" "github.com/ipfs/boxo/bitswap/network" + bsnet "github.com/ipfs/boxo/bitswap/network/bsnet" + "github.com/ipfs/boxo/bitswap/network/httpnet" blockstore "github.com/ipfs/boxo/blockstore" exchange "github.com/ipfs/boxo/exchange" + rpqm "github.com/ipfs/boxo/routing/providerquerymanager" + "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" + version "github.com/ipfs/kubo" "github.com/ipfs/kubo/config" - irouting "github.com/ipfs/kubo/routing" "github.com/libp2p/go-libp2p/core/host" + peer "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/routing" "go.uber.org/fx" + blocks "github.com/ipfs/go-block-format" "github.com/ipfs/kubo/core/node/helpers" ) @@ -23,6 +35,7 @@ const ( DefaultEngineTaskWorkerCount = 8 DefaultMaxOutstandingBytesPerPeer = 1 << 20 DefaultProviderSearchDelay = 1000 * time.Millisecond + DefaultMaxProviders = 10 // matching BitswapClientDefaultMaxProviders from https://github.com/ipfs/boxo/blob/v0.29.1/bitswap/internal/defaults/defaults.go#L15 DefaultWantHaveReplaceSize = 1024 ) @@ -34,7 +47,7 @@ type bitswapOptionsOut struct { // BitswapOptions creates configuration options for Bitswap from the config file // and whether to provide data. -func BitswapOptions(cfg *config.Config, provide bool) interface{} { +func BitswapOptions(cfg *config.Config) interface{} { return func() bitswapOptionsOut { var internalBsCfg config.InternalBitswap if cfg.Internal.Bitswap != nil { @@ -42,7 +55,6 @@ func BitswapOptions(cfg *config.Config, provide bool) interface{} { } opts := []bitswap.Option{ - bitswap.ProvideEnabled(provide), bitswap.ProviderSearchDelay(internalBsCfg.ProviderSearchDelay.WithDefault(DefaultProviderSearchDelay)), // See https://github.com/ipfs/go-ipfs/issues/8807 for rationale bitswap.EngineBlockstoreWorkerCount(int(internalBsCfg.EngineBlockstoreWorkerCount.WithDefault(DefaultEngineBlockstoreWorkerCount))), bitswap.TaskWorkerCount(int(internalBsCfg.TaskWorkerCount.WithDefault(DefaultTaskWorkerCount))), @@ -55,29 +67,177 @@ func BitswapOptions(cfg *config.Config, provide bool) interface{} { } } -type onlineExchangeIn struct { +type bitswapIn struct { fx.In Mctx helpers.MetricsCtx + Cfg *config.Config Host host.Host - Rt irouting.ProvideManyRouter + Discovery routing.ContentDiscovery Bs blockstore.GCBlockstore BitswapOpts []bitswap.Option `group:"bitswap-options"` } -// OnlineExchange creates new LibP2P backed block exchange (BitSwap). -// Additional options to bitswap.New can be provided via the "bitswap-options" -// group. -func OnlineExchange() interface{} { - return func(in onlineExchangeIn, lc fx.Lifecycle) exchange.Interface { - bitswapNetwork := network.NewFromIpfsHost(in.Host, in.Rt) +// Bitswap creates the BitSwap server/client instance. +// If Bitswap.ServerEnabled is false, the node will act only as a client +// using an empty blockstore to prevent serving blocks to other peers. +func Bitswap(serverEnabled, libp2pEnabled, httpEnabled bool) interface{} { + return func(in bitswapIn, lc fx.Lifecycle) (*bitswap.Bitswap, error) { + var bitswapNetworks, bitswapLibp2p network.BitSwapNetwork + var bitswapBlockstore blockstore.Blockstore = in.Bs + + connEvtMgr := network.NewConnectEventManager() + + libp2pEnabled := in.Cfg.Bitswap.Libp2pEnabled.WithDefault(config.DefaultBitswapLibp2pEnabled) + if libp2pEnabled { + bitswapLibp2p = bsnet.NewFromIpfsHost( + in.Host, + bsnet.WithConnectEventManager(connEvtMgr), + ) + } + + if httpEnabled { + httpCfg := in.Cfg.HTTPRetrieval + maxBlockSize, err := humanize.ParseBytes(httpCfg.MaxBlockSize.WithDefault(config.DefaultHTTPRetrievalMaxBlockSize)) + if err != nil { + return nil, err + } + logger.Infof("HTTP Retrieval enabled: Allowlist: %t. Denylist: %t", + httpCfg.Allowlist != nil, + httpCfg.Denylist != nil, + ) + + bitswapHTTP := httpnet.New(in.Host, + httpnet.WithHTTPWorkers(int(httpCfg.NumWorkers.WithDefault(config.DefaultHTTPRetrievalNumWorkers))), + httpnet.WithAllowlist(httpCfg.Allowlist), + httpnet.WithDenylist(httpCfg.Denylist), + httpnet.WithInsecureSkipVerify(httpCfg.TLSInsecureSkipVerify.WithDefault(config.DefaultHTTPRetrievalTLSInsecureSkipVerify)), + httpnet.WithMaxBlockSize(int64(maxBlockSize)), + httpnet.WithUserAgent(version.GetUserAgentVersion()), + httpnet.WithMetricsLabelsForEndpoints(httpCfg.Allowlist), + httpnet.WithConnectEventManager(connEvtMgr), + ) + bitswapNetworks = network.New(in.Host.Peerstore(), bitswapLibp2p, bitswapHTTP) + } else if libp2pEnabled { + bitswapNetworks = bitswapLibp2p + } else { + return nil, errors.New("invalid configuration: Bitswap.Libp2pEnabled and HTTPRetrieval.Enabled are both disabled, unable to initialize Bitswap") + } + + // Kubo uses own, customized ProviderQueryManager + in.BitswapOpts = append(in.BitswapOpts, bitswap.WithClientOption(client.WithDefaultProviderQueryManager(false))) + var maxProviders int = DefaultMaxProviders + + var bcDisposition string + if in.Cfg.Internal.Bitswap != nil { + maxProviders = int(in.Cfg.Internal.Bitswap.ProviderSearchMaxResults.WithDefault(DefaultMaxProviders)) + if in.Cfg.Internal.Bitswap.BroadcastControl != nil { + bcCfg := in.Cfg.Internal.Bitswap.BroadcastControl + bcEnable := bcCfg.Enable.WithDefault(config.DefaultBroadcastControlEnable) + in.BitswapOpts = append(in.BitswapOpts, bitswap.WithClientOption(client.BroadcastControlEnable(bcEnable))) + if bcEnable { + bcDisposition = "enabled" + bcMaxPeers := int(bcCfg.MaxPeers.WithDefault(config.DefaultBroadcastControlMaxPeers)) + in.BitswapOpts = append(in.BitswapOpts, bitswap.WithClientOption(client.BroadcastControlMaxPeers(bcMaxPeers))) + + bcLocalPeers := bcCfg.LocalPeers.WithDefault(config.DefaultBroadcastControlLocalPeers) + in.BitswapOpts = append(in.BitswapOpts, bitswap.WithClientOption(client.BroadcastControlLocalPeers(bcLocalPeers))) + + bcPeeredPeers := bcCfg.PeeredPeers.WithDefault(config.DefaultBroadcastControlPeeredPeers) + in.BitswapOpts = append(in.BitswapOpts, bitswap.WithClientOption(client.BroadcastControlPeeredPeers(bcPeeredPeers))) + + bcMaxRandomPeers := int(bcCfg.MaxRandomPeers.WithDefault(config.DefaultBroadcastControlMaxRandomPeers)) + in.BitswapOpts = append(in.BitswapOpts, bitswap.WithClientOption(client.BroadcastControlMaxRandomPeers(bcMaxRandomPeers))) + + bcSendToPendingPeers := bcCfg.SendToPendingPeers.WithDefault(config.DefaultBroadcastControlSendToPendingPeers) + in.BitswapOpts = append(in.BitswapOpts, bitswap.WithClientOption(client.BroadcastControlSendToPendingPeers(bcSendToPendingPeers))) + } else { + bcDisposition = "disabled" + } + } + } + + // If broadcast control is not configured, then configure with defaults. + if bcDisposition == "" { + in.BitswapOpts = append(in.BitswapOpts, bitswap.WithClientOption(client.BroadcastControlEnable(config.DefaultBroadcastControlEnable))) + if config.DefaultBroadcastControlEnable { + bcDisposition = "enabled" + in.BitswapOpts = append(in.BitswapOpts, bitswap.WithClientOption(client.BroadcastControlMaxPeers(config.DefaultBroadcastControlMaxPeers))) + in.BitswapOpts = append(in.BitswapOpts, bitswap.WithClientOption(client.BroadcastControlLocalPeers(config.DefaultBroadcastControlLocalPeers))) + in.BitswapOpts = append(in.BitswapOpts, bitswap.WithClientOption(client.BroadcastControlPeeredPeers(config.DefaultBroadcastControlPeeredPeers))) + in.BitswapOpts = append(in.BitswapOpts, bitswap.WithClientOption(client.BroadcastControlMaxRandomPeers(config.DefaultBroadcastControlMaxRandomPeers))) + in.BitswapOpts = append(in.BitswapOpts, bitswap.WithClientOption(client.BroadcastControlSendToPendingPeers(config.DefaultBroadcastControlSendToPendingPeers))) + } else { + bcDisposition = "enabled" + } + } + logger.Infof("bitswap client broadcast control %s", bcDisposition) + + ignoredPeerIDs := make([]peer.ID, 0, len(in.Cfg.Routing.IgnoreProviders)) + for _, str := range in.Cfg.Routing.IgnoreProviders { + pid, err := peer.Decode(str) + if err != nil { + return nil, err + } + ignoredPeerIDs = append(ignoredPeerIDs, pid) + } + providerQueryMgr, err := rpqm.New(bitswapNetworks, + in.Discovery, + rpqm.WithMaxProviders(maxProviders), + rpqm.WithIgnoreProviders(ignoredPeerIDs...), + ) + if err != nil { + return nil, err + } + + // Explicitly enable/disable server + in.BitswapOpts = append(in.BitswapOpts, bitswap.WithServerEnabled(serverEnabled)) + + bs := bitswap.New(helpers.LifecycleCtx(in.Mctx, lc), bitswapNetworks, providerQueryMgr, bitswapBlockstore, in.BitswapOpts...) - exch := bitswap.New(helpers.LifecycleCtx(in.Mctx, lc), bitswapNetwork, in.Bs, in.BitswapOpts...) lc.Append(fx.Hook{ OnStop: func(ctx context.Context) error { - return exch.Close() + return bs.Close() }, }) - return exch + return bs, nil } } + +// OnlineExchange creates new LibP2P backed block exchange. +// Returns a no-op exchange if Bitswap is disabled. +func OnlineExchange(isBitswapActive bool) interface{} { + return func(in *bitswap.Bitswap, lc fx.Lifecycle) exchange.Interface { + if !isBitswapActive { + return &noopExchange{closer: in} + } + lc.Append(fx.Hook{ + OnStop: func(ctx context.Context) error { + return in.Close() + }, + }) + return in + } +} + +type noopExchange struct { + closer io.Closer +} + +func (e *noopExchange) GetBlock(ctx context.Context, c cid.Cid) (blocks.Block, error) { + return nil, ipld.ErrNotFound{Cid: c} +} + +func (e *noopExchange) GetBlocks(ctx context.Context, cids []cid.Cid) (<-chan blocks.Block, error) { + ch := make(chan blocks.Block) + close(ch) + return ch, nil +} + +func (e *noopExchange) NotifyNewBlocks(ctx context.Context, blocks ...blocks.Block) error { + return nil +} + +func (e *noopExchange) Close() error { + return e.closer.Close() +} diff --git a/core/node/builder.go b/core/node/builder.go index 411e3228c..4014308f5 100644 --- a/core/node/builder.go +++ b/core/node/builder.go @@ -7,6 +7,7 @@ import ( "go.uber.org/fx" + "github.com/ipfs/boxo/autoconf" "github.com/ipfs/kubo/core/node/helpers" "github.com/ipfs/kubo/core/node/libp2p" "github.com/ipfs/kubo/repo" @@ -125,7 +126,7 @@ func defaultRepo(dstore repo.Datastore) (repo.Repo, error) { return nil, err } - c.Bootstrap = cfg.DefaultBootstrapAddresses + c.Bootstrap = autoconf.FallbackBootstrapPeers c.Addresses.Swarm = []string{"/ip4/0.0.0.0/tcp/4001", "/ip4/0.0.0.0/udp/4001/quic-v1"} c.Identity.PeerID = pid.String() c.Identity.PrivKey = base64.StdEncoding.EncodeToString(privkeyb) diff --git a/core/node/core.go b/core/node/core.go index fba314311..a636a0c54 100644 --- a/core/node/core.go +++ b/core/node/core.go @@ -2,6 +2,7 @@ package node import ( "context" + "errors" "fmt" "github.com/ipfs/boxo/blockservice" @@ -24,49 +25,74 @@ import ( dagpb "github.com/ipld/go-codec-dagpb" "go.uber.org/fx" + "github.com/ipfs/kubo/config" "github.com/ipfs/kubo/core/node/helpers" "github.com/ipfs/kubo/repo" ) -var FilesRootDatastoreKey datastore.Key - -func init() { - FilesRootDatastoreKey = datastore.NewKey("/local/filesroot") -} - // BlockService creates new blockservice which provides an interface to fetch content-addressable blocks -func BlockService(lc fx.Lifecycle, bs blockstore.Blockstore, rem exchange.Interface) blockservice.BlockService { - bsvc := blockservice.New(bs, rem) +func BlockService(cfg *config.Config) func(lc fx.Lifecycle, bs blockstore.Blockstore, rem exchange.Interface) blockservice.BlockService { + return func(lc fx.Lifecycle, bs blockstore.Blockstore, rem exchange.Interface) blockservice.BlockService { + bsvc := blockservice.New(bs, rem, + blockservice.WriteThrough(cfg.Datastore.WriteThrough.WithDefault(config.DefaultWriteThrough)), + ) - lc.Append(fx.Hook{ - OnStop: func(ctx context.Context) error { - return bsvc.Close() - }, - }) + lc.Append(fx.Hook{ + OnStop: func(ctx context.Context) error { + return bsvc.Close() + }, + }) - return bsvc + return bsvc + } } // Pinning creates new pinner which tells GC which blocks should be kept -func Pinning(bstore blockstore.Blockstore, ds format.DAGService, repo repo.Repo) (pin.Pinner, error) { - rootDS := repo.Datastore() +func Pinning(strategy string) func(bstore blockstore.Blockstore, ds format.DAGService, repo repo.Repo, prov DHTProvider) (pin.Pinner, error) { + // Parse strategy at function creation time (not inside the returned function) + // This happens before the provider is created, which is why we pass the strategy + // string and parse it here, rather than using fx-provided ProvidingStrategy. + strategyFlag := config.ParseProvideStrategy(strategy) - syncFn := func(ctx context.Context) error { - if err := rootDS.Sync(ctx, blockstore.BlockPrefix); err != nil { - return err + return func(bstore blockstore.Blockstore, + ds format.DAGService, + repo repo.Repo, + prov DHTProvider, + ) (pin.Pinner, error) { + rootDS := repo.Datastore() + + syncFn := func(ctx context.Context) error { + if err := rootDS.Sync(ctx, blockstore.BlockPrefix); err != nil { + return err + } + return rootDS.Sync(ctx, filestore.FilestorePrefix) } - return rootDS.Sync(ctx, filestore.FilestorePrefix) + syncDs := &syncDagService{ds, syncFn} + + ctx := context.TODO() + + var opts []dspinner.Option + roots := (strategyFlag & config.ProvideStrategyRoots) != 0 + pinned := (strategyFlag & config.ProvideStrategyPinned) != 0 + + // Important: Only one of WithPinnedProvider or WithRootsProvider should be active. + // Having both would cause duplicate root advertisements since "pinned" includes all + // pinned content (roots + children), while "roots" is just the root CIDs. + // We prioritize "pinned" if both are somehow set (though this shouldn't happen + // with proper strategy parsing). + if pinned { + opts = append(opts, dspinner.WithPinnedProvider(prov)) + } else if roots { + opts = append(opts, dspinner.WithRootsProvider(prov)) + } + + pinning, err := dspinner.New(ctx, rootDS, syncDs, opts...) + if err != nil { + return nil, err + } + + return pinning, nil } - syncDs := &syncDagService{ds, syncFn} - - ctx := context.TODO() - - pinning, err := dspinner.New(ctx, rootDS, syncDs) - if err != nil { - return nil, err - } - - return pinning, nil } var ( @@ -116,6 +142,7 @@ func FetcherConfig(bs blockservice.BlockService) FetchersOut { // path resolution should not fetch new blocks via exchange. offlineBs := blockservice.New(bs.Blockstore(), offline.Exchange(bs.Blockstore())) offlineIpldFetcher := bsfetcher.NewFetcherConfig(offlineBs) + offlineIpldFetcher.SkipNotFound = true // carries onto the UnixFSFetcher below offlineIpldFetcher.PrototypeChooser = dagpb.AddSupportToChooser(bsfetcher.DefaultPrototypeChooser) offlineUnixFSFetcher := offlineIpldFetcher.WithReifier(unixfsnode.Reify) @@ -152,62 +179,79 @@ func Dag(bs blockservice.BlockService) format.DAGService { } // Files loads persisted MFS root -func Files(mctx helpers.MetricsCtx, lc fx.Lifecycle, repo repo.Repo, dag format.DAGService, bs blockstore.Blockstore) (*mfs.Root, error) { - pf := func(ctx context.Context, c cid.Cid) error { - rootDS := repo.Datastore() - if err := rootDS.Sync(ctx, blockstore.BlockPrefix); err != nil { - return err - } - if err := rootDS.Sync(ctx, filestore.FilestorePrefix); err != nil { - return err +func Files(strategy string) func(mctx helpers.MetricsCtx, lc fx.Lifecycle, repo repo.Repo, dag format.DAGService, bs blockstore.Blockstore, prov DHTProvider) (*mfs.Root, error) { + return func(mctx helpers.MetricsCtx, lc fx.Lifecycle, repo repo.Repo, dag format.DAGService, bs blockstore.Blockstore, prov DHTProvider) (*mfs.Root, error) { + dsk := datastore.NewKey("/local/filesroot") + pf := func(ctx context.Context, c cid.Cid) error { + rootDS := repo.Datastore() + if err := rootDS.Sync(ctx, blockstore.BlockPrefix); err != nil { + return err + } + if err := rootDS.Sync(ctx, filestore.FilestorePrefix); err != nil { + return err + } + + if err := rootDS.Put(ctx, dsk, c.Bytes()); err != nil { + return err + } + return rootDS.Sync(ctx, dsk) } - if err := rootDS.Put(ctx, FilesRootDatastoreKey, c.Bytes()); err != nil { - return err - } - return rootDS.Sync(ctx, FilesRootDatastoreKey) - } + var nd *merkledag.ProtoNode + ctx := helpers.LifecycleCtx(mctx, lc) + val, err := repo.Datastore().Get(ctx, dsk) - var nd *merkledag.ProtoNode - ctx := helpers.LifecycleCtx(mctx, lc) - val, err := repo.Datastore().Get(ctx, FilesRootDatastoreKey) + switch { + case errors.Is(err, datastore.ErrNotFound): + nd = unixfs.EmptyDirNode() + err := dag.Add(ctx, nd) + if err != nil { + return nil, fmt.Errorf("failure writing filesroot to dagstore: %s", err) + } + case err == nil: + c, err := cid.Cast(val) + if err != nil { + return nil, err + } - switch { - case err == datastore.ErrNotFound || val == nil: - nd = unixfs.EmptyDirNode() - err := dag.Add(ctx, nd) - if err != nil { - return nil, fmt.Errorf("failure writing filesroot to dagstore: %s", err) + offlineDag := merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs))) + rnd, err := offlineDag.Get(ctx, c) + if err != nil { + return nil, fmt.Errorf("error loading filesroot from dagservice: %s", err) + } + + pbnd, ok := rnd.(*merkledag.ProtoNode) + if !ok { + return nil, merkledag.ErrNotProtobuf + } + + nd = pbnd + default: + return nil, err } - case err == nil: - c, err := cid.Cast(val) + + // MFS (Mutable File System) provider integration: Only pass the provider + // to MFS when the strategy includes "mfs". MFS will call StartProviding() + // on every DAGService.Add() operation, which is sufficient for the "mfs" + // strategy - it ensures all MFS content gets announced as it's added or + // modified. For non-mfs strategies, we set provider to nil to avoid + // unnecessary providing. + strategyFlag := config.ParseProvideStrategy(strategy) + if strategyFlag&config.ProvideStrategyMFS == 0 { + prov = nil + } + + root, err := mfs.NewRoot(ctx, dag, nd, pf, prov) if err != nil { return nil, err } - offineDag := merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs))) - rnd, err := offineDag.Get(ctx, c) - if err != nil { - return nil, fmt.Errorf("error loading filesroot from dagservice: %s", err) - } + lc.Append(fx.Hook{ + OnStop: func(ctx context.Context) error { + return root.Close() + }, + }) - pbnd, ok := rnd.(*merkledag.ProtoNode) - if !ok { - return nil, merkledag.ErrNotProtobuf - } - - nd = pbnd - default: - return nil, err + return root, err } - - root, err := mfs.NewRoot(ctx, dag, nd, pf) - - lc.Append(fx.Hook{ - OnStop: func(ctx context.Context) error { - return root.Close() - }, - }) - - return root, err } diff --git a/core/node/dns.go b/core/node/dns.go index d338e0e8b..3f0875afb 100644 --- a/core/node/dns.go +++ b/core/node/dns.go @@ -16,5 +16,8 @@ func DNSResolver(cfg *config.Config) (*madns.Resolver, error) { dohOpts = append(dohOpts, doh.WithMaxCacheTTL(cfg.DNS.MaxCacheTTL.WithDefault(time.Duration(math.MaxUint32)*time.Second))) } - return gateway.NewDNSResolver(cfg.DNS.Resolvers, dohOpts...) + // Replace "auto" DNS resolver placeholders with autoconf values + resolvers := cfg.DNSResolversWithAutoConf() + + return gateway.NewDNSResolver(resolvers, dohOpts...) } diff --git a/core/node/groups.go b/core/node/groups.go index d806e2ef6..bacc12160 100644 --- a/core/node/groups.go +++ b/core/node/groups.go @@ -4,14 +4,15 @@ import ( "context" "errors" "fmt" + "regexp" + "strings" "time" - "github.com/dustin/go-humanize" blockstore "github.com/ipfs/boxo/blockstore" offline "github.com/ipfs/boxo/exchange/offline" uio "github.com/ipfs/boxo/ipld/unixfs/io" util "github.com/ipfs/boxo/util" - "github.com/ipfs/go-log" + "github.com/ipfs/go-log/v2" "github.com/ipfs/kubo/config" "github.com/ipfs/kubo/core/node/libp2p" "github.com/ipfs/kubo/p2p" @@ -47,7 +48,9 @@ func LibP2P(bcfg *BuildCfg, cfg *config.Config, userResourceOverrides rcmgr.Part grace := cfg.Swarm.ConnMgr.GracePeriod.WithDefault(config.DefaultConnMgrGracePeriod) low := int(cfg.Swarm.ConnMgr.LowWater.WithDefault(config.DefaultConnMgrLowWater)) high := int(cfg.Swarm.ConnMgr.HighWater.WithDefault(config.DefaultConnMgrHighWater)) - connmgr = fx.Provide(libp2p.ConnectionManager(low, high, grace)) + silence := cfg.Swarm.ConnMgr.SilencePeriod.WithDefault(config.DefaultConnMgrSilencePeriod) + connmgr = fx.Provide(libp2p.ConnectionManager(low, high, grace, silence)) + default: return fx.Error(fmt.Errorf("unrecognized Swarm.ConnMgr.Type: %q", connMgrType)) } @@ -110,9 +113,14 @@ func LibP2P(bcfg *BuildCfg, cfg *config.Config, userResourceOverrides rcmgr.Part autonat = fx.Provide(libp2p.AutoNATService(cfg.AutoNAT.Throttle, true)) } + enableTCPTransport := cfg.Swarm.Transports.Network.TCP.WithDefault(true) + enableWebsocketTransport := cfg.Swarm.Transports.Network.Websocket.WithDefault(true) enableRelayTransport := cfg.Swarm.Transports.Network.Relay.WithDefault(true) // nolint enableRelayService := cfg.Swarm.RelayService.Enabled.WithDefault(enableRelayTransport) enableRelayClient := cfg.Swarm.RelayClient.Enabled.WithDefault(enableRelayTransport) + enableAutoTLS := cfg.AutoTLS.Enabled.WithDefault(config.DefaultAutoTLSEnabled) + enableAutoWSS := cfg.AutoTLS.AutoWSS.WithDefault(config.DefaultAutoWSS) + atlsLog := log.Logger("autotls") // Log error when relay subsystem could not be initialized due to missing dependency if !enableRelayTransport { @@ -124,6 +132,63 @@ func LibP2P(bcfg *BuildCfg, cfg *config.Config, userResourceOverrides rcmgr.Part } } + switch { + case enableAutoTLS && enableTCPTransport && enableWebsocketTransport: + // AutoTLS for Secure WebSockets: ensure WSS listeners are in place (manual or automatic) + wssWildcard := fmt.Sprintf("/tls/sni/*.%s/ws", cfg.AutoTLS.DomainSuffix.WithDefault(config.DefaultDomainSuffix)) + wssWildcardPresent := false + customWsPresent := false + customWsRegex := regexp.MustCompile(`/wss?$`) + tcpRegex := regexp.MustCompile(`/tcp/\d+$`) + + // inspect listeners defined in config at Addresses.Swarm + var tcpListeners []string + for _, listener := range cfg.Addresses.Swarm { + // detect if user manually added /tls/sni/.../ws listener matching AutoTLS.DomainSuffix + if strings.Contains(listener, wssWildcard) { + atlsLog.Infof("found compatible wildcard listener in Addresses.Swarm. AutoTLS will be used on %s", listener) + wssWildcardPresent = true + break + } + // detect if user manually added own /ws or /wss listener that is + // not related to AutoTLS feature + if customWsRegex.MatchString(listener) { + atlsLog.Infof("found custom /ws listener set by user in Addresses.Swarm. AutoTLS will not be used on %s.", listener) + customWsPresent = true + break + } + // else, remember /tcp listeners that can be reused for /tls/sni/../ws + if tcpRegex.MatchString(listener) { + tcpListeners = append(tcpListeners, listener) + } + } + + // Append AutoTLS's wildcard listener + // if no manual /ws listener was set by the user + if enableAutoWSS && !wssWildcardPresent && !customWsPresent { + if len(tcpListeners) == 0 { + logger.Error("Invalid configuration, AutoTLS will be disabled: AutoTLS.AutoWSS=true requires at least one /tcp listener present in Addresses.Swarm, see https://github.com/ipfs/kubo/blob/master/docs/config.md#autotls") + enableAutoTLS = false + } + for _, tcpListener := range tcpListeners { + wssListener := tcpListener + wssWildcard + cfg.Addresses.Swarm = append(cfg.Addresses.Swarm, wssListener) + atlsLog.Infof("appended AutoWSS listener: %s", wssListener) + } + } + + if !wssWildcardPresent && !enableAutoWSS { + logger.Error(fmt.Sprintf("Invalid configuration, AutoTLS will be disabled: AutoTLS.Enabled=true requires a /tcp listener ending with %q to be present in Addresses.Swarm or AutoTLS.AutoWSS=true, see https://github.com/ipfs/kubo/blob/master/docs/config.md#autotls", wssWildcard)) + enableAutoTLS = false + } + case enableAutoTLS && !enableTCPTransport: + logger.Error("Invalid configuration: AutoTLS.Enabled=true requires Swarm.Transports.Network.TCP to be true as well. AutoTLS will be disabled.") + enableAutoTLS = false + case enableAutoTLS && !enableWebsocketTransport: + logger.Error("Invalid configuration: AutoTLS.Enabled=true requires Swarm.Transports.Network.Websocket to be true as well. AutoTLS will be disabled.") + enableAutoTLS = false + } + // Gather all the options opts := fx.Options( BaseLibP2P, @@ -133,6 +198,8 @@ func LibP2P(bcfg *BuildCfg, cfg *config.Config, userResourceOverrides rcmgr.Part // Services (resource management) fx.Provide(libp2p.ResourceManager(bcfg.Repo.Path(), cfg.Swarm, userResourceOverrides)), + maybeProvide(libp2p.P2PForgeCertMgr(bcfg.Repo.Path(), cfg.AutoTLS, atlsLog), enableAutoTLS), + maybeInvoke(libp2p.StartP2PAutoTLS, enableAutoTLS), fx.Provide(libp2p.AddrFilters(cfg.Swarm.AddrFilters)), fx.Provide(libp2p.AddrsFactory(cfg.Addresses.Announce, cfg.Addresses.AppendAnnounce, cfg.Addresses.NoAnnounce)), fx.Provide(libp2p.SmuxTransport(cfg.Swarm.Transports)), @@ -148,6 +215,7 @@ func LibP2P(bcfg *BuildCfg, cfg *config.Config, userResourceOverrides rcmgr.Part fx.Provide(libp2p.Routing), fx.Provide(libp2p.ContentRouting), + fx.Provide(libp2p.ContentDiscovery), fx.Provide(libp2p.BaseRouting(cfg)), maybeProvide(libp2p.PubsubRouter, bcfg.getOpt("ipnsps")), @@ -168,6 +236,7 @@ func LibP2P(bcfg *BuildCfg, cfg *config.Config, userResourceOverrides rcmgr.Part func Storage(bcfg *BuildCfg, cfg *config.Config) fx.Option { cacheOpts := blockstore.DefaultCacheOpts() cacheOpts.HasBloomFilterSize = cfg.Datastore.BloomFilterSize + cacheOpts.HasTwoQueueCacheSize = int(cfg.Datastore.BlockKeyCacheSize.WithDefault(config.DefaultBlockKeyCacheSize)) if !bcfg.Permanent { cacheOpts.HasBloomFilterSize = 0 } @@ -180,7 +249,12 @@ func Storage(bcfg *BuildCfg, cfg *config.Config) fx.Option { return fx.Options( fx.Provide(RepoConfig), fx.Provide(Datastore), - fx.Provide(BaseBlockstoreCtor(cacheOpts, cfg.Datastore.HashOnRead)), + fx.Provide(BaseBlockstoreCtor( + cacheOpts, + cfg.Datastore.HashOnRead, + cfg.Datastore.WriteThrough.WithDefault(config.DefaultWriteThrough), + cfg.Provide.Strategy.WithDefault(config.DefaultProvideStrategy), + )), finalBstore, ) } @@ -239,7 +313,7 @@ func Online(bcfg *BuildCfg, cfg *config.Config, userResourceOverrides rcmgr.Part ipnsCacheSize = DefaultIpnsCacheSize } if ipnsCacheSize < 0 { - return fx.Error(fmt.Errorf("cannot specify negative resolve cache size")) + return fx.Error(errors.New("cannot specify negative resolve cache size")) } // Republisher params @@ -268,12 +342,18 @@ func Online(bcfg *BuildCfg, cfg *config.Config, userResourceOverrides rcmgr.Part recordLifetime = d } - /* don't provide from bitswap when the strategic provider service is active */ - shouldBitswapProvide := !cfg.Experimental.StrategicProviding + isBitswapLibp2pEnabled := cfg.Bitswap.Libp2pEnabled.WithDefault(config.DefaultBitswapLibp2pEnabled) + isBitswapServerEnabled := cfg.Bitswap.ServerEnabled.WithDefault(config.DefaultBitswapServerEnabled) + isHTTPRetrievalEnabled := cfg.HTTPRetrieval.Enabled.WithDefault(config.DefaultHTTPRetrievalEnabled) + + // The Provide system handles both new CID announcements and periodic re-announcements. + // Disabling is controlled by Provide.Enabled=false or setting Interval to 0. + isProviderEnabled := cfg.Provide.Enabled.WithDefault(config.DefaultProvideEnabled) && cfg.Provide.DHT.Interval.WithDefault(config.DefaultProvideDHTInterval) != 0 return fx.Options( - fx.Provide(BitswapOptions(cfg, shouldBitswapProvide)), - fx.Provide(OnlineExchange()), + fx.Provide(BitswapOptions(cfg)), + fx.Provide(Bitswap(isBitswapServerEnabled, isBitswapLibp2pEnabled, isHTTPRetrievalEnabled)), + fx.Provide(OnlineExchange(isBitswapLibp2pEnabled)), fx.Provide(DNSResolver), fx.Provide(Namesys(ipnsCacheSize, cfg.Ipns.MaxCacheTTL.WithDefault(config.DefaultIpnsMaxCacheTTL))), fx.Provide(Peering), @@ -284,12 +364,7 @@ func Online(bcfg *BuildCfg, cfg *config.Config, userResourceOverrides rcmgr.Part fx.Provide(p2p.New), LibP2P(bcfg, cfg, userResourceOverrides), - OnlineProviders( - cfg.Experimental.StrategicProviding, - cfg.Reprovider.Strategy.WithDefault(config.DefaultReproviderStrategy), - cfg.Reprovider.Interval.WithDefault(config.DefaultReproviderInterval), - cfg.Routing.AcceleratedDHTClient.WithDefault(config.DefaultAcceleratedDHTClient), - ), + OnlineProviders(isProviderEnabled, cfg), ) } @@ -302,18 +377,16 @@ func Offline(cfg *config.Config) fx.Option { fx.Provide(libp2p.Routing), fx.Provide(libp2p.ContentRouting), fx.Provide(libp2p.OfflineRouting), + fx.Provide(libp2p.ContentDiscovery), OfflineProviders(), ) } // Core groups basic IPFS services var Core = fx.Options( - fx.Provide(BlockService), fx.Provide(Dag), fx.Provide(FetcherConfig), fx.Provide(PathResolverConfig), - fx.Provide(Pinning), - fx.Provide(Files), ) func Networked(bcfg *BuildCfg, cfg *config.Config, userResourceOverrides rcmgr.PartialLimitConfig) fx.Option { @@ -339,31 +412,51 @@ func IPFS(ctx context.Context, bcfg *BuildCfg) fx.Option { return fx.Error(err) } - // Auto-sharding settings - shardSizeString := cfg.Internal.UnixFSShardingSizeThreshold.WithDefault("256kiB") - shardSizeInt, err := humanize.ParseBytes(shardSizeString) - if err != nil { - return fx.Error(err) - } - uio.HAMTShardingSize = int(shardSizeInt) - // Migrate users of deprecated Experimental.ShardingEnabled flag if cfg.Experimental.ShardingEnabled { - logger.Fatal("The `Experimental.ShardingEnabled` field is no longer used, please remove it from the config.\n" + - "go-ipfs now automatically shards when directory block is bigger than `" + shardSizeString + "`.\n" + - "If you need to restore the old behavior (sharding everything) set `Internal.UnixFSShardingSizeThreshold` to `1B`.\n") + logger.Fatal("The `Experimental.ShardingEnabled` field is no longer used, please remove it from the config. Use Import.UnixFSHAMTDirectorySizeThreshold instead.") } + if !cfg.Internal.UnixFSShardingSizeThreshold.IsDefault() { + msg := "The `Internal.UnixFSShardingSizeThreshold` field was renamed to `Import.UnixFSHAMTDirectorySizeThreshold`. Please update your config.\n" + if !cfg.Import.UnixFSHAMTDirectorySizeThreshold.IsDefault() { + logger.Fatal(msg) // conflicting values, hard fail + } + logger.Error(msg) + // Migrate the old OptionalString value to the new OptionalBytes field. + // Since OptionalBytes embeds OptionalString, we can construct it directly + // with the old value, preserving the user's original string (e.g., "256KiB"). + cfg.Import.UnixFSHAMTDirectorySizeThreshold = config.OptionalBytes{OptionalString: *cfg.Internal.UnixFSShardingSizeThreshold} + } + + // Validate Import configuration + if err := config.ValidateImportConfig(&cfg.Import); err != nil { + return fx.Error(err) + } + + // Validate Provide configuration + if err := config.ValidateProvideConfig(&cfg.Provide); err != nil { + return fx.Error(err) + } + + // Auto-sharding settings + shardSingThresholdInt := cfg.Import.UnixFSHAMTDirectorySizeThreshold.WithDefault(config.DefaultUnixFSHAMTDirectorySizeThreshold) + shardMaxFanout := cfg.Import.UnixFSHAMTDirectoryMaxFanout.WithDefault(config.DefaultUnixFSHAMTDirectoryMaxFanout) + // TODO: avoid overriding this globally, see if we can extend Directory interface like Get/SetMaxLinks from https://github.com/ipfs/boxo/pull/906 + uio.HAMTShardingSize = int(shardSingThresholdInt) + uio.DefaultShardWidth = int(shardMaxFanout) + + providerStrategy := cfg.Provide.Strategy.WithDefault(config.DefaultProvideStrategy) return fx.Options( bcfgOpts, - fx.Provide(baseProcess), - Storage(bcfg, cfg), Identity(cfg), IPNS, Networked(bcfg, cfg, userResourceOverrides), - + fx.Provide(BlockService(cfg)), + fx.Provide(Pinning(providerStrategy)), + fx.Provide(Files(providerStrategy)), Core, ) } diff --git a/core/node/helpers.go b/core/node/helpers.go index 63e76ead7..05cccfd01 100644 --- a/core/node/helpers.go +++ b/core/node/helpers.go @@ -4,34 +4,38 @@ import ( "context" "errors" - "github.com/jbenet/goprocess" "go.uber.org/fx" ) -type lcProcess struct { +type lcStartStop struct { fx.In - LC fx.Lifecycle - Proc goprocess.Process + LC fx.Lifecycle } -// Append wraps ProcessFunc into a goprocess, and appends it to the lifecycle -func (lp *lcProcess) Append(f goprocess.ProcessFunc) { +// Append wraps a function into a fx.Hook and appends it to the fx.Lifecycle. +func (lcss *lcStartStop) Append(f func() func()) { // Hooks are guaranteed to run in sequence. If a hook fails to start, its // OnStop won't be executed. - var proc goprocess.Process + var stopFunc func() - lp.LC.Append(fx.Hook{ + lcss.LC.Append(fx.Hook{ OnStart: func(ctx context.Context) error { - proc = lp.Proc.Go(f) + if ctx.Err() != nil { + return nil + } + stopFunc = f() return nil }, OnStop: func(ctx context.Context) error { - if proc == nil { // Theoretically this shouldn't ever happen - return errors.New("lcProcess: proc was nil") + if ctx.Err() != nil { + return nil } - - return proc.Close() // todo: respect ctx, somehow + if stopFunc == nil { // Theoretically this shouldn't ever happen + return errors.New("lcStatStop: stopFunc was nil") + } + stopFunc() + return nil }, }) } @@ -50,14 +54,3 @@ func maybeInvoke(opt interface{}, enable bool) fx.Option { } return fx.Options() } - -// baseProcess creates a goprocess which is closed when the lifecycle signals it to stop -func baseProcess(lc fx.Lifecycle) goprocess.Process { - p := goprocess.WithParent(goprocess.Background()) - lc.Append(fx.Hook{ - OnStop: func(_ context.Context) error { - return p.Close() - }, - }) - return p -} diff --git a/core/node/helpers/helpers.go b/core/node/helpers/helpers.go index 4c3a04881..36ac435e8 100644 --- a/core/node/helpers/helpers.go +++ b/core/node/helpers/helpers.go @@ -8,7 +8,7 @@ import ( type MetricsCtx context.Context -// LifecycleCtx creates a context which will be cancelled when lifecycle stops +// LifecycleCtx creates a context which will be canceled when lifecycle stops // // This is a hack which we need because most of our services use contexts in a // wrong way diff --git a/core/node/ipns.go b/core/node/ipns.go index 5f516d035..df9b087c5 100644 --- a/core/node/ipns.go +++ b/core/node/ipns.go @@ -45,8 +45,8 @@ func Namesys(cacheSize int, cacheMaxTTL time.Duration) func(rt irouting.ProvideM } // IpnsRepublisher runs new IPNS republisher service -func IpnsRepublisher(repubPeriod time.Duration, recordLifetime time.Duration) func(lcProcess, namesys.NameSystem, repo.Repo, crypto.PrivKey) error { - return func(lc lcProcess, namesys namesys.NameSystem, repo repo.Repo, privKey crypto.PrivKey) error { +func IpnsRepublisher(repubPeriod time.Duration, recordLifetime time.Duration) func(lcStartStop, namesys.NameSystem, repo.Repo, crypto.PrivKey) error { + return func(lc lcStartStop, namesys namesys.NameSystem, repo repo.Repo, privKey crypto.PrivKey) error { repub := republisher.NewRepublisher(namesys, repo.Datastore(), privKey, repo.Keystore()) if repubPeriod != 0 { diff --git a/core/node/libp2p/addrs.go b/core/node/libp2p/addrs.go index b287c20ff..135b71d91 100644 --- a/core/node/libp2p/addrs.go +++ b/core/node/libp2p/addrs.go @@ -1,12 +1,24 @@ package libp2p import ( + "context" "fmt" + "os" + "path/filepath" + "time" + logging "github.com/ipfs/go-log/v2" + version "github.com/ipfs/kubo" + "github.com/ipfs/kubo/config" + p2pforge "github.com/ipshipyard/p2p-forge/client" "github.com/libp2p/go-libp2p" + "github.com/libp2p/go-libp2p/core/host" p2pbhost "github.com/libp2p/go-libp2p/p2p/host/basic" ma "github.com/multiformats/go-multiaddr" mamask "github.com/whyrusleeping/multiaddr-filter" + + "github.com/caddyserver/certmagic" + "go.uber.org/fx" ) func AddrFilters(filters []string) func() (*ma.Filters, Libp2pOpts, error) { @@ -24,7 +36,7 @@ func AddrFilters(filters []string) func() (*ma.Filters, Libp2pOpts, error) { } } -func makeAddrsFactory(announce []string, appendAnnouce []string, noAnnounce []string) (p2pbhost.AddrsFactory, error) { +func makeAddrsFactory(announce []string, appendAnnounce []string, noAnnounce []string) (p2pbhost.AddrsFactory, error) { var err error // To assign to the slice in the for loop existing := make(map[string]bool) // To avoid duplicates @@ -38,7 +50,7 @@ func makeAddrsFactory(announce []string, appendAnnouce []string, noAnnounce []st } var appendAnnAddrs []ma.Multiaddr - for _, addr := range appendAnnouce { + for _, addr := range appendAnnounce { if existing[addr] { // skip AppendAnnounce that is on the Announce list already continue @@ -87,12 +99,26 @@ func makeAddrsFactory(announce []string, appendAnnouce []string, noAnnounce []st }, nil } -func AddrsFactory(announce []string, appendAnnouce []string, noAnnounce []string) func() (opts Libp2pOpts, err error) { - return func() (opts Libp2pOpts, err error) { - addrsFactory, err := makeAddrsFactory(announce, appendAnnouce, noAnnounce) +func AddrsFactory(announce []string, appendAnnounce []string, noAnnounce []string) interface{} { + return func(params struct { + fx.In + ForgeMgr *p2pforge.P2PForgeCertMgr `optional:"true"` + }, + ) (opts Libp2pOpts, err error) { + var addrsFactory p2pbhost.AddrsFactory + announceAddrsFactory, err := makeAddrsFactory(announce, appendAnnounce, noAnnounce) if err != nil { return opts, err } + if params.ForgeMgr == nil { + addrsFactory = announceAddrsFactory + } else { + addrsFactory = func(multiaddrs []ma.Multiaddr) []ma.Multiaddr { + forgeProcessing := params.ForgeMgr.AddressFactory()(multiaddrs) + announceProcessing := announceAddrsFactory(forgeProcessing) + return announceProcessing + } + } opts.Opts = append(opts.Opts, libp2p.AddrsFactory(addrsFactory)) return } @@ -107,3 +133,55 @@ func ListenOn(addresses []string) interface{} { } } } + +func P2PForgeCertMgr(repoPath string, cfg config.AutoTLS, atlsLog *logging.ZapEventLogger) interface{} { + return func() (*p2pforge.P2PForgeCertMgr, error) { + storagePath := filepath.Join(repoPath, "p2p-forge-certs") + rawLogger := atlsLog.Desugar() + + // TODO: this should not be necessary after + // https://github.com/ipshipyard/p2p-forge/pull/42 but keep it here for + // now to help tracking down any remaining conditions causing + // https://github.com/ipshipyard/p2p-forge/issues/8 + certmagic.Default.Logger = rawLogger.Named("default_fixme") + certmagic.DefaultACME.Logger = rawLogger.Named("default_acme_client_fixme") + + registrationDelay := cfg.RegistrationDelay.WithDefault(config.DefaultAutoTLSRegistrationDelay) + if cfg.Enabled == config.True && cfg.RegistrationDelay.IsDefault() { + // Skip delay if user explicitly enabled AutoTLS.Enabled in config + // and did not set custom AutoTLS.RegistrationDelay + registrationDelay = 0 * time.Second + } + + certStorage := &certmagic.FileStorage{Path: storagePath} + certMgr, err := p2pforge.NewP2PForgeCertMgr( + p2pforge.WithLogger(rawLogger.Sugar()), + p2pforge.WithForgeDomain(cfg.DomainSuffix.WithDefault(config.DefaultDomainSuffix)), + p2pforge.WithForgeRegistrationEndpoint(cfg.RegistrationEndpoint.WithDefault(config.DefaultRegistrationEndpoint)), + p2pforge.WithRegistrationDelay(registrationDelay), + p2pforge.WithCAEndpoint(cfg.CAEndpoint.WithDefault(config.DefaultCAEndpoint)), + p2pforge.WithForgeAuth(cfg.RegistrationToken.WithDefault(os.Getenv(p2pforge.ForgeAuthEnv))), + p2pforge.WithUserAgent(version.GetUserAgentVersion()), + p2pforge.WithCertificateStorage(certStorage), + p2pforge.WithShortForgeAddrs(cfg.ShortAddrs.WithDefault(config.DefaultAutoTLSShortAddrs)), + ) + if err != nil { + return nil, err + } + + return certMgr, nil + } +} + +func StartP2PAutoTLS(lc fx.Lifecycle, certMgr *p2pforge.P2PForgeCertMgr, h host.Host) { + lc.Append(fx.Hook{ + OnStart: func(ctx context.Context) error { + certMgr.ProvideHost(h) + return certMgr.Start() + }, + OnStop: func(ctx context.Context) error { + certMgr.Stop() + return nil + }, + }) +} diff --git a/core/node/libp2p/dns.go b/core/node/libp2p/dns.go index 1c56a2c0a..2ee73b4c9 100644 --- a/core/node/libp2p/dns.go +++ b/core/node/libp2p/dns.go @@ -2,10 +2,11 @@ package libp2p import ( "github.com/libp2p/go-libp2p" + "github.com/libp2p/go-libp2p/p2p/net/swarm" madns "github.com/multiformats/go-multiaddr-dns" ) func MultiaddrResolver(rslv *madns.Resolver) (opts Libp2pOpts, err error) { - opts.Opts = append(opts.Opts, libp2p.MultiaddrResolver(rslv)) + opts.Opts = append(opts.Opts, libp2p.MultiaddrResolver(swarm.ResolverFromMaDNS{Resolver: rslv})) return opts, nil } diff --git a/core/node/libp2p/fd/sys_unix.go b/core/node/libp2p/fd/sys_unix.go index 5e417c0fa..dcb82a881 100644 --- a/core/node/libp2p/fd/sys_unix.go +++ b/core/node/libp2p/fd/sys_unix.go @@ -1,5 +1,4 @@ //go:build linux || darwin -// +build linux darwin package fd diff --git a/core/node/libp2p/host.go b/core/node/libp2p/host.go index 7950f3dc6..0cb85f454 100644 --- a/core/node/libp2p/host.go +++ b/core/node/libp2p/host.go @@ -49,17 +49,30 @@ func Host(mctx helpers.MetricsCtx, lc fx.Lifecycle, params P2PHostIn) (out P2PHo if err != nil { return out, err } - bootstrappers, err := cfg.BootstrapPeers() + // Use auto-config resolution for actual connectivity + bootstrappers, err := cfg.BootstrapPeersWithAutoConf() if err != nil { return out, err } + // Optimistic provide is enabled either via dedicated expierimental flag, or when DHT Provide Sweep is enabled. + // When DHT Provide Sweep is enabled, all provide operations go through the + // `SweepingProvider`, hence the provides don't use the optimistic provide + // logic. Provides use `SweepingProvider.StartProviding()` and not + // `IpfsDHT.Provide()`, which is where the optimistic provide logic is + // implemented. However, `IpfsDHT.Provide()` is used to quickly provide roots + // when user manually adds content with the `--fast-provide` flag enabled. In + // this case we want to use optimistic provide logic to quickly announce the + // content to the network. This should be the only use case of + // `IpfsDHT.Provide()` when DHT Provide Sweep is enabled. + optimisticProvide := cfg.Experimental.OptimisticProvide || cfg.Provide.DHT.SweepEnabled.WithDefault(config.DefaultProvideDHTSweepEnabled) + routingOptArgs := RoutingOptionArgs{ Ctx: ctx, Datastore: params.Repo.Datastore(), Validator: params.Validator, BootstrapPeers: bootstrappers, - OptimisticProvide: cfg.Experimental.OptimisticProvide, + OptimisticProvide: optimisticProvide, OptimisticProvideJobsPoolSize: cfg.Experimental.OptimisticProvideJobsPoolSize, LoopbackAddressesOnLanDHT: cfg.Routing.LoopbackAddressesOnLanDHT.WithDefault(config.DefaultLoopbackAddressesOnLanDHT), } diff --git a/core/node/libp2p/libp2p.go b/core/node/libp2p/libp2p.go index e6977b061..da6991b1f 100644 --- a/core/node/libp2p/libp2p.go +++ b/core/node/libp2p/libp2p.go @@ -8,7 +8,7 @@ import ( version "github.com/ipfs/kubo" config "github.com/ipfs/kubo/config" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" "github.com/libp2p/go-libp2p" "github.com/libp2p/go-libp2p/core/crypto" "github.com/libp2p/go-libp2p/core/peer" @@ -25,9 +25,12 @@ type Libp2pOpts struct { Opts []libp2p.Option `group:"libp2p"` } -func ConnectionManager(low, high int, grace time.Duration) func() (opts Libp2pOpts, err error) { +func ConnectionManager(low, high int, grace, silence time.Duration) func() (opts Libp2pOpts, err error) { return func() (opts Libp2pOpts, err error) { - cm, err := connmgr.NewConnManager(low, high, connmgr.WithGracePeriod(grace)) + cm, err := connmgr.NewConnManager(low, high, + connmgr.WithGracePeriod(grace), + connmgr.WithSilencePeriod(silence), + ) if err != nil { return opts, err } diff --git a/core/node/libp2p/rcmgr.go b/core/node/libp2p/rcmgr.go index 80bfec34a..6844757f9 100644 --- a/core/node/libp2p/rcmgr.go +++ b/core/node/libp2p/rcmgr.go @@ -3,11 +3,15 @@ package libp2p import ( "context" "encoding/json" + "errors" "fmt" "os" "path/filepath" - "github.com/benbjohnson/clock" + "github.com/ipfs/kubo/config" + "github.com/ipfs/kubo/core/node/helpers" + "github.com/ipfs/kubo/repo" + logging "github.com/ipfs/go-log/v2" "github.com/libp2p/go-libp2p" "github.com/libp2p/go-libp2p/core/network" @@ -16,17 +20,13 @@ import ( rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager" "github.com/multiformats/go-multiaddr" "go.uber.org/fx" - - "github.com/ipfs/kubo/config" - "github.com/ipfs/kubo/core/node/helpers" - "github.com/ipfs/kubo/repo" ) var rcmgrLogger = logging.Logger("rcmgr") const NetLimitTraceFilename = "rcmgr.json.gz" -var ErrNoResourceMgr = fmt.Errorf("missing ResourceMgr: make sure the daemon is running with Swarm.ResourceMgr.Enabled") +var ErrNoResourceMgr = errors.New("missing ResourceMgr: make sure the daemon is running with Swarm.ResourceMgr.Enabled") func ResourceManager(repoPath string, cfg config.SwarmConfig, userResourceOverrides rcmgr.PartialLimitConfig) interface{} { return func(mctx helpers.MetricsCtx, lc fx.Lifecycle, repo repo.Repo) (network.ResourceManager, Libp2pOpts, error) { @@ -70,7 +70,6 @@ filled in with autocomputed defaults.`) } ropts := []rcmgr.Option{ - rcmgr.WithMetrics(createRcmgrMetrics()), rcmgr.WithTraceReporter(str), rcmgr.WithLimitPerSubnet( nil, @@ -112,7 +111,6 @@ filled in with autocomputed defaults.`) return nil, opts, fmt.Errorf("creating libp2p resource manager: %w", err) } lrm := &loggingResourceManager{ - clock: clock.New(), logger: &logging.Logger("resourcemanager").SugaredLogger, delegate: manager, } diff --git a/core/node/libp2p/rcmgr_defaults.go b/core/node/libp2p/rcmgr_defaults.go index 98fdccb99..94851a1a6 100644 --- a/core/node/libp2p/rcmgr_defaults.go +++ b/core/node/libp2p/rcmgr_defaults.go @@ -19,12 +19,8 @@ var infiniteResourceLimits = rcmgr.InfiniteLimits.ToPartialLimitConfig().System // The defaults follow the documentation in docs/libp2p-resource-management.md. // Any changes in the logic here should be reflected there. func createDefaultLimitConfig(cfg config.SwarmConfig) (limitConfig rcmgr.ConcreteLimitConfig, logMessageForStartup string, err error) { - maxMemoryDefaultString := humanize.Bytes(uint64(memory.TotalMemory()) / 2) - maxMemoryString := cfg.ResourceMgr.MaxMemory.WithDefault(maxMemoryDefaultString) - maxMemory, err := humanize.ParseBytes(maxMemoryString) - if err != nil { - return rcmgr.ConcreteLimitConfig{}, "", err - } + maxMemoryDefault := uint64(memory.TotalMemory()) / 2 + maxMemory := cfg.ResourceMgr.MaxMemory.WithDefault(maxMemoryDefault) maxMemoryMB := maxMemory / (1024 * 1024) maxFD := int(cfg.ResourceMgr.MaxFileDescriptors.WithDefault(int64(fd.GetNumFDs()) / 2)) @@ -142,7 +138,7 @@ Computed default go-libp2p Resource Manager limits based on: These can be inspected with 'ipfs swarm resources'. -`, maxMemoryString, maxFD) +`, humanize.Bytes(maxMemory), maxFD) // We already have a complete value thus pass in an empty ConcreteLimitConfig. return partialLimits.Build(rcmgr.ConcreteLimitConfig{}), msg, nil diff --git a/core/node/libp2p/rcmgr_logging.go b/core/node/libp2p/rcmgr_logging.go index 56e017b82..72ee07668 100644 --- a/core/node/libp2p/rcmgr_logging.go +++ b/core/node/libp2p/rcmgr_logging.go @@ -3,10 +3,10 @@ package libp2p import ( "context" "errors" + "net" "sync" "time" - "github.com/benbjohnson/clock" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/protocol" @@ -16,7 +16,6 @@ import ( ) type loggingResourceManager struct { - clock clock.Clock logger *zap.SugaredLogger delegate network.ResourceManager logInterval time.Duration @@ -41,7 +40,7 @@ func (n *loggingResourceManager) start(ctx context.Context) { if logInterval == 0 { logInterval = 10 * time.Second } - ticker := n.clock.Ticker(logInterval) + ticker := time.NewTicker(logInterval) go func() { defer ticker.Stop() for { @@ -164,6 +163,10 @@ func (n *loggingResourceManager) Stat() rcmgr.ResourceManagerStat { return rapi.Stat() } +func (n *loggingResourceManager) VerifySourceAddress(addr net.Addr) bool { + return n.delegate.VerifySourceAddress(addr) +} + func (s *loggingScope) ReserveMemory(size int, prio uint8) error { err := s.delegate.ReserveMemory(size, prio) s.countErrs(err) diff --git a/core/node/libp2p/rcmgr_logging_test.go b/core/node/libp2p/rcmgr_logging_test.go index 559a3fec3..1cc83eb34 100644 --- a/core/node/libp2p/rcmgr_logging_test.go +++ b/core/node/libp2p/rcmgr_logging_test.go @@ -1,11 +1,10 @@ package libp2p import ( - "context" "testing" + "testing/synctest" "time" - "github.com/benbjohnson/clock" "github.com/libp2p/go-libp2p/core/network" rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager" ma "github.com/multiformats/go-multiaddr" @@ -15,49 +14,49 @@ import ( ) func TestLoggingResourceManager(t *testing.T) { - clock := clock.NewMock() - orig := rcmgr.DefaultLimits.AutoScale() - limits := orig.ToPartialLimitConfig() - limits.System.Conns = 1 - limits.System.ConnsInbound = 1 - limits.System.ConnsOutbound = 1 - limiter := rcmgr.NewFixedLimiter(limits.Build(orig)) - rm, err := rcmgr.NewResourceManager(limiter) - if err != nil { - t.Fatal(err) - } - - oCore, oLogs := observer.New(zap.WarnLevel) - oLogger := zap.New(oCore) - lrm := &loggingResourceManager{ - clock: clock, - logger: oLogger.Sugar(), - delegate: rm, - logInterval: 1 * time.Second, - } - - // 2 of these should result in resource limit exceeded errors and subsequent log messages - for i := 0; i < 3; i++ { - _, _ = lrm.OpenConnection(network.DirInbound, false, ma.StringCast("/ip4/127.0.0.1/tcp/1234")) - } - - // run the logger which will write an entry for those errors - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - lrm.start(ctx) - clock.Add(3 * time.Second) - - timer := time.NewTimer(1 * time.Second) - for { - select { - case <-timer.C: - t.Fatalf("expected logs never arrived") - default: - if oLogs.Len() == 0 { - continue - } - require.Equal(t, "Protected from exceeding resource limits 2 times. libp2p message: \"system: cannot reserve inbound connection: resource limit exceeded\".", oLogs.All()[0].Message) - return + synctest.Test(t, func(t *testing.T) { + orig := rcmgr.DefaultLimits.AutoScale() + limits := orig.ToPartialLimitConfig() + limits.System.Conns = 1 + limits.System.ConnsInbound = 1 + limits.System.ConnsOutbound = 1 + limiter := rcmgr.NewFixedLimiter(limits.Build(orig)) + rm, err := rcmgr.NewResourceManager(limiter) + if err != nil { + t.Fatal(err) } - } + defer rm.Close() + + oCore, oLogs := observer.New(zap.WarnLevel) + oLogger := zap.New(oCore) + lrm := &loggingResourceManager{ + logger: oLogger.Sugar(), + delegate: rm, + logInterval: 1 * time.Second, + } + + // 2 of these should result in resource limit exceeded errors and subsequent log messages + for i := 0; i < 3; i++ { + _, _ = lrm.OpenConnection(network.DirInbound, false, ma.StringCast("/ip4/127.0.0.1/tcp/1234")) + } + + // run the logger which will write an entry for those errors + ctx := t.Context() + lrm.start(ctx) + time.Sleep(3 * time.Second) + + timer := time.NewTimer(1 * time.Second) + for { + select { + case <-timer.C: + t.Fatalf("expected logs never arrived") + default: + if oLogs.Len() == 0 { + continue + } + require.Equal(t, "Protected from exceeding resource limits 2 times. libp2p message: \"system: cannot reserve inbound connection: resource limit exceeded\".", oLogs.All()[0].Message) + return + } + } + }) } diff --git a/core/node/libp2p/rcmgr_metrics.go b/core/node/libp2p/rcmgr_metrics.go deleted file mode 100644 index f8b1a7daa..000000000 --- a/core/node/libp2p/rcmgr_metrics.go +++ /dev/null @@ -1,251 +0,0 @@ -package libp2p - -import ( - "errors" - "strconv" - - "github.com/libp2p/go-libp2p/core/network" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/libp2p/go-libp2p/core/protocol" - rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager" - - "github.com/prometheus/client_golang/prometheus" -) - -func mustRegister(c prometheus.Collector) { - err := prometheus.Register(c) - are := prometheus.AlreadyRegisteredError{} - if errors.As(err, &are) { - return - } - if err != nil { - panic(err) - } -} - -func createRcmgrMetrics() rcmgr.MetricsReporter { - const ( - direction = "direction" - usesFD = "usesFD" - protocol = "protocol" - service = "service" - ) - - connAllowed := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "libp2p_rcmgr_conns_allowed_total", - Help: "allowed connections", - }, - []string{direction, usesFD}, - ) - mustRegister(connAllowed) - - connBlocked := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "libp2p_rcmgr_conns_blocked_total", - Help: "blocked connections", - }, - []string{direction, usesFD}, - ) - mustRegister(connBlocked) - - streamAllowed := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "libp2p_rcmgr_streams_allowed_total", - Help: "allowed streams", - }, - []string{direction}, - ) - mustRegister(streamAllowed) - - streamBlocked := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "libp2p_rcmgr_streams_blocked_total", - Help: "blocked streams", - }, - []string{direction}, - ) - mustRegister(streamBlocked) - - peerAllowed := prometheus.NewCounter(prometheus.CounterOpts{ - Name: "libp2p_rcmgr_peers_allowed_total", - Help: "allowed peers", - }) - mustRegister(peerAllowed) - - peerBlocked := prometheus.NewCounter(prometheus.CounterOpts{ - Name: "libp2p_rcmgr_peer_blocked_total", - Help: "blocked peers", - }) - mustRegister(peerBlocked) - - protocolAllowed := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "libp2p_rcmgr_protocols_allowed_total", - Help: "allowed streams attached to a protocol", - }, - []string{protocol}, - ) - mustRegister(protocolAllowed) - - protocolBlocked := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "libp2p_rcmgr_protocols_blocked_total", - Help: "blocked streams attached to a protocol", - }, - []string{protocol}, - ) - mustRegister(protocolBlocked) - - protocolPeerBlocked := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "libp2p_rcmgr_protocols_for_peer_blocked_total", - Help: "blocked streams attached to a protocol for a specific peer", - }, - []string{protocol}, - ) - mustRegister(protocolPeerBlocked) - - serviceAllowed := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "libp2p_rcmgr_services_allowed_total", - Help: "allowed streams attached to a service", - }, - []string{service}, - ) - mustRegister(serviceAllowed) - - serviceBlocked := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "libp2p_rcmgr_services_blocked_total", - Help: "blocked streams attached to a service", - }, - []string{service}, - ) - mustRegister(serviceBlocked) - - servicePeerBlocked := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "libp2p_rcmgr_service_for_peer_blocked_total", - Help: "blocked streams attached to a service for a specific peer", - }, - []string{service}, - ) - mustRegister(servicePeerBlocked) - - memoryAllowed := prometheus.NewCounter(prometheus.CounterOpts{ - Name: "libp2p_rcmgr_memory_allocations_allowed_total", - Help: "allowed memory allocations", - }) - mustRegister(memoryAllowed) - - memoryBlocked := prometheus.NewCounter(prometheus.CounterOpts{ - Name: "libp2p_rcmgr_memory_allocations_blocked_total", - Help: "blocked memory allocations", - }) - mustRegister(memoryBlocked) - - return rcmgrMetrics{ - connAllowed, - connBlocked, - streamAllowed, - streamBlocked, - peerAllowed, - peerBlocked, - protocolAllowed, - protocolBlocked, - protocolPeerBlocked, - serviceAllowed, - serviceBlocked, - servicePeerBlocked, - memoryAllowed, - memoryBlocked, - } -} - -// Failsafe to ensure interface from go-libp2p-resource-manager is implemented -var _ rcmgr.MetricsReporter = rcmgrMetrics{} - -type rcmgrMetrics struct { - connAllowed *prometheus.CounterVec - connBlocked *prometheus.CounterVec - streamAllowed *prometheus.CounterVec - streamBlocked *prometheus.CounterVec - peerAllowed prometheus.Counter - peerBlocked prometheus.Counter - protocolAllowed *prometheus.CounterVec - protocolBlocked *prometheus.CounterVec - protocolPeerBlocked *prometheus.CounterVec - serviceAllowed *prometheus.CounterVec - serviceBlocked *prometheus.CounterVec - servicePeerBlocked *prometheus.CounterVec - memoryAllowed prometheus.Counter - memoryBlocked prometheus.Counter -} - -func getDirection(d network.Direction) string { - switch d { - default: - return "" - case network.DirInbound: - return "inbound" - case network.DirOutbound: - return "outbound" - } -} - -func (r rcmgrMetrics) AllowConn(dir network.Direction, usefd bool) { - r.connAllowed.WithLabelValues(getDirection(dir), strconv.FormatBool(usefd)).Inc() -} - -func (r rcmgrMetrics) BlockConn(dir network.Direction, usefd bool) { - r.connBlocked.WithLabelValues(getDirection(dir), strconv.FormatBool(usefd)).Inc() -} - -func (r rcmgrMetrics) AllowStream(_ peer.ID, dir network.Direction) { - r.streamAllowed.WithLabelValues(getDirection(dir)).Inc() -} - -func (r rcmgrMetrics) BlockStream(_ peer.ID, dir network.Direction) { - r.streamBlocked.WithLabelValues(getDirection(dir)).Inc() -} - -func (r rcmgrMetrics) AllowPeer(_ peer.ID) { - r.peerAllowed.Inc() -} - -func (r rcmgrMetrics) BlockPeer(_ peer.ID) { - r.peerBlocked.Inc() -} - -func (r rcmgrMetrics) AllowProtocol(proto protocol.ID) { - r.protocolAllowed.WithLabelValues(string(proto)).Inc() -} - -func (r rcmgrMetrics) BlockProtocol(proto protocol.ID) { - r.protocolBlocked.WithLabelValues(string(proto)).Inc() -} - -func (r rcmgrMetrics) BlockProtocolPeer(proto protocol.ID, _ peer.ID) { - r.protocolPeerBlocked.WithLabelValues(string(proto)).Inc() -} - -func (r rcmgrMetrics) AllowService(svc string) { - r.serviceAllowed.WithLabelValues(svc).Inc() -} - -func (r rcmgrMetrics) BlockService(svc string) { - r.serviceBlocked.WithLabelValues(svc).Inc() -} - -func (r rcmgrMetrics) BlockServicePeer(svc string, _ peer.ID) { - r.servicePeerBlocked.WithLabelValues(svc).Inc() -} - -func (r rcmgrMetrics) AllowMemory(_ int) { - r.memoryAllowed.Inc() -} - -func (r rcmgrMetrics) BlockMemory(_ int) { - r.memoryBlocked.Inc() -} diff --git a/core/node/libp2p/relay.go b/core/node/libp2p/relay.go index 89567e30d..dd56835fb 100644 --- a/core/node/libp2p/relay.go +++ b/core/node/libp2p/relay.go @@ -33,13 +33,12 @@ func RelayService(enable bool, relayOpts config.RelayService) func() (opts Libp2 Data: relayOpts.ConnectionDataLimit.WithDefault(def.Limit.Data), Duration: relayOpts.ConnectionDurationLimit.WithDefault(def.Limit.Duration), }, - MaxCircuits: int(relayOpts.MaxCircuits.WithDefault(int64(def.MaxCircuits))), - BufferSize: int(relayOpts.BufferSize.WithDefault(int64(def.BufferSize))), - ReservationTTL: relayOpts.ReservationTTL.WithDefault(def.ReservationTTL), - MaxReservations: int(relayOpts.MaxReservations.WithDefault(int64(def.MaxReservations))), - MaxReservationsPerIP: int(relayOpts.MaxReservationsPerIP.WithDefault(int64(def.MaxReservationsPerIP))), - MaxReservationsPerPeer: int(relayOpts.MaxReservationsPerPeer.WithDefault(int64(def.MaxReservationsPerPeer))), - MaxReservationsPerASN: int(relayOpts.MaxReservationsPerASN.WithDefault(int64(def.MaxReservationsPerASN))), + MaxCircuits: int(relayOpts.MaxCircuits.WithDefault(int64(def.MaxCircuits))), + BufferSize: int(relayOpts.BufferSize.WithDefault(int64(def.BufferSize))), + ReservationTTL: relayOpts.ReservationTTL.WithDefault(def.ReservationTTL), + MaxReservations: int(relayOpts.MaxReservations.WithDefault(int64(def.MaxReservations))), + MaxReservationsPerIP: int(relayOpts.MaxReservationsPerIP.WithDefault(int64(def.MaxReservationsPerIP))), + MaxReservationsPerASN: int(relayOpts.MaxReservationsPerASN.WithDefault(int64(def.MaxReservationsPerASN))), }))) } return diff --git a/core/node/libp2p/routing.go b/core/node/libp2p/routing.go index 697bf0f2e..6fafe37a4 100644 --- a/core/node/libp2p/routing.go +++ b/core/node/libp2p/routing.go @@ -95,7 +95,8 @@ func BaseRouting(cfg *config.Config) interface{} { if err != nil { return out, err } - bspeers, err := cfg.BootstrapPeers() + // Use auto-config resolution for actual connectivity + bspeers, err := cfg.BootstrapPeersWithAutoConf() if err != nil { return out, err } @@ -177,6 +178,12 @@ func ContentRouting(in p2pOnlineContentRoutingIn) routing.ContentRouting { } } +// ContentDiscovery narrows down the given content routing facility so that it +// only does discovery. +func ContentDiscovery(in irouting.ProvideManyRouter) routing.ContentDiscovery { + return in +} + type p2pOnlineRoutingIn struct { fx.In @@ -184,9 +191,8 @@ type p2pOnlineRoutingIn struct { Validator record.Validator } -// Routing will get all routers obtained from different methods -// (delegated routers, pub-sub, and so on) and add them all together -// using a TieredRouter. +// Routing will get all routers obtained from different methods (delegated +// routers, pub-sub, and so on) and add them all together using a ParallelRouter. func Routing(in p2pOnlineRoutingIn) irouting.ProvideManyRouter { routers := in.Routers @@ -206,7 +212,8 @@ func Routing(in p2pOnlineRoutingIn) irouting.ProvideManyRouter { return routinghelpers.NewComposableParallel(cRouters) } -// OfflineRouting provides a special Router to the routers list when we are creating a offline node. +// OfflineRouting provides a special Router to the routers list when we are +// creating an offline node. func OfflineRouting(dstore ds.Datastore, validator record.Validator) p2pRouterOut { return p2pRouterOut{ Router: Router{ @@ -291,24 +298,36 @@ func autoRelayFeeder(cfgPeering config.Peering, peerChan chan<- peer.AddrInfo) f } // Additionally, feed closest peers discovered via DHT - if dht == nil { - /* noop due to missing dht.WAN. happens in some unit tests, - not worth fixing as we will refactor this after go-libp2p 0.20 */ - continue + if dht != nil { + closestPeers, err := dht.WAN.GetClosestPeers(ctx, h.ID().String()) + if err == nil { + for _, p := range closestPeers { + addrs := h.Peerstore().Addrs(p) + if len(addrs) == 0 { + continue + } + dhtPeer := peer.AddrInfo{ID: p, Addrs: addrs} + select { + case peerChan <- dhtPeer: + case <-ctx.Done(): + return + } + } + } } - closestPeers, err := dht.WAN.GetClosestPeers(ctx, h.ID().String()) - if err != nil { - // no-op: usually 'failed to find any peer in table' during startup - continue - } - for _, p := range closestPeers { + + // Additionally, feed all connected swarm peers as potential relay candidates. + // This includes peers from HTTP routing, manual swarm connect, mDNS discovery, etc. + // (fixes https://github.com/ipfs/kubo/issues/10899) + connectedPeers := h.Network().Peers() + for _, p := range connectedPeers { addrs := h.Peerstore().Addrs(p) if len(addrs) == 0 { continue } - dhtPeer := peer.AddrInfo{ID: p, Addrs: addrs} + swarmPeer := peer.AddrInfo{ID: p, Addrs: addrs} select { - case peerChan <- dhtPeer: + case peerChan <- swarmPeer: case <-ctx.Done(): return } diff --git a/core/node/libp2p/routingopt.go b/core/node/libp2p/routingopt.go index 869b7ef06..c8f22af2f 100644 --- a/core/node/libp2p/routingopt.go +++ b/core/node/libp2p/routingopt.go @@ -2,10 +2,12 @@ package libp2p import ( "context" + "fmt" "os" "strings" "time" + "github.com/ipfs/boxo/autoconf" "github.com/ipfs/go-datastore" "github.com/ipfs/kubo/config" irouting "github.com/ipfs/kubo/routing" @@ -31,38 +33,146 @@ type RoutingOptionArgs struct { type RoutingOption func(args RoutingOptionArgs) (routing.Routing, error) -// Default HTTP routers used in parallel to DHT when Routing.Type = "auto" -var defaultHTTPRouters = []string{ - "https://cid.contact", // https://github.com/ipfs/kubo/issues/9422#issuecomment-1338142084 - // TODO: add an independent router from Cloudflare +var noopRouter = routinghelpers.Null{} + +// EndpointSource tracks where a URL came from to determine appropriate capabilities +type EndpointSource struct { + URL string + SupportsRead bool // came from DelegatedRoutersWithAutoConf (Read operations) + SupportsWrite bool // came from DelegatedPublishersWithAutoConf (Write operations) } -func init() { - // Override HTTP routers if custom ones were passed via env - if routers := os.Getenv("IPFS_HTTP_ROUTERS"); routers != "" { - defaultHTTPRouters = strings.Split(routers, " ") +// determineCapabilities determines endpoint capabilities based on URL path and source +func determineCapabilities(endpoint EndpointSource) (string, autoconf.EndpointCapabilities, error) { + parsed, err := autoconf.DetermineKnownCapabilities(endpoint.URL, endpoint.SupportsRead, endpoint.SupportsWrite) + if err != nil { + log.Debugf("Skipping endpoint %q: %v", endpoint.URL, err) + return "", autoconf.EndpointCapabilities{}, nil // Return empty caps, not error } + + return parsed.BaseURL, parsed.Capabilities, nil +} + +// collectAllEndpoints gathers URLs from both router and publisher sources +func collectAllEndpoints(cfg *config.Config) []EndpointSource { + var endpoints []EndpointSource + + // Get router URLs (Read operations) + var routerURLs []string + if envRouters := os.Getenv(config.EnvHTTPRouters); envRouters != "" { + // Use environment variable override if set (space or comma separated) + splitFunc := func(r rune) bool { return r == ',' || r == ' ' } + routerURLs = strings.FieldsFunc(envRouters, splitFunc) + log.Warnf("Using HTTP routers from %s environment variable instead of config/autoconf: %v", config.EnvHTTPRouters, routerURLs) + } else { + // Use delegated routers from autoconf + routerURLs = cfg.DelegatedRoutersWithAutoConf() + // No fallback - if autoconf doesn't provide endpoints, use empty list + // This exposes any autoconf issues rather than masking them with hardcoded defaults + } + + // Add router URLs to collection + for _, url := range routerURLs { + endpoints = append(endpoints, EndpointSource{ + URL: url, + SupportsRead: true, + SupportsWrite: false, + }) + } + + // Get publisher URLs (Write operations) + publisherURLs := cfg.DelegatedPublishersWithAutoConf() + + // Add publisher URLs, merging with existing router URLs if they match + for _, url := range publisherURLs { + found := false + for i, existing := range endpoints { + if existing.URL == url { + endpoints[i].SupportsWrite = true + found = true + break + } + } + if !found { + endpoints = append(endpoints, EndpointSource{ + URL: url, + SupportsRead: false, + SupportsWrite: true, + }) + } + } + + return endpoints } func constructDefaultHTTPRouters(cfg *config.Config) ([]*routinghelpers.ParallelRouter, error) { var routers []*routinghelpers.ParallelRouter - // Append HTTP routers for additional speed - for _, endpoint := range defaultHTTPRouters { - httpRouter, err := irouting.ConstructHTTPRouter(endpoint, cfg.Identity.PeerID, httpAddrsFromConfig(cfg.Addresses), cfg.Identity.PrivKey) + httpRetrievalEnabled := cfg.HTTPRetrieval.Enabled.WithDefault(config.DefaultHTTPRetrievalEnabled) + + // Collect URLs from both router and publisher sources + endpoints := collectAllEndpoints(cfg) + + // Group endpoints by origin (base URL) and aggregate capabilities + originCapabilities := make(map[string]autoconf.EndpointCapabilities) + for _, endpoint := range endpoints { + // Parse endpoint and determine capabilities based on source + baseURL, capabilities, err := determineCapabilities(endpoint) + if err != nil { + return nil, fmt.Errorf("failed to parse endpoint %q: %w", endpoint.URL, err) + } + + // Aggregate capabilities for this origin + existing := originCapabilities[baseURL] + existing.Merge(capabilities) + originCapabilities[baseURL] = existing + } + + // Create single HTTP router and composer per origin + for baseURL, capabilities := range originCapabilities { + // Construct HTTP router using base URL (without path) + httpRouter, err := irouting.ConstructHTTPRouter(baseURL, cfg.Identity.PeerID, httpAddrsFromConfig(cfg.Addresses), cfg.Identity.PrivKey, httpRetrievalEnabled) if err != nil { return nil, err } - r := &irouting.Composer{ - GetValueRouter: routinghelpers.Null{}, - PutValueRouter: routinghelpers.Null{}, - ProvideRouter: routinghelpers.Null{}, // modify this when indexers supports provide - FindPeersRouter: routinghelpers.Null{}, - FindProvidersRouter: httpRouter, + // Configure router operations based on aggregated capabilities + // https://specs.ipfs.tech/routing/http-routing-v1/ + composer := &irouting.Composer{ + GetValueRouter: noopRouter, // Default disabled, enabled below based on capabilities + PutValueRouter: noopRouter, // Default disabled, enabled below based on capabilities + ProvideRouter: noopRouter, // we don't have spec for sending provides to /routing/v1 (revisit once https://github.com/ipfs/specs/pull/378 or similar is ratified) + FindPeersRouter: noopRouter, // Default disabled, enabled below based on capabilities + FindProvidersRouter: noopRouter, // Default disabled, enabled below based on capabilities + } + + // Enable specific capabilities + if capabilities.IPNSGet { + composer.GetValueRouter = httpRouter // GET /routing/v1/ipns for IPNS resolution + } + if capabilities.IPNSPut { + composer.PutValueRouter = httpRouter // PUT /routing/v1/ipns for IPNS publishing + } + if capabilities.Peers { + composer.FindPeersRouter = httpRouter // GET /routing/v1/peers + } + if capabilities.Providers { + composer.FindProvidersRouter = httpRouter // GET /routing/v1/providers + } + + // Handle special cases and backward compatibility + if baseURL == config.CidContactRoutingURL { + // Special-case: cid.contact only supports /routing/v1/providers/cid endpoint + // Override any capabilities detected from URL path to ensure only providers is enabled + // TODO: Consider moving this to configuration or removing once cid.contact adds more capabilities + composer.GetValueRouter = noopRouter + composer.PutValueRouter = noopRouter + composer.ProvideRouter = noopRouter + composer.FindPeersRouter = noopRouter + composer.FindProvidersRouter = httpRouter // Only providers supported } routers = append(routers, &routinghelpers.ParallelRouter{ - Router: r, + Router: composer, IgnoreError: true, // https://github.com/ipfs/kubo/pull/9475#discussion_r1042507387 Timeout: 15 * time.Second, // 5x server value from https://github.com/ipfs/kubo/pull/9475#discussion_r1042428529 DoNotWaitForSearchValue: true, @@ -72,6 +182,31 @@ func constructDefaultHTTPRouters(cfg *config.Config) ([]*routinghelpers.Parallel return routers, nil } +// ConstructDelegatedOnlyRouting returns routers used when Routing.Type is set to "delegated" +// This provides HTTP-only routing without DHT, using only delegated routers and IPNS publishers. +// Useful for environments where DHT connectivity is not available or desired +func ConstructDelegatedOnlyRouting(cfg *config.Config) RoutingOption { + return func(args RoutingOptionArgs) (routing.Routing, error) { + // Use only HTTP routers (includes both read and write capabilities) - no DHT + var routers []*routinghelpers.ParallelRouter + + // Add HTTP delegated routers (includes both router and publisher capabilities) + httpRouters, err := constructDefaultHTTPRouters(cfg) + if err != nil { + return nil, err + } + routers = append(routers, httpRouters...) + + // Validate that we have at least one router configured + if len(routers) == 0 { + return nil, fmt.Errorf("no delegated routers or publishers configured for 'delegated' routing mode") + } + + routing := routinghelpers.NewComposableParallel(routers) + return routing, nil + } +} + // ConstructDefaultRouting returns routers used when Routing.Type is unset or set to "auto" func ConstructDefaultRouting(cfg *config.Config, routingOpt RoutingOption) RoutingOption { return func(args RoutingOptionArgs) (routing.Routing, error) { @@ -134,7 +269,7 @@ func constructDHTRouting(mode dht.ModeOpt) RoutingOption { } // ConstructDelegatedRouting is used when Routing.Type = "custom" -func ConstructDelegatedRouting(routers config.Routers, methods config.Methods, peerID string, addrs config.Addresses, privKey string) RoutingOption { +func ConstructDelegatedRouting(routers config.Routers, methods config.Methods, peerID string, addrs config.Addresses, privKey string, httpRetrieval bool) RoutingOption { return func(args RoutingOptionArgs) (routing.Routing, error) { return irouting.Parse(routers, methods, &irouting.ExtraDHTParams{ @@ -145,9 +280,10 @@ func ConstructDelegatedRouting(routers config.Routers, methods config.Methods, p Context: args.Ctx, }, &irouting.ExtraHTTPParams{ - PeerID: peerID, - Addrs: httpAddrsFromConfig(addrs), - PrivKeyB64: privKey, + PeerID: peerID, + Addrs: httpAddrsFromConfig(addrs), + PrivKeyB64: privKey, + HTTPRetrieval: httpRetrieval, }, ) } diff --git a/core/node/libp2p/routingopt_test.go b/core/node/libp2p/routingopt_test.go index 801fc0344..1a06045d9 100644 --- a/core/node/libp2p/routingopt_test.go +++ b/core/node/libp2p/routingopt_test.go @@ -3,7 +3,9 @@ package libp2p import ( "testing" + "github.com/ipfs/boxo/autoconf" config "github.com/ipfs/kubo/config" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -32,3 +34,191 @@ func TestHttpAddrsFromConfig(t *testing.T) { AppendAnnounce: []string{"/ip4/192.168.0.2/tcp/4001"}, }), "AppendAnnounce addrs should be included if specified") } + +func TestDetermineCapabilities(t *testing.T) { + tests := []struct { + name string + endpoint EndpointSource + expectedBaseURL string + expectedCapabilities autoconf.EndpointCapabilities + expectError bool + }{ + { + name: "URL with no path should have all Read capabilities", + endpoint: EndpointSource{ + URL: "https://example.com", + SupportsRead: true, + SupportsWrite: false, + }, + expectedBaseURL: "https://example.com", + expectedCapabilities: autoconf.EndpointCapabilities{ + Providers: true, + Peers: true, + IPNSGet: true, + IPNSPut: false, + }, + expectError: false, + }, + { + name: "URL with trailing slash should have all Read capabilities", + endpoint: EndpointSource{ + URL: "https://example.com/", + SupportsRead: true, + SupportsWrite: false, + }, + expectedBaseURL: "https://example.com", + expectedCapabilities: autoconf.EndpointCapabilities{ + Providers: true, + Peers: true, + IPNSGet: true, + IPNSPut: false, + }, + expectError: false, + }, + { + name: "URL with IPNS path should have only IPNS capabilities", + endpoint: EndpointSource{ + URL: "https://example.com/routing/v1/ipns", + SupportsRead: true, + SupportsWrite: true, + }, + expectedBaseURL: "https://example.com", + expectedCapabilities: autoconf.EndpointCapabilities{ + Providers: false, + Peers: false, + IPNSGet: true, + IPNSPut: true, + }, + expectError: false, + }, + { + name: "URL with providers path should have only Providers capability", + endpoint: EndpointSource{ + URL: "https://example.com/routing/v1/providers", + SupportsRead: true, + SupportsWrite: false, + }, + expectedBaseURL: "https://example.com", + expectedCapabilities: autoconf.EndpointCapabilities{ + Providers: true, + Peers: false, + IPNSGet: false, + IPNSPut: false, + }, + expectError: false, + }, + { + name: "URL with peers path should have only Peers capability", + endpoint: EndpointSource{ + URL: "https://example.com/routing/v1/peers", + SupportsRead: true, + SupportsWrite: false, + }, + expectedBaseURL: "https://example.com", + expectedCapabilities: autoconf.EndpointCapabilities{ + Providers: false, + Peers: true, + IPNSGet: false, + IPNSPut: false, + }, + expectError: false, + }, + { + name: "URL with Write support only should enable IPNSPut for no-path endpoint", + endpoint: EndpointSource{ + URL: "https://example.com", + SupportsRead: false, + SupportsWrite: true, + }, + expectedBaseURL: "https://example.com", + expectedCapabilities: autoconf.EndpointCapabilities{ + Providers: false, + Peers: false, + IPNSGet: false, + IPNSPut: true, + }, + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + baseURL, capabilities, err := determineCapabilities(tt.endpoint) + + if tt.expectError { + assert.Error(t, err) + return + } + + require.NoError(t, err) + assert.Equal(t, tt.expectedBaseURL, baseURL) + assert.Equal(t, tt.expectedCapabilities, capabilities) + }) + } +} + +func TestEndpointCapabilitiesReadWriteLogic(t *testing.T) { + t.Run("Read endpoint with no path should enable read capabilities", func(t *testing.T) { + endpoint := EndpointSource{ + URL: "https://example.com", + SupportsRead: true, + SupportsWrite: false, + } + _, capabilities, err := determineCapabilities(endpoint) + require.NoError(t, err) + + // Read endpoint with no path should enable all read capabilities + assert.True(t, capabilities.Providers) + assert.True(t, capabilities.Peers) + assert.True(t, capabilities.IPNSGet) + assert.False(t, capabilities.IPNSPut) // Write capability should be false + }) + + t.Run("Write endpoint with no path should enable write capabilities", func(t *testing.T) { + endpoint := EndpointSource{ + URL: "https://example.com", + SupportsRead: false, + SupportsWrite: true, + } + _, capabilities, err := determineCapabilities(endpoint) + require.NoError(t, err) + + // Write endpoint with no path should only enable IPNS write capability + assert.False(t, capabilities.Providers) + assert.False(t, capabilities.Peers) + assert.False(t, capabilities.IPNSGet) + assert.True(t, capabilities.IPNSPut) // Only write capability should be true + }) + + t.Run("Specific path should only enable matching capabilities", func(t *testing.T) { + endpoint := EndpointSource{ + URL: "https://example.com/routing/v1/ipns", + SupportsRead: true, + SupportsWrite: true, + } + _, capabilities, err := determineCapabilities(endpoint) + require.NoError(t, err) + + // Specific IPNS path should only enable IPNS capabilities based on source + assert.False(t, capabilities.Providers) + assert.False(t, capabilities.Peers) + assert.True(t, capabilities.IPNSGet) // Read capability enabled + assert.True(t, capabilities.IPNSPut) // Write capability enabled + }) + + t.Run("Unsupported paths should result in empty capabilities", func(t *testing.T) { + endpoint := EndpointSource{ + URL: "https://example.com/routing/v1/unsupported", + SupportsRead: true, + SupportsWrite: false, + } + _, capabilities, err := determineCapabilities(endpoint) + require.NoError(t, err) + + // Unsupported paths should result in no capabilities + assert.False(t, capabilities.Providers) + assert.False(t, capabilities.Peers) + assert.False(t, capabilities.IPNSGet) + assert.False(t, capabilities.IPNSPut) + }) +} diff --git a/core/node/libp2p/smux.go b/core/node/libp2p/smux.go index d52b306d8..5b87f7d08 100644 --- a/core/node/libp2p/smux.go +++ b/core/node/libp2p/smux.go @@ -1,7 +1,7 @@ package libp2p import ( - "fmt" + "errors" "os" "github.com/ipfs/kubo/config" @@ -12,10 +12,10 @@ import ( func makeSmuxTransportOption(tptConfig config.Transports) (libp2p.Option, error) { if prefs := os.Getenv("LIBP2P_MUX_PREFS"); prefs != "" { - return nil, fmt.Errorf("configuring muxers with LIBP2P_MUX_PREFS is no longer supported, use Swarm.Transports.Multiplexers") + return nil, errors.New("configuring muxers with LIBP2P_MUX_PREFS is no longer supported, use Swarm.Transports.Multiplexers") } if tptConfig.Multiplexers.Yamux < 0 { - return nil, fmt.Errorf("running libp2p with Swarm.Transports.Multiplexers.Yamux disabled is not supported") + return nil, errors.New("running libp2p with Swarm.Transports.Multiplexers.Yamux disabled is not supported") } return libp2p.Muxer(yamux.ID, yamux.DefaultTransport), nil diff --git a/core/node/libp2p/transport.go b/core/node/libp2p/transport.go index 6628adc32..3e5ab9568 100644 --- a/core/node/libp2p/transport.go +++ b/core/node/libp2p/transport.go @@ -2,8 +2,10 @@ package libp2p import ( "fmt" + "os" "github.com/ipfs/kubo/config" + "github.com/ipshipyard/p2p-forge/client" "github.com/libp2p/go-libp2p" "github.com/libp2p/go-libp2p/core/metrics" quic "github.com/libp2p/go-libp2p/p2p/transport/quic" @@ -16,20 +18,35 @@ import ( ) func Transports(tptConfig config.Transports) interface{} { - return func(pnet struct { + return func(params struct { fx.In - Fprint PNetFingerprint `optional:"true"` + Fprint PNetFingerprint `optional:"true"` + ForgeMgr *client.P2PForgeCertMgr `optional:"true"` }, ) (opts Libp2pOpts, err error) { - privateNetworkEnabled := pnet.Fprint != nil + privateNetworkEnabled := params.Fprint != nil - if tptConfig.Network.TCP.WithDefault(true) { + tcpEnabled := tptConfig.Network.TCP.WithDefault(true) + wsEnabled := tptConfig.Network.Websocket.WithDefault(true) + if tcpEnabled { // TODO(9290): Make WithMetrics configurable opts.Opts = append(opts.Opts, libp2p.Transport(tcp.NewTCPTransport, tcp.WithMetrics())) } - if tptConfig.Network.Websocket.WithDefault(true) { - opts.Opts = append(opts.Opts, libp2p.Transport(websocket.New)) + if wsEnabled { + if params.ForgeMgr == nil { + opts.Opts = append(opts.Opts, libp2p.Transport(websocket.New)) + } else { + opts.Opts = append(opts.Opts, libp2p.Transport(websocket.New, websocket.WithTLSConfig(params.ForgeMgr.TLSConfig()))) + } + } + + if tcpEnabled && wsEnabled && os.Getenv("LIBP2P_TCP_MUX") != "false" { + if privateNetworkEnabled { + log.Error("libp2p.ShareTCPListener() is not supported in private networks, please disable Swarm.Transports.Network.Websocket or run with LIBP2P_TCP_MUX=false to make this message go away") + } else { + opts.Opts = append(opts.Opts, libp2p.ShareTCPListener()) + } } if tptConfig.Network.QUIC.WithDefault(!privateNetworkEnabled) { diff --git a/core/node/provider.go b/core/node/provider.go index 7f37cd8b1..57d70feb6 100644 --- a/core/node/provider.go +++ b/core/node/provider.go @@ -2,178 +2,923 @@ package node import ( "context" + "errors" "fmt" "time" "github.com/ipfs/boxo/blockstore" "github.com/ipfs/boxo/fetcher" + "github.com/ipfs/boxo/mfs" pin "github.com/ipfs/boxo/pinning/pinner" - provider "github.com/ipfs/boxo/provider" + "github.com/ipfs/boxo/pinning/pinner/dspinner" + "github.com/ipfs/boxo/provider" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/namespace" + "github.com/ipfs/go-datastore/query" + "github.com/ipfs/kubo/config" "github.com/ipfs/kubo/repo" irouting "github.com/ipfs/kubo/routing" + dht "github.com/libp2p/go-libp2p-kad-dht" + "github.com/libp2p/go-libp2p-kad-dht/amino" + "github.com/libp2p/go-libp2p-kad-dht/dual" + "github.com/libp2p/go-libp2p-kad-dht/fullrt" + dht_pb "github.com/libp2p/go-libp2p-kad-dht/pb" + dhtprovider "github.com/libp2p/go-libp2p-kad-dht/provider" + "github.com/libp2p/go-libp2p-kad-dht/provider/buffered" + ddhtprovider "github.com/libp2p/go-libp2p-kad-dht/provider/dual" + "github.com/libp2p/go-libp2p-kad-dht/provider/keystore" + routinghelpers "github.com/libp2p/go-libp2p-routing-helpers" + "github.com/libp2p/go-libp2p/core/host" + peer "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/routing" + ma "github.com/multiformats/go-multiaddr" + mh "github.com/multiformats/go-multihash" "go.uber.org/fx" ) -func ProviderSys(reprovideInterval time.Duration, acceleratedDHTClient bool) fx.Option { - const magicThroughputReportCount = 128 - return fx.Provide(func(lc fx.Lifecycle, cr irouting.ProvideManyRouter, keyProvider provider.KeyChanFunc, repo repo.Repo, bs blockstore.Blockstore) (provider.System, error) { - opts := []provider.Option{ - provider.Online(cr), - provider.ReproviderInterval(reprovideInterval), - provider.KeyProvider(keyProvider), +const ( + // The size of a batch that will be used for calculating average announcement + // time per CID, inside of boxo/provider.ThroughputReport + // and in 'ipfs stats provide' report. + // Used when Provide.DHT.SweepEnabled=false + sampledBatchSize = 1000 + + // Datastore key used to store previous reprovide strategy. + reprovideStrategyKey = "/reprovideStrategy" + + // Datastore namespace prefix for provider data. + providerDatastorePrefix = "provider" + // Datastore path for the provider keystore. + keystoreDatastorePath = "keystore" +) + +// Interval between reprovide queue monitoring checks for slow reprovide alerts. +// Used when Provide.DHT.SweepEnabled=true +const reprovideAlertPollInterval = 15 * time.Minute + +// Number of consecutive polling intervals with sustained queue growth before +// triggering a slow reprovide alert (3 intervals = 45 minutes). +// Used when Provide.DHT.SweepEnabled=true +const consecutiveAlertsThreshold = 3 + +// DHTProvider is an interface for providing keys to a DHT swarm. It holds a +// state of keys to be advertised, and is responsible for periodically +// publishing provider records for these keys to the DHT swarm before the +// records expire. +type DHTProvider interface { + // StartProviding ensures keys are periodically advertised to the DHT swarm. + // + // If the `keys` aren't currently being reprovided, they are added to the + // queue to be provided to the DHT swarm as soon as possible, and scheduled + // to be reprovided periodically. If `force` is set to true, all keys are + // provided to the DHT swarm, regardless of whether they were already being + // reprovided in the past. `keys` keep being reprovided until `StopProviding` + // is called. + // + // This operation is asynchronous, it returns as soon as the `keys` are added + // to the provide queue, and provides happens asynchronously. + // + // Returns an error if the keys couldn't be added to the provide queue. This + // can happen if the provider is closed or if the node is currently Offline + // (either never bootstrapped, or disconnected since more than `OfflineDelay`). + // The schedule and provide queue depend on the network size, hence recent + // network connectivity is essential. + StartProviding(force bool, keys ...mh.Multihash) error + // ProvideOnce sends provider records for the specified keys to the DHT swarm + // only once. It does not automatically reprovide those keys afterward. + // + // Add the supplied multihashes to the provide queue, and return immediately. + // The provide operation happens asynchronously. + // + // Returns an error if the keys couldn't be added to the provide queue. This + // can happen if the provider is closed or if the node is currently Offline + // (either never bootstrapped, or disconnected since more than `OfflineDelay`). + // The schedule and provide queue depend on the network size, hence recent + // network connectivity is essential. + ProvideOnce(keys ...mh.Multihash) error + // Clear clears the all the keys from the provide queue and returns the number + // of keys that were cleared. + // + // The keys are not deleted from the keystore, so they will continue to be + // reprovided as scheduled. + Clear() int + // RefreshSchedule scans the Keystore for any keys that are not currently + // scheduled for reproviding. If such keys are found, it schedules their + // associated keyspace region to be reprovided. + // + // This function doesn't remove prefixes that have no keys from the schedule. + // This is done automatically during the reprovide operation if a region has no + // keys. + // + // Returns an error if the provider is closed or if the node is currently + // Offline (either never bootstrapped, or disconnected since more than + // `OfflineDelay`). The schedule depends on the network size, hence recent + // network connectivity is essential. + RefreshSchedule() error + Close() error +} + +var ( + _ DHTProvider = &ddhtprovider.SweepingProvider{} + _ DHTProvider = &dhtprovider.SweepingProvider{} + _ DHTProvider = &NoopProvider{} + _ DHTProvider = &LegacyProvider{} +) + +// NoopProvider is a no-operation provider implementation that does nothing. +// It is used when providing is disabled or when no DHT is available. +// All methods return successfully without performing any actual operations. +type NoopProvider struct{} + +func (r *NoopProvider) StartProviding(bool, ...mh.Multihash) error { return nil } +func (r *NoopProvider) ProvideOnce(...mh.Multihash) error { return nil } +func (r *NoopProvider) Clear() int { return 0 } +func (r *NoopProvider) RefreshSchedule() error { return nil } +func (r *NoopProvider) Close() error { return nil } + +// LegacyProvider is a wrapper around the boxo/provider.System that implements +// the DHTProvider interface. This provider manages reprovides using a burst +// strategy where it sequentially reprovides all keys at once during each +// reprovide interval, rather than spreading the load over time. +// +// This is the legacy provider implementation that can cause resource spikes +// during reprovide operations. For more efficient providing, consider using +// the SweepingProvider which spreads the load over the reprovide interval. +type LegacyProvider struct { + provider.System +} + +func (r *LegacyProvider) StartProviding(force bool, keys ...mh.Multihash) error { + return r.ProvideOnce(keys...) +} + +func (r *LegacyProvider) ProvideOnce(keys ...mh.Multihash) error { + if many, ok := r.System.(routinghelpers.ProvideManyRouter); ok { + return many.ProvideMany(context.Background(), keys) + } + + for _, k := range keys { + if err := r.Provide(context.Background(), cid.NewCidV1(cid.Raw, k), true); err != nil { + return err } - if !acceleratedDHTClient { - // The estimation kinda suck if you are running with accelerated DHT client, - // given this message is just trying to push people to use the acceleratedDHTClient - // let's not report on through if it's in use - opts = append(opts, - provider.ThroughputReport(func(reprovide bool, complete bool, keysProvided uint, duration time.Duration) bool { - avgProvideSpeed := duration / time.Duration(keysProvided) - count := uint64(keysProvided) + } + return nil +} - if !reprovide || !complete { - // We don't know how many CIDs we have to provide, try to fetch it from the blockstore. - // But don't try for too long as this might be very expensive if you have a huge datastore. - ctx, cancel := context.WithTimeout(context.Background(), time.Minute*5) - defer cancel() +func (r *LegacyProvider) Clear() int { + return r.System.Clear() +} - // FIXME: I want a running counter of blocks so size of blockstore can be an O(1) lookup. - ch, err := bs.AllKeysChan(ctx) - if err != nil { - logger.Errorf("fetching AllKeysChain in provider ThroughputReport: %v", err) - return false - } - count = 0 - countLoop: - for { - select { - case _, ok := <-ch: - if !ok { - break countLoop - } - count++ - case <-ctx.Done(): - // really big blockstore mode +func (r *LegacyProvider) RefreshSchedule() error { return nil } - // how many blocks would be in a 10TiB blockstore with 128KiB blocks. - const probableBigBlockstore = (10 * 1024 * 1024 * 1024 * 1024) / (128 * 1024) - // How long per block that lasts us. - expectedProvideSpeed := reprovideInterval / probableBigBlockstore - if avgProvideSpeed > expectedProvideSpeed { - logger.Errorf(` -🔔🔔🔔 YOU MAY BE FALLING BEHIND DHT REPROVIDES! 🔔🔔🔔 +// LegacyProviderOpt creates a LegacyProvider to be used as provider in the +// IpfsNode +func LegacyProviderOpt(reprovideInterval time.Duration, strategy string, acceleratedDHTClient bool, provideWorkerCount int) fx.Option { + system := fx.Provide( + fx.Annotate(func(lc fx.Lifecycle, cr irouting.ProvideManyRouter, repo repo.Repo) (*LegacyProvider, error) { + // Initialize provider.System first, before pinner/blockstore/etc. + // The KeyChanFunc will be set later via SetKeyProvider() once we have + // created the pinner, blockstore and other dependencies. + opts := []provider.Option{ + provider.Online(cr), + provider.ReproviderInterval(reprovideInterval), + provider.ProvideWorkerCount(provideWorkerCount), + } + if !acceleratedDHTClient && reprovideInterval > 0 { + // The estimation kinda suck if you are running with accelerated DHT client, + // given this message is just trying to push people to use the acceleratedDHTClient + // let's not report on through if it's in use + opts = append(opts, + provider.ThroughputReport(func(reprovide bool, complete bool, keysProvided uint, duration time.Duration) bool { + avgProvideSpeed := duration / time.Duration(keysProvided) + count := uint64(keysProvided) -⚠️ Your system might be struggling to keep up with DHT reprovides! -This means your content could partially or completely inaccessible on the network. -We observed that you recently provided %d keys at an average rate of %v per key. + if !reprovide || !complete { + // We don't know how many CIDs we have to provide, try to fetch it from the blockstore. + // But don't try for too long as this might be very expensive if you have a huge datastore. + ctx, cancel := context.WithTimeout(context.Background(), time.Minute*5) + defer cancel() -🕑 An attempt to estimate your blockstore size timed out after 5 minutes, -implying your blockstore might be exceedingly large. Assuming a considerable -size of 10TiB, it would take %v to provide the complete set. + // FIXME: I want a running counter of blocks so size of blockstore can be an O(1) lookup. + // Note: talk to datastore directly, as to not depend on Blockstore here. + qr, err := repo.Datastore().Query(ctx, query.Query{ + Prefix: blockstore.BlockPrefix.String(), + KeysOnly: true, + }) + if err != nil { + logger.Errorf("fetching AllKeysChain in provider ThroughputReport: %v", err) + return false + } + defer qr.Close() + count = 0 + countLoop: + for { + select { + case _, ok := <-qr.Next(): + if !ok { + break countLoop + } + count++ + case <-ctx.Done(): + // really big blockstore mode -⏰ The total provide time needs to stay under your reprovide interval (%v) to prevent falling behind! + // how many blocks would be in a 10TiB blockstore with 128KiB blocks. + const probableBigBlockstore = (10 * 1024 * 1024 * 1024 * 1024) / (128 * 1024) + // How long per block that lasts us. + expectedProvideSpeed := reprovideInterval / probableBigBlockstore + if avgProvideSpeed > expectedProvideSpeed { + logger.Errorf(` +🔔🔔🔔 Reprovide Operations Too Slow 🔔🔔🔔 -💡 Consider enabling the Accelerated DHT to enhance your system performance. See: -https://github.com/ipfs/kubo/blob/master/docs/config.md#routingaccelerateddhtclient`, - keysProvided, avgProvideSpeed, avgProvideSpeed*probableBigBlockstore, reprovideInterval) - return false +Your node may be falling behind on DHT reprovides, which could affect content availability. + +Observed: %d keys at %v per key +Estimated: Assuming 10TiB blockstore, would take %v to complete +⏰ Must finish within %v (Provide.DHT.Interval) + +Solutions (try in order): +1. Enable Provide.DHT.SweepEnabled=true (recommended) +2. Increase Provide.DHT.MaxWorkers if needed +3. Enable Routing.AcceleratedDHTClient=true (last resort, resource intensive) + +Learn more: https://github.com/ipfs/kubo/blob/master/docs/config.md#provide`, + keysProvided, avgProvideSpeed, avgProvideSpeed*probableBigBlockstore, reprovideInterval) + return false + } } } } - } - // How long per block that lasts us. - expectedProvideSpeed := reprovideInterval - if count > 0 { - expectedProvideSpeed = reprovideInterval / time.Duration(count) - } + // How long per block that lasts us. + expectedProvideSpeed := reprovideInterval + if count > 0 { + expectedProvideSpeed = reprovideInterval / time.Duration(count) + } - if avgProvideSpeed > expectedProvideSpeed { - logger.Errorf(` -🔔🔔🔔 YOU ARE FALLING BEHIND DHT REPROVIDES! 🔔🔔🔔 + if avgProvideSpeed > expectedProvideSpeed { + logger.Errorf(` +🔔🔔🔔 Reprovide Operations Too Slow 🔔🔔🔔 -⚠️ Your system is struggling to keep up with DHT reprovides! -This means your content could partially or completely inaccessible on the network. -We observed that you recently provided %d keys at an average rate of %v per key. +Your node is falling behind on DHT reprovides, which will affect content availability. -💾 Your total CID count is ~%d which would total at %v reprovide process. +Observed: %d keys at %v per key +Confirmed: ~%d total CIDs requiring %v to complete +⏰ Must finish within %v (Provide.DHT.Interval) -⏰ The total provide time needs to stay under your reprovide interval (%v) to prevent falling behind! +Solutions (try in order): +1. Enable Provide.DHT.SweepEnabled=true (recommended) +2. Increase Provide.DHT.MaxWorkers if needed +3. Enable Routing.AcceleratedDHTClient=true (last resort, resource intensive) -💡 Consider enabling the Accelerated DHT to enhance your reprovide throughput. See: -https://github.com/ipfs/kubo/blob/master/docs/config.md#routingaccelerateddhtclient`, - keysProvided, avgProvideSpeed, count, avgProvideSpeed*time.Duration(count), reprovideInterval) - } - return false - }, magicThroughputReportCount)) - } - sys, err := provider.New(repo.Datastore(), opts...) +Learn more: https://github.com/ipfs/kubo/blob/master/docs/config.md#provide`, + keysProvided, avgProvideSpeed, count, avgProvideSpeed*time.Duration(count), reprovideInterval) + } + return false + }, sampledBatchSize)) + } + + sys, err := provider.New(repo.Datastore(), opts...) + if err != nil { + return nil, err + } + lc.Append(fx.Hook{ + OnStop: func(ctx context.Context) error { + return sys.Close() + }, + }) + + prov := &LegacyProvider{sys} + handleStrategyChange(strategy, prov, repo.Datastore()) + + return prov, nil + }, + fx.As(new(provider.System)), + fx.As(new(DHTProvider)), + ), + ) + setKeyProvider := fx.Invoke(func(lc fx.Lifecycle, system provider.System, keyProvider provider.KeyChanFunc) { + lc.Append(fx.Hook{ + OnStart: func(ctx context.Context) error { + // SetKeyProvider breaks the circular dependency between provider, blockstore, and pinner. + // We cannot create the blockstore without the provider (it needs to provide blocks), + // and we cannot determine the reproviding strategy without the pinner/blockstore. + // This deferred initialization allows us to create provider.System first, + // then set the actual key provider function after all dependencies are ready. + system.SetKeyProvider(keyProvider) + return nil + }, + }) + }) + return fx.Options( + system, + setKeyProvider, + ) +} + +type dhtImpl interface { + routing.Routing + GetClosestPeers(context.Context, string) ([]peer.ID, error) + Host() host.Host + MessageSender() dht_pb.MessageSender +} + +type fullrtRouter struct { + *fullrt.FullRT +} + +// GetClosestPeers overrides fullrt.FullRT's GetClosestPeers and returns an +// error if the fullrt's initial network crawl isn't complete yet. +func (fr *fullrtRouter) GetClosestPeers(ctx context.Context, key string) ([]peer.ID, error) { + if !fr.Ready() { + return nil, errors.New("fullrt: initial network crawl still running") + } + return fr.FullRT.GetClosestPeers(ctx, key) +} + +var ( + _ dhtImpl = &dht.IpfsDHT{} + _ dhtImpl = &fullrtRouter{} +) + +type addrsFilter interface { + FilteredAddrs() []ma.Multiaddr +} + +func SweepingProviderOpt(cfg *config.Config) fx.Option { + reprovideInterval := cfg.Provide.DHT.Interval.WithDefault(config.DefaultProvideDHTInterval) + type providerInput struct { + fx.In + DHT routing.Routing `name:"dhtc"` + Repo repo.Repo + } + sweepingReprovider := fx.Provide(func(in providerInput) (DHTProvider, *keystore.ResettableKeystore, error) { + ds := namespace.Wrap(in.Repo.Datastore(), datastore.NewKey(providerDatastorePrefix)) + ks, err := keystore.NewResettableKeystore(ds, + keystore.WithPrefixBits(16), + keystore.WithDatastorePath(keystoreDatastorePath), + keystore.WithBatchSize(int(cfg.Provide.DHT.KeystoreBatchSize.WithDefault(config.DefaultProvideDHTKeystoreBatchSize))), + ) if err != nil { - return nil, err + return nil, nil, err + } + // Constants for buffered provider configuration + // These values match the upstream defaults from go-libp2p-kad-dht and have been battle-tested + const ( + // bufferedDsName is the datastore namespace used by the buffered provider. + // The dsqueue persists operations here to handle large data additions without + // being memory-bound, allowing operations on hardware with limited RAM and + // enabling core operations to return instantly while processing happens async. + bufferedDsName = "bprov" + + // bufferedBatchSize controls how many operations are dequeued and processed + // together from the datastore queue. The worker processes up to this many + // operations at once, grouping them by type for efficiency. + bufferedBatchSize = 1 << 10 // 1024 items + + // bufferedIdleWriteTime is an implementation detail of go-dsqueue that controls + // how long the datastore buffer waits for new multihashes to arrive before + // flushing in-memory items to the datastore. This does NOT affect providing speed - + // provides happen as fast as possible via a dedicated worker that continuously + // processes the queue regardless of this timing. + bufferedIdleWriteTime = time.Minute + ) + + bufferedProviderOpts := []buffered.Option{ + buffered.WithBatchSize(bufferedBatchSize), + buffered.WithDsName(bufferedDsName), + buffered.WithIdleWriteTime(bufferedIdleWriteTime), + } + var impl dhtImpl + switch inDht := in.DHT.(type) { + case *dht.IpfsDHT: + if inDht != nil { + impl = inDht + } + case *dual.DHT: + if inDht != nil { + prov, err := ddhtprovider.New(inDht, + ddhtprovider.WithKeystore(ks), + ddhtprovider.WithDatastore(ds), + ddhtprovider.WithResumeCycle(cfg.Provide.DHT.ResumeEnabled.WithDefault(config.DefaultProvideDHTResumeEnabled)), + + ddhtprovider.WithReprovideInterval(reprovideInterval), + ddhtprovider.WithMaxReprovideDelay(time.Hour), + ddhtprovider.WithOfflineDelay(cfg.Provide.DHT.OfflineDelay.WithDefault(config.DefaultProvideDHTOfflineDelay)), + ddhtprovider.WithConnectivityCheckOnlineInterval(1*time.Minute), + + ddhtprovider.WithMaxWorkers(int(cfg.Provide.DHT.MaxWorkers.WithDefault(config.DefaultProvideDHTMaxWorkers))), + ddhtprovider.WithDedicatedPeriodicWorkers(int(cfg.Provide.DHT.DedicatedPeriodicWorkers.WithDefault(config.DefaultProvideDHTDedicatedPeriodicWorkers))), + ddhtprovider.WithDedicatedBurstWorkers(int(cfg.Provide.DHT.DedicatedBurstWorkers.WithDefault(config.DefaultProvideDHTDedicatedBurstWorkers))), + ddhtprovider.WithMaxProvideConnsPerWorker(int(cfg.Provide.DHT.MaxProvideConnsPerWorker.WithDefault(config.DefaultProvideDHTMaxProvideConnsPerWorker))), + ) + if err != nil { + return nil, nil, err + } + return buffered.New(prov, ds, bufferedProviderOpts...), ks, nil + } + case *fullrt.FullRT: + if inDht != nil { + impl = &fullrtRouter{inDht} + } + } + if impl == nil { + return &NoopProvider{}, nil, nil + } + + var selfAddrsFunc func() []ma.Multiaddr + if imlpFilter, ok := impl.(addrsFilter); ok { + selfAddrsFunc = imlpFilter.FilteredAddrs + } else { + selfAddrsFunc = func() []ma.Multiaddr { return impl.Host().Addrs() } + } + opts := []dhtprovider.Option{ + dhtprovider.WithKeystore(ks), + dhtprovider.WithDatastore(ds), + dhtprovider.WithResumeCycle(cfg.Provide.DHT.ResumeEnabled.WithDefault(config.DefaultProvideDHTResumeEnabled)), + dhtprovider.WithHost(impl.Host()), + dhtprovider.WithRouter(impl), + dhtprovider.WithMessageSender(impl.MessageSender()), + dhtprovider.WithSelfAddrs(selfAddrsFunc), + dhtprovider.WithAddLocalRecord(func(h mh.Multihash) error { + return impl.Provide(context.Background(), cid.NewCidV1(cid.Raw, h), false) + }), + + dhtprovider.WithReplicationFactor(amino.DefaultBucketSize), + dhtprovider.WithReprovideInterval(reprovideInterval), + dhtprovider.WithMaxReprovideDelay(time.Hour), + dhtprovider.WithOfflineDelay(cfg.Provide.DHT.OfflineDelay.WithDefault(config.DefaultProvideDHTOfflineDelay)), + dhtprovider.WithConnectivityCheckOnlineInterval(1 * time.Minute), + + dhtprovider.WithMaxWorkers(int(cfg.Provide.DHT.MaxWorkers.WithDefault(config.DefaultProvideDHTMaxWorkers))), + dhtprovider.WithDedicatedPeriodicWorkers(int(cfg.Provide.DHT.DedicatedPeriodicWorkers.WithDefault(config.DefaultProvideDHTDedicatedPeriodicWorkers))), + dhtprovider.WithDedicatedBurstWorkers(int(cfg.Provide.DHT.DedicatedBurstWorkers.WithDefault(config.DefaultProvideDHTDedicatedBurstWorkers))), + dhtprovider.WithMaxProvideConnsPerWorker(int(cfg.Provide.DHT.MaxProvideConnsPerWorker.WithDefault(config.DefaultProvideDHTMaxProvideConnsPerWorker))), + } + + prov, err := dhtprovider.New(opts...) + if err != nil { + return nil, nil, err + } + return buffered.New(prov, ds, bufferedProviderOpts...), ks, nil + }) + + type keystoreInput struct { + fx.In + Provider DHTProvider + Keystore *keystore.ResettableKeystore + KeyProvider provider.KeyChanFunc + } + initKeystore := fx.Invoke(func(lc fx.Lifecycle, in keystoreInput) { + // Skip keystore initialization for NoopProvider + if _, ok := in.Provider.(*NoopProvider); ok { + return + } + + var ( + cancel context.CancelFunc + done = make(chan struct{}) + ) + + syncKeystore := func(ctx context.Context) error { + kcf, err := in.KeyProvider(ctx) + if err != nil { + return err + } + if err := in.Keystore.ResetCids(ctx, kcf); err != nil { + return err + } + if err := in.Provider.RefreshSchedule(); err != nil { + logger.Infow("refreshing provider schedule", "err", err) + } + return nil + } + + lc.Append(fx.Hook{ + OnStart: func(ctx context.Context) error { + // Set the KeyProvider as a garbage collection function for the + // keystore. Periodically purge the Keystore from all its keys and + // replace them with the keys that needs to be reprovided, coming from + // the KeyChanFunc. So far, this is the less worse way to remove CIDs + // that shouldn't be reprovided from the provider's state. + go func() { + // Sync the keystore once at startup. This operation is async since + // we need to walk the DAG of objects matching the provide strategy, + // which can take a while. + strategy := cfg.Provide.Strategy.WithDefault(config.DefaultProvideStrategy) + logger.Infow("provider keystore sync started", "strategy", strategy) + if err := syncKeystore(ctx); err != nil { + if ctx.Err() == nil { + logger.Errorw("provider keystore sync failed", "err", err, "strategy", strategy) + } else { + logger.Debugw("provider keystore sync interrupted by shutdown", "err", err, "strategy", strategy) + } + return + } + logger.Infow("provider keystore sync completed", "strategy", strategy) + }() + + gcCtx, c := context.WithCancel(context.Background()) + cancel = c + + go func() { // garbage collection loop for cids to reprovide + defer close(done) + ticker := time.NewTicker(reprovideInterval) + defer ticker.Stop() + + for { + select { + case <-gcCtx.Done(): + return + case <-ticker.C: + if err := syncKeystore(gcCtx); err != nil { + logger.Errorw("provider keystore sync", "err", err) + } + } + } + }() + return nil + }, + OnStop: func(ctx context.Context) error { + if cancel != nil { + cancel() + } + select { + case <-done: + case <-ctx.Done(): + return ctx.Err() + } + // Keystore will be closed by ensureProviderClosesBeforeKeystore hook + // to guarantee provider closes before keystore. + return nil + }, + }) + }) + + // ensureProviderClosesBeforeKeystore manages the shutdown order between + // provider and keystore to prevent race conditions. + // + // The provider's worker goroutines may call keystore methods during their + // operation. If keystore closes while these operations are in-flight, we get + // "keystore is closed" errors. By closing the provider first, we ensure all + // worker goroutines exit and complete any pending keystore operations before + // the keystore itself closes. + type providerKeystoreShutdownInput struct { + fx.In + Provider DHTProvider + Keystore *keystore.ResettableKeystore + } + ensureProviderClosesBeforeKeystore := fx.Invoke(func(lc fx.Lifecycle, in providerKeystoreShutdownInput) { + // Skip for NoopProvider + if _, ok := in.Provider.(*NoopProvider); ok { + return } lc.Append(fx.Hook{ OnStop: func(ctx context.Context) error { - return sys.Close() + // Close provider first - waits for all worker goroutines to exit. + // This ensures no code can access keystore after this returns. + if err := in.Provider.Close(); err != nil { + logger.Errorw("error closing provider during shutdown", "error", err) + } + + // Close keystore - safe now, provider is fully shut down + return in.Keystore.Close() }, }) - - return sys, nil }) + + // extractSweepingProvider extracts a SweepingProvider from the given provider interface. + // It handles unwrapping buffered and dual providers, always selecting WAN for dual DHT. + // Returns nil if the provider is not a sweeping provider type. + var extractSweepingProvider func(prov any) *dhtprovider.SweepingProvider + extractSweepingProvider = func(prov any) *dhtprovider.SweepingProvider { + switch p := prov.(type) { + case *dhtprovider.SweepingProvider: + return p + case *ddhtprovider.SweepingProvider: + return p.WAN + case *buffered.SweepingProvider: + // Recursively extract from the inner provider + return extractSweepingProvider(p.Provider) + default: + return nil + } + } + + type alertInput struct { + fx.In + Provider DHTProvider + } + reprovideAlert := fx.Invoke(func(lc fx.Lifecycle, in alertInput) { + prov := extractSweepingProvider(in.Provider) + if prov == nil { + return + } + + var ( + cancel context.CancelFunc + done = make(chan struct{}) + ) + + lc.Append(fx.Hook{ + OnStart: func(ctx context.Context) error { + gcCtx, c := context.WithCancel(context.Background()) + cancel = c + go func() { + defer close(done) + + ticker := time.NewTicker(reprovideAlertPollInterval) + defer ticker.Stop() + + var ( + queueSize, prevQueueSize int64 + queuedWorkers, prevQueuedWorkers bool + count int + ) + + for { + select { + case <-gcCtx.Done(): + return + case <-ticker.C: + } + + stats := prov.Stats() + queuedWorkers = stats.Workers.QueuedPeriodic > 0 + queueSize = int64(stats.Queues.PendingRegionReprovides) + + // Alert if reprovide queue keeps growing and all periodic workers are busy. + // Requires consecutiveAlertsThreshold intervals of sustained growth. + if prevQueuedWorkers && queuedWorkers && queueSize > prevQueueSize { + count++ + if count >= consecutiveAlertsThreshold { + logger.Errorf(` +🔔🔔🔔 Reprovide Operations Too Slow 🔔🔔🔔 + +Your node is falling behind on DHT reprovides, which will affect content availability. + +Keyspace regions enqueued for reprovide: + %s ago:\t%d + Now:\t%d + +All periodic workers are busy! + Active workers:\t%d / %d (max) + Active workers types:\t%d periodic, %d burst + Dedicated workers:\t%d periodic, %d burst + +Solutions (try in order): +1. Increase Provide.DHT.MaxWorkers (current %d) +2. Increase Provide.DHT.DedicatedPeriodicWorkers (current %d) +3. Set Provide.DHT.SweepEnabled=false and Routing.AcceleratedDHTClient=true (last resort, not recommended) + +See how the reprovide queue is processed in real-time with 'watch ipfs provide stat --all --compact' + +See docs: https://github.com/ipfs/kubo/blob/master/docs/config.md#providedhtmaxworkers`, + reprovideAlertPollInterval.Truncate(time.Minute).String(), prevQueueSize, queueSize, + stats.Workers.Active, stats.Workers.Max, + stats.Workers.ActivePeriodic, stats.Workers.ActiveBurst, + stats.Workers.DedicatedPeriodic, stats.Workers.DedicatedBurst, + stats.Workers.Max, stats.Workers.DedicatedPeriodic) + } + } else if !queuedWorkers { + count = 0 + } + + prevQueueSize, prevQueuedWorkers = queueSize, queuedWorkers + } + }() + return nil + }, + OnStop: func(ctx context.Context) error { + // Cancel the alert loop + if cancel != nil { + cancel() + } + select { + case <-done: + case <-ctx.Done(): + return ctx.Err() + } + return nil + }, + }) + }) + + return fx.Options( + sweepingReprovider, + initKeystore, + ensureProviderClosesBeforeKeystore, + reprovideAlert, + ) } // ONLINE/OFFLINE -// OnlineProviders groups units managing provider routing records online -func OnlineProviders(useStrategicProviding bool, reprovideStrategy string, reprovideInterval time.Duration, acceleratedDHTClient bool) fx.Option { - if useStrategicProviding { +// hasDHTRouting checks if the routing configuration includes a DHT component. +// Returns false for HTTP-only custom routing configurations (e.g., Routing.Type="custom" +// with only HTTP routers). This is used to determine whether SweepingProviderOpt +// can be used, since it requires a DHT client. +func hasDHTRouting(cfg *config.Config) bool { + routingType := cfg.Routing.Type.WithDefault(config.DefaultRoutingType) + switch routingType { + case "auto", "autoclient", "dht", "dhtclient", "dhtserver": + return true + case "custom": + // Check if any router in custom config is DHT-based + for _, router := range cfg.Routing.Routers { + if routerIncludesDHT(router, cfg) { + return true + } + } + return false + default: // "none", "delegated" + return false + } +} + +// routerIncludesDHT recursively checks if a router configuration includes DHT. +// Handles parallel and sequential composite routers by checking their children. +func routerIncludesDHT(rp config.RouterParser, cfg *config.Config) bool { + switch rp.Type { + case config.RouterTypeDHT: + return true + case config.RouterTypeParallel, config.RouterTypeSequential: + if children, ok := rp.Parameters.(*config.ComposableRouterParams); ok { + for _, child := range children.Routers { + if childRouter, exists := cfg.Routing.Routers[child.RouterName]; exists { + if routerIncludesDHT(childRouter, cfg) { + return true + } + } + } + } + } + return false +} + +// OnlineProviders groups units managing provide routing records online +func OnlineProviders(provide bool, cfg *config.Config) fx.Option { + if !provide { return OfflineProviders() } - var keyProvider fx.Option - switch reprovideStrategy { - case "all", "": - keyProvider = fx.Provide(newProvidingStrategy(false, false)) - case "roots": - keyProvider = fx.Provide(newProvidingStrategy(true, true)) - case "pinned": - keyProvider = fx.Provide(newProvidingStrategy(true, false)) - case "flat": - keyProvider = fx.Provide(provider.NewBlockstoreProvider) - default: - return fx.Error(fmt.Errorf("unknown reprovider strategy %q", reprovideStrategy)) + providerStrategy := cfg.Provide.Strategy.WithDefault(config.DefaultProvideStrategy) + + strategyFlag := config.ParseProvideStrategy(providerStrategy) + if strategyFlag == 0 { + return fx.Error(fmt.Errorf("provider: unknown strategy %q", providerStrategy)) } - return fx.Options( - keyProvider, - ProviderSys(reprovideInterval, acceleratedDHTClient), - ) + opts := []fx.Option{ + fx.Provide(setReproviderKeyProvider(providerStrategy)), + } + + sweepEnabled := cfg.Provide.DHT.SweepEnabled.WithDefault(config.DefaultProvideDHTSweepEnabled) + dhtAvailable := hasDHTRouting(cfg) + + // Use SweepingProvider only when both sweep is enabled AND DHT is available. + // For HTTP-only routing (e.g., Routing.Type="custom" with only HTTP routers), + // fall back to LegacyProvider which works with ProvideManyRouter. + // See https://github.com/ipfs/kubo/issues/11089 + if sweepEnabled && dhtAvailable { + opts = append(opts, SweepingProviderOpt(cfg)) + } else { + reprovideInterval := cfg.Provide.DHT.Interval.WithDefault(config.DefaultProvideDHTInterval) + acceleratedDHTClient := cfg.Routing.AcceleratedDHTClient.WithDefault(config.DefaultAcceleratedDHTClient) + provideWorkerCount := int(cfg.Provide.DHT.MaxWorkers.WithDefault(config.DefaultProvideDHTMaxWorkers)) + + opts = append(opts, LegacyProviderOpt(reprovideInterval, providerStrategy, acceleratedDHTClient, provideWorkerCount)) + } + + return fx.Options(opts...) } -// OfflineProviders groups units managing provider routing records offline +// OfflineProviders groups units managing provide routing records offline func OfflineProviders() fx.Option { - return fx.Provide(provider.NewNoopProvider) + return fx.Provide(func() DHTProvider { + return &NoopProvider{} + }) } -func newProvidingStrategy(onlyPinned, onlyRoots bool) interface{} { - type input struct { - fx.In - Pinner pin.Pinner - Blockstore blockstore.Blockstore - IPLDFetcher fetcher.Factory `name:"ipldFetcher"` +func mfsProvider(mfsRoot *mfs.Root, fetcher fetcher.Factory) provider.KeyChanFunc { + return func(ctx context.Context) (<-chan cid.Cid, error) { + err := mfsRoot.FlushMemFree(ctx) + if err != nil { + return nil, fmt.Errorf("provider: error flushing MFS, cannot provide MFS: %w", err) + } + rootNode, err := mfsRoot.GetDirectory().GetNode() + if err != nil { + return nil, fmt.Errorf("provider: error loading MFS root, cannot provide MFS: %w", err) + } + + kcf := provider.NewDAGProvider(rootNode.Cid(), fetcher) + return kcf(ctx) } - return func(in input) provider.KeyChanFunc { - if onlyRoots { - return provider.NewPinnedProvider(true, in.Pinner, in.IPLDFetcher) - } +} - if onlyPinned { - return provider.NewPinnedProvider(false, in.Pinner, in.IPLDFetcher) - } +type provStrategyIn struct { + fx.In + Pinner pin.Pinner + Blockstore blockstore.Blockstore + OfflineIPLDFetcher fetcher.Factory `name:"offlineIpldFetcher"` + OfflineUnixFSFetcher fetcher.Factory `name:"offlineUnixfsFetcher"` + MFSRoot *mfs.Root + Repo repo.Repo +} +type provStrategyOut struct { + fx.Out + ProvidingStrategy config.ProvideStrategy + ProvidingKeyChanFunc provider.KeyChanFunc +} + +// createKeyProvider creates the appropriate KeyChanFunc based on strategy. +// Each strategy has different behavior: +// - "roots": Only root CIDs of pinned content +// - "pinned": All pinned content (roots + children) +// - "mfs": Only MFS content +// - "all": all blocks +func createKeyProvider(strategyFlag config.ProvideStrategy, in provStrategyIn) provider.KeyChanFunc { + switch strategyFlag { + case config.ProvideStrategyRoots: + return provider.NewBufferedProvider(dspinner.NewPinnedProvider(true, in.Pinner, in.OfflineIPLDFetcher)) + case config.ProvideStrategyPinned: + return provider.NewBufferedProvider(dspinner.NewPinnedProvider(false, in.Pinner, in.OfflineIPLDFetcher)) + case config.ProvideStrategyPinned | config.ProvideStrategyMFS: return provider.NewPrioritizedProvider( - provider.NewPinnedProvider(true, in.Pinner, in.IPLDFetcher), - provider.NewBlockstoreProvider(in.Blockstore), + provider.NewBufferedProvider(dspinner.NewPinnedProvider(false, in.Pinner, in.OfflineIPLDFetcher)), + mfsProvider(in.MFSRoot, in.OfflineUnixFSFetcher), ) + case config.ProvideStrategyMFS: + return mfsProvider(in.MFSRoot, in.OfflineUnixFSFetcher) + default: // "all", "", "flat" (compat) + return in.Blockstore.AllKeysChan + } +} + +// detectStrategyChange checks if the reproviding strategy has changed from what's persisted. +// Returns: (previousStrategy, hasChanged, error) +func detectStrategyChange(ctx context.Context, strategy string, ds datastore.Datastore) (string, bool, error) { + strategyKey := datastore.NewKey(reprovideStrategyKey) + + prev, err := ds.Get(ctx, strategyKey) + if err != nil { + if errors.Is(err, datastore.ErrNotFound) { + return "", strategy != "", nil + } + return "", false, err + } + + previousStrategy := string(prev) + return previousStrategy, previousStrategy != strategy, nil +} + +// persistStrategy saves the current reproviding strategy to the datastore. +// Empty string strategies are deleted rather than stored. +func persistStrategy(ctx context.Context, strategy string, ds datastore.Datastore) error { + strategyKey := datastore.NewKey(reprovideStrategyKey) + + if strategy == "" { + return ds.Delete(ctx, strategyKey) + } + return ds.Put(ctx, strategyKey, []byte(strategy)) +} + +// handleStrategyChange manages strategy change detection and queue clearing. +// Strategy change detection: when the reproviding strategy changes, +// we clear the provide queue to avoid unexpected behavior from mixing +// strategies. This ensures a clean transition between different providing modes. +func handleStrategyChange(strategy string, provider DHTProvider, ds datastore.Datastore) { + ctx := context.Background() + + previous, changed, err := detectStrategyChange(ctx, strategy, ds) + if err != nil { + logger.Error("cannot read previous reprovide strategy", "err", err) + return + } + + if !changed { + return + } + + logger.Infow("Provide.Strategy changed, clearing provide queue", "previous", previous, "current", strategy) + provider.Clear() + + if err := persistStrategy(ctx, strategy, ds); err != nil { + logger.Error("cannot update reprovide strategy", "err", err) + } +} + +func setReproviderKeyProvider(strategy string) func(in provStrategyIn) provStrategyOut { + strategyFlag := config.ParseProvideStrategy(strategy) + + return func(in provStrategyIn) provStrategyOut { + // Create the appropriate key provider based on strategy + kcf := createKeyProvider(strategyFlag, in) + return provStrategyOut{ + ProvidingStrategy: strategyFlag, + ProvidingKeyChanFunc: kcf, + } } } diff --git a/core/node/storage.go b/core/node/storage.go index aedf0ee6a..e97a0db4a 100644 --- a/core/node/storage.go +++ b/core/node/storage.go @@ -27,10 +27,30 @@ func Datastore(repo repo.Repo) datastore.Datastore { type BaseBlocks blockstore.Blockstore // BaseBlockstoreCtor creates cached blockstore backed by the provided datastore -func BaseBlockstoreCtor(cacheOpts blockstore.CacheOpts, hashOnRead bool) func(mctx helpers.MetricsCtx, repo repo.Repo, lc fx.Lifecycle) (bs BaseBlocks, err error) { - return func(mctx helpers.MetricsCtx, repo repo.Repo, lc fx.Lifecycle) (bs BaseBlocks, err error) { +func BaseBlockstoreCtor( + cacheOpts blockstore.CacheOpts, + hashOnRead bool, + writeThrough bool, + providingStrategy string, +) func(mctx helpers.MetricsCtx, repo repo.Repo, prov DHTProvider, lc fx.Lifecycle) (bs BaseBlocks, err error) { + return func(mctx helpers.MetricsCtx, repo repo.Repo, prov DHTProvider, lc fx.Lifecycle) (bs BaseBlocks, err error) { + opts := []blockstore.Option{blockstore.WriteThrough(writeThrough)} + + // Blockstore providing integration: + // When strategy includes "all" the blockstore directly provides blocks as they're Put. + // Important: Provide calls from blockstore are intentionally BLOCKING. + // The Provider implementation (not the blockstore) should handle concurrency/queuing. + // This avoids spawning unbounded goroutines for concurrent block additions. + strategyFlag := config.ParseProvideStrategy(providingStrategy) + if strategyFlag&config.ProvideStrategyAll != 0 { + opts = append(opts, blockstore.Provider(prov)) + } + // hash security - bs = blockstore.NewBlockstore(repo.Datastore()) + bs = blockstore.NewBlockstore( + repo.Datastore(), + opts..., + ) bs = &verifbs.VerifBS{Blockstore: bs} bs, err = blockstore.CachedBlockstore(helpers.LifecycleCtx(mctx, lc), bs, cacheOpts) if err != nil { @@ -39,8 +59,8 @@ func BaseBlockstoreCtor(cacheOpts blockstore.CacheOpts, hashOnRead bool) func(mc bs = blockstore.NewIdStore(bs) - if hashOnRead { // TODO: review: this is how it was done originally, is there a reason we can't just pass this directly? - bs.HashOnRead(true) + if hashOnRead { + bs = &blockstore.ValidatingBlockstore{Blockstore: bs} } return @@ -57,11 +77,11 @@ func GcBlockstoreCtor(bb BaseBlocks) (gclocker blockstore.GCLocker, gcbs blockst } // FilestoreBlockstoreCtor wraps GcBlockstore and adds Filestore support -func FilestoreBlockstoreCtor(repo repo.Repo, bb BaseBlocks) (gclocker blockstore.GCLocker, gcbs blockstore.GCBlockstore, bs blockstore.Blockstore, fstore *filestore.Filestore) { +func FilestoreBlockstoreCtor(repo repo.Repo, bb BaseBlocks, prov DHTProvider) (gclocker blockstore.GCLocker, gcbs blockstore.GCBlockstore, bs blockstore.Blockstore, fstore *filestore.Filestore) { gclocker = blockstore.NewGCLocker() // hash security - fstore = filestore.NewFilestore(bb, repo.FileManager()) + fstore = filestore.NewFilestore(bb, repo.FileManager(), prov) gcbs = blockstore.NewGCBlockstore(fstore, gclocker) gcbs = &verifbs.VerifBSGC{GCBlockstore: gcbs} diff --git a/coverage/Rules.mk b/coverage/Rules.mk index 48fce2856..84a4a1887 100644 --- a/coverage/Rules.mk +++ b/coverage/Rules.mk @@ -3,33 +3,14 @@ include mk/header.mk GOCC ?= go $(d)/coverage_deps: $$(DEPS_GO) cmd/ipfs/ipfs - rm -rf $(@D)/unitcover && mkdir $(@D)/unitcover rm -rf $(@D)/sharnesscover && mkdir $(@D)/sharnesscover -ifneq ($(IPFS_SKIP_COVER_BINS),1) -$(d)/coverage_deps: test/bin/gocovmerge -endif - .PHONY: $(d)/coverage_deps -# unit tests coverage -UTESTS_$(d) := $(shell $(GOCC) list -f '{{if (or (len .TestGoFiles) (len .XTestGoFiles))}}{{.ImportPath}}{{end}}' $(go-flags-with-tags) ./... | grep -v go-ipfs/vendor | grep -v go-ipfs/Godeps) +# unit tests coverage is now produced by test_unit target in mk/golang.mk +# (outputs coverage/unit_tests.coverprofile and test/unit/gotest.json) -UCOVER_$(d) := $(addsuffix .coverprofile,$(addprefix $(d)/unitcover/, $(subst /,_,$(UTESTS_$(d))))) - -$(UCOVER_$(d)): $(d)/coverage_deps ALWAYS - $(eval TMP_PKG := $(subst _,/,$(basename $(@F)))) - $(eval TMP_DEPS := $(shell $(GOCC) list -f '{{range .Deps}}{{.}} {{end}}' $(go-flags-with-tags) $(TMP_PKG) | sed 's/ /\n/g' | grep ipfs/go-ipfs) $(TMP_PKG)) - $(eval TMP_DEPS_LIST := $(call join-with,$(comma),$(TMP_DEPS))) - $(GOCC) test $(go-flags-with-tags) $(GOTFLAGS) -v -covermode=atomic -json -coverpkg=$(TMP_DEPS_LIST) -coverprofile=$@ $(TMP_PKG) | tee -a test/unit/gotest.json - - -$(d)/unit_tests.coverprofile: $(UCOVER_$(d)) - gocovmerge $^ > $@ - -TGTS_$(d) := $(d)/unit_tests.coverprofile - -.PHONY: $(d)/unit_tests.coverprofile +TGTS_$(d) := # sharness tests coverage $(d)/ipfs: GOTAGS += testrunmain @@ -46,7 +27,7 @@ endif export IPFS_COVER_DIR:= $(realpath $(d))/sharnesscover/ $(d)/sharness_tests.coverprofile: export TEST_PLUGIN=0 -$(d)/sharness_tests.coverprofile: $(d)/ipfs cmd/ipfs/ipfs-test-cover $(d)/coverage_deps test_sharness +$(d)/sharness_tests.coverprofile: $(d)/ipfs cmd/ipfs/ipfs-test-cover $(d)/coverage_deps test/bin/gocovmerge test_sharness (cd $(@D)/sharnesscover && find . -type f | gocovmerge -list -) > $@ diff --git a/coverage/main/main.go b/coverage/main/main.go index e680a7037..0d279d967 100644 --- a/coverage/main/main.go +++ b/coverage/main/main.go @@ -1,5 +1,4 @@ //go:build testrunmain -// +build testrunmain package main diff --git a/docs/README.md b/docs/README.md index ab7ac9cc3..244aa4846 100644 --- a/docs/README.md +++ b/docs/README.md @@ -14,9 +14,9 @@ Otherwise, check out the following guides to using and developing IPFS: ## Developing `kubo` -- First, please read the Contributing Guidelines [for IPFS projects](https://github.com/ipfs/community/blob/master/CONTRIBUTING.md) and then the Contributing Guidelines for [Go code specifically](https://github.com/ipfs/community/blob/master/CONTRIBUTING_GO.md) -- Building on… - - [Windows](windows.md) +- **[Developer Guide](developer-guide.md)** - prerequisites, build, test, and contribute +- Contributing Guidelines [for IPFS projects](https://github.com/ipfs/community/blob/master/CONTRIBUTING.md) and for [Go code specifically](https://github.com/ipfs/community/blob/master/CONTRIBUTING_GO.md) +- Building on [Windows](windows.md) - [Performance Debugging Guidelines](debug-guide.md) - [Release Checklist](releases.md) diff --git a/docs/RELEASE_CHECKLIST.md b/docs/RELEASE_CHECKLIST.md index 476c77b15..da96a20d4 100644 --- a/docs/RELEASE_CHECKLIST.md +++ b/docs/RELEASE_CHECKLIST.md @@ -1,177 +1,113 @@ - + # ✅ Release Checklist (vX.Y.Z[-rcN]) -## Labels +**Release types:** RC (Release Candidate) | FINAL | PATCH -If an item should be executed for a specific release type, it should be labeled with one of the following labels: +## Prerequisites -- ![](https://img.shields.io/badge/only-RC-blue?style=flat-square) execute **ONLY** when releasing a Release Candidate -- ![](https://img.shields.io/badge/only-FINAL-green?style=flat-square) execute **ONLY** when releasing a Final Release +- [ ] [GPG signature](https://docs.github.com/en/authentication/managing-commit-signature-verification) configured in local git and GitHub +- [ ] [Docker](https://docs.docker.com/get-docker/) installed on your system +- [ ] [npm](https://docs.npmjs.com/downloading-and-installing-node-js-and-npm) installed on your system +- [ ] kubo repository cloned locally +- [ ] **non-PATCH:** Upgrade Go in CI to latest patch from -Otherwise, it means it should be executed for **ALL** release types. +## 1. Prepare Release Branch -Patch releases should follow the same process as `.0` releases. If some item should **NOT** be executed for a Patch Release, it should be labeled with: +- [ ] Fetch latest changes: `git fetch origin master release` +- [ ] Create branch `release-vX.Y.Z` (base from: `master` if Z=0 for new minor/major, `release` if Z>0 for patch) +- [ ] **RC1 only:** Switch to `master` branch and prepare for next release cycle: + - [ ] Update [version.go](https://github.com/ipfs/kubo/blob/master/version.go) to `vX.Y+1.0-dev` (⚠️ double-check Y+1 is correct) ([example PR](https://github.com/ipfs/kubo/pull/9305)) + - [ ] Create `./docs/changelogs/vX.Y+1.md` and add link in [CHANGELOG.md](https://github.com/ipfs/kubo/blob/master/CHANGELOG.md) +- [ ] Switch to `release-vX.Y.Z` branch and update [version.go](https://github.com/ipfs/kubo/blob/master/version.go) to `vX.Y.Z(-rcN)` (⚠️ double-check Y matches release) ([example](https://github.com/ipfs/kubo/pull/9394)) +- [ ] Create draft PR: `release-vX.Y.Z` → `release` ([example](https://github.com/ipfs/kubo/pull/9306)) +- [ ] In `release-vX.Y.Z` branch, cherry-pick commits from `master`: `git cherry-pick -x ` ([example](https://github.com/ipfs/kubo/pull/10636/commits/033de22e3bc6191dbb024ad6472f5b96b34e3ccf)) + - ⚠️ **NOTE:** `-x` flag records original commit SHA for traceability and ensures cleaner merges with deduplicated commits in history +- [ ] Verify all CI checks on the PR are passing +- [ ] **FINAL only:** In `release-vX.Y.Z` branch, replace `Changelog` and `Contributors` sections with `./bin/mkreleaselog` stdout (do **NOT** copy stderr) +- [ ] **FINAL only:** Merge PR (`release-vX.Y.Z` → `release`) using `Create a merge commit` + - ⚠️ do **NOT** use `Squash and merge` nor `Rebase and merge` because we need to be able to sign the merge commit + - ⚠️ do **NOT** delete the `release-vX.Y.Z` branch (needed for future patch releases and git history) -- ![](https://img.shields.io/badge/not-PATCH-yellow?style=flat-square) do **NOT** execute when releasing a Patch Release +## 2. Tag & Publish -## Before the release +### Create Tag +⚠️ **POINT OF NO RETURN:** Once pushed, tags trigger automatic Docker/NPM publishing that cannot be reversed! +If you're making a release for the first time, do pair programming and have the release reviewer verify all commands. -This section covers tasks to be done ahead of the release. +- [ ] **RC:** From `release-vX.Y.Z` branch: `git tag -s vX.Y.Z-rcN -m 'Prerelease X.Y.Z-rcN'` +- [ ] **FINAL:** After PR merge, from `release` branch: `git tag -s vX.Y.Z -m 'Release X.Y.Z'` +- [ ] ⚠️ Verify tag is signed and correct: `git show vX.Y.Z(-rcN)` +- [ ] Push tag: `git push origin vX.Y.Z(-rcN)` + - ⚠️ do **NOT** use `git push --tags` because it pushes all your local tags +- [ ] **STOP:** Wait for [Docker build](https://github.com/ipfs/kubo/actions/workflows/docker-image.yml) to complete before proceeding -- [ ] Verify you have access to all the services and tools required for the release - - [ ] [GPG signature](https://docs.github.com/en/authentication/managing-commit-signature-verification) configured in local git and in GitHub - - [ ] [admin access to IPFS Discourse](https://discuss.ipfs.tech/g/admins) - - ask the previous release owner (or @2color) for an invite - - [ ] ![](https://img.shields.io/badge/not-PATCH-yellow?style=flat-square) [access to #shared-pl-marketing-requests](https://filecoinproject.slack.com/archives/C018EJ8LWH1) channel in FIL Slack - - ask the previous release owner for an invite - - [ ] [access to IPFS network metrics](https://github.com/protocol/pldw/blob/624f47cf4ec14ad2cec6adf601a9f7b203ef770d/docs/sources/ipfs.md#ipfs-network-metrics) dashboards in Grafana - - open an access request in the [pldw](https://github.com/protocol/pldw/issues/new/choose) - - [example](https://github.com/protocol/pldw/issues/158) - - [ ] [kuboreleaser](https://github.com/ipfs/kuboreleaser) checked out on your system (_only if you're using [kuboreleaser](https://github.com/ipfs/kuboreleaser)_) - - [ ] [Thunderdome](https://github.com/ipfs-shipyard/thunderdome) checked out on your system and configured (see the [Thunderdome release docs](./releases_thunderdome.md) for setup) - - [ ] [docker](https://docs.docker.com/get-docker/) installed on your system (_only if you're using [kuboreleaser](https://github.com/ipfs/kuboreleaser)_) - - [ ] [npm](https://docs.npmjs.com/downloading-and-installing-node-js-and-npm) installed on your system (_only if you're **NOT** using [kuboreleaser](https://github.com/ipfs/kuboreleaser)_) - - [ ] [zsh](https://github.com/ohmyzsh/ohmyzsh/wiki/Installing-ZSH#install-and-set-up-zsh-as-default) installed on your system - - [ ] [kubo](https://github.com/ipfs/kubo) checked out under `$(go env GOPATH)/src/github.com/ipfs/kubo` - - you can also symlink your clone to the expected location by running `mkdir -p $(go env GOPATH)/src/github.com/ipfs && ln -s $(pwd) $(go env GOPATH)/src/github.com/ipfs/kubo` - - [ ] ![](https://img.shields.io/badge/not-PATCH-yellow?style=flat-square) [Reddit](https://www.reddit.com) account -- ![](https://img.shields.io/badge/not-PATCH-yellow?style=flat-square) Upgrade Go used in CI to the latest patch release available in [CircleCI](https://hub.docker.com/r/cimg/go/tags) in: - - [ ] ![](https://img.shields.io/badge/not-PATCH-yellow?style=flat-square) [ipfs/distributions](https://github.com/ipfs/distributions) - - [example](https://github.com/ipfs/distributions/pull/756) - - [ ] ![](https://img.shields.io/badge/not-PATCH-yellow?style=flat-square) [ipfs/ipfs-docs](https://github.com/ipfs/ipfs-docs) - - [example](https://github.com/ipfs/ipfs-docs/pull/1298) -- [ ] Verify there is nothing [left for release](-what-s-left-for-release) -- [ ] Create a release process improvement PR - - [ ] update the [release issue template](docs/RELEASE_ISSUE_TEMPLATE.md) as you go - - [ ] link it in the [Meta](#meta) section +### Publish Artifacts -## The release +- [ ] **Docker:** Publish to [DockerHub](https://hub.docker.com/r/ipfs/kubo/tags) + - [ ] Wait for [Publish docker image](https://github.com/ipfs/kubo/actions/workflows/docker-image.yml) workflow triggered by tag push + - [ ] Verify image is available on [Docker Hub → tags](https://hub.docker.com/r/ipfs/kubo/tags) +- [ ] **dist.ipfs.tech:** Publish to [dist.ipfs.tech](https://dist.ipfs.tech) + - [ ] Check out [ipfs/distributions](https://github.com/ipfs/distributions) + - [ ] Create branch: `git checkout -b release-kubo-X.Y.Z(-rcN)` + - [ ] Verify `.tool-versions` golang matches [Kubo's CI](https://github.com/ipfs/kubo/blob/master/.github/workflows/gotest.yml) `go-version:` (update if needed) + - [ ] Run: `./dist.sh add-version kubo vX.Y.Z(-rcN)` ([usage](https://github.com/ipfs/distributions#usage)) + - [ ] Create and merge PR (updates `dists/kubo/versions`, **FINAL** also updates `dists/kubo/current` - [example](https://github.com/ipfs/distributions/pull/1125)) + - [ ] Wait for [CI workflow](https://github.com/ipfs/distributions/actions/workflows/main.yml) triggered by merge + - [ ] Verify release on [dist.ipfs.tech](https://dist.ipfs.tech/#kubo) +- [ ] **NPM:** Publish to [NPM](https://www.npmjs.com/package/kubo?activeTab=versions) + - [ ] Manually dispatch [Release to npm](https://github.com/ipfs/npm-kubo/actions/workflows/main.yml) workflow if not auto-triggered + - [ ] Verify release on [NPM](https://www.npmjs.com/package/kubo?activeTab=versions) +- [ ] **GitHub Release:** Publish to [GitHub](https://github.com/ipfs/kubo/releases) + - [ ] [Create release](https://docs.github.com/en/repositories/releasing-projects-on-github/managing-releases-in-a-repository#creating-a-release) ([RC example](https://github.com/ipfs/kubo/releases/tag/v0.36.0-rc1), [FINAL example](https://github.com/ipfs/kubo/releases/tag/v0.35.0)) + - [ ] Use tag `vX.Y.Z(-rcN)` + - [ ] Link to release issue + - [ ] **RC:** Link to changelog, check `This is a pre-release` + - [ ] **FINAL:** Copy changelog content (without header), do **NOT** check pre-release + - [ ] Run [sync-release-assets](https://github.com/ipfs/kubo/actions/workflows/sync-release-assets.yml) workflow + - [ ] Verify assets are attached to the GitHub release -This section covers tasks to be done during each release. +## 3. Post-Release -- [ ] Prepare the release branch and update version numbers accordingly
using `./kuboreleaser --skip-check-before release --version vX.Y.Z(-rcN) prepare-branch` or ... - - [ ] create a new branch `release-vX.Y.Z` - - use `master` as base if `Z == 0` - - use `release` as base if `Z > 0` - - [ ] ![](https://img.shields.io/badge/only-RC-blue?style=flat-square) update the `CurrentVersionNumber` in [version.go](version.go) in the `master` branch to `vX.Y+1.0-dev` - - [example](https://github.com/ipfs/kubo/pull/9305) - - [ ] update the `CurrentVersionNumber` in [version.go](version.go) in the `release-vX.Y` branch to `vX.Y.Z(-RCN)` - - [example](https://github.com/ipfs/kubo/pull/9394) - - [ ] create a draft PR from `release-vX.Y` to `release` - - [example](https://github.com/ipfs/kubo/pull/9306) - - [ ] Cherry-pick commits from `master` to the `release-vX.Y.Z` using `git cherry-pick -x ` - - [ ] ![](https://img.shields.io/badge/only-FINAL-green?style=flat-square) Add full changelog and contributors to the [changelog](docs/changelogs/vX.Y.md) - - [ ] ![](https://img.shields.io/badge/only-FINAL-green?style=flat-square) Replace the `Changelog` and `Contributors` sections of the [changelog](docs/changelogs/vX.Y.md) with the stdout of `./bin/mkreleaselog`. Note that the command expects your `$GOPATH/src/github.com/ipfs/kubo` to include latest commits from `release-vX.Y` - - do **NOT** copy the stderr - - [ ] verify all CI checks on the PR from `release-vX.Y` to `release` are passing - - [ ] ![](https://img.shields.io/badge/only-FINAL-green?style=flat-square) Merge the PR from `release-vX.Y` to `release` using the `Create a merge commit` - - do **NOT** use `Squash and merge` nor `Rebase and merge` because we need to be able to sign the merge commit - - do **NOT** delete the `release-vX.Y` branch -
-- [ ] Create the release tag
using `./kuboreleaser release --version vX.Y.Z(-rcN) tag` or ... - - This is a dangerous operation! Go and Docker publishing are difficult to reverse! Have the release reviewer verify all the commands marked with ⚠️! - - [ ] ⚠️ ![](https://img.shields.io/badge/only-RC-blue?style=flat-square) tag the HEAD commit using `git tag -s vX.Y.Z(-RCN) -m 'Prerelease X.Y.Z(-RCN)'` - - [ ] ⚠️ ![](https://img.shields.io/badge/only-FINAL-green?style=flat-square) tag the HEAD commit of the `release` branch using `git tag -s vX.Y.Z -m 'Release X.Y.Z'` - - [ ] ⚠️ verify the tag is signed and tied to the correct commit using `git show vX.Y.Z(-RCN)` - - [ ] ⚠️ push the tag to GitHub using `git push origin vX.Y.Z(-RCN)` - - do **NOT** use `git push --tags` because it pushes all your local tags -
-- [ ] Verify [ipfs/distributions](https://github.com/ipfs/distributions)'s `.tool-versions`'s `golang` entry is set to the [latest go release](https://go.dev/doc/devel/release) on the major go branch [Kubo is being tested on](https://github.com/ipfs/kubo/blob/master/.github/workflows/gotest.yml) (see `go-version:`). -- [ ] Publish to Dockerhub, NPM, and dist.ipfs.tech and GitHub using `./kuboreleaser --skip-check-before --skip-run release --version vX.Y.Z(-rcN) publish-to-all` or follow each step below: - - [ ] Publish the release to [DockerHub](https://hub.docker.com/r/ipfs/kubo/)
using `./kuboreleaser --skip-check-before --skip-run release --version vX.Y.Z(-rcN) publish-to-dockerhub` or ... - - [ ] Wait for [Publish docker image](https://github.com/ipfs/kubo/actions/workflows/docker-image.yml) workflow run initiated by the tag push to finish - - [ ] verify the image is available on [Docker Hub](https://hub.docker.com/r/ipfs/kubo/tags) - - [ ] Publish the release to [dist.ipfs.tech](https://dist.ipfs.tech)
using `./kuboreleaser release --version vX.Y.Z(-rcN) publish-to-distributions` or ... - - [ ] check out [ipfs/distributions](https://github.com/ipfs/distributions) - - [ ] run `./dist.sh add-version kubo vX.Y.Z(-RCN)` to add the new version to the `versions` file - - [usage](https://github.com/ipfs/distributions#usage) - - [ ] create and merge the PR which updates `dists/kubo/versions` and `dists/go-ipfs/versions` (![](https://img.shields.io/badge/only-FINAL-green?style=flat-square) and `dists/kubo/current_version` and `dists/go-ipfs/current_version`) - - [example](https://github.com/ipfs/distributions/pull/760) - - [ ] wait for the [CI](https://github.com/ipfs/distributions/actions/workflows/main.yml) workflow run initiated by the merge to master to finish - - [ ] verify the release is available on [dist.ipfs.tech](https://dist.ipfs.tech/#kubo) -
- - [ ] Publish the release to [NPM](https://www.npmjs.com/package/go-ipfs?activeTab=versions)
using `./kuboreleaser release --version vX.Y.Z(-rcN) publish-to-npm` (⚠️ you might need to run the command a couple of times because GHA might not be able to see the new distribution straight away due to caching) or ... - - [ ] run the [Release to npm](https://github.com/ipfs/npm-go-ipfs/actions/workflows/main.yml) workflow - - [ ] check [Release to npm](https://github.com/ipfs/npm-go-ipfs/actions/workflows/main.yml) workflow run logs to verify it discovered the new release - - [ ] verify the release is available on [NPM](https://www.npmjs.com/package/go-ipfs?activeTab=versions) -
- - [ ] Publish the release to [GitHub](https://github.com/ipfs/kubo/releases)
using `./kuboreleaser release --version vX.Y.Z(-rcN) publish-to-github` or ... - - [ ] create a new release on [GitHub](https://docs.github.com/en/repositories/releasing-projects-on-github/managing-releases-in-a-repository#creating-a-release) - - [RC example](https://github.com/ipfs/kubo/releases/tag/v0.17.0-rc1) - - [FINAL example](https://github.com/ipfs/kubo/releases/tag/v0.17.0) - - [ ] use the `vX.Y.Z(-RCN)` tag - - [ ] link to the release issue - - [ ] ![](https://img.shields.io/badge/only-RC-blue?style=flat-square) link to the changelog in the description - - [ ] ![](https://img.shields.io/badge/only-RC-blue?style=flat-square) check the `This is a pre-release` checkbox - - [ ] ![](https://img.shields.io/badge/only-FINAL-green?style=flat-square) copy the changelog (without the header) in the description - - [ ] ![](https://img.shields.io/badge/only-FINAL-green?style=flat-square) do **NOT** check the `This is a pre-release` checkbox - - [ ] run the [sync-release-assets](https://github.com/ipfs/kubo/actions/workflows/sync-release-assets.yml) workflow - - [ ] wait for the [sync-release-assets](https://github.com/ipfs/kubo/actions/workflows/sync-release-assets.yml) workflow run to finish - - [ ] verify the release assets are present in the [GitHub release](https://github.com/ipfs/kubo/releases/tag/vX.Y.Z(-RCN)) -
-- [ ] Update Kubo staging environment, see the [Running Kubo tests on staging](https://www.notion.so/Running-Kubo-tests-on-staging-488578bb46154f9bad982e4205621af8) for details. - - [ ] ![](https://img.shields.io/badge/only-RC-blue?style=flat-square) Test last release against the current RC - - [ ] ![](https://img.shields.io/badge/only-FINAL-green?style=flat-square) Test last release against the current one -- [ ] Promote the release
using `./kuboreleaser release --version vX.Y.Z(-rcN) promote` or ... - - [ ] create an [IPFS Discourse](https://discuss.ipfs.tech) topic - - [prerelease example](https://discuss.ipfs.tech/t/kubo-v0-16-0-rc1-release-candidate-is-out/15248) - - [release example](https://discuss.ipfs.tech/t/kubo-v0-16-0-release-is-out/15249) - - [ ] use `Kubo vX.Y.Z(-RCN) is out!` as the title - - [ ] use `kubo` and `go-ipfs` as topics - - [ ] repeat the title as a heading (`##`) in the description - - [ ] link to the GitHub Release, binaries on IPNS, docker pull command and release notes in the description - - [ ] pin the [IPFS Discourse](https://discuss.ipfs.tech) topic globally - - you can make the topic a banner if there is no banner already - - verify the [IPFS Discourse](https://discuss.ipfs.tech) topic was copied to: - - [ ] [#ipfs-chatter](https://discord.com/channels/669268347736686612/669268347736686615) in IPFS Discord - - [ ] [#ipfs-chatter](https://filecoinproject.slack.com/archives/C018EJ8LWH1) in FIL Slack - - [ ] [#ipfs-chatter:ipfs.io](https://matrix.to/#/#ipfs-chatter:ipfs.io) in Matrix - - [ ] ![](https://img.shields.io/badge/only-FINAL-green?style=flat-square) Add the link to the [IPFS Discourse](https://discuss.ipfs.tech) topic to the [GitHub Release](https://github.com/ipfs/kubo/releases/tag/vX.Y.Z(-RCN)) description - - [example](https://github.com/ipfs/kubo/releases/tag/v0.17.0) - - [ ] ![](https://img.shields.io/badge/only-RC-blue?style=flat-square) create an issue comment mentioning early testers on the release issue - - [example](https://github.com/ipfs/kubo/issues/9319#issuecomment-1311002478) - - [ ] ![](https://img.shields.io/badge/only-FINAL-green?style=flat-square) create an issue comment linking to the release on the release issue - - [example](https://github.com/ipfs/kubo/issues/9417#issuecomment-1400740975) - - [ ] ![](https://img.shields.io/badge/only-FINAL-green?style=flat-square) ![](https://img.shields.io/badge/not-PATCH-yellow?style=flat-square) ask the marketing team to tweet about the release in [#shared-pl-marketing-requests](https://filecoinproject.slack.com/archives/C018EJ8LWH1) in FIL Slack - - [example](https://filecoinproject.slack.com/archives/C018EJ8LWH1/p1664885305374900) - - [ ] ![](https://img.shields.io/badge/only-FINAL-green?style=flat-square) ![](https://img.shields.io/badge/not-PATCH-yellow?style=flat-square) post the link to the [GitHub Release](https://github.com/ipfs/kubo/releases/tag/vX.Y.Z(-RCN)) to [Reddit](https://reddit.com/r/ipfs) - - [example](https://www.reddit.com/r/ipfs/comments/9x0q0k/kubo_v0160_release_is_out/) -
-- [ ] ~~Test the new version with `ipfs-companion`~~ ([currently skipped](https://github.com/ipfs/ipfs-companion/issues/1300))
using `./kuboreleaser release --version vX.Y.Z(-rcN) test-ipfs-companion` or ... - - [ ] run the [e2e](https://github.com/ipfs/ipfs-companion/actions/workflows/e2e.yml) - - use `vX.Y.Z(-RCN)` as the Kubo image version - - [ ] wait for the [e2e](https://github.com/ipfs/ipfs-companion/actions/workflows/e2e.yml) workflow run to finish -
-- [ ] ![](https://img.shields.io/badge/only-FINAL-green?style=flat-square) Update Kubo in [ipfs-desktop](https://github.com/ipfs/ipfs-desktop)
using `./kuboreleaser release --version vX.Y.Z(-rcN) update-ipfs-desktop` or ... - - [ ] check out [ipfs/ipfs-desktop](https://github.com/ipfs/ipfs-desktop) - - [ ] run `npm install` - - [ ] create a PR which updates `package.json` and `package-lock.json` - - [ ] ![](https://img.shields.io/badge/only-FINAL-green?style=flat-square) add @SgtPooki as reviewer -
-- [ ] ![](https://img.shields.io/badge/only-FINAL-green?style=flat-square) Update Kubo docs
using `./kuboreleaser release --version vX.Y.Z(-rcN) update-ipfs-docs` or ... - - [ ] ![](https://img.shields.io/badge/only-FINAL-green?style=flat-square) run the [update-on-new-ipfs-tag.yml](https://github.com/ipfs/ipfs-docs/actions/workflows/update-on-new-ipfs-tag.yml) workflow - - [ ] ![](https://img.shields.io/badge/only-FINAL-green?style=flat-square) merge the PR created by the [update-on-new-ipfs-tag.yml](https://github.com/ipfs/ipfs-docs/actions/workflows/update-on-new-ipfs-tag.yml) workflow run -
-- [ ] ![](https://img.shields.io/badge/only-FINAL-green?style=flat-square) Create a blog entry on [blog.ipfs.tech](https://blog.ipfs.tech)
using `./kuboreleaser release --version vX.Y.Z(-rcN) update-ipfs-blog --date YYYY-MM-DD` or ... - - [ ] ![](https://img.shields.io/badge/only-FINAL-green?style=flat-square) create a PR which adds a release note for the new Kubo version - - [example](https://github.com/ipfs/ipfs-blog/pull/529) - - [ ] ![](https://img.shields.io/badge/only-FINAL-green?style=flat-square) merge the PR - - [ ] ![](https://img.shields.io/badge/only-FINAL-green?style=flat-square) verify the blog entry was published -
-- [ ] ![](https://img.shields.io/badge/only-FINAL-green?style=flat-square) Merge the [release](https://github.com/ipfs/kubo/tree/release) branch back into [master](https://github.com/ipfs/kubo/tree/master), ignoring the changes to [version.go](version.go) (keep the `-dev`) version,
using `./kuboreleaser release --version vX.Y.Z(-rcN) merge-branch` or ... - - [ ] create a new branch `merge-release-vX.Y.Z` from `release` - - [ ] create and merge a PR from `merge-release-vX.Y.Z` to `master` -
-- [ ] ![](https://img.shields.io/badge/only-FINAL-green?style=flat-square) ![](https://img.shields.io/badge/not-PATCH-yellow?style=flat-square) Prepare for the next release
using `./kuboreleaser release --version vX.Y.Z(-rcN) prepare-next` or ... - - [ ] ![](https://img.shields.io/badge/only-FINAL-green?style=flat-square) ![](https://img.shields.io/badge/not-PATCH-yellow?style=flat-square) Create the next [changelog](https://github.com/ipfs/kubo/blob/master/docs/changelogs/vX.(Y+1).md) - - [ ] ![](https://img.shields.io/badge/only-FINAL-green?style=flat-square) ![](https://img.shields.io/badge/not-PATCH-yellow?style=flat-square) Link to the new changelog in the [CHANGELOG.md](CHANGELOG.md) file - - [ ] ![](https://img.shields.io/badge/only-FINAL-green?style=flat-square) ![](https://img.shields.io/badge/not-PATCH-yellow?style=flat-square) Create the next release issue -
-- [ ] ![](https://img.shields.io/badge/only-FINAL-green?style=flat-square) ![](https://img.shields.io/badge/not-PATCH-yellow?style=flat-square) Create a dependency update PR - - [ ] ![](https://img.shields.io/badge/only-FINAL-green?style=flat-square) ![](https://img.shields.io/badge/not-PATCH-yellow?style=flat-square) check out [ipfs/kubo](https://github.com/ipfs/kubo) - - [ ] ![](https://img.shields.io/badge/only-FINAL-green?style=flat-square) ![](https://img.shields.io/badge/not-PATCH-yellow?style=flat-square) go over direct dependencies from `go.mod` in the root directory (NOTE: do not run `go get -u` as it will upgrade indirect dependencies which may cause problems) - - [ ] ![](https://img.shields.io/badge/only-FINAL-green?style=flat-square) ![](https://img.shields.io/badge/not-PATCH-yellow?style=flat-square) run `make mod_tidy` - - [ ] ![](https://img.shields.io/badge/only-FINAL-green?style=flat-square) ![](https://img.shields.io/badge/not-PATCH-yellow?style=flat-square) create a PR which updates `go.mod` and `go.sum` - - [ ] ![](https://img.shields.io/badge/only-FINAL-green?style=flat-square) ![](https://img.shields.io/badge/not-PATCH-yellow?style=flat-square) add the PR to the next release milestone -- [ ] ![](https://img.shields.io/badge/only-FINAL-green?style=flat-square) Close the release issue +### Technical Tasks + +- [ ] **FINAL only:** Merge `release` → `master` + - [ ] Create branch `merge-release-vX.Y.Z` from `release` + - [ ] Merge `master` to `merge-release-vX.Y.Z` first, and resolve conflict in `version.go` + - ⚠️ **NOTE:** make sure to ignore the changes to [version.go](https://github.com/ipfs/kubo/blob/master/version.go) (keep the `-dev` in `master`) + - [ ] Create and merge PR from `merge-release-vX.Y.Z` to `master` using `Create a merge commit` + - ⚠️ do **NOT** use `Squash and merge` nor `Rebase and merge` because we want to preserve original commit history +- [ ] Update [ipshipyard/waterworks-infra](https://github.com/ipshipyard/waterworks-infra) + - [ ] Update Kubo staging environment ([Running Kubo tests on staging](https://www.notion.so/Running-Kubo-tests-on-staging-488578bb46154f9bad982e4205621af8)) + - [ ] **RC:** Test last release against current RC + - [ ] **FINAL:** Latest release on both boxes + - [ ] **FINAL:** Update collab cluster boxes to the tagged release + - [ ] **FINAL:** Update libp2p bootstrappers to the tagged release +- [ ] Smoke test with [IPFS Companion Browser Extension](https://docs.ipfs.tech/install/ipfs-companion/) +- [ ] Update [ipfs-desktop](https://github.com/ipfs/ipfs-desktop) + - [ ] Create PR updating kubo version in `package.json` and `package-lock.json` + - [ ] **FINAL:** Merge PR and ship new ipfs-desktop release +- [ ] **FINAL only:** Update [docs.ipfs.tech](https://docs.ipfs.tech/): run [update-on-new-ipfs-tag.yml](https://github.com/ipfs/ipfs-docs/actions/workflows/update-on-new-ipfs-tag.yml) workflow and merge the PR + +### Promotion + +- [ ] Create [IPFS Discourse](https://discuss.ipfs.tech) topic ([RC example](https://discuss.ipfs.tech/t/kubo-v0-38-0-rc2-is-out/19772), [FINAL example](https://discuss.ipfs.tech/t/kubo-v0-38-0-is-out/19795)) + - [ ] Title: `Kubo vX.Y.Z(-rcN) is out!`, tag: `kubo` + - [ ] Use title as heading (`##`) in description + - [ ] Include: GitHub release link, IPNS binaries, docker pull command, release notes + - [ ] Pin topic globally (make banner if no existing banner) +- [ ] Verify bot posted to [#ipfs-chatter](https://discord.com/channels/669268347736686612/669268347736686615) (Discord) or [#ipfs-chatter:ipfs.io](https://matrix.to/#/#ipfs-chatter:ipfs.io) (Matrix) +- [ ] **RC only:** Comment on release issue mentioning early testers ([example](https://github.com/ipfs/kubo/issues/9319#issuecomment-1311002478)) +- [ ] **FINAL only:** Comment on release issue with link ([example](https://github.com/ipfs/kubo/issues/9417#issuecomment-1400740975)) +- [ ] **FINAL only:** Create [blog.ipfs.tech](https://blog.ipfs.tech) entry ([example](https://github.com/ipfs/ipfs-blog/commit/32040d1e90279f21bad56b924fe4710bba5ba043)) +- [ ] **FINAL non-PATCH:** (optional) Post on social media ([bsky](https://bsky.app/profile/ipshipyard.com/post/3ltxcsrbn5s2k), [x.com](https://x.com/ipshipyard/status/1944867893226635603), [Reddit](https://www.reddit.com/r/ipfs/comments/1lzy6ze/release_v0360_ipfskubo/)) + +### Final Steps + +- [ ] **FINAL non-PATCH:** Create dependency update PR + - [ ] Review direct dependencies from root `go.mod` (⚠️ do **NOT** run `go get -u` as it will upgrade indirect dependencies which may cause problems) + - [ ] Run `make mod_tidy` + - [ ] Create PR with `go.mod` and `go.sum` updates + - [ ] Add PR to next release milestone +- [ ] **FINAL non-PATCH:** Create next release issue ([example](https://github.com/ipfs/kubo/issues/10816)) +- [ ] **FINAL only:** Close release issue \ No newline at end of file diff --git a/docs/add-code-flow.md b/docs/add-code-flow.md index a13c7177d..353d47166 100644 --- a/docs/add-code-flow.md +++ b/docs/add-code-flow.md @@ -1,6 +1,6 @@ # IPFS : The `Add` command demystified -The goal of this document is to capture the code flow for adding a file (see the `coreapi` package) using the IPFS CLI, in the process exploring some datastructures and packages like `ipld.Node` (aka `dagnode`), `FSNode`, `MFS`, etc. +The goal of this document is to capture the code flow for adding a file (see the `coreapi` package) using the IPFS CLI, in the process exploring some data structures and packages like `ipld.Node` (aka `dagnode`), `FSNode`, `MFS`, etc. ## Concepts - [Files](https://github.com/ipfs/docs/issues/133) @@ -55,7 +55,7 @@ Within the function, a new `Adder` is created with the configured `Blockstore` a 1. **[`adder.add(io.Reader)`](https://github.com/ipfs/go-ipfs/blob/v0.4.18/core/coreunix/add.go#L115)** - *Create and return the **root** __DAG__ node* - This method converts the input data (`io.Reader`) to a __DAG__ tree, by splitting the data into _chunks_ using the `Chunker` and organizing them in to a __DAG__ (with a *trickle* or *balanced* layout. See [balanced](https://github.com/ipfs/go-unixfs/blob/6b769632e7eb8fe8f302e3f96bf5569232e7a3ee/importer/balanced/builder.go) for more info). + This method converts the input data (`io.Reader`) to a __DAG__ tree, by splitting the data into _chunks_ using the `Chunker` and organizing them into a __DAG__ (with a *trickle* or *balanced* layout. See [balanced](https://github.com/ipfs/go-unixfs/blob/6b769632e7eb8fe8f302e3f96bf5569232e7a3ee/importer/balanced/builder.go) for more info). The method returns the **root** `ipld.Node` of the __DAG__. @@ -70,7 +70,7 @@ Within the function, a new `Adder` is created with the configured `Blockstore` a - **[MFS] [`PutNode(mfs.Root, path, ipld.Node)`](https://github.com/ipfs/go-mfs/blob/v0.1.18/ops.go#L86)** - *Insert node at path into given `MFS`* - The `path` param is used to determine the `MFS Directory`, which is first looked up in the `MFS` using `lookupDir()` function. This is followed by adding the **root** __DAG__ node (`ipld.Node`) in to this `Directory` using `directory.AddChild()` method. + The `path` param is used to determine the `MFS Directory`, which is first looked up in the `MFS` using `lookupDir()` function. This is followed by adding the **root** __DAG__ node (`ipld.Node`) into this `Directory` using `directory.AddChild()` method. - **[MFS] Add Child To `UnixFS`** - **[`directory.AddChild(filename, ipld.Node)`](https://github.com/ipfs/go-mfs/blob/v0.1.18/dir.go#L350)** - *Add **root** __DAG__ node under this directory* @@ -99,4 +99,4 @@ Within the function, a new `Adder` is created with the configured `Blockstore` a - **[`adder.PinRoot()`](https://github.com/ipfs/go-ipfs/blob/v0.4.18/core/coreunix/add.go#L171)** - *Pin all files under the `MFS` **root*** - The whole process ends with `PinRoot` recursively pinning all the files under the `MFS` **root** \ No newline at end of file + The whole process ends with `PinRoot` recursively pinning all the files under the `MFS` **root** diff --git a/docs/changelogs/v0.10.md b/docs/changelogs/v0.10.md index ea92201a9..429ff7d37 100644 --- a/docs/changelogs/v0.10.md +++ b/docs/changelogs/v0.10.md @@ -80,7 +80,7 @@ Performance profiles can now be collected using `ipfs diag profile`. If you need #### 🍎 Mac OS notarized binaries -The go-ipfs and related migration binaries (for both Intel and Apple Sillicon) are now signed and notarized to make Mac OS installation easier. +The go-ipfs and related migration binaries (for both Intel and Apple Silicon) are now signed and notarized to make Mac OS installation easier. #### 👨‍👩‍👦 Improved MDNS @@ -101,7 +101,7 @@ See `ipfs swarm peering --help` for more details. - github.com/ipfs/go-ipfs: - fuse: load unixfs adls as their dagpb substrates - enable the legacy mDNS implementation - - test: add dag get --ouput-codec test + - test: add dag get --output-codec test - change ipfs dag get flag name from format to output-codec - test: check behavior of loading UnixFS sharded directories with missing shards - remove dag put option shortcuts @@ -320,7 +320,7 @@ See `ipfs swarm peering --help` for more details. - More changelog grooming. - Changelog grooming. - node/tests: put most of the schema test cases here - - Add more explicit discussion of indicies to ListIterator. + - Add more explicit discussion of indices to ListIterator. - node/bindnode: start of a reflect-based Node implementation - add DeepEqual and start using it in tests - Add enumerate methods to the multicodec registries. ([ipld/go-ipld-prime#176](https://github.com/ipld/go-ipld-prime/pull/176)) @@ -390,7 +390,7 @@ See `ipfs swarm peering --help` for more details. - remove note about go modules in README ([libp2p/go-libp2p-noise#100](https://github.com/libp2p/go-libp2p-noise/pull/100)) - fix: remove deprecated call to pk.Bytes ([libp2p/go-libp2p-noise#99](https://github.com/libp2p/go-libp2p-noise/pull/99)) - github.com/libp2p/go-libp2p-peerstore (v0.2.7 -> v0.2.8): - - Fix perfomance issue in updating addr book ([libp2p/go-libp2p-peerstore#141](https://github.com/libp2p/go-libp2p-peerstore/pull/141)) + - Fix performance issue in updating addr book ([libp2p/go-libp2p-peerstore#141](https://github.com/libp2p/go-libp2p-peerstore/pull/141)) - Fix test flakes ([libp2p/go-libp2p-peerstore#164](https://github.com/libp2p/go-libp2p-peerstore/pull/164)) - Only remove records during GC ([libp2p/go-libp2p-peerstore#135](https://github.com/libp2p/go-libp2p-peerstore/pull/135)) - sync: update CI config files ([libp2p/go-libp2p-peerstore#160](https://github.com/libp2p/go-libp2p-peerstore/pull/160)) diff --git a/docs/changelogs/v0.11.md b/docs/changelogs/v0.11.md index 98133052a..a3867c003 100644 --- a/docs/changelogs/v0.11.md +++ b/docs/changelogs/v0.11.md @@ -301,7 +301,7 @@ This work was [contributed](https://github.com/ipfs/go-ipfs/pull/8569) by [Ceram - fix(graphsync): make sure linkcontext is passed (#207) ([ipfs/go-graphsync#207](https://github.com/ipfs/go-graphsync/pull/207)) - Merge final v0.6.x commit history, and 0.8.0 changelog (#205) ([ipfs/go-graphsync#205](https://github.com/ipfs/go-graphsync/pull/205)) - Fix broken link to IPLD selector documentation (#189) ([ipfs/go-graphsync#189](https://github.com/ipfs/go-graphsync/pull/189)) - - fix: check errors before defering a close (#200) ([ipfs/go-graphsync#200](https://github.com/ipfs/go-graphsync/pull/200)) + - fix: check errors before deferring a close (#200) ([ipfs/go-graphsync#200](https://github.com/ipfs/go-graphsync/pull/200)) - chore: fix checks (#197) ([ipfs/go-graphsync#197](https://github.com/ipfs/go-graphsync/pull/197)) - Merge the v0.6.x commit history (#190) ([ipfs/go-graphsync#190](https://github.com/ipfs/go-graphsync/pull/190)) - Ready for universal CI (#187) ([ipfs/go-graphsync#187](https://github.com/ipfs/go-graphsync/pull/187)) diff --git a/docs/changelogs/v0.12.md b/docs/changelogs/v0.12.md index def891271..d87f5fc82 100644 --- a/docs/changelogs/v0.12.md +++ b/docs/changelogs/v0.12.md @@ -58,7 +58,7 @@ As usual, this release includes important fixes, some of which may be critical f - `ipfs refs local` will now list all blocks as if they were [raw]() CIDv1 instead of with whatever CID version and IPLD codecs they were stored with. All other functionality should remain the same. -Note: This change also effects [ipfs-update](https://github.com/ipfs/ipfs-update) so if you use that tool to mange your go-ipfs installation then grab ipfs-update v1.8.0 from [dist](https://dist.ipfs.tech/#ipfs-update). +Note: This change also effects [ipfs-update](https://github.com/ipfs/ipfs-update) so if you use that tool to manage your go-ipfs installation then grab ipfs-update v1.8.0 from [dist](https://dist.ipfs.tech/#ipfs-update). Keep reading to learn more details. diff --git a/docs/changelogs/v0.13.md b/docs/changelogs/v0.13.md index 9bf4ee88a..a985f179c 100644 --- a/docs/changelogs/v0.13.md +++ b/docs/changelogs/v0.13.md @@ -53,7 +53,7 @@ View the linked [security advisory](https://github.com/ipfs/go-ipfs/security/adv - bump to newer blockstore err not found (#301) ([ipld/go-car#301](https://github.com/ipld/go-car/pull/301)) - Car command supports for `largebytes` nodes (#296) ([ipld/go-car#296](https://github.com/ipld/go-car/pull/296)) - fix(test): rootless fixture should have no roots, not null roots - - Allow extracton of a raw unixfs file (#284) ([ipld/go-car#284](https://github.com/ipld/go-car/pull/284)) + - Allow extraction of a raw unixfs file (#284) ([ipld/go-car#284](https://github.com/ipld/go-car/pull/284)) - cmd/car: use a better install command in the README - feat: --version selector for `car create` & update deps - feat: add option to create blockstore that writes a plain CARv1 (#288) ([ipld/go-car#288](https://github.com/ipld/go-car/pull/288)) @@ -537,7 +537,7 @@ The more fully featured yamux stream multiplexer is now prioritized over mplex f - Fix unixfs fetch (#364) ([ipfs/go-graphsync#364](https://github.com/ipfs/go-graphsync/pull/364)) - [Feature] UUIDs, protocol versioning, v2 protocol w/ dag-cbor messaging (#332) ([ipfs/go-graphsync#332](https://github.com/ipfs/go-graphsync/pull/332)) - feat(CHANGELOG): update for v0.12.0 - - Use do not send blocks for pause/resume & prevent processing of blocks on cancelled requests (#333) ([ipfs/go-graphsync#333](https://github.com/ipfs/go-graphsync/pull/333)) + - Use do not send blocks for pause/resume & prevent processing of blocks on canceled requests (#333) ([ipfs/go-graphsync#333](https://github.com/ipfs/go-graphsync/pull/333)) - Support unixfs reification in default linksystem (#329) ([ipfs/go-graphsync#329](https://github.com/ipfs/go-graphsync/pull/329)) - Don't run hooks on blocks we didn't have (#331) ([ipfs/go-graphsync#331](https://github.com/ipfs/go-graphsync/pull/331)) - feat(responsemanager): trace full messages via links to responses (#325) ([ipfs/go-graphsync#325](https://github.com/ipfs/go-graphsync/pull/325)) diff --git a/docs/changelogs/v0.14.md b/docs/changelogs/v0.14.md index d725c1374..247570e9c 100644 --- a/docs/changelogs/v0.14.md +++ b/docs/changelogs/v0.14.md @@ -173,7 +173,7 @@ $ ipfs cid format -v 1 -b base256emoji bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylq - swarm: fix flaky TestDialExistingConnection test (#1509) ([libp2p/go-libp2p#1509](https://github.com/libp2p/go-libp2p/pull/1509)) - tcp: limit the number of connections in tcp suite test on non-linux hosts (#1507) ([libp2p/go-libp2p#1507](https://github.com/libp2p/go-libp2p/pull/1507)) - increase overly short require.Eventually intervals (#1501) ([libp2p/go-libp2p#1501](https://github.com/libp2p/go-libp2p/pull/1501)) - - tls: fix flaky handshake cancelation test (#1503) ([libp2p/go-libp2p#1503](https://github.com/libp2p/go-libp2p/pull/1503)) + - tls: fix flaky handshake cancellation test (#1503) ([libp2p/go-libp2p#1503](https://github.com/libp2p/go-libp2p/pull/1503)) - merge the transport test suite from go-libp2p-testing here ([libp2p/go-libp2p#1496](https://github.com/libp2p/go-libp2p/pull/1496)) - fix racy connection comparison in TestDialWorkerLoopBasic (#1499) ([libp2p/go-libp2p#1499](https://github.com/libp2p/go-libp2p/pull/1499)) - swarm: fix race condition in TestFailFirst (#1490) ([libp2p/go-libp2p#1490](https://github.com/libp2p/go-libp2p/pull/1490)) diff --git a/docs/changelogs/v0.16.md b/docs/changelogs/v0.16.md index 135ef4252..52fcdb165 100644 --- a/docs/changelogs/v0.16.md +++ b/docs/changelogs/v0.16.md @@ -106,7 +106,7 @@ The previous alternative is websocket secure, which require installing a reverse #### How to enable WebTransport -Thoses steps are temporary and wont be needed once we make it enabled by default. +Those steps are temporary and won't be needed once we make it enabled by default. 1. Enable the WebTransport transport: `ipfs config Swarm.Transports.Network.WebTransport --json true` @@ -191,7 +191,7 @@ For more information, see `ipfs add --help` and `ipfs files --help`. - docs: add WebTransport docs ([ipfs/kubo#9308](https://github.com/ipfs/kubo/pull/9308)) - chore: bump version to 0.16.0-rc1 - fix: ensure hasher is registered when using a hashing function - - feat: add webtransport as an optin transport ([ipfs/kubo#9293](https://github.com/ipfs/kubo/pull/9293)) + - feat: add webtransport as an option transport ([ipfs/kubo#9293](https://github.com/ipfs/kubo/pull/9293)) - feat(gateway): _redirects file support (#8890) ([ipfs/kubo#8890](https://github.com/ipfs/kubo/pull/8890)) - docs: fix typo in changelog-v0.16.0.md - Readme: Rewrite introduction and featureset (#9211) ([ipfs/kubo#9211](https://github.com/ipfs/kubo/pull/9211)) @@ -265,7 +265,7 @@ For more information, see `ipfs add --help` and `ipfs files --help`. - sync: update CI config files ([ipfs/go-pinning-service-http-client#21](https://github.com/ipfs/go-pinning-service-http-client/pull/21)) - github.com/ipld/edelweiss (v0.1.4 -> v0.2.0): - Release v0.2.0 (#60) ([ipld/edelweiss#60](https://github.com/ipld/edelweiss/pull/60)) - - feat: add cachable modifier to methods (#48) ([ipld/edelweiss#48](https://github.com/ipld/edelweiss/pull/48)) + - feat: add cacheable modifier to methods (#48) ([ipld/edelweiss#48](https://github.com/ipld/edelweiss/pull/48)) - adding licenses (#52) ([ipld/edelweiss#52](https://github.com/ipld/edelweiss/pull/52)) - sync: update CI config files ([ipld/edelweiss#56](https://github.com/ipld/edelweiss/pull/56)) - chore: replace deprecated ioutil with io/os ([ipld/edelweiss#59](https://github.com/ipld/edelweiss/pull/59)) diff --git a/docs/changelogs/v0.18.md b/docs/changelogs/v0.18.md index f2a22d84e..70ce9ef24 100644 --- a/docs/changelogs/v0.18.md +++ b/docs/changelogs/v0.18.md @@ -56,7 +56,7 @@ As much as possible, the aim is for a user to only think about how much memory t and not need to think about translating that to hard numbers for connections, streams, etc. More updates are likely in future Kubo releases, but with this release: 1. ``System.StreamsInbound`` is no longer bounded directly -2. ``System.ConnsInbound``, ``Transient.Memory``, ``Transiet.ConnsInbound`` have higher default computed values. +2. ``System.ConnsInbound``, ``Transient.Memory``, ``Transient.ConnsInbound`` have higher default computed values. ### 📝 Changelog @@ -312,11 +312,11 @@ and various improvements have been made to improve the UX including: - github.com/ipfs/kubo: - fix: clarity: no user supplied rcmgr limits of 0 (#9563) ([ipfs/kubo#9563](https://github.com/ipfs/kubo/pull/9563)) - fix(gateway): undesired conversions to dag-json and friends (#9566) ([ipfs/kubo#9566](https://github.com/ipfs/kubo/pull/9566)) - - fix: ensure connmgr is smaller then autoscalled ressource limits + - fix: ensure connmgr is smaller then autoscalled resource limits - fix: typo in ensureConnMgrMakeSenseVsResourcesMgr - docs: clarify browser descriptions for webtransport - fix: update saxon download path - - fix: refuse to start if connmgr is smaller than ressource limits and not using none connmgr + - fix: refuse to start if connmgr is smaller than resource limits and not using none connmgr - fix: User-Agent sent to HTTP routers - test: port gateway sharness tests to Go tests - fix: do not download saxon in parallel @@ -338,7 +338,7 @@ and various improvements have been made to improve the UX including: - fix: disable provide over HTTP with Routing.Type=auto (#9511) ([ipfs/kubo#9511](https://github.com/ipfs/kubo/pull/9511)) - Update version.go - 'chore: update version.go' - - Clened up 0.18 changelog for release ([ipfs/kubo#9497](https://github.com/ipfs/kubo/pull/9497)) + - Cleaned up 0.18 changelog for release ([ipfs/kubo#9497](https://github.com/ipfs/kubo/pull/9497)) - feat: turn on WebTransport by default ([ipfs/kubo#9492](https://github.com/ipfs/kubo/pull/9492)) - feat: fast directory listings with DAG Size column (#9481) ([ipfs/kubo#9481](https://github.com/ipfs/kubo/pull/9481)) - feat: add basic CLI tests using Go Test @@ -484,7 +484,7 @@ and various improvements have been made to improve the UX including: - run gofmt -s - bump go.mod to Go 1.18 and run go fix - test for reader / sizing behavior on large files ([ipfs/go-unixfsnode#34](https://github.com/ipfs/go-unixfsnode/pull/34)) - - add helper to approximate test creation patter from ipfs-files ([ipfs/go-unixfsnode#32](https://github.com/ipfs/go-unixfsnode/pull/32)) + - add helper to approximate test creation pattern from ipfs-files ([ipfs/go-unixfsnode#32](https://github.com/ipfs/go-unixfsnode/pull/32)) - chore: remove Stebalien/go-bitfield in favour of ipfs/go-bitfield - github.com/ipfs/interface-go-ipfs-core (v0.7.0 -> v0.8.2): - chore: version 0.8.2 (#100) ([ipfs/interface-go-ipfs-core#100](https://github.com/ipfs/interface-go-ipfs-core/pull/100)) @@ -629,7 +629,7 @@ and various improvements have been made to improve the UX including: - feat: WithLocalPublication option to enable local only publishing on a topic (#481) ([libp2p/go-libp2p-pubsub#481](https://github.com/libp2p/go-libp2p-pubsub/pull/481)) - update pubsub deps (#491) ([libp2p/go-libp2p-pubsub#491](https://github.com/libp2p/go-libp2p-pubsub/pull/491)) - Gossipsub: Unsubscribe backoff (#488) ([libp2p/go-libp2p-pubsub#488](https://github.com/libp2p/go-libp2p-pubsub/pull/488)) - - Adds exponential backoff to re-spawing new streams for supposedly dead peers (#483) ([libp2p/go-libp2p-pubsub#483](https://github.com/libp2p/go-libp2p-pubsub/pull/483)) + - Adds exponential backoff to re-spawning new streams for supposedly dead peers (#483) ([libp2p/go-libp2p-pubsub#483](https://github.com/libp2p/go-libp2p-pubsub/pull/483)) - Publishing option for signing a message with a custom private key (#486) ([libp2p/go-libp2p-pubsub#486](https://github.com/libp2p/go-libp2p-pubsub/pull/486)) - fix unused GossipSubHistoryGossip, make seenMessages ttl configurable, make score params SeenMsgTTL configurable - Update README.md diff --git a/docs/changelogs/v0.19.md b/docs/changelogs/v0.19.md index f7e190a7e..f22270e28 100644 --- a/docs/changelogs/v0.19.md +++ b/docs/changelogs/v0.19.md @@ -89,7 +89,7 @@ There are further followups up on libp2p resource manager improvements in Kubo [ and [0.18.1](https://github.com/ipfs/kubo/blob/master/docs/changelogs/v0.18.md#improving-libp2p-resource-management-integration): 1. `ipfs swarm limits` and `ipfs swarm stats` have been replaced by `ipfs swarm resources` to provide a single/combined view for limits and their current usage in a more intuitive ordering. 1. Removal of `Swarm.ResourceMgr.Limits` config. Instead [the power user can specify limits in a .json file that are fed directly to go-libp2p](https://github.com/ipfs/kubo/blob/master/docs/libp2p-resource-management.md#user-supplied-override-limits). This allows the power user to take advantage of the [new resource manager types introduced in go-libp2p 0.25](https://github.com/libp2p/go-libp2p/blob/master/CHANGELOG.md#new-resource-manager-types-) including "use default", "unlimited", "block all". - - Note: we don't expect most users to need these capablities, but they are there if so. + - Note: we don't expect most users to need these capabilities, but they are there if so. 1. [Doc updates](https://github.com/ipfs/kubo/blob/master/docs/libp2p-resource-management.md). #### Gateways @@ -205,11 +205,11 @@ For more information and rational see [#9717](https://github.com/ipfs/kubo/issue - Merge Kubo: v0.18 ([ipfs/kubo#9581](https://github.com/ipfs/kubo/pull/9581)) - fix: clarity: no user supplied rcmgr limits of 0 (#9563) ([ipfs/kubo#9563](https://github.com/ipfs/kubo/pull/9563)) - fix(gateway): undesired conversions to dag-json and friends (#9566) ([ipfs/kubo#9566](https://github.com/ipfs/kubo/pull/9566)) - - fix: ensure connmgr is smaller then autoscalled ressource limits + - fix: ensure connmgr is smaller then autoscalled resource limits - fix: typo in ensureConnMgrMakeSenseVsResourcesMgr - docs: clarify browser descriptions for webtransport - fix: update saxon download path - - fix: refuse to start if connmgr is smaller than ressource limits and not using none connmgr + - fix: refuse to start if connmgr is smaller than resource limits and not using none connmgr - fix: User-Agent sent to HTTP routers - test: port gateway sharness tests to Go tests - fix: do not download saxon in parallel diff --git a/docs/changelogs/v0.2.md b/docs/changelogs/v0.2.md index 4e60221d5..4d42ea2f5 100644 --- a/docs/changelogs/v0.2.md +++ b/docs/changelogs/v0.2.md @@ -10,7 +10,7 @@ config file Bootstrap field changed accordingly. users can upgrade cleanly with: - ipfs bootstrap >boostrap_peers + ipfs bootstrap >bootstrap_peers ipfs bootstrap rm --all diff --git a/docs/changelogs/v0.20.md b/docs/changelogs/v0.20.md index 3a6ce8f64..e26c0695d 100644 --- a/docs/changelogs/v0.20.md +++ b/docs/changelogs/v0.20.md @@ -471,7 +471,7 @@ You can read more about the rationale behind this decision on the [tracking issu - identify: fix stale comment (#2179) ([libp2p/go-libp2p#2179](https://github.com/libp2p/go-libp2p/pull/2179)) - relay service: add metrics (#2154) ([libp2p/go-libp2p#2154](https://github.com/libp2p/go-libp2p/pull/2154)) - identify: Fix IdentifyWait when Connected events happen out of order (#2173) ([libp2p/go-libp2p#2173](https://github.com/libp2p/go-libp2p/pull/2173)) - - chore: fix ressource manager's README (#2168) ([libp2p/go-libp2p#2168](https://github.com/libp2p/go-libp2p/pull/2168)) + - chore: fix resource manager's README (#2168) ([libp2p/go-libp2p#2168](https://github.com/libp2p/go-libp2p/pull/2168)) - relay: fix deadlock when closing (#2171) ([libp2p/go-libp2p#2171](https://github.com/libp2p/go-libp2p/pull/2171)) - core: remove LocalPrivateKey method from network.Conn interface (#2144) ([libp2p/go-libp2p#2144](https://github.com/libp2p/go-libp2p/pull/2144)) - routed host: return connection error instead of routing error (#2169) ([libp2p/go-libp2p#2169](https://github.com/libp2p/go-libp2p/pull/2169)) diff --git a/docs/changelogs/v0.21.md b/docs/changelogs/v0.21.md index 4dd29c5ed..e8511d981 100644 --- a/docs/changelogs/v0.21.md +++ b/docs/changelogs/v0.21.md @@ -75,7 +75,7 @@ The [`go-ipfs-http-client`](https://github.com/ipfs/go-ipfs-http-client) RPC has been migrated into [`kubo/client/rpc`](../../client/rpc). With this change the two will be kept in sync, in some previous releases we -updated the CoreAPI with new Kubo features but forgot to port thoses to the +updated the CoreAPI with new Kubo features but forgot to port those to the http-client, making it impossible to use them together with the same coreapi version. @@ -142,7 +142,7 @@ Shared Size: 2048 Ratio: 1.615755 ``` -`ipfs --enc=json dag stat`'s keys are a non breaking change, new keys have been added but old keys with previous sementics are still here. +`ipfs --enc=json dag stat`'s keys are a non breaking change, new keys have been added but old keys with previous semantics are still here. #### Accelerated DHT Client is no longer experimental @@ -263,7 +263,7 @@ should be using AcceleratedDHTClient because they are falling behind. - chore: release v0.24.0 - fix: don't add unresponsive DHT servers to the Routing Table (#820) ([libp2p/go-libp2p-kad-dht#820](https://github.com/libp2p/go-libp2p-kad-dht/pull/820)) - github.com/libp2p/go-libp2p-kbucket (v0.5.0 -> v0.6.3): - - fix: fix abba bug in UsefullNewPeer ([libp2p/go-libp2p-kbucket#122](https://github.com/libp2p/go-libp2p-kbucket/pull/122)) + - fix: fix abba bug in UsefulNewPeer ([libp2p/go-libp2p-kbucket#122](https://github.com/libp2p/go-libp2p-kbucket/pull/122)) - chore: release v0.6.2 ([libp2p/go-libp2p-kbucket#121](https://github.com/libp2p/go-libp2p-kbucket/pull/121)) - Replacing UsefulPeer() with UsefulNewPeer() ([libp2p/go-libp2p-kbucket#120](https://github.com/libp2p/go-libp2p-kbucket/pull/120)) - chore: release 0.6.1 ([libp2p/go-libp2p-kbucket#119](https://github.com/libp2p/go-libp2p-kbucket/pull/119)) diff --git a/docs/changelogs/v0.22.md b/docs/changelogs/v0.22.md index 3aa55f30e..503c618fc 100644 --- a/docs/changelogs/v0.22.md +++ b/docs/changelogs/v0.22.md @@ -236,7 +236,7 @@ This includes a breaking change to `ipfs id` and some of the `ipfs swarm` comman - chore: cleanup error handling in compparallel - fix: correctly handle errors in compparallel - fix: make the ProvideMany docs clearer - - perf: remove goroutine that just waits before closing with a synchrous waitgroup + - perf: remove goroutine that just waits before closing with a synchronous waitgroup - github.com/libp2p/go-nat (v0.1.0 -> v0.2.0): - release v0.2.0 (#30) ([libp2p/go-nat#30](https://github.com/libp2p/go-nat/pull/30)) - update deps, use contexts on UPnP functions (#29) ([libp2p/go-nat#29](https://github.com/libp2p/go-nat/pull/29)) diff --git a/docs/changelogs/v0.23.md b/docs/changelogs/v0.23.md index 70c1d460a..10061fdf4 100644 --- a/docs/changelogs/v0.23.md +++ b/docs/changelogs/v0.23.md @@ -27,7 +27,7 @@ Mplex is being deprecated, this is because it is unreliable and randomly drop streams when sending data *too fast*. -New pieces of code rely on backpressure, that means the stream will dynamicaly +New pieces of code rely on backpressure, that means the stream will dynamically slow down the sending rate if data is getting backed up. Backpressure is provided by **Yamux** and **QUIC**. @@ -111,7 +111,7 @@ the `/quic-v1` addresses only. For more background information, check [issue #94 Thanks to [probelab.io's RFM17.1](https://github.com/plprobelab/network-measurements/blob/master/results/rfm17.1-sharing-prs-with-multiaddresses.md) DHT servers will [now cache the addresses of content hosts for the lifetime of the provider record](https://github.com/libp2p/go-libp2p-kad-dht/commit/777160f164b8c187c534debd293157031e9f3a02). -This means clients who resolve content from theses servers get a responses which include both peer id and multiaddresses. +This means clients who resolve content from these servers get a responses which include both peer id and multiaddresses. In most cases this enables skipping a second query which resolves the peer id to multiaddresses for stable enough peers. This will improve content fetching lantency in the network overtime as servers updates. @@ -175,7 +175,7 @@ Thx a lot @bmwiedemann for debugging this issue. - chore: bump boxo for verifcid breaking changes - chore: remove outdated comment (#10077) ([ipfs/kubo#10077](https://github.com/ipfs/kubo/pull/10077)) - chore: remove deprecated testground plans - - feat: allow users to optin again into mplex + - feat: allow users to option again into mplex - feat: remove Mplex - docs(readme): minimal reqs (#10066) ([ipfs/kubo#10066](https://github.com/ipfs/kubo/pull/10066)) - docs: add v0.23.md diff --git a/docs/changelogs/v0.24.md b/docs/changelogs/v0.24.md index 9ca7fa84e..7e0a75591 100644 --- a/docs/changelogs/v0.24.md +++ b/docs/changelogs/v0.24.md @@ -62,7 +62,7 @@ record remains cached before checking an upstream routing system, such as Amino DHT, for updates. The TTL value in the IPNS record now serves as a hint for: - `boxo/namesys`: the internal cache, determining how long the IPNS resolution - result is cached before asking upsteam routing systems for updates. + result is cached before asking upstream routing systems for updates. - `boxo/gateway`: the `Cache-Control` HTTP header in responses to requests made for `/ipns/name` content paths. diff --git a/docs/changelogs/v0.25.md b/docs/changelogs/v0.25.md index db610044a..c1ac973c3 100644 --- a/docs/changelogs/v0.25.md +++ b/docs/changelogs/v0.25.md @@ -44,7 +44,7 @@ After deprecating and removing mplex support by default in [v0.23.0](https://git We now fully removed it. If you still need mplex support to talk with other pieces of software, please try updating them, and if they don't support yamux or QUIC [talk to us about it](https://github.com/ipfs/kubo/issues/new/choose). -Mplex is unreliable by design, it will drop data and generete errors when sending data *too fast*, +Mplex is unreliable by design, it will drop data and generate errors when sending data *too fast*, yamux and QUIC support backpressure, that means if we send data faster than the remote machine can process it, we slows down to match the remote's speed. #### Graphsync Experiment Removal diff --git a/docs/changelogs/v0.27.md b/docs/changelogs/v0.27.md index e5bd895ca..aba290cf3 100644 --- a/docs/changelogs/v0.27.md +++ b/docs/changelogs/v0.27.md @@ -113,7 +113,7 @@ Kubo now only uses [trustless requests](https://specs.ipfs.tech/http-gateways/tr - github.com/multiformats/go-multiaddr (v0.12.1 -> v0.12.2): - chore: release v0.12.2 - tests: add round trip equality check to fuzz (#232) ([multiformats/go-multiaddr#232](https://github.com/multiformats/go-multiaddr/pull/232)) - - fix: correctly parse ports as uint16 and explicitely fail on overflows (#228) ([multiformats/go-multiaddr#228](https://github.com/multiformats/go-multiaddr/pull/228)) + - fix: correctly parse ports as uint16 and explicitly fail on overflows (#228) ([multiformats/go-multiaddr#228](https://github.com/multiformats/go-multiaddr/pull/228)) - replace custom random tests with testing.F (#227) ([multiformats/go-multiaddr#227](https://github.com/multiformats/go-multiaddr/pull/227))
diff --git a/docs/changelogs/v0.29.md b/docs/changelogs/v0.29.md index 8c45bbfca..82ec3eab2 100644 --- a/docs/changelogs/v0.29.md +++ b/docs/changelogs/v0.29.md @@ -80,7 +80,7 @@ The hash function, CID version, or UnixFS raw leaves and chunker behaviors can b - github.com/ipfs/go-ipfs-exchange-interface (v0.2.0 -> v0.2.1): - chore: bump version - Deprecate types and readme (#29) ([ipfs/go-ipfs-exchange-interface#29](https://github.com/ipfs/go-ipfs-exchange-interface/pull/29)) - - docs: Add proper documenation to the interface. + - docs: Add proper documentation to the interface. - github.com/ipfs/go-verifcid (v0.0.2 -> v0.0.3): - chore: bump version - chore: deprecate types and readme diff --git a/docs/changelogs/v0.30.md b/docs/changelogs/v0.30.md index 36c3a5c75..742190c0a 100644 --- a/docs/changelogs/v0.30.md +++ b/docs/changelogs/v0.30.md @@ -121,7 +121,7 @@ $ # cli client, in different terminal can find socket via /api file $ cat $IPFS_PATH/api /unix/tmp/kubo.socket -$ # or have it pased via --api +$ # or have it passed via --api $ ipfs --api=/unix/tmp/kubo.socket id ``` @@ -150,7 +150,7 @@ Daemon is ready The previous lengthy listing of all listener and announced multiaddrs has been removed due to its complexity, especially with modern libp2p nodes sharing multiple transports and long lists of `/webtransport` and `/webrtc-direct` certhashes. The output now features a simplified list of swarm listeners, displayed in the format `host:port (TCP+UDP)`, which provides essential information for debugging connectivity issues, particularly related to port forwarding. -Announced libp2p addresses are no longer printed on startup, because libp2p may change or augument them based on AutoNAT, relay, and UPnP state. Instead, users are prompted to run `ipfs id` to obtain up-to-date list of listeners and announced multiaddrs in libp2p format. +Announced libp2p addresses are no longer printed on startup, because libp2p may change or augment them based on AutoNAT, relay, and UPnP state. Instead, users are prompted to run `ipfs id` to obtain up-to-date list of listeners and announced multiaddrs in libp2p format. #### Commands Preserve Specified Hostname @@ -183,7 +183,7 @@ When executing a [CLI command](https://docs.ipfs.tech/reference/kubo/cli/) over - fix(daemon): panic in kubo/daemon.go:595 (#10473) ([ipfs/kubo#10473](https://github.com/ipfs/kubo/pull/10473)) - feat: webui v4.3.0 (#10477) ([ipfs/kubo#10477](https://github.com/ipfs/kubo/pull/10477)) - docs(readme): add Gentoo Linux (#10474) ([ipfs/kubo#10474](https://github.com/ipfs/kubo/pull/10474)) - - libp2p: default to prefering TLS ([ipfs/kubo#10227](https://github.com/ipfs/kubo/pull/10227)) + - libp2p: default to preferring TLS ([ipfs/kubo#10227](https://github.com/ipfs/kubo/pull/10227)) - docs: document unofficial Ubuntu PPA ([ipfs/kubo#10467](https://github.com/ipfs/kubo/pull/10467)) - feat: run AutoNAT V2 service in addition to V1 (#10468) ([ipfs/kubo#10468](https://github.com/ipfs/kubo/pull/10468)) - feat: go-libp2p 0.36 and /webrtc-direct listener (#10463) ([ipfs/kubo#10463](https://github.com/ipfs/kubo/pull/10463)) diff --git a/docs/changelogs/v0.31.md b/docs/changelogs/v0.31.md index ef1d4bb1b..e055cc9f4 100644 --- a/docs/changelogs/v0.31.md +++ b/docs/changelogs/v0.31.md @@ -6,7 +6,10 @@ - [Overview](#overview) - [🔦 Highlights](#-highlights) + - [Experimental Pebble Datastore](#experimental-pebble-datastore) + - [New metrics](#new-metrics) - [`lowpower` profile no longer breaks DHT announcements](#lowpower-profile-no-longer-breaks-dht-announcements) + - [go 1.23, boxo 0.24 and go-libp2p 0.36.5](#go-123-boxo-024-and-go-libp2p-0365) - [📝 Changelog](#-changelog) - [👨‍👩‍👧‍👦 Contributors](#-contributors) @@ -14,18 +17,138 @@ ### 🔦 Highlights +#### Experimental Pebble Datastore + +[Pebble](https://github.com/ipfs/kubo/blob/master/docs/config.md#pebbleds-profile) provides a high-performance alternative to leveldb as the datastore, and provides a modern replacement for [legacy badgerv1](https://github.com/ipfs/kubo/blob/master/docs/config.md#badgerds-profile). + +A fresh Kubo node can be initialized with [`pebbleds` profile](https://github.com/ipfs/kubo/blob/master/docs/config.md#pebbleds-profile) via `ipfs init --profile pebbleds`. + +There are a number of parameters available for tuning pebble's performance to your specific needs. Default values are used for any parameters that are not configured or are set to their zero-value. +For a description of the available tuning parameters, see [kubo/docs/datastores.md#pebbleds](https://github.com/ipfs/kubo/blob/master/docs/datastores.md#pebbleds). + +#### New metrics + +- Added 3 new go metrics: `go_gc_gogc_percent`, `go_gc_gomemlimit_bytes` and `go_sched_gomaxprocs_threads` as those are [recommended by the Go team](https://github.com/prometheus/client_golang/pull/1559) +- Added [network usage metrics](https://github.com/prometheus/client_golang/pull/1555): `process_network_receive_bytes_total` and `process_network_transmit_bytes_total` +- Removed `go_memstat_lookups_total` metric [which was always 0](https://github.com/prometheus/client_golang/pull/1577) + #### `lowpower` profile no longer breaks DHT announcements We've notices users were applying `lowpower` profile, and then reporting content routing issues. This was because `lowpower` disabled reprovider system and locally hosted data was no longer announced on Amino DHT. -This release changes [`lowpower` profile](https://github.com/ipfs/kubo/blob/master/docs/config.md#lowpower-profile) to not change reprovider settings, ensuring the new users are not sabotaging themselves. It also adds [`annouce-on`](https://github.com/ipfs/kubo/blob/master/docs/config.md#announce-on-profile) and [`announce-off`](https://github.com/ipfs/kubo/blob/master/docs/config.md#announce-off-profile) profiles for controlling announcement settings separately. +This release changes [`lowpower` profile](https://github.com/ipfs/kubo/blob/master/docs/config.md#lowpower-profile) to not change reprovider settings, ensuring the new users are not sabotaging themselves. It also adds [`announce-on`](https://github.com/ipfs/kubo/blob/master/docs/config.md#announce-on-profile) and [`announce-off`](https://github.com/ipfs/kubo/blob/master/docs/config.md#announce-off-profile) profiles for controlling announcement settings separately. > [!IMPORTANT] > If you've ever applied the `lowpower` profile before, there is a high chance your node is not announcing to DHT anymore. -> If you have `Reprovider.Interval` set to `0` you may want to wet it to `22h` (or run `ipfs config profile apply announce-on`) to fix your system. +> If you have `Reprovider.Interval` set to `0` you may want to set it to `22h` (or run `ipfs config profile apply announce-on`) to fix your system. > > As a convenience, `ipfs daemon` will warn if reprovide system is disabled, creating oportinity to fix configuration if it was not intentional. +#### go 1.23, boxo 0.24 and go-libp2p 0.36.5 + +Various bugfixes. Please update. + ### 📝 Changelog +
Full Changelog + +- github.com/ipfs/kubo: + - fix: go 1.23(.2) (#10540) ([ipfs/kubo#10540](https://github.com/ipfs/kubo/pull/10540)) + - chore: bump version to 0.32.0-dev + - feat(routing/http): support IPIP-484 and streaming (#10534) ([ipfs/kubo#10534](https://github.com/ipfs/kubo/pull/10534)) + - fix(daemon): webui URL when rpc is catch-all (#10520) ([ipfs/kubo#10520](https://github.com/ipfs/kubo/pull/10520)) + - chore: update changelog and config doc with more info about pebble (#10533) ([ipfs/kubo#10533](https://github.com/ipfs/kubo/pull/10533)) + - feat: pebbleds profile and plugin (#10530) ([ipfs/kubo#10530](https://github.com/ipfs/kubo/pull/10530)) + - chore: dependency updates for 0.31 (#10511) ([ipfs/kubo#10511](https://github.com/ipfs/kubo/pull/10511)) + - feat: explicit announce-on/off profiles (#10524) ([ipfs/kubo#10524](https://github.com/ipfs/kubo/pull/10524)) + - fix(core): look for MFS root in local repo only (#8661) ([ipfs/kubo#8661](https://github.com/ipfs/kubo/pull/8661)) + - Fix issue in ResourceManager and nopfsPlugin about repo path (#10492) ([ipfs/kubo#10492](https://github.com/ipfs/kubo/pull/10492)) + - feat(bitswap): allow configuring WithWantHaveReplaceSize (#10512) ([ipfs/kubo#10512](https://github.com/ipfs/kubo/pull/10512)) + - refactor: simplify logic for MFS pinning (#10506) ([ipfs/kubo#10506](https://github.com/ipfs/kubo/pull/10506)) + - docs: clarify Gateway.PublicGateways (#10525) ([ipfs/kubo#10525](https://github.com/ipfs/kubo/pull/10525)) + - chore: clarify dep update in RELEASE_CHECKLIST.md (#10518) ([ipfs/kubo#10518](https://github.com/ipfs/kubo/pull/10518)) + - feat: ipfs-webui v4.3.2 (#10523) ([ipfs/kubo#10523](https://github.com/ipfs/kubo/pull/10523)) + - docs(config): add useful references + - docs(config): improve profile descriptions (#10517) ([ipfs/kubo#10517](https://github.com/ipfs/kubo/pull/10517)) + - docs: update RELEASE_CHECKLIST.md (#10496) ([ipfs/kubo#10496](https://github.com/ipfs/kubo/pull/10496)) + - chore: create next changelog (#10510) ([ipfs/kubo#10510](https://github.com/ipfs/kubo/pull/10510)) + - Merge Release: v0.30.0 [skip changelog] ([ipfs/kubo#10508](https://github.com/ipfs/kubo/pull/10508)) + - chore: boxo v0.23.0 and go-libp2p v0.36.3 (#10507) ([ipfs/kubo#10507](https://github.com/ipfs/kubo/pull/10507)) + - docs: replace outdated package paths described in rpc README (#10505) ([ipfs/kubo#10505](https://github.com/ipfs/kubo/pull/10505)) + - fix: switch back to go 1.22 (#10502) ([ipfs/kubo#10502](https://github.com/ipfs/kubo/pull/10502)) + - fix(cli): preserve hostname specified with --api in http request headers (#10497) ([ipfs/kubo#10497](https://github.com/ipfs/kubo/pull/10497)) + - chore: upgrade to go 1.23 (#10486) ([ipfs/kubo#10486](https://github.com/ipfs/kubo/pull/10486)) + - fix: error during config when running benchmarks (#10495) ([ipfs/kubo#10495](https://github.com/ipfs/kubo/pull/10495)) + - chore: update go-unixfsnode, cmds, and boxo (#10494) ([ipfs/kubo#10494](https://github.com/ipfs/kubo/pull/10494)) + - Docs fix spelling issues (#10493) ([ipfs/kubo#10493](https://github.com/ipfs/kubo/pull/10493)) + - chore: update version (#10491) ([ipfs/kubo#10491](https://github.com/ipfs/kubo/pull/10491)) +- github.com/ipfs/boxo (v0.23.0 -> v0.24.0): + - Release v0.24.0 ([ipfs/boxo#683](https://github.com/ipfs/boxo/pull/683)) +- github.com/ipfs/go-ipld-cbor (v0.1.0 -> v0.2.0): + - v0.2.0 + - deprecate DumpObject() in favor of better named Encode() + - add an EncodeWriter method, using the pooled marshallers + - fix expCid vs actualCid guard +- github.com/ipld/go-car/v2 (v2.13.1 -> v2.14.2): + - v2.14.2 bump + - fix: goreleaser v2 compat, trigger release-binaries with workflow_run + - v2.14.1 bump + - chore: update fuzz to Go 1.22 + - v2.14.0 bump + - fix(cmd): properly pick up --inverse and --cid-file args ([ipld/go-car#531](https://github.com/ipld/go-car/pull/531)) + - Re-factor cmd functions to library ([ipld/go-car#524](https://github.com/ipld/go-car/pull/524)) + - ci: uci/copy-templates ([ipld/go-car#521](https://github.com/ipld/go-car/pull/521)) + - Add a `car ls --unixfs-blocks` to render two-column output ([ipld/go-car#514](https://github.com/ipld/go-car/pull/514)) +- github.com/libp2p/go-libp2p (v0.36.3 -> v0.36.5): + - chore: remove Roadmap file (#2954) ([libp2p/go-libp2p#2954](https://github.com/libp2p/go-libp2p/pull/2954)) + - fix: Release v0.36.5 + - autonatv2: recover from panics (#2992) ([libp2p/go-libp2p#2992](https://github.com/libp2p/go-libp2p/pull/2992)) + - basichost: ensure no duplicates in Addrs output (#2980) ([libp2p/go-libp2p#2980](https://github.com/libp2p/go-libp2p/pull/2980)) + - Release v0.36.4 + - peerstore: better GC in membacked peerstore (#2960) ([libp2p/go-libp2p#2960](https://github.com/libp2p/go-libp2p/pull/2960)) + - fix: use quic.Version instead of the deprecated quic.VersionNumber (#2955) ([libp2p/go-libp2p#2955](https://github.com/libp2p/go-libp2p/pull/2955)) + - tcp: fix metrics for multiple calls to Close (#2953) ([libp2p/go-libp2p#2953](https://github.com/libp2p/go-libp2p/pull/2953)) +- github.com/libp2p/go-libp2p-kbucket (v0.6.3 -> v0.6.4): + - release v0.6.4 ([libp2p/go-libp2p-kbucket#135](https://github.com/libp2p/go-libp2p-kbucket/pull/135)) + - feat: add log printing when peer added and removed table ([libp2p/go-libp2p-kbucket#134](https://github.com/libp2p/go-libp2p-kbucket/pull/134)) + - Upgrade to go-log v2.5.1 ([libp2p/go-libp2p-kbucket#132](https://github.com/libp2p/go-libp2p-kbucket/pull/132)) + - chore: update go-libp2p-asn-util +- github.com/multiformats/go-multiaddr-dns (v0.3.1 -> v0.4.0): + - Release v0.4.0 (#64) ([multiformats/go-multiaddr-dns#64](https://github.com/multiformats/go-multiaddr-dns/pull/64)) + - Limit total number of resolved addresses from DNS response (#63) ([multiformats/go-multiaddr-dns#63](https://github.com/multiformats/go-multiaddr-dns/pull/63)) + - fix!: Only resolve the first DNS-like component (#61) ([multiformats/go-multiaddr-dns#61](https://github.com/multiformats/go-multiaddr-dns/pull/61)) + - sync: update CI config files (#43) ([multiformats/go-multiaddr-dns#43](https://github.com/multiformats/go-multiaddr-dns/pull/43)) + - remove deprecated types ([multiformats/go-multiaddr-dns#37](https://github.com/multiformats/go-multiaddr-dns/pull/37)) + - remove Jenkinsfile ([multiformats/go-multiaddr-dns#40](https://github.com/multiformats/go-multiaddr-dns/pull/40)) + - sync: update CI config files (#29) ([multiformats/go-multiaddr-dns#29](https://github.com/multiformats/go-multiaddr-dns/pull/29)) + - use net.IP.Equal to compare IP addresses ([multiformats/go-multiaddr-dns#30](https://github.com/multiformats/go-multiaddr-dns/pull/30)) + +
+ ### 👨‍👩‍👧‍👦 Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Will Scott | 3 | +731/-581 | 14 | +| Daniel N | 17 | +1034/-191 | 33 | +| Marco Munizaga | 5 | +721/-404 | 12 | +| Andrew Gillis | 9 | +765/-266 | 35 | +| Marcin Rataj | 17 | +568/-323 | 41 | +| Daniel Norman | 3 | +232/-111 | 10 | +| sukun | 4 | +93/-8 | 8 | +| Jorropo | 2 | +48/-45 | 5 | +| Marten Seemann | 3 | +19/-47 | 5 | +| fengzie | 1 | +29/-26 | 5 | +| Rod Vagg | 7 | +27/-11 | 9 | +| gopherfarm | 1 | +14/-14 | 6 | +| web3-bot | 3 | +13/-10 | 3 | +| Michael Muré | 2 | +16/-5 | 4 | +| i-norden | 1 | +9/-9 | 1 | +| Elias Rad | 1 | +7/-7 | 4 | +| Prithvi Shahi | 1 | +0/-11 | 2 | +| Lucas Molas | 1 | +5/-4 | 1 | +| elecbug | 1 | +6/-2 | 1 | +| gammazero | 2 | +2/-2 | 2 | +| chris erway | 1 | +2/-2 | 2 | +| Russell Dempsey | 1 | +2/-1 | 1 | +| guillaumemichel | 1 | +1/-1 | 1 | diff --git a/docs/changelogs/v0.32.md b/docs/changelogs/v0.32.md new file mode 100644 index 000000000..f00cca611 --- /dev/null +++ b/docs/changelogs/v0.32.md @@ -0,0 +1,207 @@ +# Kubo changelog v0.32 + +- [v0.32.0](#v0320) + +## v0.32.0 + +- [Overview](#overview) +- [🔦 Highlights](#-highlights) + - [🎯 AutoTLS: Automatic Certificates for libp2p WebSockets via `libp2p.direct`](#-autotls-automatic-certificates-for-libp2p-websockets-via-libp2pdirect) + - [📦️ Dependency updates](#-dependency-updates) +- [📝 Changelog](#-changelog) +- [👨‍👩‍👧‍👦 Contributors](#-contributors) + +### Overview + +### 🔦 Highlights + +#### 🎯 AutoTLS: Automatic Certificates for libp2p WebSockets via `libp2p.direct` + + + +This release introduces an experimental feature that significantly improves how browsers ([Helia](https://helia.io/), [Service Worker](https://inbrowser.link)) can connect to Kubo node. + +Opt-in configuration allows a publicly dialable Kubo nodes (public IP, port forwarding, or NAT with uPnP) to obtain CA-signed TLS certificates for [libp2p Secure WebSocket (WSS)](https://github.com/libp2p/specs/blob/master/websockets/README.md) connections automatically. + +> [!TIP] +> To enable this feature, set `AutoTLS.Enabled` to `true` and add a listener for `/tls/sni/*.libp2p.direct/ws` on a separate TCP port: +> ```diff +> { +> + "AutoTLS": { "Enabled": true }, +> "Addresses": { +> "Swarm": { +> "/ip4/0.0.0.0/tcp/4001", +> + "/ip4/0.0.0.0/tcp/4002/tls/sni/*.libp2p.direct/ws", +> "/ip6/::/tcp/4001", +> + "/ip6/::/tcp/4002/tls/sni/*.libp2p.direct/ws", +> ``` +> After restarting your node for the first time you may need to wait 5-15 minutes to pass all checks and for the changes to take effect. +> We are working on sharing the same TCP port with other transports ([go-libp2p#2984](https://github.com/libp2p/go-libp2p/pull/2984)). + +See [`AutoTLS` configuration](https://github.com/ipfs/kubo/blob/master/docs/config.md#autotls) for more details how to enable it and what to expect. + +This is an early preview, we appreciate you testing and filling bug reports or feedback in the tracking issue at [kubo#10560](https://github.com/ipfs/kubo/issues/10560). + +#### 📦️ Dependency updates + +- update `ipfs-webui` to [v4.4.0](https://github.com/ipfs/ipfs-webui/releases/tag/v4.4.0) +- update `boxo` to [v0.24.1](https://github.com/ipfs/boxo/releases/tag/v0.24.1) + [v0.24.2](https://github.com/ipfs/boxo/releases/tag/v0.24.2) + [v0.24.3](https://github.com/ipfs/boxo/releases/tag/v0.24.3) + - This includes a number of fixes and bitswap improvements, and support for filtering from [IPIP-484](https://specs.ipfs.tech/ipips/ipip-0484/) in delegated HTTP routing and IPNI queries. +- update `go-libp2p` to [v0.37.0](https://github.com/libp2p/go-libp2p/releases/tag/v0.37.0) + - This update required removal of `Swarm.RelayService.MaxReservationsPerPeer` configuration option from Kubo. If you had it set, remove it from your configuration file. +- update `go-libp2p-kad-dht` to [v0.27.0](https://github.com/libp2p/go-libp2p-kad-dht/releases/tag/v0.27.0) + [v0.28.0](https://github.com/libp2p/go-libp2p-kad-dht/releases/tag/v0.28.0) + [v0.28.1](https://github.com/libp2p/go-libp2p-kad-dht/releases/tag/v0.28.1) +- update `go-libp2p-pubsub` to [v0.12.0](https://github.com/libp2p/go-libp2p-pubsub/releases/tag/v0.12.0) +- update `p2p-forge/client` to [v0.0.2](https://github.com/ipshipyard/p2p-forge/releases/tag/v0.0.2) +- removed `go-homedir` + - The `github.com/mitchellh/go-homedir` repo is archived, no longer needed, and no longer maintained. + - `homedir.Dir` is replaced by the stdlib `os.UserHomeDir` + - `homedir.Expand` is replaced by `fsutil.ExpandHome` in the `github.com/ipfs/kubo/misc/fsutil` package. + - The new `github.com/ipfs/kubo/misc/fsutil` package contains file utility code previously located elsewhere in kubo. + +### 📝 Changelog + +
Full Changelog + +- github.com/ipfs/kubo: + - chore: 0.32.0 + - fix: go-libp2p-kad-dht v0.28.0 (#10578) ([ipfs/kubo#10578](https://github.com/ipfs/kubo/pull/10578)) + - chore: 0.32.0-rc2 + - feat: ipfs-webui v4.4.0 (#10574) ([ipfs/kubo#10574](https://github.com/ipfs/kubo/pull/10574)) + - chore: label implicit loggers + - chore: boxo v0.24.3 and p2p-forge v0.0.2 (#10572) ([ipfs/kubo#10572](https://github.com/ipfs/kubo/pull/10572)) + - chore: stop using go-homedir (#10568) ([ipfs/kubo#10568](https://github.com/ipfs/kubo/pull/10568)) + - fix(autotls): store certificates at the location from the repo path (#10566) ([ipfs/kubo#10566](https://github.com/ipfs/kubo/pull/10566)) + - chore: 0.32.0-rc1 + - docs(autotls): add note about separate port use (#10562) ([ipfs/kubo#10562](https://github.com/ipfs/kubo/pull/10562)) + - feat(AutoTLS): opt-in WSS certs from p2p-forge at libp2p.direct (#10521) ([ipfs/kubo#10521](https://github.com/ipfs/kubo/pull/10521)) + - chore: upgrade to boxo v0.24.2 (#10559) ([ipfs/kubo#10559](https://github.com/ipfs/kubo/pull/10559)) + - refactor: update to go-libp2p v0.37.0 (#10554) ([ipfs/kubo#10554](https://github.com/ipfs/kubo/pull/10554)) + - docs(config): explain what multiaddr is + - chore: update dependencies (#10548) ([ipfs/kubo#10548](https://github.com/ipfs/kubo/pull/10548)) + - chore: update test dependencies (#10555) ([ipfs/kubo#10555](https://github.com/ipfs/kubo/pull/10555)) + - chore(ci): adjust verbosity + - chore(ci): verbose build of test/bin deps + - chore(ci): build docker images for staging branch + - Create Changelog: v0.32 ([ipfs/kubo#10546](https://github.com/ipfs/kubo/pull/10546)) + - Merge release v0.31.0 ([ipfs/kubo#10545](https://github.com/ipfs/kubo/pull/10545)) + - chore: update RELEASE_CHECKLIST.md (#10544) ([ipfs/kubo#10544](https://github.com/ipfs/kubo/pull/10544)) + - feat: ipfs-webui v4.3.3 (#10543) ([ipfs/kubo#10543](https://github.com/ipfs/kubo/pull/10543)) + - chore: update RELEASE_CHECKLIST.md (#10542) ([ipfs/kubo#10542](https://github.com/ipfs/kubo/pull/10542)) + - Add full changelog to release changelog + - fix: go 1.23(.2) (#10540) ([ipfs/kubo#10540](https://github.com/ipfs/kubo/pull/10540)) + - chore: bump version to 0.32.0-dev +- github.com/ipfs/boxo (v0.24.0 -> v0.24.3): + - Release v0.24.3 ([ipfs/boxo#713](https://github.com/ipfs/boxo/pull/713)) + - Merge branch 'main' into release + - Release v0.24.2 ([ipfs/boxo#707](https://github.com/ipfs/boxo/pull/707)) + - Release v0.24.1 ([ipfs/boxo#706](https://github.com/ipfs/boxo/pull/706)) +- github.com/ipfs/go-ipfs-cmds (v0.13.0 -> v0.14.0): + - chore: release v0.14.0 (#269) ([ipfs/go-ipfs-cmds#269](https://github.com/ipfs/go-ipfs-cmds/pull/269)) +- github.com/ipfs/go-ipfs-redirects-file (v0.1.1 -> v0.1.2): + - chore: v0.1.2 (#29) ([ipfs/go-ipfs-redirects-file#29](https://github.com/ipfs/go-ipfs-redirects-file/pull/29)) + - docs(readme): refer specs and ipip + - chore: update dependencies (#28) ([ipfs/go-ipfs-redirects-file#28](https://github.com/ipfs/go-ipfs-redirects-file/pull/28)) +- github.com/ipfs/go-metrics-prometheus (v0.0.2 -> v0.0.3): + - chore: release v0.0.3 (#24) ([ipfs/go-metrics-prometheus#24](https://github.com/ipfs/go-metrics-prometheus/pull/24)) + - chore: update deps and update go-log to v2 (#23) ([ipfs/go-metrics-prometheus#23](https://github.com/ipfs/go-metrics-prometheus/pull/23)) + - sync: update CI config files (#9) ([ipfs/go-metrics-prometheus#9](https://github.com/ipfs/go-metrics-prometheus/pull/9)) +- github.com/ipfs/go-unixfsnode (v1.9.1 -> v1.9.2): + - New release version ([ipfs/go-unixfsnode#78](https://github.com/ipfs/go-unixfsnode/pull/78)) + - chore: update dependencies +- github.com/libp2p/go-flow-metrics (v0.1.0 -> v0.2.0): + - chore: release v0.2.0 (#33) ([libp2p/go-flow-metrics#33](https://github.com/libp2p/go-flow-metrics/pull/33)) + - chore: cleanup readme (#31) ([libp2p/go-flow-metrics#31](https://github.com/libp2p/go-flow-metrics/pull/31)) + - ci: uci/update-go ([libp2p/go-flow-metrics#27](https://github.com/libp2p/go-flow-metrics/pull/27)) + - fix(ewma): reduce the chances of fake bandwidth spikes (#8) ([libp2p/go-flow-metrics#8](https://github.com/libp2p/go-flow-metrics/pull/8)) + - chore: switch to typed atomics (#24) ([libp2p/go-flow-metrics#24](https://github.com/libp2p/go-flow-metrics/pull/24)) + - test: use mock clocks for all tests (#25) ([libp2p/go-flow-metrics#25](https://github.com/libp2p/go-flow-metrics/pull/25)) + - ci: uci/copy-templates ([libp2p/go-flow-metrics#21](https://github.com/libp2p/go-flow-metrics/pull/21)) +- github.com/libp2p/go-libp2p (v0.36.5 -> v0.37.0): + - Release v0.37.0 (#3013) ([libp2p/go-libp2p#3013](https://github.com/libp2p/go-libp2p/pull/3013)) + - feat: Add WithFxOption (#2956) ([libp2p/go-libp2p#2956](https://github.com/libp2p/go-libp2p/pull/2956)) + - chore: update imports to use slices package (#3007) ([libp2p/go-libp2p#3007](https://github.com/libp2p/go-libp2p/pull/3007)) + - Change latency metrics buckets (#3012) ([libp2p/go-libp2p#3012](https://github.com/libp2p/go-libp2p/pull/3012)) + - chore: bump deps in preparation for v0.37.0 (#3011) ([libp2p/go-libp2p#3011](https://github.com/libp2p/go-libp2p/pull/3011)) + - autonat: fix interaction with autorelay (#2967) ([libp2p/go-libp2p#2967](https://github.com/libp2p/go-libp2p/pull/2967)) + - swarm: add a peer dial latency metric (#2959) ([libp2p/go-libp2p#2959](https://github.com/libp2p/go-libp2p/pull/2959)) + - peerstore: limit number of non connected peers in addrbook (#2971) ([libp2p/go-libp2p#2971](https://github.com/libp2p/go-libp2p/pull/2971)) + - fix: swarm: refactor address resolution (#2990) ([libp2p/go-libp2p#2990](https://github.com/libp2p/go-libp2p/pull/2990)) + - Add backoff for updating local IP addresses on error (#2999) ([libp2p/go-libp2p#2999](https://github.com/libp2p/go-libp2p/pull/2999)) + - libp2phttp: HTTP Peer ID Authentication (#2854) ([libp2p/go-libp2p#2854](https://github.com/libp2p/go-libp2p/pull/2854)) + - relay: make only 1 reservation per peer (#2974) ([libp2p/go-libp2p#2974](https://github.com/libp2p/go-libp2p/pull/2974)) + - autonatv2: recover from panics (#2992) ([libp2p/go-libp2p#2992](https://github.com/libp2p/go-libp2p/pull/2992)) + - basichost: ensure no duplicates in Addrs output (#2980) ([libp2p/go-libp2p#2980](https://github.com/libp2p/go-libp2p/pull/2980)) + - fix(websocket): re-enable websocket transport test (#2987) ([libp2p/go-libp2p#2987](https://github.com/libp2p/go-libp2p/pull/2987)) + - feat(websocket): switch the underlying http server logger to use ipfs/go-log (#2985) ([libp2p/go-libp2p#2985](https://github.com/libp2p/go-libp2p/pull/2985)) + - peerstore: better GC in membacked peerstore (#2960) ([libp2p/go-libp2p#2960](https://github.com/libp2p/go-libp2p/pull/2960)) + - connmgr: reduce log level for untagging untracked peers ([libp2p/go-libp2p#2961](https://github.com/libp2p/go-libp2p/pull/2961)) + - fix: use quic.Version instead of the deprecated quic.VersionNumber (#2955) ([libp2p/go-libp2p#2955](https://github.com/libp2p/go-libp2p/pull/2955)) + - tcp: fix metrics for multiple calls to Close (#2953) ([libp2p/go-libp2p#2953](https://github.com/libp2p/go-libp2p/pull/2953)) + - chore: remove Roadmap file (#2954) ([libp2p/go-libp2p#2954](https://github.com/libp2p/go-libp2p/pull/2954)) + - chore: add a funding JSON file to apply for Optimism rPGF round 5 (#2940) ([libp2p/go-libp2p#2940](https://github.com/libp2p/go-libp2p/pull/2940)) + - Fix: WebSocket: Clone TLS config before creating a new listener + - fix: enable dctur when interface address is public (#2931) ([libp2p/go-libp2p#2931](https://github.com/libp2p/go-libp2p/pull/2931)) + - fix: QUIC/Webtransport Transports now will prefer their owned listeners for dialing out (#2936) ([libp2p/go-libp2p#2936](https://github.com/libp2p/go-libp2p/pull/2936)) + - ci: uci/update-go (#2937) ([libp2p/go-libp2p#2937](https://github.com/libp2p/go-libp2p/pull/2937)) + - fix: slice append value (#2938) ([libp2p/go-libp2p#2938](https://github.com/libp2p/go-libp2p/pull/2938)) + - webrtc: wait for listener context before dropping connection (#2932) ([libp2p/go-libp2p#2932](https://github.com/libp2p/go-libp2p/pull/2932)) + - ci: use go1.23, drop go1.21 (#2933) ([libp2p/go-libp2p#2933](https://github.com/libp2p/go-libp2p/pull/2933)) + - Fail on any test timeout (#2929) ([libp2p/go-libp2p#2929](https://github.com/libp2p/go-libp2p/pull/2929)) + - test: Try to fix test timeout (#2930) ([libp2p/go-libp2p#2930](https://github.com/libp2p/go-libp2p/pull/2930)) + - ci: Out of the tarpit (#2923) ([libp2p/go-libp2p#2923](https://github.com/libp2p/go-libp2p/pull/2923)) + - Make BlackHoleState type public (#2917) ([libp2p/go-libp2p#2917](https://github.com/libp2p/go-libp2p/pull/2917)) + - Fix proto import paths (#2920) ([libp2p/go-libp2p#2920](https://github.com/libp2p/go-libp2p/pull/2920)) +- github.com/libp2p/go-libp2p-kad-dht (v0.26.1 -> v0.28.0): + - chore: release v0.28.0 (#998) ([libp2p/go-libp2p-kad-dht#998](https://github.com/libp2p/go-libp2p-kad-dht/pull/998)) + - fix: set context timeout for `queryPeer` (#996) ([libp2p/go-libp2p-kad-dht#996](https://github.com/libp2p/go-libp2p-kad-dht/pull/996)) + - refactor: document and expose Amino DHT defaults (#990) ([libp2p/go-libp2p-kad-dht#990](https://github.com/libp2p/go-libp2p-kad-dht/pull/990)) + - Use timeout context for NewStream call ([libp2p/go-libp2p-kad-dht#994](https://github.com/libp2p/go-libp2p-kad-dht/pull/994)) + - release v0.27.0 ([libp2p/go-libp2p-kad-dht#992](https://github.com/libp2p/go-libp2p-kad-dht/pull/992)) + - Add new DHT option to provide custom pb.MessageSender ([libp2p/go-libp2p-kad-dht#991](https://github.com/libp2p/go-libp2p-kad-dht/pull/991)) + - fix: replace deprecated Boxo function ([libp2p/go-libp2p-kad-dht#987](https://github.com/libp2p/go-libp2p-kad-dht/pull/987)) + - fix(query): reverting changes on TestRTEvictionOnFailedQuery ([libp2p/go-libp2p-kad-dht#984](https://github.com/libp2p/go-libp2p-kad-dht/pull/984)) +- github.com/libp2p/go-libp2p-pubsub (v0.11.0 -> v0.12.0): + - chore: upgrade go-libp2p (#575) ([libp2p/go-libp2p-pubsub#575](https://github.com/libp2p/go-libp2p-pubsub/pull/575)) + - GossipSub v1.2: IDONTWANT control message and priority queue. (#553) ([libp2p/go-libp2p-pubsub#553](https://github.com/libp2p/go-libp2p-pubsub/pull/553)) + - Re-enable disabled gossipsub test (#566) ([libp2p/go-libp2p-pubsub#566](https://github.com/libp2p/go-libp2p-pubsub/pull/566)) + - chore: staticcheck + - chore: update rand usage + - chore: go fmt + - chore: add or force update version.json + - added missing Close call on the AddrBook member of GossipSubRouter (#568) ([libp2p/go-libp2p-pubsub#568](https://github.com/libp2p/go-libp2p-pubsub/pull/568)) + - test: test notify protocols updated (#567) ([libp2p/go-libp2p-pubsub#567](https://github.com/libp2p/go-libp2p-pubsub/pull/567)) + - Switch to the new peer notify mechanism (#564) ([libp2p/go-libp2p-pubsub#564](https://github.com/libp2p/go-libp2p-pubsub/pull/564)) + - test: use the regular libp2p host (#565) ([libp2p/go-libp2p-pubsub#565](https://github.com/libp2p/go-libp2p-pubsub/pull/565)) + - Missing flood protection check for number of message IDs when handling `Ihave` messages (#560) ([libp2p/go-libp2p-pubsub#560](https://github.com/libp2p/go-libp2p-pubsub/pull/560)) + +
+ +### 👨‍👩‍👧‍👦 Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Marco Munizaga | 16 | +4253/-545 | 81 | +| Pop Chunhapanya | 1 | +1423/-137 | 15 | +| sukun | 10 | +752/-425 | 35 | +| Steven Allen | 11 | +518/-541 | 35 | +| Andrew Gillis | 19 | +348/-194 | 50 | +| Marcin Rataj | 26 | +343/-132 | 47 | +| Adin Schmahmann | 4 | +269/-29 | 12 | +| gammazero | 12 | +154/-18 | 13 | +| Josh Klopfenstein | 1 | +90/-35 | 27 | +| galargh | 3 | +42/-44 | 13 | +| Daniel Norman | 2 | +30/-16 | 4 | +| Mikel Cortes | 3 | +25/-4 | 4 | +| gopherfarm | 1 | +14/-14 | 6 | +| Carlos Peliciari | 1 | +12/-12 | 4 | +| Prithvi Shahi | 2 | +5/-11 | 3 | +| web3-bot | 6 | +12/-3 | 6 | +| guillaumemichel | 3 | +7/-6 | 3 | +| Jorropo | 1 | +11/-0 | 1 | +| Sorin Stanculeanu | 1 | +8/-0 | 1 | +| Hlib Kanunnikov | 2 | +6/-2 | 4 | +| André Bierlein | 1 | +4/-3 | 1 | +| bytetigers | 1 | +1/-1 | 1 | +| Wondertan | 2 | +2/-0 | 2 | +| Alexandr Burdiyan | 1 | +1/-1 | 1 | +| Guillaume Michel | 1 | +0/-1 | 1 | diff --git a/docs/changelogs/v0.33.md b/docs/changelogs/v0.33.md new file mode 100644 index 000000000..4715aa7ca --- /dev/null +++ b/docs/changelogs/v0.33.md @@ -0,0 +1,484 @@ +# Kubo changelog v0.33 + +- [v0.33.0](#v0330) +- [v0.33.1](#v0331) +- [v0.33.2](#v0332) + +## v0.33.0 + +- [Overview](#overview) +- [🔦 Highlights](#-highlights) + - [Shared TCP listeners](#shared-tcp-listeners) + - [AutoTLS takes care of Secure WebSockets setup](#autotls-takes-care-of-secure-websockets-setup) + - [Bitswap improvements from Boxo](#bitswap-improvements-from-boxo) + - [Using default `libp2p_rcmgr` metrics](#using-default-libp2p_rcmgr--metrics) + - [Flatfs does not `sync` on each write](#flatfs-does-not-sync-on-each-write) + - [`ipfs add --to-files` no longer works with `--wrap`](#ipfs-add---to-files-no-longer-works-with---wrap) + - [`ipfs --api` supports HTTPS RPC endpoints](#ipfs---api-supports-https-rpc-endpoints) + - [New options for faster writes: `WriteThrough`, `BlockKeyCacheSize`, `BatchMaxNodes`, `BatchMaxSize`](#new-options-for-faster-writes-writethrough-blockkeycachesize-batchmaxnodes-batchmaxsize) + - [MFS stability with large number of writes](#mfs-stability-with-large-number-of-writes) + - [New DoH resolvers for non-ICANN DNSLinks](#new-doh-resolvers-for-non-icann-dnslinks) + - [Reliability improvements to the WebRTC Direct listener](#reliability-improvements-to-the-webrtc-direct-listener) + - [Bitswap improvements from Boxo](#bitswap-improvements-from-boxo-1) + - [📦️ Important dependency updates](#-important-dependency-updates) + - [Escape Redirect URL for Directory](#escape-redirect-url-for-directory) +- [📝 Changelog](#-changelog) +- [👨‍👩‍👧‍👦 Contributors](#-contributors) + +### Overview + +### 🔦 Highlights + +#### Shared TCP listeners + +Kubo now supports sharing the same TCP port (`4001` by default) by both [raw TCP](https://github.com/ipfs/kubo/blob/master/docs/config.md#swarmtransportsnetworktcp) and [WebSockets](https://github.com/ipfs/kubo/blob/master/docs/config.md#swarmtransportsnetworkwebsocket) libp2p transports. + +This feature is not yet compatible with Private Networks and can be disabled by setting `LIBP2P_TCP_MUX=false` if causes any issues. + +#### AutoTLS takes care of Secure WebSockets setup + +It is no longer necessary to manually add `/tcp/../ws` listeners to `Addresses.Swarm` when [`AutoTLS.Enabled`](https://github.com/ipfs/kubo/blob/master/docs/config.md#autotlsenabled) is set to `true`. Kubo will detect if `/ws` listener is missing and add one on the same port as pre-existing TCP (e.g. `/tcp/4001`), removing the need for any extra configuration. +> [!TIP] +> Give it a try: +> ```console +> $ ipfs config --json AutoTLS.Enabled true +> ``` +> And restart the node. If you are behind NAT, make sure your node is publicly diallable (uPnP or port forwarding), and wait a few minutes to pass all checks and for the changes to take effect. + +See [`AutoTLS`](https://github.com/ipfs/kubo/blob/master/docs/config.md#autotls) for more information. + +#### Bitswap improvements from Boxo + +This release includes some refactorings and improvements affecting Bitswap which should improve reliability. One of the changes affects blocks providing. Previously, the bitswap layer took care itself of announcing new blocks -added or received- with the configured provider (i.e. DHT). This bypassed the "Reprovider", that is, the system that manages precisely "providing" the blocks stored by Kubo. The Reprovider knows how to take advantage of the [AcceleratedDHTClient](https://github.com/ipfs/kubo/blob/master/docs/config.md#routingaccelerateddhtclient), is able to handle priorities, logs statistics and is able to resume on daemon reboot where it left off. From now on, Bitswap will not be doing any providing on-the-side and all announcements are managed by the reprovider. In some cases, when the reproviding queue is full with other elements, this may cause additional delays, but more likely this will result in improved block-providing behaviour overall. + +#### Using default `libp2p_rcmgr` metrics + +Bespoke rcmgr metrics [were removed](https://github.com/ipfs/kubo/pull/9947), Kubo now exposes only the default `libp2p_rcmgr` metrics from go-libp2p. +This makes it easier to compare Kubo with custom implementations based on go-libp2p. +If you depended on removed ones, please fill an issue to add them to the upstream [go-libp2p](https://github.com/libp2p/go-libp2p). + +#### Flatfs does not `sync` on each write + +New repositories initialized with `flatfs` in `Datastore.Spec` will have `sync` set to `false`. + +The old default was overly conservative and caused performance issues in big repositories that did a lot of writes. There is usually no need to flush on every block write to disk before continuing. Setting this to false is safe as kubo will automatically flush writes to disk before and after performing critical operations like pinning. However, we still provide users with ability to set this to true to be extra-safe (at the cost of a slowdown when adding files in bulk). + +#### `ipfs add --to-files` no longer works with `--wrap` + +Onboarding files and directories with `ipfs add --to-files` now requires non-empty names. due to this, The `--to-files` and `--wrap` options are now mutually exclusive ([#10612](https://github.com/ipfs/kubo/issues/10612)). + +#### `ipfs --api` supports HTTPS RPC endpoints + +CLI and RPC client now supports accessing Kubo RPC over `https://` protocol when multiaddr ending with `/https` or `/tls/http` is passed to `ipfs --api`: + +```console +$ ipfs id --api /dns/kubo-rpc.example.net/tcp/5001/tls/http +# → https://kubo-rpc.example.net:5001 +``` + +#### New options for faster writes: `WriteThrough`, `BlockKeyCacheSize`, `BatchMaxNodes`, `BatchMaxSize` + +Now that Kubo supports [`pebble`](https://github.com/ipfs/kubo/blob/master/docs/datastores.md#pebbleds) as an _experimental_ datastore backend, it becomes very useful to expose some additional configuration options for how the blockservice/blockstore/datastore combo behaves. + +Usually, LSM-tree based datastore like Pebble or Badger have very fast write performance (blocks are streamed to disk) while incurring in read-amplification penalties (blocks need to be looked up in the index to know where they are on disk), specially noticeable on spinning disks. + +Prior to this version, `BlockService` and `Blockstore` implementations performed a `Has(cid)` for every block that was going to be written, skipping the writes altogether if the block was already present in the datastore. The performance impact of this `Has()` call can vary. The `Datastore` implementation itself might include block-caching and things like bloom-filters to speed up lookups and mitigate read-penalties. Our `Blockstore` implementation also supports a bloom-filter (controlled by `BloomFilterSize` and disabled by default), and a two-queue cache for keys and block sizes. If we assume that most of the blocks added to Kubo are new blocks, not already present in the datastore, or that the datastore itself includes mechanisms to optimize writes and avoid writing the same data twice, the calls to `Has()` at both BlockService and Blockstore layers seem superfluous to they point they even harm write performance. + +For these reasons, from now on, the default is to use a "write-through" mode for the Blockservice and the Blockstore. We have added a new option `Datastore.WriteThrough`, which defaults to `true`. Previous behaviour can be obtained by manually setting it to `false`. + +We have also made the size of the two-queue blockstore cache configurable with another option: `Datastore.BlockKeyCacheSize`, which defaults to `65536` (64KiB). Additionally, this caching layer can be disabled altogether by setting it to `0`. In particular, this option controls the size of a blockstore caching layer that records whether the blockstore has certain block and their sizes (but does not cache the contents, so it stays relativey small in general). + +Finally, we have added two new options to the `Import` section to control the maximum size of write-batches: `BatchMaxNodes` and `BatchMaxSize`. These are set by default to `128` nodes and `20MiB`. Increasing them will batch more items together when importing data with `ipfs dag import`, which can speed things up. It is importance to find a balance between available memory (used to hold the batch), disk latencies (when writing the batch) and processing power (when preparing the batch, as nodes are sorted and duplicates removed). + +As a reminder, details from all the options are explained in the [configuration documentation](https://github.com/ipfs/kubo/blob/master/docs/config.md). + +We recommend users trying Pebble as a datastore backend to disable both blockstore bloom-filter and key caching layers and enable write through as a way to evaluate the raw performance of the underlying datastore, which includes its own bloom-filter and caching layers (default cache size is `8MiB` and can be configured in the [options](https://github.com/ipfs/kubo/blob/master/docs/datastores.md#pebbleds). + +#### MFS stability with large number of writes + +We have fixed a number of issues that were triggered by writing or copying many files onto an MFS folder: increased memory usage first, then CPU, disk usage, and eventually a deadlock on write operations. The details of the fixes can be read at [#10630](https://github.com/ipfs/kubo/pull/10630) and [#10623](https://github.com/ipfs/kubo/pull/10623). The result is that writing large amounts of files to an MFS folder should now be possible without major issues. It is possible, as before, to speed up the operations using the `ipfs files --flush=false ...` flag, but it is recommended to switch to `ipfs files --flush=true ...` regularly, or call `ipfs files flush` on the working directory regularly, as this will flush, clear the directory cache and speed up reads. + +#### New DoH resolvers for non-ICANN DNSLinks + +- `.eth` TLD DNSLinks are now resolved via [DNS-over-HTTPS](https://en.wikipedia.org/wiki/DNS_over_HTTPS) endpoint at `https://dns.eth.limo/dns-query` +- `.crypto` TLD DNSLinks are now resolved via DoH endpoint at `https://resolver.unstoppable.io/dns-query` + +#### Reliability improvements to the WebRTC Direct listener + +Two fixes in go-libp2p improve the reliability of the WebRTC Direct listener in Kubo, and by extension dialability from browsers. + +Relevant changes in go-libp2p: +- [Deprioritising outgoing `/webrtc-direct`](https://github.com/libp2p/go-libp2p/pull/3078) dials. +- [Allows more concurrent handshakes by default](https://github.com/libp2p/go-libp2p/pull/3040/). + +#### Bitswap improvements from Boxo + +This release includes performance and reliability improvements and fixes for minor resource leaks. + +#### 📦️ Important dependency updates + +- update `boxo` to [v0.27.4](https://github.com/ipfs/boxo/releases/tag/v0.27.4) (incl. [v0.25.0](https://github.com/ipfs/boxo/releases/tag/v0.25.0) + [v0.26.0](https://github.com/ipfs/boxo/releases/tag/v0.26.0) + [v0.27.0](https://github.com/ipfs/boxo/releases/tag/v0.27.0) + [v0.27.1](https://github.com/ipfs/boxo/releases/tag/v0.27.1) + [v0.27.2](https://github.com/ipfs/boxo/releases/tag/v0.27.2) + [v0.27.3](https://github.com/ipfs/boxo/releases/tag/v0.27.3)) +- update `go-libp2p` to [v0.38.2](https://github.com/libp2p/go-libp2p/releases/tag/v0.38.2) (incl. [v0.37.1](https://github.com/libp2p/go-libp2p/releases/tag/v0.37.1) + [v0.37.2](https://github.com/libp2p/go-libp2p/releases/tag/v0.37.2) + [v0.38.0](https://github.com/libp2p/go-libp2p/releases/tag/v0.38.0) + [v0.38.1](https://github.com/libp2p/go-libp2p/releases/tag/v0.38.1)) +- update `go-libp2p-kad-dht` to [v0.28.2](https://github.com/libp2p/go-libp2p-kad-dht/releases/tag/v0.28.2) +- update `quic-go` to [v0.49.0](https://github.com/quic-go/quic-go/releases/tag/v0.49.0) +- update `p2p-forge/client` to [v0.3.0](https://github.com/ipshipyard/p2p-forge/releases/tag/v0.3.0) (incl. [v0.1.0](https://github.com/ipshipyard/p2p-forge/releases/tag/v0.1.0), [v0.2.0](https://github.com/ipshipyard/p2p-forge/releases/tag/v0.2.0), [v0.2.1](https://github.com/ipshipyard/p2p-forge/releases/tag/v0.2.1), [v0.2.2](https://github.com/ipshipyard/p2p-forge/releases/tag/v0.2.2)) +- update `ipfs-webui` to [v4.4.2](https://github.com/ipfs/ipfs-webui/releases/tag/v4.4.2) (incl. [v4.4.1](https://github.com/ipfs/ipfs-webui/releases/tag/v4.4.1)) + +#### Escape Redirect URL for Directory + +When navigating to a subdirectory, served by the Kubo web server, a subdirectory without a trailing slash gets redirected to a URL with a trailing slash. If there are special characters such as "%" in the subdirectory name then these must be escaped in the redirect URL. Previously this was not being done and was preventing navigation to such subdirectories, requiring the user to manually add a trailing slash to the subdirectory URL. This is now fixed to handle the redirect to URLs with characters that must be escaped. + +### 📝 Changelog + +
Full Changelog v0.33.0 + +- github.com/ipfs/kubo: + - test: fix the socat tests after the ubuntu 24.04 upgrade (#10683) ([ipfs/kubo#10683](https://github.com/ipfs/kubo/pull/10683)) + - chore: 0.33.0-rc3 + - fix: quic-go v0.49.0 (#10673) ([ipfs/kubo#10673](https://github.com/ipfs/kubo/pull/10673)) + - Upgrade to Boxo v0.27.2 (#10672) ([ipfs/kubo#10672](https://github.com/ipfs/kubo/pull/10672)) + - chore: 0.33.0-rc2 + - Upgrade to Boxo v0.27.1 (#10671) ([ipfs/kubo#10671](https://github.com/ipfs/kubo/pull/10671)) + - fix(autotls): renewal and AutoTLS.ShortAddrs (#10669) ([ipfs/kubo#10669](https://github.com/ipfs/kubo/pull/10669)) + - update changelog for boxo and go-libp2p (#10668) ([ipfs/kubo#10668](https://github.com/ipfs/kubo/pull/10668)) + - Upgrade to Boxo v0.27.0 (#10665) ([ipfs/kubo#10665](https://github.com/ipfs/kubo/pull/10665)) + - update dependencies (#10664) ([ipfs/kubo#10664](https://github.com/ipfs/kubo/pull/10664)) + - fix(dns): update default DNSLink resolvers (#10655) ([ipfs/kubo#10655](https://github.com/ipfs/kubo/pull/10655)) + - chore: p2p-forge v0.2.2 + go-libp2p-kad-dht v0.28.2 (#10663) ([ipfs/kubo#10663](https://github.com/ipfs/kubo/pull/10663)) + - fix(cli): support HTTPS in ipfs --api (#10659) ([ipfs/kubo#10659](https://github.com/ipfs/kubo/pull/10659)) + - chore: fix typos and comment formatting (#10653) ([ipfs/kubo#10653](https://github.com/ipfs/kubo/pull/10653)) + - fix/gateway: escape directory redirect url (#10649) ([ipfs/kubo#10649](https://github.com/ipfs/kubo/pull/10649)) + - Add example of setting array to config command help + - collection of typo fixes (#10647) ([ipfs/kubo#10647](https://github.com/ipfs/kubo/pull/10647)) + - chore: 0.33.0-rc1 + - fix: ipfs-webui v4.4.2 (#10635) ([ipfs/kubo#10635](https://github.com/ipfs/kubo/pull/10635)) + - feat(libp2p): shared TCP listeners and AutoTLS.AutoWSS (#10565) ([ipfs/kubo#10565](https://github.com/ipfs/kubo/pull/10565)) + - feat(flatfs): default to sync=false (#10632) ([ipfs/kubo#10632](https://github.com/ipfs/kubo/pull/10632)) + - Minor spelling and wording changes (#10634) ([ipfs/kubo#10634](https://github.com/ipfs/kubo/pull/10634)) + - docs: clarify Swarm.ResourceMgr.MaxMemory (#10622) ([ipfs/kubo#10622](https://github.com/ipfs/kubo/pull/10622)) + - feat: expose BlockKeyCacheSize and enable WriteThrough datastore options (#10614) ([ipfs/kubo#10614](https://github.com/ipfs/kubo/pull/10614)) + - cmd/files: flush parent folders (#10630) ([ipfs/kubo#10630](https://github.com/ipfs/kubo/pull/10630)) + - Upgrade to Boxo v0.26.0 (#10631) ([ipfs/kubo#10631](https://github.com/ipfs/kubo/pull/10631)) + - [skip changelog] pinmfs: mitigate slow mfs writes when it triggers (#10623) ([ipfs/kubo#10623](https://github.com/ipfs/kubo/pull/10623)) + - chore: use errors.New to replace fmt.Errorf with no parameters (#10617) ([ipfs/kubo#10617](https://github.com/ipfs/kubo/pull/10617)) + - chore: boxo v0.25.0 (#10619) ([ipfs/kubo#10619](https://github.com/ipfs/kubo/pull/10619)) + - fix(cmds/add): disallow --wrap with --to-files (#10612) ([ipfs/kubo#10612](https://github.com/ipfs/kubo/pull/10612)) + - refactor(cmds): do not return errors embedded in result type (#10527) ([ipfs/kubo#10527](https://github.com/ipfs/kubo/pull/10527)) + - fix: ipfs-webui v4.4.1 (#10608) ([ipfs/kubo#10608](https://github.com/ipfs/kubo/pull/10608)) + - chore: fix broken url in comment (#10606) ([ipfs/kubo#10606](https://github.com/ipfs/kubo/pull/10606)) + - refactor(rcmgr): use default libp2p rcmgr metrics (#9947) ([ipfs/kubo#9947](https://github.com/ipfs/kubo/pull/9947)) + - docs(changelog/v0.33): bitswap reprovide changes (#10604) ([ipfs/kubo#10604](https://github.com/ipfs/kubo/pull/10604)) + - tests(cli/harness): use unused Verbose flag to pipe daemon outputs (#10601) ([ipfs/kubo#10601](https://github.com/ipfs/kubo/pull/10601)) + - chore: p2p-forge/client v0.1.0 (#10605) ([ipfs/kubo#10605](https://github.com/ipfs/kubo/pull/10605)) + - fix: go-libp2p v0.37.2 (#10603) ([ipfs/kubo#10603](https://github.com/ipfs/kubo/pull/10603)) + - docs: typos (#10602) ([ipfs/kubo#10602](https://github.com/ipfs/kubo/pull/10602)) + - tests/cli: fix flapping tests (#10600) ([ipfs/kubo#10600](https://github.com/ipfs/kubo/pull/10600)) + - Update to boxo with refactored providerQueryManager. (#10595) ([ipfs/kubo#10595](https://github.com/ipfs/kubo/pull/10595)) + - fix some typos in docs (#10598) ([ipfs/kubo#10598](https://github.com/ipfs/kubo/pull/10598)) + - feat(bootstrap): add JS-based va1.bootstrap.libp2p.io (#10575) ([ipfs/kubo#10575](https://github.com/ipfs/kubo/pull/10575)) + - fix: increase provider sample size (#10589) ([ipfs/kubo#10589](https://github.com/ipfs/kubo/pull/10589)) + - Typos Update config.md (#10591) ([ipfs/kubo#10591](https://github.com/ipfs/kubo/pull/10591)) + - refactor: update to boxo without goprocess (#10567) ([ipfs/kubo#10567](https://github.com/ipfs/kubo/pull/10567)) + - fix: go-libp2p-kad-dht v0.28.1 (#10581) ([ipfs/kubo#10581](https://github.com/ipfs/kubo/pull/10581)) + - docs: update RELEASE_CHECKLIST.md (#10564) ([ipfs/kubo#10564](https://github.com/ipfs/kubo/pull/10564)) + - Merge release v0.32.0 ([ipfs/kubo#10579](https://github.com/ipfs/kubo/pull/10579)) + - fix: go-libp2p-kad-dht v0.28.0 (#10578) ([ipfs/kubo#10578](https://github.com/ipfs/kubo/pull/10578)) + - feat: ipfs-webui v4.4.0 (#10574) ([ipfs/kubo#10574](https://github.com/ipfs/kubo/pull/10574)) + - chore: boxo v0.24.3 and p2p-forge v0.0.2 (#10572) ([ipfs/kubo#10572](https://github.com/ipfs/kubo/pull/10572)) + - chore: stop using go-homedir (#10568) ([ipfs/kubo#10568](https://github.com/ipfs/kubo/pull/10568)) + - fix(autotls): store certificates at the location from the repo path (#10566) ([ipfs/kubo#10566](https://github.com/ipfs/kubo/pull/10566)) + - chore: bump master to 0.33.0-dev +- github.com/ipfs-shipyard/nopfs (v0.0.12 -> v0.0.14): + - Fix error when no doublehash db exists (#42) ([ipfs-shipyard/nopfs#42](https://github.com/ipfs-shipyard/nopfs/pull/42)) + - Improve support for IPNS double-hashed entries (#41) ([ipfs-shipyard/nopfs#41](https://github.com/ipfs-shipyard/nopfs/pull/41)) +- github.com/ipfs-shipyard/nopfs/ipfs (v0.13.2-0.20231027223058-cde3b5ba964c -> v0.25.0): + failed to fetch repo +- github.com/ipfs/boxo (v0.24.3 -> v0.27.2): + - Release v0.27.2 ([ipfs/boxo#811](https://github.com/ipfs/boxo/pull/811)) + - Revert peer exclude cancel ([ipfs/boxo#809](https://github.com/ipfs/boxo/pull/809)) + - Release v0.27.1 ([ipfs/boxo#807](https://github.com/ipfs/boxo/pull/807)) + - fix sending cancels when excluding peer ([ipfs/boxo#805](https://github.com/ipfs/boxo/pull/805)) + - Release v0.27.0 ([ipfs/boxo#802](https://github.com/ipfs/boxo/pull/802)) + - Remove want-block sent tracking from sessionWantSender (#759) ([ipfs/boxo#759](https://github.com/ipfs/boxo/pull/759)) + - Upgrade to go-libp2p v0.38.2 (#804) ([ipfs/boxo#804](https://github.com/ipfs/boxo/pull/804)) + - [skip changelog] Use routing.ContentRouting interface (#803) ([ipfs/boxo#803](https://github.com/ipfs/boxo/pull/803)) + - fix potential crash in unixfs directory (#798) ([ipfs/boxo#798](https://github.com/ipfs/boxo/pull/798)) + - prefer slices.SortFunc to sort.Sort (#796) ([ipfs/boxo#796](https://github.com/ipfs/boxo/pull/796)) + - fix: ipns protobuf namespace conflict (#794) ([ipfs/boxo#794](https://github.com/ipfs/boxo/pull/794)) + - update release procedure (#773) ([ipfs/boxo#773](https://github.com/ipfs/boxo/pull/773)) + - reduce default number of routing in-process requests (#793) ([ipfs/boxo#793](https://github.com/ipfs/boxo/pull/793)) + - Do not return unused values from wantlists (#792) ([ipfs/boxo#792](https://github.com/ipfs/boxo/pull/792)) + - Create FUNDING.json [skip changelog] (#795) ([ipfs/boxo#795](https://github.com/ipfs/boxo/pull/795)) + - refactor: using slices.Contains to simplify the code (#791) ([ipfs/boxo#791](https://github.com/ipfs/boxo/pull/791)) + - do not send cancel message to peer that sent block (#784) ([ipfs/boxo#784](https://github.com/ipfs/boxo/pull/784)) + - Define a `go_package` for protobuf, rename to a more unique `ipns-record.proto` ([ipfs/boxo#789](https://github.com/ipfs/boxo/pull/789)) + - bitswap: messagequeue: lock only needed sections (#787) ([ipfs/boxo#787](https://github.com/ipfs/boxo/pull/787)) + - Update libp2p-kad-dht to v0.28.2 (#786) ([ipfs/boxo#786](https://github.com/ipfs/boxo/pull/786)) + - feat(gateway): allow localhost http:// DoH resolvers (#645) ([ipfs/boxo#645](https://github.com/ipfs/boxo/pull/645)) + - fix(gateway): update DoH resolver for .crypto DNSLink (#782) ([ipfs/boxo#782](https://github.com/ipfs/boxo/pull/782)) + - fix(gateway): update DoH resolver for .eth DNSLink (#781) ([ipfs/boxo#781](https://github.com/ipfs/boxo/pull/781)) + - chore: pass options to tracer start (#775) ([ipfs/boxo#775](https://github.com/ipfs/boxo/pull/775)) + - escape redirect urls (#783) ([ipfs/boxo#783](https://github.com/ipfs/boxo/pull/783)) + - fix/gateway: escape directory redirect url (#779) ([ipfs/boxo#779](https://github.com/ipfs/boxo/pull/779)) + - fix spelling in comments (#778) ([ipfs/boxo#778](https://github.com/ipfs/boxo/pull/778)) + - trivial spelling changes in comments (#777) ([ipfs/boxo#777](https://github.com/ipfs/boxo/pull/777)) + - Release v0.26.0 ([ipfs/boxo#770](https://github.com/ipfs/boxo/pull/770)) + - Minor spelling and wording changes (#768) ([ipfs/boxo#768](https://github.com/ipfs/boxo/pull/768)) + - update go-libp2p and go-libp2p-kad-dht ([ipfs/boxo#767](https://github.com/ipfs/boxo/pull/767)) + - [skip changelog] fix: Drop stream references on Close/Reset ([ipfs/boxo#760](https://github.com/ipfs/boxo/pull/760)) + - Update go-libp2p to v0.38.0 (#764) ([ipfs/boxo#764](https://github.com/ipfs/boxo/pull/764)) + - Fix leak due to cid queue never getting cleaned up (#756) ([ipfs/boxo#756](https://github.com/ipfs/boxo/pull/756)) + - Do not reset the broadcast timer if there are no wants (#758) ([ipfs/boxo#758](https://github.com/ipfs/boxo/pull/758)) + - Replace mock time implementation (#762) ([ipfs/boxo#762](https://github.com/ipfs/boxo/pull/762)) + - mfs: clean cache on sync ([ipfs/boxo#751](https://github.com/ipfs/boxo/pull/751)) + - Remove peer's count of first responses when peer becomes unavailable (#757) ([ipfs/boxo#757](https://github.com/ipfs/boxo/pull/757)) + - Remove unnecessary CID copying in SessionInterestManager (#761) ([ipfs/boxo#761](https://github.com/ipfs/boxo/pull/761)) + - [bitswap/peermanager] take read-lock for read-only operation (#755) ([ipfs/boxo#755](https://github.com/ipfs/boxo/pull/755)) + - bitswap/client/messagequeue: expose dontHaveTimeoutMgr configuration (#750) ([ipfs/boxo#750](https://github.com/ipfs/boxo/pull/750)) + - improve mfs republisher (#754) ([ipfs/boxo#754](https://github.com/ipfs/boxo/pull/754)) + - blockstore/blockservice: change option to `WriteThrough(enabled bool)` ([ipfs/boxo#749](https://github.com/ipfs/boxo/pull/749)) + - Merge release v0.25.0 ([ipfs/boxo#748](https://github.com/ipfs/boxo/pull/748)) + - Use deque instead of slice for queues (#742) ([ipfs/boxo#742](https://github.com/ipfs/boxo/pull/742)) + - chore: no lifecycle context to shutdown ProviderQueryManager (#734) ([ipfs/boxo#734](https://github.com/ipfs/boxo/pull/734)) + - removed Startup function from ProviderQueryManager (#741) ([ipfs/boxo#741](https://github.com/ipfs/boxo/pull/741)) + - Re-enable flaky bitswap tests (#740) ([ipfs/boxo#740](https://github.com/ipfs/boxo/pull/740)) + - feat(session): do not record erroneous session want sends (#452) ([ipfs/boxo#452](https://github.com/ipfs/boxo/pull/452)) + - feat(filestore): add mmap reader option (#665) ([ipfs/boxo#665](https://github.com/ipfs/boxo/pull/665)) + - chore: update to latest go-libp2p (#739) ([ipfs/boxo#739](https://github.com/ipfs/boxo/pull/739)) + - refactor(remote/pinning): `Ls` to take results channel instead of returning one (#738) ([ipfs/boxo#738](https://github.com/ipfs/boxo/pull/738)) + - Bitswap default ProviderQueryManager uses explicit options (#737) ([ipfs/boxo#737](https://github.com/ipfs/boxo/pull/737)) + - chore: minor examples cleanup (#736) ([ipfs/boxo#736](https://github.com/ipfs/boxo/pull/736)) + - misc comments and spelling (#735) ([ipfs/boxo#735](https://github.com/ipfs/boxo/pull/735)) + - chore: fix invalid url in docs (#733) ([ipfs/boxo#733](https://github.com/ipfs/boxo/pull/733)) + - [skip changelog] bitswap/client: fix wiring when passing custom providerFinder ([ipfs/boxo#732](https://github.com/ipfs/boxo/pull/732)) + - Add debug logging for deduplicated queries (#729) ([ipfs/boxo#729](https://github.com/ipfs/boxo/pull/729)) + - [skip changelog] staticcheck fixes / remove unused variables (#730) ([ipfs/boxo#730](https://github.com/ipfs/boxo/pull/730)) + - refactor: default to prometheus.DefaultRegisterer (#722) ([ipfs/boxo#722](https://github.com/ipfs/boxo/pull/722)) + - chore: minor Improvements to providerquerymanager (#728) ([ipfs/boxo#728](https://github.com/ipfs/boxo/pull/728)) + - dspinner: RecursiveKeys(): do not hang on cancellations (#727) ([ipfs/boxo#727](https://github.com/ipfs/boxo/pull/727)) + - Tests can signal immediate rebroadcast (#726) ([ipfs/boxo#726](https://github.com/ipfs/boxo/pull/726)) + - fix(bitswap/client/msgq): prevent duplicate requests (#691) ([ipfs/boxo#691](https://github.com/ipfs/boxo/pull/691)) + - Bitswap: move providing -> Exchange-layer, providerQueryManager -> routing (#641) ([ipfs/boxo#641](https://github.com/ipfs/boxo/pull/641)) + - fix(bitswap/client/providerquerymanager): don't end trace span until … (#725) ([ipfs/boxo#725](https://github.com/ipfs/boxo/pull/725)) + - fix(routing/http/server): adjust bucket sizes for http metrics ([ipfs/boxo#724](https://github.com/ipfs/boxo/pull/724)) + - fix(bitswap/client/providerquerymanager): use non-timed out context for tracing (#721) ([ipfs/boxo#721](https://github.com/ipfs/boxo/pull/721)) + - fix(bitswap/server): pass context to server engine to register metrics (#723) ([ipfs/boxo#723](https://github.com/ipfs/boxo/pull/723)) + - docs: fix url of tracing env vars (#719) ([ipfs/boxo#719](https://github.com/ipfs/boxo/pull/719)) + - feat(routing/http/server): add routing timeout (#720) ([ipfs/boxo#720](https://github.com/ipfs/boxo/pull/720)) + - feat(routing/http/server): expose prometheus metrics (#718) ([ipfs/boxo#718](https://github.com/ipfs/boxo/pull/718)) + - Remove dependency on goprocess ([ipfs/boxo#710](https://github.com/ipfs/boxo/pull/710)) + - Merge release v0.24.3 ([ipfs/boxo#714](https://github.com/ipfs/boxo/pull/714)) + - fix(bitswap): log unexpected blocks to debug level (#711) ([ipfs/boxo#711](https://github.com/ipfs/boxo/pull/711)) + - Release v0.24.2 ([ipfs/boxo#708](https://github.com/ipfs/boxo/pull/708)) +- github.com/ipfs/go-ds-pebble (v0.4.0 -> v0.4.2): + - new version (#44) ([ipfs/go-ds-pebble#44](https://github.com/ipfs/go-ds-pebble/pull/44)) + - new version for pebble minor version update (#42) ([ipfs/go-ds-pebble#42](https://github.com/ipfs/go-ds-pebble/pull/42)) +- github.com/ipfs/go-ipfs-cmds (v0.14.0 -> v0.14.1): + - fix(NewClient): support https:// URLs (#277) ([ipfs/go-ipfs-cmds#277](https://github.com/ipfs/go-ipfs-cmds/pull/277)) +- github.com/ipfs/go-peertaskqueue (v0.8.1 -> v0.8.2): + - new version ([ipfs/go-peertaskqueue#39](https://github.com/ipfs/go-peertaskqueue/pull/39)) + - Replace mock time implementation ([ipfs/go-peertaskqueue#37](https://github.com/ipfs/go-peertaskqueue/pull/37)) + - fix: staticcheck feedback +- github.com/libp2p/go-doh-resolver (v0.4.0 -> v0.5.0): + - chore: release v0.5.0 + - fix: include url on HTTP error (#29) ([libp2p/go-doh-resolver#29](https://github.com/libp2p/go-doh-resolver/pull/29)) + - feat: allow localhost http endpoints (#28) ([libp2p/go-doh-resolver#28](https://github.com/libp2p/go-doh-resolver/pull/28)) + - sync: update CI config files (#20) ([libp2p/go-doh-resolver#20](https://github.com/libp2p/go-doh-resolver/pull/20)) +- github.com/libp2p/go-libp2p (v0.37.0 -> v0.38.2): + - Release v0.38.2 (#3147) ([libp2p/go-libp2p#3147](https://github.com/libp2p/go-libp2p/pull/3147)) + - chore: release v0.38.1 + - fix(httpauth): Correctly handle concurrent requests on server (#3111) ([libp2p/go-libp2p#3111](https://github.com/libp2p/go-libp2p/pull/3111)) + - ci: Install specific protoc version when generating protobufs (#3112) ([libp2p/go-libp2p#3112](https://github.com/libp2p/go-libp2p/pull/3112)) + - fix(autorelay): Move relayFinder peer disconnect cleanup to separate goroutine (#3105) ([libp2p/go-libp2p#3105](https://github.com/libp2p/go-libp2p/pull/3105)) + - chore: Release v0.38.0 (#3106) ([libp2p/go-libp2p#3106](https://github.com/libp2p/go-libp2p/pull/3106)) + - peerstore: remove sync.Pool for expiringAddrs (#3093) ([libp2p/go-libp2p#3093](https://github.com/libp2p/go-libp2p/pull/3093)) + - webtransport: close quic conn on dial error (#3104) ([libp2p/go-libp2p#3104](https://github.com/libp2p/go-libp2p/pull/3104)) + - peerstore: fix addressbook benchmark timing (#3092) ([libp2p/go-libp2p#3092](https://github.com/libp2p/go-libp2p/pull/3092)) + - swarm: record conn metrics only once (#3091) ([libp2p/go-libp2p#3091](https://github.com/libp2p/go-libp2p/pull/3091)) + - fix(sampledconn): Correctly handle slow bytes and closed conns (#3080) ([libp2p/go-libp2p#3080](https://github.com/libp2p/go-libp2p/pull/3080)) + - peerstore: pass options to addrbook constructor (#3090) ([libp2p/go-libp2p#3090](https://github.com/libp2p/go-libp2p/pull/3090)) + - fix(swarm): remove stray print stmt (#3086) ([libp2p/go-libp2p#3086](https://github.com/libp2p/go-libp2p/pull/3086)) + - feat(swarm): delay /webrtc-direct dials by 1 second (#3078) ([libp2p/go-libp2p#3078](https://github.com/libp2p/go-libp2p/pull/3078)) + - chore: Update dependencies and fix deprecated function in relay example (#3023) ([libp2p/go-libp2p#3023](https://github.com/libp2p/go-libp2p/pull/3023)) + - chore: fix broken link to record envelope protobuf file (#3070) ([libp2p/go-libp2p#3070](https://github.com/libp2p/go-libp2p/pull/3070)) + - chore(core): fix function name in interface comment (#3056) ([libp2p/go-libp2p#3056](https://github.com/libp2p/go-libp2p/pull/3056)) + - basichost: avoid modifying slice returned by AddrsFactory (#3068) ([libp2p/go-libp2p#3068](https://github.com/libp2p/go-libp2p/pull/3068)) + - fix(swarm): check after we split for empty multiaddr (#3063) ([libp2p/go-libp2p#3063](https://github.com/libp2p/go-libp2p/pull/3063)) + - feat: allow passing options to memoryAddrBook (#3062) ([libp2p/go-libp2p#3062](https://github.com/libp2p/go-libp2p/pull/3062)) + - fix(libp2phttp): Return ErrServerClosed on Close (#3050) ([libp2p/go-libp2p#3050](https://github.com/libp2p/go-libp2p/pull/3050)) + - chore(dashboard/alertmanager): update api version from v1 to v2 (#3054) ([libp2p/go-libp2p#3054](https://github.com/libp2p/go-libp2p/pull/3054)) + - fix(tcpreuse): handle connection that failed to be sampled (#3036) ([libp2p/go-libp2p#3036](https://github.com/libp2p/go-libp2p/pull/3036)) + - fix(tcpreuse): remove windows specific code (#3039) ([libp2p/go-libp2p#3039](https://github.com/libp2p/go-libp2p/pull/3039)) + - refactor(libp2phttp): don't require specific port for the HTTP host example (#3047) ([libp2p/go-libp2p#3047](https://github.com/libp2p/go-libp2p/pull/3047)) + - refactor(core/routing): split ContentRouting interface (#3048) ([libp2p/go-libp2p#3048](https://github.com/libp2p/go-libp2p/pull/3048)) + - fix(holepunch/tracer): replace inline peer struct with peerInfo type (#3049) ([libp2p/go-libp2p#3049](https://github.com/libp2p/go-libp2p/pull/3049)) + - fix: Defer resource usage cleanup until the very end (#3042) ([libp2p/go-libp2p#3042](https://github.com/libp2p/go-libp2p/pull/3042)) + - fix(eventbus): Idempotent wildcardSub close (#3045) ([libp2p/go-libp2p#3045](https://github.com/libp2p/go-libp2p/pull/3045)) + - fix: obsaddr: do not record observations over relayed conn (#3043) ([libp2p/go-libp2p#3043](https://github.com/libp2p/go-libp2p/pull/3043)) + - fix(identify): push should not dial a new connection (#3035) ([libp2p/go-libp2p#3035](https://github.com/libp2p/go-libp2p/pull/3035)) + - webrtc: handshake more connections in parallel (#3040) ([libp2p/go-libp2p#3040](https://github.com/libp2p/go-libp2p/pull/3040)) + - eventbus: dont panic on closing Subscription twice (#3034) ([libp2p/go-libp2p#3034](https://github.com/libp2p/go-libp2p/pull/3034)) + - fix(swarm): incorrect error message format order (#3037) ([libp2p/go-libp2p#3037](https://github.com/libp2p/go-libp2p/pull/3037)) + - feat: eventbus: log error on slow consumers (#3031) ([libp2p/go-libp2p#3031](https://github.com/libp2p/go-libp2p/pull/3031)) + - chore: make funding.json uppercase to follow meta convention (#3028) ([libp2p/go-libp2p#3028](https://github.com/libp2p/go-libp2p/pull/3028)) + - chore: add drips entry to funding.json for Filecoin rPGF round 2 + - tcp: parameterize metrics collector (#3026) ([libp2p/go-libp2p#3026](https://github.com/libp2p/go-libp2p/pull/3026)) + - fix: basichost: Use NegotiationTimeout as fallback timeout for NewStream (#3020) ([libp2p/go-libp2p#3020](https://github.com/libp2p/go-libp2p/pull/3020)) + - feat(tcpreuse): add options for sharing TCP listeners amongst TCP, WS and WSS transports (#2984) ([libp2p/go-libp2p#2984](https://github.com/libp2p/go-libp2p/pull/2984)) + - pnet: wrap underlying error when reading nonce fails (#2975) ([libp2p/go-libp2p#2975](https://github.com/libp2p/go-libp2p/pull/2975)) +- github.com/libp2p/go-libp2p-kad-dht (v0.28.1 -> v0.28.2): + - Release v0.28.2 (#1010) ([libp2p/go-libp2p-kad-dht#1010](https://github.com/libp2p/go-libp2p-kad-dht/pull/1010)) + - accelerated-dht: cleanup peer from message sender on disconnection (#1009) ([libp2p/go-libp2p-kad-dht#1009](https://github.com/libp2p/go-libp2p-kad-dht/pull/1009)) + - chore: fix some function names in comment ([libp2p/go-libp2p-kad-dht#1004](https://github.com/libp2p/go-libp2p-kad-dht/pull/1004)) + - feat: add more attributes to traces ([libp2p/go-libp2p-kad-dht#1002](https://github.com/libp2p/go-libp2p-kad-dht/pull/1002)) +- github.com/libp2p/go-netroute (v0.2.1 -> v0.2.2): + - v0.2.2 Includes v4/v6 confusion fix for bsd route parsing + - #50, Don't transform v4 routes to their v6 form on bsd ([libp2p/go-netroute#51](https://github.com/libp2p/go-netroute/pull/51)) + - Using syscall.RtMsg on Linux ([libp2p/go-netroute#43](https://github.com/libp2p/go-netroute/pull/43)) + - add wasi build constraint for netroute_stub ([libp2p/go-netroute#38](https://github.com/libp2p/go-netroute/pull/38)) + - Stricter filtering of degenerate routes ([libp2p/go-netroute#33](https://github.com/libp2p/go-netroute/pull/33)) + - sync: update CI config files (#30) ([libp2p/go-netroute#30](https://github.com/libp2p/go-netroute/pull/30)) +- github.com/multiformats/go-multiaddr (v0.13.0 -> v0.14.0): + - Release v0.14.0 ([multiformats/go-multiaddr#258](https://github.com/multiformats/go-multiaddr/pull/258)) + - feat: memory multiaddrs ([multiformats/go-multiaddr#256](https://github.com/multiformats/go-multiaddr/pull/256)) + - nit: validate ipcidr ([multiformats/go-multiaddr#247](https://github.com/multiformats/go-multiaddr/pull/247)) + - check for nil interfaces (#251) ([multiformats/go-multiaddr#251](https://github.com/multiformats/go-multiaddr/pull/251)) + - Make it safe to roundtrip SplitXXX and Join (#250) ([multiformats/go-multiaddr#250](https://github.com/multiformats/go-multiaddr/pull/250)) +- github.com/multiformats/go-multiaddr-dns (v0.4.0 -> v0.4.1): + - Release v0.4.1 + - fix: If decapsulating is empty, skip it. (#65) ([multiformats/go-multiaddr-dns#65](https://github.com/multiformats/go-multiaddr-dns/pull/65)) +- github.com/multiformats/go-multistream (v0.5.0 -> v0.6.0): + - release v0.6.0 ([multiformats/go-multistream#116](https://github.com/multiformats/go-multistream/pull/116)) + - fix: finish reading handshake on lazyConn close + - feat: New error to highlight unrecognized responses + - release v0.5.0 (#108) ([multiformats/go-multistream#108](https://github.com/multiformats/go-multistream/pull/108)) + +
+ +### 👨‍👩‍👧‍👦 Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Andrew Gillis | 57 | +1995/-1718 | 191 | +| Adin Schmahmann | 7 | +2552/-719 | 84 | +| Marco Munizaga | 27 | +1036/-261 | 51 | +| Hector Sanjuan | 21 | +789/-362 | 65 | +| gammazero | 20 | +407/-419 | 40 | +| sukun | 13 | +519/-233 | 30 | +| Marcin Rataj | 34 | +426/-142 | 59 | +| Marten Seemann | 2 | +11/-261 | 5 | +| Dreamacro | 2 | +161/-68 | 5 | +| Hlib Kanunnikov | 1 | +34/-65 | 4 | +| bashkarev | 1 | +78/-5 | 2 | +| Daniel Norman | 4 | +68/-12 | 6 | +| Andi | 1 | +37/-32 | 20 | +| hannahhoward | 1 | +35/-17 | 7 | +| Carlos Peliciari | 2 | +19/-26 | 2 | +| Cole Brown | 1 | +32/-0 | 3 | +| Will Scott | 2 | +19/-7 | 3 | +| Guillaume Michel | 1 | +21/-2 | 4 | +| 7sunarni | 1 | +3/-19 | 1 | +| Srdjan S | 1 | +11/-2 | 2 | +| web3-bot | 2 | +6/-6 | 3 | +| dashangcun | 1 | +2/-10 | 1 | +| John | 3 | +6/-6 | 5 | +| Daniel N | 3 | +8/-3 | 3 | +| Ivan Shvedunov | 1 | +4/-6 | 2 | +| Piotr Galar | 1 | +4/-4 | 2 | +| Derek Nola | 2 | +4/-4 | 4 | +| Bryer | 1 | +4/-4 | 1 | +| Prithvi Shahi | 2 | +6/-1 | 2 | +| Cameron Wood | 1 | +7/-0 | 1 | +| wangjingcun | 1 | +3/-3 | 2 | +| cuibuwei | 1 | +2/-2 | 2 | +| Jorropo | 1 | +1/-3 | 1 | +| 未月 | 1 | +1/-1 | 1 | +| Ubuntu | 1 | +1/-1 | 1 | +| Ryan MacArthur | 1 | +1/-1 | 1 | +| Reymon | 1 | +1/-1 | 1 | +| guillaumemichel | 1 | +1/-0 | 1 | + +## v0.33.1 + +### 🔦 Highlights + +#### Bitswap improvements from Boxo + +This release includes performance and reliability improvements and fixes for minor resource leaks. One of the performance changes [greatly improves the bitswap clients ability to operate under high load](https://github.com/ipfs/boxo/pull/817#pullrequestreview-2587207745), that could previously result in an out of memory condition. + +#### Improved IPNS interop + +Improved compatibility with third-party IPNS publishers by restoring support for compact binary CIDs in the `Value` field of IPNS Records ([IPNS Specs](https://specs.ipfs.tech/ipns/ipns-record/)). As long the signature is valid, Kubo will now resolve such records (likely created by non-Kubo nodes) and convert raw CIDs into valid `/ipfs/cid` content paths. +**Note:** This only adds support for resolving externally created records—Kubo’s IPNS record creation remains unchanged. IPNS records with empty `Value` fields default to zero-length `/ipfs/bafkqaaa` to maintain backward compatibility with code expecting a valid content path. + +#### 📦️ Important dependency updates + +- update `boxo` to [v0.27.4](https://github.com/ipfs/boxo/releases/tag/v0.27.4) (incl. [v0.27.3](https://github.com/ipfs/boxo/releases/tag/v0.27.3)) + +### 📝 Changelog + +
Full Changelog v0.33.1 + +- github.com/ipfs/kubo: + - chore: v0.33.1 + - fix: boxo v0.27.4 (#10692) ([ipfs/kubo#10692](https://github.com/ipfs/kubo/pull/10692)) + - docs: add webrtc-direct fixes to 0.33 release changelog (#10688) ([ipfs/kubo#10688](https://github.com/ipfs/kubo/pull/10688)) + - fix: config help (#10686) ([ipfs/kubo#10686](https://github.com/ipfs/kubo/pull/10686)) +- github.com/ipfs/boxo (v0.27.2 -> v0.27.4): + - Release v0.27.4 ([ipfs/boxo#832](https://github.com/ipfs/boxo/pull/832)) + - fix(ipns): reading records with raw []byte Value (#830) ([ipfs/boxo#830](https://github.com/ipfs/boxo/pull/830)) + - fix(bitswap): blockpresencemanager leak (#833) ([ipfs/boxo#833](https://github.com/ipfs/boxo/pull/833)) + - Always send cancels even if peer has no interest (#829) ([ipfs/boxo#829](https://github.com/ipfs/boxo/pull/829)) + - tidy changelog ([ipfs/boxo#828](https://github.com/ipfs/boxo/pull/828)) + - Update changelog (#827) ([ipfs/boxo#827](https://github.com/ipfs/boxo/pull/827)) + - fix(bitswap): filter interests from received messages (#822) ([ipfs/boxo#822](https://github.com/ipfs/boxo/pull/822)) + - Reduce unnecessary logging work (#826) ([ipfs/boxo#826](https://github.com/ipfs/boxo/pull/826)) + - fix: bitswap lock contention under high load (#817) ([ipfs/boxo#817](https://github.com/ipfs/boxo/pull/817)) + - fix: bitswap simplify cancel (#824) ([ipfs/boxo#824](https://github.com/ipfs/boxo/pull/824)) + - fix(bitswap): simplify SessionInterestManager (#821) ([ipfs/boxo#821](https://github.com/ipfs/boxo/pull/821)) + - feat: Better self-service commands for DHT providing (#815) ([ipfs/boxo#815](https://github.com/ipfs/boxo/pull/815)) + - bitswap/client: fewer wantlist iterations in sendCancels (#819) ([ipfs/boxo#819](https://github.com/ipfs/boxo/pull/819)) + - style: cleanup code by golangci-lint (#797) ([ipfs/boxo#797](https://github.com/ipfs/boxo/pull/797)) + - Move long messagequeue comment to doc.go (#814) ([ipfs/boxo#814](https://github.com/ipfs/boxo/pull/814)) + - Describe how bitswap message queue works ([ipfs/boxo#813](https://github.com/ipfs/boxo/pull/813)) + +
+ + +### 👨‍👩‍👧‍👦 Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Dreamacro | 1 | +304/-376 | 119 | +| Andrew Gillis | 7 | +306/-200 | 20 | +| Guillaume Michel | 5 | +122/-98 | 14 | +| Marcin Rataj | 2 | +113/-7 | 4 | +| gammazero | 6 | +41/-11 | 6 | +| Sergey Gorbunov | 1 | +14/-2 | 2 | +| Daniel Norman | 1 | +9/-0 | 1 | + +## v0.33.2 + +### 🔦 Highlights + +#### 📦️ Important dependency updates + +- update `go-libp2p` to [v0.38.3](https://github.com/libp2p/go-libp2p/releases/tag/v0.38.3) + +### 📝 Changelog + +
Full Changelog + +- github.com/ipfs/kubo: + - chore: v0.33.2 +- github.com/libp2p/go-libp2p (v0.38.2 -> v0.38.3): + - Release v0.38.3 (#3184) ([libp2p/go-libp2p#3184](https://github.com/libp2p/go-libp2p/pull/3184)) + +
+ +### 👨‍👩‍👧‍👦 Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| sukun | 1 | +122/-23 | 7 | +| Marcin Rataj | 1 | +1/-1 | 1 | diff --git a/docs/changelogs/v0.34.md b/docs/changelogs/v0.34.md new file mode 100644 index 000000000..2b4761de1 --- /dev/null +++ b/docs/changelogs/v0.34.md @@ -0,0 +1,461 @@ +# Kubo changelog v0.34 + + + +This release was brought to you by the [Shipyard](http://ipshipyard.com/) team. + +- [v0.34.0](#v0340) +- [v0.34.1](#v0341) + +## v0.34.0 + +- [Overview](#overview) +- [🔦 Highlights](#-highlights) + - [AutoTLS now enabled by default for nodes with 1 hour uptime](#autotls-now-enabled-by-default-for-nodes-with-1-hour-uptime) + - [New WebUI features](#new-webui-features) + - [RPC and CLI command changes](#rpc-and-cli-command-changes) + - [Bitswap improvements from Boxo](#bitswap-improvements-from-boxo) + - [IPNS publishing TTL change](#ipns-publishing-ttl-change) + - [`IPFS_LOG_LEVEL` deprecated](#ipfs_log_level-deprecated) + - [Pebble datastore format update](#pebble-datastore-format-update) + - [Badger datastore update](#badger-datastore-update) + - [Datastore Implementation Updates](#datastore-implementation-updates) + - [One Multi-error Package](#one-multi-error-package) + - [Fix hanging pinset operations during reprovides](#fix-hanging-pinset-operations-during-reprovides) + - [📦️ Important dependency updates](#-important-dependency-updates) +- [📝 Changelog](#-changelog) +- [👨‍👩‍👧‍👦 Contributors](#-contributors) + +### Overview + +### 🔦 Highlights + +#### AutoTLS now enabled by default for nodes with 1 hour uptime + +Starting now, any publicly dialable Kubo node with a `/tcp` listener that remains online for at least one hour will receive a TLS certificate through the [`AutoTLS`](https://github.com/ipfs/kubo/blob/master/docs/config.md#autotls) feature. +This occurs automatically, with no need for manual setup. + +To bypass the 1-hour delay and enable AutoTLS immediately, users can explicitly opt-in by running the following commands: + +```console +$ ipfs config --json AutoTLS.Enabled true +$ ipfs config --json AutoTLS.RegistrationDelay 0 +``` + +AutoTLS will remain disabled under the following conditions: + +- The node already has a manually configured `/ws` (WebSocket) listener +- A private network is in use with a `swarm.key` +- TCP or WebSocket transports are disabled, or there is no `/tcp` listener + +To troubleshoot, use `GOLOG_LOG_LEVEL="error,autotls=info`. + +For more details, check out the [`AutoTLS` configuration documentation](https://github.com/ipfs/kubo/blob/master/docs/config.md#autotls) or dive deeper with [AutoTLS libp2p blog post](https://blog.libp2p.io/autotls/). + +#### New WebUI features + +The WebUI, accessible at http://127.0.0.1:5001/webui/, now includes support for CAR file import and QR code sharing directly from the Files view. Additionally, the Peers screen has been updated with the latest [`ipfs-geoip`](https://www.npmjs.com/package/ipfs-geoip) dataset. + +#### RPC and CLI command changes + +- `ipfs config` is now validating json fields ([#10679](https://github.com/ipfs/kubo/pull/10679)). +- Deprecated the `bitswap reprovide` command. Make sure to switch to modern `routing reprovide`. ([#10677](https://github.com/ipfs/kubo/pull/10677)) +- The `stats reprovide` command now shows additional stats for [`Routing.AcceleratedDHTClient`](https://github.com/ipfs/kubo/blob/master/docs/config.md#routingaccelerateddhtclient), indicating the last and next `reprovide` times. ([#10677](https://github.com/ipfs/kubo/pull/10677)) +- `ipfs files cp` now performs basic codec check and will error when source is not a valid UnixFS (only `dag-pb` and `raw` codecs are allowed in MFS) + +#### Bitswap improvements from Boxo + +This release includes performance and reliability improvements and fixes for minor resource leaks. One of the performance changes [greatly improves the bitswap clients ability to operate under high load](https://github.com/ipfs/boxo/pull/817#pullrequestreview-2587207745), that could previously result in an out of memory condition. + +#### IPNS publishing TTL change + +Many complaints about IPNS being slow are tied to the default `--ttl` in `ipfs name publish`, which was set to 1 hour. To address this, we’ve lowered the default [IPNS Record TTL](https://specs.ipfs.tech/ipns/ipns-record/#ttl-uint64) during publishing to 5 minutes, matching similar TTL defaults in DNS. This update is now part of `boxo/ipfs` (GO, [boxo#859](https://github.com/ipfs/boxo/pull/859)) and `@helia/ipns` (JS, [helia#749](https://github.com/ipfs/helia/pull/749)). + +> [!TIP] +> IPNS TTL recommendations when even faster update propagation is desired: +> - **As a Publisher:** Lower the `--ttl` (e.g., `ipfs name publish --ttl=1m`) to further reduce caching delays. If using DNSLink, ensure the DNS TXT record TTL matches the IPNS record TTL. +> - **As a Gateway Operator:** Override publisher TTLs for faster updates using configurations like [`Ipns.MaxCacheTTL`](https://github.com/ipfs/kubo/blob/master/docs/config.md#ipnsmaxcachettl) in Kubo or [`RAINBOW_IPNS_MAX_CACHE_TTL`](https://github.com/ipfs/rainbow/blob/main/docs/environment-variables.md#rainbow_ipns_max_cache_ttl) in [Rainbow](https://github.com/ipfs/rainbow/). + +#### `IPFS_LOG_LEVEL` deprecated + +The variable has been deprecated. Please use [`GOLOG_LOG_LEVEL`](https://github.com/ipfs/kubo/blob/master/docs/environment-variables.md#golog_log_level) instead for configuring logging levels. + +#### Pebble datastore format update + +If the pebble database format is not explicitly set in the config, then automatically upgrade it to the latest format version supported by the release ob pebble used by kubo. This will ensure that the database format is sufficiently up-to-date to be compatible with a major version upgrade of pebble. This is necessary before upgrading to use pebble v2. + +#### Badger datastore update + +An update was made to the badger v1 datastore that avoids use of mmap in 32-bit environments, which has been seen to cause issues on some platforms. Please be aware that this could lead to a performance regression for users of badger in a 32-bit environment. Badger users are advised to move to the flatds or pebble datastore. + +#### Datastore Implementation Updates + +The go-ds-xxx datastore implementations have been updated to support the updated `go-datastore` [v0.8.2](https://github.com/ipfs/go-datastore/releases/tag/v0.8.2) query API. This update removes the datastore implementations' dependency on `goprocess` and updates the query API. + +#### One Multi-error Package + +Kubo previously depended on multiple multi-error packages, `github.com/hashicorp/go-multierror` and `go.uber.org/multierr`. These have nearly identical functionality so there was no need to use both. Therefore, `go.uber.org/multierr` was selected as the package to depend on. Any future code needing multi-error functionality should use `go.uber.org/multierr` to avoid introducing unneeded dependencies. + +#### Fix hanging pinset operations during reprovides + +The reprovide process can be quite slow. In default settings, the reprovide process will start reading CIDs that belong to the pinset. During this operation, starvation can occur for other operations that need pinset access (see https://github.com/ipfs/kubo/issues/10596). + +We have now switch to buffering pinset-related cids that are going to be reprovided in memory, so that we can free pinset mutexes as soon as possible so that pinset-writes and subsequent read operations can proceed. The downside is larger pinsets will need some extra memory, with an estimation of ~1GiB of RAM memory-use per 20 million items to be reprovided. + +Use [`Reprovider.Strategy`](https://github.com/ipfs/kubo/blob/master/docs/config.md#reproviderstrategy) to balance announcement prioritization, speed, and memory utilization. + +#### 📦️ Important dependency updates + +- update `go-libp2p` to [v0.41.0](https://github.com/libp2p/go-libp2p/releases/tag/v0.41.0) (incl. [v0.40.0](https://github.com/libp2p/go-libp2p/releases/tag/v0.40.0)) +- update `go-libp2p-kad-dht` to [v0.30.2](https://github.com/libp2p/go-libp2p-kad-dht/releases/tag/v0.30.2) (incl. [v0.29.0](https://github.com/libp2p/go-libp2p-kad-dht/releases/tag/v0.29.0), [v0.29.1](https://github.com/libp2p/go-libp2p-kad-dht/releases/tag/v0.29.1), [v0.29.2](https://github.com/libp2p/go-libp2p-kad-dht/releases/tag/v0.29.2), [v0.30.0](https://github.com/libp2p/go-libp2p-kad-dht/releases/tag/v0.30.0), [v0.30.1](https://github.com/libp2p/go-libp2p-kad-dht/releases/tag/v0.30.1)) +- update `boxo` to [v0.29.1](https://github.com/ipfs/boxo/releases/tag/v0.29.1) (incl. [v0.28.0](https://github.com/ipfs/boxo/releases/tag/v0.28.0) [v0.29.0](https://github.com/ipfs/boxo/releases/tag/v0.29.0)) +- update `ipfs-webui` to [v4.6.0](https://github.com/ipfs/ipfs-webui/releases/tag/v4.6.0) (incl. [v4.5.0](https://github.com/ipfs/ipfs-webui/releases/tag/v4.5.0)) +- update `p2p-forge/client` to [v0.4.0](https://github.com/ipshipyard/p2p-forge/releases/tag/v0.4.0) +- update `go-datastore` to [v0.8.2](https://github.com/ipfs/go-datastore/releases/tag/v0.8.2) (incl. [v0.7.0](https://github.com/ipfs/go-datastore/releases/tag/v0.7.0), [v0.8.0](https://github.com/ipfs/go-datastore/releases/tag/v0.8.0)) + +### 📝 Changelog + +
Full Changelog + +- github.com/ipfs/kubo: + - chore: v0.34.0 + - chore: v0.34.0-rc2 + - docs: mention Reprovider.Strategy config + - docs: ipns ttl change + - feat: ipfs-webui v4.6 (#10756) ([ipfs/kubo#10756](https://github.com/ipfs/kubo/pull/10756)) + - docs(readme): update min. requirements + cleanup (#10750) ([ipfs/kubo#10750](https://github.com/ipfs/kubo/pull/10750)) + - Upgrade to Boxo v0.29.1 (#10755) ([ipfs/kubo#10755](https://github.com/ipfs/kubo/pull/10755)) + - Nonfunctional (#10753) ([ipfs/kubo#10753](https://github.com/ipfs/kubo/pull/10753)) + - Update docs/changelogs/v0.34.md + - provider: buffer pin providers. + - chore: 0.34.0-rc1 + - fix(mfs): basic UnixFS sanity checks in `files cp` (#10701) ([ipfs/kubo#10701](https://github.com/ipfs/kubo/pull/10701)) + - Upgrade to Boxo v0.29.0 (#10742) ([ipfs/kubo#10742](https://github.com/ipfs/kubo/pull/10742)) + - use go-datastore without go-process (#10736) ([ipfs/kubo#10736](https://github.com/ipfs/kubo/pull/10736)) + - docs(config): add security considerations for rpc (#10739) ([ipfs/kubo#10739](https://github.com/ipfs/kubo/pull/10739)) + - chore: update go-libp2p to v0.41.0 (#10733) ([ipfs/kubo#10733](https://github.com/ipfs/kubo/pull/10733)) + - feat: ipfs-webui v4.5.0 (#10735) ([ipfs/kubo#10735](https://github.com/ipfs/kubo/pull/10735)) + - Create FUNDING.json (#10734) ([ipfs/kubo#10734](https://github.com/ipfs/kubo/pull/10734)) + - feat(AutoTLS): enabled by default with 1h RegistrationDelay (#10724) ([ipfs/kubo#10724](https://github.com/ipfs/kubo/pull/10724)) + - Upgrade to Boxo v0.28.0 (#10725) ([ipfs/kubo#10725](https://github.com/ipfs/kubo/pull/10725)) + - Upgrade to go1.24 (#10726) ([ipfs/kubo#10726](https://github.com/ipfs/kubo/pull/10726)) + - Replace go-random with random-data from go-test package (#10731) ([ipfs/kubo#10731](https://github.com/ipfs/kubo/pull/10731)) + - Update to new go-test (#10729) ([ipfs/kubo#10729](https://github.com/ipfs/kubo/pull/10729)) + - Update go-test and use new random-files generator (#10728) ([ipfs/kubo#10728](https://github.com/ipfs/kubo/pull/10728)) + - docs(readme): update docker section (#10716) ([ipfs/kubo#10716](https://github.com/ipfs/kubo/pull/10716)) + - Update go-ds-badger to v0.3.1 (#10722) ([ipfs/kubo#10722](https://github.com/ipfs/kubo/pull/10722)) + - Update pebble db to latest format by default (#10720) ([ipfs/kubo#10720](https://github.com/ipfs/kubo/pull/10720)) + - fix: switch away from IPFS_LOG_LEVEL (#10694) ([ipfs/kubo#10694](https://github.com/ipfs/kubo/pull/10694)) + - Merge release v0.33.2 ([ipfs/kubo#10713](https://github.com/ipfs/kubo/pull/10713)) + - Remove unused TimeParts struct (#10708) ([ipfs/kubo#10708](https://github.com/ipfs/kubo/pull/10708)) + - fix(rpc): restore and deprecate `bitswap reprovide` (#10699) ([ipfs/kubo#10699](https://github.com/ipfs/kubo/pull/10699)) + - docs(release): update RELEASE_CHECKLIST.md after v0.33.1 (#10697) ([ipfs/kubo#10697](https://github.com/ipfs/kubo/pull/10697)) + - docs: update min requirements (#10687) ([ipfs/kubo#10687](https://github.com/ipfs/kubo/pull/10687)) + - Merge release v0.33.1 ([ipfs/kubo#10698](https://github.com/ipfs/kubo/pull/10698)) + - fix: boxo v0.27.4 (#10692) ([ipfs/kubo#10692](https://github.com/ipfs/kubo/pull/10692)) + - fix: Issue #9364 JSON config validation (#10679) ([ipfs/kubo#10679](https://github.com/ipfs/kubo/pull/10679)) + - docs: RELEASE_CHECKLIST.md update for 0.33 (#10674) ([ipfs/kubo#10674](https://github.com/ipfs/kubo/pull/10674)) + - feat: Better self-service commands for DHT providing (#10677) ([ipfs/kubo#10677](https://github.com/ipfs/kubo/pull/10677)) + - docs: add webrtc-direct fixes to 0.33 release changelog (#10688) ([ipfs/kubo#10688](https://github.com/ipfs/kubo/pull/10688)) + - fix: config help (#10686) ([ipfs/kubo#10686](https://github.com/ipfs/kubo/pull/10686)) + - feat: Add CI for Spell Checking (#10637) ([ipfs/kubo#10637](https://github.com/ipfs/kubo/pull/10637)) + - Merge release v0.33.0 ([ipfs/kubo#10684](https://github.com/ipfs/kubo/pull/10684)) + - test: fix the socat tests after the ubuntu 24.04 upgrade (#10683) ([ipfs/kubo#10683](https://github.com/ipfs/kubo/pull/10683)) + - fix: quic-go v0.49.0 (#10673) ([ipfs/kubo#10673](https://github.com/ipfs/kubo/pull/10673)) + - Upgrade to Boxo v0.27.2 (#10672) ([ipfs/kubo#10672](https://github.com/ipfs/kubo/pull/10672)) + - Upgrade to Boxo v0.27.1 (#10671) ([ipfs/kubo#10671](https://github.com/ipfs/kubo/pull/10671)) + - fix(autotls): renewal and AutoTLS.ShortAddrs (#10669) ([ipfs/kubo#10669](https://github.com/ipfs/kubo/pull/10669)) + - update changelog for boxo and go-libp2p (#10668) ([ipfs/kubo#10668](https://github.com/ipfs/kubo/pull/10668)) + - Upgrade to Boxo v0.27.0 (#10665) ([ipfs/kubo#10665](https://github.com/ipfs/kubo/pull/10665)) + - update dependencies (#10664) ([ipfs/kubo#10664](https://github.com/ipfs/kubo/pull/10664)) + - docs(readme): add unofficial Fedora COPR (#10660) ([ipfs/kubo#10660](https://github.com/ipfs/kubo/pull/10660)) + - fix(dns): update default DNSLink resolvers (#10655) ([ipfs/kubo#10655](https://github.com/ipfs/kubo/pull/10655)) + - chore: p2p-forge v0.2.2 + go-libp2p-kad-dht v0.28.2 (#10663) ([ipfs/kubo#10663](https://github.com/ipfs/kubo/pull/10663)) + - fix(cli): support HTTPS in ipfs --api (#10659) ([ipfs/kubo#10659](https://github.com/ipfs/kubo/pull/10659)) + - chore: fix typos and comment formatting (#10653) ([ipfs/kubo#10653](https://github.com/ipfs/kubo/pull/10653)) + - fix/gateway: escape directory redirect url (#10649) ([ipfs/kubo#10649](https://github.com/ipfs/kubo/pull/10649)) + - Add example of setting array to config command help ([ipfs/kubo#10650](https://github.com/ipfs/kubo/pull/10650)) + - collection of typo fixes (#10647) ([ipfs/kubo#10647](https://github.com/ipfs/kubo/pull/10647)) + - chore: bump master to 0.34.0-dev +- github.com/ipfs/boxo (v0.27.4 -> v0.29.1): + - Release v0.29.1 ([ipfs/boxo#885](https://github.com/ipfs/boxo/pull/885)) + - fix(provider): call reprovider throughput callback only if reprovide is enabled (#871) ([ipfs/boxo#871](https://github.com/ipfs/boxo/pull/871)) + - bitswap/httpnet: do not follow redirects (#878) ([ipfs/boxo#878](https://github.com/ipfs/boxo/pull/878)) + - Refactor(hostname): Skip DNSLink for local IP addresses to avoid DNS queries (#880) ([ipfs/boxo#880](https://github.com/ipfs/boxo/pull/880)) + - Nonfunctional (#882) ([ipfs/boxo#882](https://github.com/ipfs/boxo/pull/882)) + - fix(bitswap/client): dont set nil for DontHaveTimeoutConfig (#872) ([ipfs/boxo#872](https://github.com/ipfs/boxo/pull/872)) + - provider: add a buffered KeyChanFunc. ([ipfs/boxo#870](https://github.com/ipfs/boxo/pull/870)) + - Release v0.29.0 (#869) ([ipfs/boxo#869](https://github.com/ipfs/boxo/pull/869)) + - Do not use multiple multi-error packages, pick one (#867) ([ipfs/boxo#867](https://github.com/ipfs/boxo/pull/867)) + - feat(bitswap/client): MinTimeout for DontHaveTimeoutConfig (#865) ([ipfs/boxo#865](https://github.com/ipfs/boxo/pull/865)) + - use go-datastore without go-process (#858) ([ipfs/boxo#858](https://github.com/ipfs/boxo/pull/858)) + - minimize peermanager lock scope (#860) ([ipfs/boxo#860](https://github.com/ipfs/boxo/pull/860)) + - chore(ipns): lower `DefaultRecordTTL` to 5m (#859) ([ipfs/boxo#859](https://github.com/ipfs/boxo/pull/859)) + - httpnet: bitswap network for HTTP block retrieval over trustless gateway endpoints. ([ipfs/boxo#747](https://github.com/ipfs/boxo/pull/747)) + - chore: Update FUNDING.json for Optimism RPF (#857) ([ipfs/boxo#857](https://github.com/ipfs/boxo/pull/857)) + - Release v0.28.0 (#854) ([ipfs/boxo#854](https://github.com/ipfs/boxo/pull/854)) + - Update deps (#852) ([ipfs/boxo#852](https://github.com/ipfs/boxo/pull/852)) + - fix: gateway/blocks-backend: GetBlock should not perform IPLD decoding (#845) ([ipfs/boxo#845](https://github.com/ipfs/boxo/pull/845)) + - Protobuf pkg name (#850) ([ipfs/boxo#850](https://github.com/ipfs/boxo/pull/850)) + - Fix intermittent test failure (#849) ([ipfs/boxo#849](https://github.com/ipfs/boxo/pull/849)) + - move `ipld/merkledag` from gogo protobuf (#841) ([ipfs/boxo#841](https://github.com/ipfs/boxo/pull/841)) + - move `ipld/unixfs` from gogo protobuf (#840) ([ipfs/boxo#840](https://github.com/ipfs/boxo/pull/840)) + - Start moving from gogo protobuf (#839) ([ipfs/boxo#839](https://github.com/ipfs/boxo/pull/839)) + - ci: uci/update-go (#848) ([ipfs/boxo#848](https://github.com/ipfs/boxo/pull/848)) + - expose DontHaveTimeoutConfig (#846) ([ipfs/boxo#846](https://github.com/ipfs/boxo/pull/846)) + - Upgrade go-libp2p to v0.39.1 (#843) ([ipfs/boxo#843](https://github.com/ipfs/boxo/pull/843)) + - feat: Prevent multiple instances of "ipfs routing reprovide" running together. (#834) ([ipfs/boxo#834](https://github.com/ipfs/boxo/pull/834)) + - Upgrade to go-libp2p v0.39.0 (#837) ([ipfs/boxo#837](https://github.com/ipfs/boxo/pull/837)) + - bitswap/client/internal/messagequeue: run tests in parallel (#835) ([ipfs/boxo#835](https://github.com/ipfs/boxo/pull/835)) +- github.com/ipfs/go-cid (v0.4.1 -> v0.5.0): + - v0.5.0 bump (#172) ([ipfs/go-cid#172](https://github.com/ipfs/go-cid/pull/172)) + - move _rsrch/cidiface into an internal package +- github.com/ipfs/go-datastore (v0.6.0 -> v0.8.2): + - bump version (#231) ([ipfs/go-datastore#231](https://github.com/ipfs/go-datastore/pull/231)) + - Results.Close should return error (#230) ([ipfs/go-datastore#230](https://github.com/ipfs/go-datastore/pull/230)) + - new version (#229) ([ipfs/go-datastore#229](https://github.com/ipfs/go-datastore/pull/229)) + - Update fuzz module dependencies (#228) ([ipfs/go-datastore#228](https://github.com/ipfs/go-datastore/pull/228)) + - new version (#225) ([ipfs/go-datastore#225](https://github.com/ipfs/go-datastore/pull/225)) + - No goprocess (#223) ([ipfs/go-datastore#223](https://github.com/ipfs/go-datastore/pull/223)) + - Release version 0.7.0 (#213) ([ipfs/go-datastore#213](https://github.com/ipfs/go-datastore/pull/213)) + - query result ordering does not create additional goroutine (#221) ([ipfs/go-datastore#221](https://github.com/ipfs/go-datastore/pull/221)) + - Remove unneeded dependencies (#220) ([ipfs/go-datastore#220](https://github.com/ipfs/go-datastore/pull/220)) + - Add traced datastore (#209) ([ipfs/go-datastore#209](https://github.com/ipfs/go-datastore/pull/209)) + - Add root namespace method to Key (#208) ([ipfs/go-datastore#208](https://github.com/ipfs/go-datastore/pull/208)) + - ci: uci/copy-templates (#207) ([ipfs/go-datastore#207](https://github.com/ipfs/go-datastore/pull/207)) + - test: fix fuzz commands + - fix fuzz tests by adding the missing context.Context argument (#198) ([ipfs/go-datastore#198](https://github.com/ipfs/go-datastore/pull/198)) + - sync: update CI config files (#195) ([ipfs/go-datastore#195](https://github.com/ipfs/go-datastore/pull/195)) +- github.com/ipfs/go-ds-badger (v0.3.0 -> v0.3.4): + - new version (#137) ([ipfs/go-ds-badger#137](https://github.com/ipfs/go-ds-badger/pull/137)) + - new version (#135) ([ipfs/go-ds-badger#135](https://github.com/ipfs/go-ds-badger/pull/135)) + - new version (#132) ([ipfs/go-ds-badger#132](https://github.com/ipfs/go-ds-badger/pull/132)) + - Update to use go-datastore without go-process (#131) ([ipfs/go-ds-badger#131](https://github.com/ipfs/go-ds-badger/pull/131)) + - new version ([ipfs/go-ds-badger#128](https://github.com/ipfs/go-ds-badger/pull/128)) + - Update dependencies and minimum go version ([ipfs/go-ds-badger#127](https://github.com/ipfs/go-ds-badger/pull/127)) + - ci: uci/update-go ([ipfs/go-ds-badger#123](https://github.com/ipfs/go-ds-badger/pull/123)) + - ci: uci/copy-templates ([ipfs/go-ds-badger#122](https://github.com/ipfs/go-ds-badger/pull/122)) + - chore: check PersistentDatastore conformance at build time (#120) ([ipfs/go-ds-badger#120](https://github.com/ipfs/go-ds-badger/pull/120)) +- github.com/ipfs/go-ds-flatfs (v0.5.1 -> v0.5.5): + - bump version (#130) ([ipfs/go-ds-flatfs#130](https://github.com/ipfs/go-ds-flatfs/pull/130)) + - new version (#128) ([ipfs/go-ds-flatfs#128](https://github.com/ipfs/go-ds-flatfs/pull/128)) + - new version (#126) ([ipfs/go-ds-flatfs#126](https://github.com/ipfs/go-ds-flatfs/pull/126)) + - Fix race condition due to concurrent use of rand source (#125) ([ipfs/go-ds-flatfs#125](https://github.com/ipfs/go-ds-flatfs/pull/125)) + - new version ([ipfs/go-ds-flatfs#124](https://github.com/ipfs/go-ds-flatfs/pull/124)) + - Use go-datastore without go-process ([ipfs/go-ds-flatfs#123](https://github.com/ipfs/go-ds-flatfs/pull/123)) + - ci: uci/update-go (#122) ([ipfs/go-ds-flatfs#122](https://github.com/ipfs/go-ds-flatfs/pull/122)) + - fix: actually use the size hint in util_windows.go + - perf: do not use virtual call when passing os.Rename as rename + - chore(logging): update go-log v2 (#117) ([ipfs/go-ds-flatfs#117](https://github.com/ipfs/go-ds-flatfs/pull/117)) + - ci: uci/copy-templates ([ipfs/go-ds-flatfs#116](https://github.com/ipfs/go-ds-flatfs/pull/116)) + - sync: update CI config files ([ipfs/go-ds-flatfs#111](https://github.com/ipfs/go-ds-flatfs/pull/111)) + - possibly fix a bug in renameAndUpdateDiskUsage + - add documentation and comment + - perf: avoid syncing directories when they already existed (#107) ([ipfs/go-ds-flatfs#107](https://github.com/ipfs/go-ds-flatfs/pull/107)) + - test: faster TestNoCluster by batching the 3200 Puts ([ipfs/go-ds-flatfs#108](https://github.com/ipfs/go-ds-flatfs/pull/108)) + - query: also teard down on ctx done (#106) ([ipfs/go-ds-flatfs#106](https://github.com/ipfs/go-ds-flatfs/pull/106)) +- github.com/ipfs/go-ds-leveldb (v0.5.0 -> v0.5.2): + - new version (#75) ([ipfs/go-ds-leveldb#75](https://github.com/ipfs/go-ds-leveldb/pull/75)) + - Results close needs to return error (#74) ([ipfs/go-ds-leveldb#74](https://github.com/ipfs/go-ds-leveldb/pull/74)) + - new version ([ipfs/go-ds-leveldb#73](https://github.com/ipfs/go-ds-leveldb/pull/73)) + - use go-datastore without go-process ([ipfs/go-ds-leveldb#72](https://github.com/ipfs/go-ds-leveldb/pull/72)) + - sync: update CI config files (#62) ([ipfs/go-ds-leveldb#62](https://github.com/ipfs/go-ds-leveldb/pull/62)) + - chore: add PersistentDatastore and Batching interface checks +- github.com/ipfs/go-ds-measure (v0.2.0 -> v0.2.2): + - new version ([ipfs/go-ds-measure#54](https://github.com/ipfs/go-ds-measure/pull/54)) + - new version ([ipfs/go-ds-measure#52](https://github.com/ipfs/go-ds-measure/pull/52)) +- github.com/ipfs/go-ds-pebble (v0.4.2 -> v0.4.4): + - new version (#51) ([ipfs/go-ds-pebble#51](https://github.com/ipfs/go-ds-pebble/pull/51)) + - new version (#48) ([ipfs/go-ds-pebble#48](https://github.com/ipfs/go-ds-pebble/pull/48)) + - Use go-datastore without go-process (#47) ([ipfs/go-ds-pebble#47](https://github.com/ipfs/go-ds-pebble/pull/47)) +- github.com/ipfs/go-metrics-interface (v0.0.1 -> v0.3.0): + - CounterVec: even more ergonomic ([ipfs/go-metrics-interface#22](https://github.com/ipfs/go-metrics-interface/pull/22)) + - Improve CounterVec abstraction ([ipfs/go-metrics-interface#21](https://github.com/ipfs/go-metrics-interface/pull/21)) + - v0.1.0 ([ipfs/go-metrics-interface#20](https://github.com/ipfs/go-metrics-interface/pull/20)) + - Feat: Add CounterVec type. ([ipfs/go-metrics-interface#19](https://github.com/ipfs/go-metrics-interface/pull/19)) + - sync: update CI config files (#10) ([ipfs/go-metrics-interface#10](https://github.com/ipfs/go-metrics-interface/pull/10)) + - sync: update CI config files (#8) ([ipfs/go-metrics-interface#8](https://github.com/ipfs/go-metrics-interface/pull/8)) + - use a struct as a key for the context ([ipfs/go-metrics-interface#4](https://github.com/ipfs/go-metrics-interface/pull/4)) +- github.com/ipfs/go-metrics-prometheus (v0.0.3 -> v0.1.0): + - Implement the CounterVec type. ([ipfs/go-metrics-prometheus#26](https://github.com/ipfs/go-metrics-prometheus/pull/26)) +- github.com/ipfs/go-test (v0.0.4 -> v0.2.1): + - new version (#20) ([ipfs/go-test#20](https://github.com/ipfs/go-test/pull/20)) + - No newline at end of random raw data (#19) ([ipfs/go-test#19](https://github.com/ipfs/go-test/pull/19)) + - new-version (#18) ([ipfs/go-test#18](https://github.com/ipfs/go-test/pull/18)) + - new version (#15) ([ipfs/go-test#15](https://github.com/ipfs/go-test/pull/15)) + - refactor: Make go-multiaddr v0.15 forward compatible change (#16) ([ipfs/go-test#16](https://github.com/ipfs/go-test/pull/16)) + - Move cli apps (#17) ([ipfs/go-test#17](https://github.com/ipfs/go-test/pull/17)) + - Update help text (#14) ([ipfs/go-test#14](https://github.com/ipfs/go-test/pull/14)) + - Add package to generate random filesystem hierarchies for testing (#13) ([ipfs/go-test#13](https://github.com/ipfs/go-test/pull/13)) +- github.com/ipfs/go-unixfsnode (v1.9.2 -> v1.10.0): + - new version ([ipfs/go-unixfsnode#81](https://github.com/ipfs/go-unixfsnode/pull/81)) + - upgrade to boxo v0.27.4 ([ipfs/go-unixfsnode#80](https://github.com/ipfs/go-unixfsnode/pull/80)) +- github.com/libp2p/go-libp2p (v0.38.3 -> v0.41.0): + - Release v0.41.0 (#3210) ([libp2p/go-libp2p#3210](https://github.com/libp2p/go-libp2p/pull/3210)) + - fix(libp2phttp): Fix relative to absolute multiaddr URI logic (#3208) ([libp2p/go-libp2p#3208](https://github.com/libp2p/go-libp2p/pull/3208)) + - fix(dcutr): Fix end to end tests and add legacy behavior flag (default=true) (#3044) ([libp2p/go-libp2p#3044](https://github.com/libp2p/go-libp2p/pull/3044)) + - feat(libp2phttp): More ergonomic auth (#3188) ([libp2p/go-libp2p#3188](https://github.com/libp2p/go-libp2p/pull/3188)) + - chore(identify): move log to debug level (#3206) ([libp2p/go-libp2p#3206](https://github.com/libp2p/go-libp2p/pull/3206)) + - chore: Update go-multiaddr to v0.15 (#3145) ([libp2p/go-libp2p#3145](https://github.com/libp2p/go-libp2p/pull/3145)) + - chore: update quic-go to v0.50.0 (#3204) ([libp2p/go-libp2p#3204](https://github.com/libp2p/go-libp2p/pull/3204)) + - chore: move go-nat to internal package + - basichost: add certhashes to addrs in place (#3200) ([libp2p/go-libp2p#3200](https://github.com/libp2p/go-libp2p/pull/3200)) + - autorelay: send addresses on eventbus; dont wrap address factory (#3071) ([libp2p/go-libp2p#3071](https://github.com/libp2p/go-libp2p/pull/3071)) + - chore: update ci for go1.24 (#3195) ([libp2p/go-libp2p#3195](https://github.com/libp2p/go-libp2p/pull/3195)) + - Release v0.40.0 (#3192) ([libp2p/go-libp2p#3192](https://github.com/libp2p/go-libp2p/pull/3192)) + - chore: bump deps for v0.40.0 (#3191) ([libp2p/go-libp2p#3191](https://github.com/libp2p/go-libp2p/pull/3191)) + - autonatv2: allow multiple concurrent requests per peer (#3187) ([libp2p/go-libp2p#3187](https://github.com/libp2p/go-libp2p/pull/3187)) + - feat: add AutoTLS example (#3103) ([libp2p/go-libp2p#3103](https://github.com/libp2p/go-libp2p/pull/3103)) + - feat(swarm): logging waitForDirectConn return error (#3183) ([libp2p/go-libp2p#3183](https://github.com/libp2p/go-libp2p/pull/3183)) + - tcpreuse: fix Scope() for *tls.Conn (#3181) ([libp2p/go-libp2p#3181](https://github.com/libp2p/go-libp2p/pull/3181)) + - test(p2p/protocol/identify): fix user agent assertion in Go 1.24 (#3177) ([libp2p/go-libp2p#3177](https://github.com/libp2p/go-libp2p/pull/3177)) + - swarm: remove unnecessary error log (#3128) ([libp2p/go-libp2p#3128](https://github.com/libp2p/go-libp2p/pull/3128)) + - Implement error codes spec (#2927) ([libp2p/go-libp2p#2927](https://github.com/libp2p/go-libp2p/pull/2927)) + - chore: update pion/ice to v4 (#3175) ([libp2p/go-libp2p#3175](https://github.com/libp2p/go-libp2p/pull/3175)) + - chore: release v0.39.0 (#3174) ([libp2p/go-libp2p#3174](https://github.com/libp2p/go-libp2p/pull/3174)) + - feat(holepunch): add logging when DirectConnect execution fails (#3146) ([libp2p/go-libp2p#3146](https://github.com/libp2p/go-libp2p/pull/3146)) + - feat: Implement Custom TCP Dialers (#3166) ([libp2p/go-libp2p#3166](https://github.com/libp2p/go-libp2p/pull/3166)) + - Update quic-go to v0.49.0 (#3153) ([libp2p/go-libp2p#3153](https://github.com/libp2p/go-libp2p/pull/3153)) + - feat(transport/websocket): support SOCKS proxy with ws(s) (#3137) ([libp2p/go-libp2p#3137](https://github.com/libp2p/go-libp2p/pull/3137)) + - tcpreuse: fix rcmgr accounting when tcp metrics are enabled (#3142) ([libp2p/go-libp2p#3142](https://github.com/libp2p/go-libp2p/pull/3142)) + - fix(net/nat): data race problem of `extAddr` (#3140) ([libp2p/go-libp2p#3140](https://github.com/libp2p/go-libp2p/pull/3140)) + - test: fix failing test (#3141) ([libp2p/go-libp2p#3141](https://github.com/libp2p/go-libp2p/pull/3141)) + - quicreuse: make it possible to use an application-constructed quic.Transport (#3122) ([libp2p/go-libp2p#3122](https://github.com/libp2p/go-libp2p/pull/3122)) + - nat: ignore mapping if external port is 0 (#3094) ([libp2p/go-libp2p#3094](https://github.com/libp2p/go-libp2p/pull/3094)) + - tcpreuse: error on using tcpreuse with pnet (#3129) ([libp2p/go-libp2p#3129](https://github.com/libp2p/go-libp2p/pull/3129)) + - chore: Update contribution guidelines (#3134) ([libp2p/go-libp2p#3134](https://github.com/libp2p/go-libp2p/pull/3134)) + - tcp: fix metrics test build directive (#3052) ([libp2p/go-libp2p#3052](https://github.com/libp2p/go-libp2p/pull/3052)) + - webrtc: upgrade pion/webrtc to v4 (#3098) ([libp2p/go-libp2p#3098](https://github.com/libp2p/go-libp2p/pull/3098)) + - webtransport: fix docstring comment for getCurrentBucketStartTime + - chore: release v0.38.1 (#3114) ([libp2p/go-libp2p#3114](https://github.com/libp2p/go-libp2p/pull/3114)) +- github.com/libp2p/go-libp2p-kad-dht (v0.28.2 -> v0.30.2): + - new version (#1059) ([libp2p/go-libp2p-kad-dht#1059](https://github.com/libp2p/go-libp2p-kad-dht/pull/1059)) + - do not use multiple multi-error packages, pick one (#1058) ([libp2p/go-libp2p-kad-dht#1058](https://github.com/libp2p/go-libp2p-kad-dht/pull/1058)) + - update version (#1057) ([libp2p/go-libp2p-kad-dht#1057](https://github.com/libp2p/go-libp2p-kad-dht/pull/1057)) + - chore: release v0.30.0 (#1054) ([libp2p/go-libp2p-kad-dht#1054](https://github.com/libp2p/go-libp2p-kad-dht/pull/1054)) + - fix: crawler polluting peerstore (#1053) ([libp2p/go-libp2p-kad-dht#1053](https://github.com/libp2p/go-libp2p-kad-dht/pull/1053)) + - new version (#1052) ([libp2p/go-libp2p-kad-dht#1052](https://github.com/libp2p/go-libp2p-kad-dht/pull/1052)) + - use go-datastore without go-process (#1051) ([libp2p/go-libp2p-kad-dht#1051](https://github.com/libp2p/go-libp2p-kad-dht/pull/1051)) + - feat: use OTEL for metrics (removes opencensus) (#1045) ([libp2p/go-libp2p-kad-dht#1045](https://github.com/libp2p/go-libp2p-kad-dht/pull/1045)) + - release v0.29.1 (#1042) ([libp2p/go-libp2p-kad-dht#1042](https://github.com/libp2p/go-libp2p-kad-dht/pull/1042)) + - fix: flaky TestInvalidServer (#1049) ([libp2p/go-libp2p-kad-dht#1049](https://github.com/libp2p/go-libp2p-kad-dht/pull/1049)) + - chore: update deps (#1048) ([libp2p/go-libp2p-kad-dht#1048](https://github.com/libp2p/go-libp2p-kad-dht/pull/1048)) + - fix addrsSoFar comparison (#1046) ([libp2p/go-libp2p-kad-dht#1046](https://github.com/libp2p/go-libp2p-kad-dht/pull/1046)) + - fix: flaky TestInvalidServer (#1043) ([libp2p/go-libp2p-kad-dht#1043](https://github.com/libp2p/go-libp2p-kad-dht/pull/1043)) + - add verbose to TestFindProviderAsync (dual) (#1040) ([libp2p/go-libp2p-kad-dht#1040](https://github.com/libp2p/go-libp2p-kad-dht/pull/1040)) + - test: cover dns addresses in TestAddrFilter (#1041) ([libp2p/go-libp2p-kad-dht#1041](https://github.com/libp2p/go-libp2p-kad-dht/pull/1041)) + - fix: flaky TestSearchValue (dual) (#1038) ([libp2p/go-libp2p-kad-dht#1038](https://github.com/libp2p/go-libp2p-kad-dht/pull/1038)) + - fix: flaky TestClientModeConnect (#1037) ([libp2p/go-libp2p-kad-dht#1037](https://github.com/libp2p/go-libp2p-kad-dht/pull/1037)) + - fix: flaky TestFindPeerQueryMinimal (#1036) ([libp2p/go-libp2p-kad-dht#1036](https://github.com/libp2p/go-libp2p-kad-dht/pull/1036)) + - fix: flaky TestInvalidServer (#1032) ([libp2p/go-libp2p-kad-dht#1032](https://github.com/libp2p/go-libp2p-kad-dht/pull/1032)) + - fix: flaky TestFindPeerWithQueryFilter (#1034) ([libp2p/go-libp2p-kad-dht#1034](https://github.com/libp2p/go-libp2p-kad-dht/pull/1034)) + - fix: Flaky TestInvalidServer (#1029) ([libp2p/go-libp2p-kad-dht#1029](https://github.com/libp2p/go-libp2p-kad-dht/pull/1029)) + - fix: flaky TestClientModeConnect (#1028) ([libp2p/go-libp2p-kad-dht#1028](https://github.com/libp2p/go-libp2p-kad-dht/pull/1028)) + - fix: increase timeout in TestProvidesMany (#1027) ([libp2p/go-libp2p-kad-dht#1027](https://github.com/libp2p/go-libp2p-kad-dht/pull/1027)) + - fix(tests): cleanup of skipped tests (#1025) ([libp2p/go-libp2p-kad-dht#1025](https://github.com/libp2p/go-libp2p-kad-dht/pull/1025)) + - fix: don't skip TestProvidesExpire (#1024) ([libp2p/go-libp2p-kad-dht#1024](https://github.com/libp2p/go-libp2p-kad-dht/pull/1024)) + - fixing flaky TestFindPeerQueryMinimal (#1020) ([libp2p/go-libp2p-kad-dht#1020](https://github.com/libp2p/go-libp2p-kad-dht/pull/1020)) + - fix flaky TestSkipRefreshOnGapCpls (#1021) ([libp2p/go-libp2p-kad-dht#1021](https://github.com/libp2p/go-libp2p-kad-dht/pull/1021)) + - fix: don't skip TestContextShutDown (#1022) ([libp2p/go-libp2p-kad-dht#1022](https://github.com/libp2p/go-libp2p-kad-dht/pull/1022)) + - comments formatting and typos (#1019) ([libp2p/go-libp2p-kad-dht#1019](https://github.com/libp2p/go-libp2p-kad-dht/pull/1019)) + - log peers rejected for diversity (#759) ([libp2p/go-libp2p-kad-dht#759](https://github.com/libp2p/go-libp2p-kad-dht/pull/759)) + - docs: update fullrt docs (#768) ([libp2p/go-libp2p-kad-dht#768](https://github.com/libp2p/go-libp2p-kad-dht/pull/768)) + - query cleanup (#1017) ([libp2p/go-libp2p-kad-dht#1017](https://github.com/libp2p/go-libp2p-kad-dht/pull/1017)) + - better variable names (#787) ([libp2p/go-libp2p-kad-dht#787](https://github.com/libp2p/go-libp2p-kad-dht/pull/787)) + - release v0.29.0 (#1014) ([libp2p/go-libp2p-kad-dht#1014](https://github.com/libp2p/go-libp2p-kad-dht/pull/1014)) + - Move from gogo protobuf (#975) ([libp2p/go-libp2p-kad-dht#975](https://github.com/libp2p/go-libp2p-kad-dht/pull/975)) + - fix: don't copy message to OnRequestHook ([libp2p/go-libp2p-kad-dht#1012](https://github.com/libp2p/go-libp2p-kad-dht/pull/1012)) + - chore: remove boxo/util deps ([libp2p/go-libp2p-kad-dht#1013](https://github.com/libp2p/go-libp2p-kad-dht/pull/1013)) + - feat: add request callback config option ([libp2p/go-libp2p-kad-dht#1011](https://github.com/libp2p/go-libp2p-kad-dht/pull/1011)) +- github.com/libp2p/go-libp2p-kbucket (v0.6.4 -> v0.6.5): + - upgrading deps (#137) ([libp2p/go-libp2p-kbucket#137](https://github.com/libp2p/go-libp2p-kbucket/pull/137)) +- github.com/libp2p/go-libp2p-pubsub (v0.12.0 -> v0.13.0): + - Release v0.13.0 (#593) ([libp2p/go-libp2p-pubsub#593](https://github.com/libp2p/go-libp2p-pubsub/pull/593)) + - Allow cancelling IWANT using IDONTWANT (#591) ([libp2p/go-libp2p-pubsub#591](https://github.com/libp2p/go-libp2p-pubsub/pull/591)) + - Improve IDONTWANT Flood Protection (#590) ([libp2p/go-libp2p-pubsub#590](https://github.com/libp2p/go-libp2p-pubsub/pull/590)) + - Fix the Router's Ability to Prune the Mesh Periodically (#589) ([libp2p/go-libp2p-pubsub#589](https://github.com/libp2p/go-libp2p-pubsub/pull/589)) + - Add Function to Enable Application Layer to Send Direct Control Messages (#562) ([libp2p/go-libp2p-pubsub#562](https://github.com/libp2p/go-libp2p-pubsub/pull/562)) + - Do not format expensive debug messages in non-debug levels in doDropRPC (#580) ([libp2p/go-libp2p-pubsub#580](https://github.com/libp2p/go-libp2p-pubsub/pull/580)) +- github.com/libp2p/go-libp2p-record (v0.2.0 -> v0.3.1): + - fix: missing protobuf package (#64) ([libp2p/go-libp2p-record#64](https://github.com/libp2p/go-libp2p-record/pull/64)) + - release: v0.3.0 (#63) ([libp2p/go-libp2p-record#63](https://github.com/libp2p/go-libp2p-record/pull/63)) + - fix: protobuf namespace conflicts (#62) ([libp2p/go-libp2p-record#62](https://github.com/libp2p/go-libp2p-record/pull/62)) + - Remove gogo protobuf (#60) ([libp2p/go-libp2p-record#60](https://github.com/libp2p/go-libp2p-record/pull/60)) +- github.com/libp2p/go-libp2p-routing-helpers (v0.7.4 -> v0.7.5): + - new version ([libp2p/go-libp2p-routing-helpers#90](https://github.com/libp2p/go-libp2p-routing-helpers/pull/90)) + - Consolidate multi-error packages by choosing one ([libp2p/go-libp2p-routing-helpers#88](https://github.com/libp2p/go-libp2p-routing-helpers/pull/88)) + - update dependencies ([libp2p/go-libp2p-routing-helpers#89](https://github.com/libp2p/go-libp2p-routing-helpers/pull/89)) +- github.com/multiformats/go-multiaddr (v0.14.0 -> v0.15.0): + - chore: release v0.15.0 (#266) ([multiformats/go-multiaddr#266](https://github.com/multiformats/go-multiaddr/pull/266)) + - refactor: Backwards compatible Encapsulate/Decapsulate/Join/NewComponent (#272) ([multiformats/go-multiaddr#272](https://github.com/multiformats/go-multiaddr/pull/272)) + - refactor: keep same api as v0.14.0 for SplitFirst/SplitLast (#271) ([multiformats/go-multiaddr#271](https://github.com/multiformats/go-multiaddr/pull/271)) + - refactor: Follows up on #261 (#264) ([multiformats/go-multiaddr#264](https://github.com/multiformats/go-multiaddr/pull/264)) + - refactor!: make the API harder to misuse (#261) ([multiformats/go-multiaddr#261](https://github.com/multiformats/go-multiaddr/pull/261)) + +
+ +### 👨‍👩‍👧‍👦 Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Hector Sanjuan | 100 | +4777/-1495 | 200 | +| Marco Munizaga | 22 | +3482/-1632 | 122 | +| Andrew Gillis | 69 | +1628/-1509 | 191 | +| sukun | 13 | +1240/-288 | 67 | +| Simon Menke | 7 | +766/-97 | 16 | +| Guillaume Michel | 33 | +438/-383 | 62 | +| Marcin Rataj | 24 | +494/-266 | 47 | +| Sergey Gorbunov | 4 | +384/-103 | 20 | +| AvyChanna | 1 | +294/-193 | 9 | +| gammazero | 22 | +208/-217 | 28 | +| Dennis Trautwein | 3 | +425/-0 | 8 | +| web3-bot | 18 | +193/-184 | 46 | +| Steven Allen | 8 | +204/-82 | 13 | +| Marten Seemann | 5 | +215/-63 | 11 | +| Daniel Norman | 2 | +225/-0 | 6 | +| Abhinav Prakash | 1 | +190/-2 | 4 | +| guillaumemichel | 3 | +93/-56 | 15 | +| youyyytrok | 1 | +84/-63 | 29 | +| Nishant Das | 2 | +111/-1 | 4 | +| Pop Chunhapanya | 1 | +109/-0 | 2 | +| Michael Muré | 7 | +78/-29 | 15 | +| Jorropo | 4 | +53/-20 | 7 | +| Ryan Skidmore | 1 | +62/-0 | 2 | +| GITSRC | 1 | +44/-0 | 3 | +| Russell Dempsey | 1 | +22/-17 | 10 | +| Adin Schmahmann | 2 | +29/-8 | 3 | +| Gabriel Cruz | 1 | +13/-13 | 1 | +| Wlynxg | 3 | +12/-9 | 3 | +| Khaled Yakdan | 1 | +11/-10 | 1 | +| Yahya Hassanzadeh, Ph.D. | 1 | +17/-0 | 1 | +| Can ZHANG | 2 | +15/-2 | 3 | +| Pavel Zbitskiy | 1 | +13/-1 | 2 | +| Yuttakhan B. | 1 | +6/-6 | 6 | +| Hlib Kanunnikov | 2 | +9/-2 | 4 | +| Petar Maymounkov | 1 | +7/-2 | 1 | +| Prithvi Shahi | 2 | +8/-0 | 2 | +| Piotr Galar | 1 | +4/-4 | 2 | +| Michael Vorburger | 1 | +6/-0 | 1 | +| Gus Eggert | 2 | +6/-0 | 2 | +| Raúl Kripalani | 1 | +4/-0 | 1 | +| linchizhen | 1 | +1/-1 | 1 | +| achingbrain | 1 | +1/-1 | 1 | +| Rod Vagg | 1 | +1/-1 | 1 | +| Ian Davis | 1 | +1/-1 | 1 | +| Fabio Bozzo | 1 | +1/-1 | 1 | + +## v0.34.1 + +- [Overview](#overview) +- [🔦 Highlights](#-highlights) + - [📦️ Important dependency updates](#-important-dependency-updates) + +### Overview + +### 🔦 Highlights + +#### 📦️ Important dependency updates + +- update `go-libp2p` to [v0.41.1](https://github.com/libp2p/go-libp2p/releases/tag/v0.41.1) + - high impact fix from [go-libp2p#3221](https://github.com/libp2p/go-libp2p/pull/3221) improves [hole punching](https://github.com/libp2p/specs/blob/master/relay/DCUtR.md) success rate +- update `quic-go` to [v0.50.1](https://github.com/quic-go/quic-go/releases/tag/v0.50.1) diff --git a/docs/changelogs/v0.35.md b/docs/changelogs/v0.35.md new file mode 100644 index 000000000..1f955182c --- /dev/null +++ b/docs/changelogs/v0.35.md @@ -0,0 +1,413 @@ +# Kubo changelog v0.35 + + + +This release was brought to you by the [Shipyard](http://ipshipyard.com/) team. + +- [v0.35.0](#v0340) + +## v0.35.0 + +- [Overview](#overview) +- [🔦 Highlights](#-highlights) + - [Opt-in HTTP Retrieval client](#opt-in-http-retrieval-client) + - [Dedicated `Reprovider.Strategy` for MFS](#dedicated-reproviderstrategy-for-mfs) + - [Experimental support for MFS as a FUSE mount point](#experimental-support-for-mfs-as-a-fuse-mount-point) + - [Grid view in WebUI](#grid-view-in-webui) + - [Enhanced DAG-Shaping Controls](#enhanced-dag-shaping-controls) + - [New DAG-Shaping `ipfs add` Options](#new-dag-shaping-ipfs-add-options) + - [Persistent DAG-Shaping `Import.*` Configuration](#persistent-dag-shaping-import-configuration) + - [Updated DAG-Shaping `Import` Profiles](#updated-dag-shaping-import-profiles) + - [`Datastore` Metrics Now Opt-In](#datastore-metrics-now-opt-in) + - [Improved performance of data onboarding](#improved-performance-of-data-onboarding) + - [Fast `ipfs add` in online mode](#fast-ipfs-add-in-online-mode) + - [Optimized, dedicated queue for providing fresh CIDs](#optimized-dedicated-queue-for-providing-fresh-cids) + - [Deprecated `ipfs stats provider`](#deprecated-ipfs-stats-provider) + - [New `Bitswap` configuration options](#new-bitswap-configuration-options) + - [New `Routing` configuration options](#new-routing-configuration-options) + - [New Pebble database format config](#new-pebble-database-format-config) + - [New environment variables](#new-environment-variables) + - [Improved Log Output Setting](#improved-log-output-setting) + - [New Repo Lock Optional Wait](#new-repo-lock-optional-wait) + - [📦️ Important dependency updates](#-important-dependency-updates) +- [📝 Changelog](#-changelog) +- [👨‍👩‍👧‍👦 Contributors](#-contributors) + +### Overview + +This release brings significant UX and performance improvements to data onboarding, provisioning, and retrieval systems. + +New configuration options let you customize the shape of UnixFS DAGs generated during the data import, control the scope of DAGs announced on the Amino DHT, select which delegated routing endpoints are queried, and choose whether to enable HTTP retrieval alongside Bitswap over Libp2p. + +Continue reading for more details. + + +### 🔦 Highlights + +#### Opt-in HTTP Retrieval client + +This release adds experimental support for retrieving blocks directly over HTTPS (HTTP/2), complementing the existing Bitswap over Libp2p. + +The opt-in client enables Kubo to use [delegated routing](https://github.com/ipfs/kubo/blob/master/docs/config.md#routingdelegatedrouters) results with `/tls/http` multiaddrs, connecting to HTTPS servers that support [Trustless HTTP Gateway](https://specs.ipfs.tech/http-gateways/trustless-gateway)'s Block Responses (`?format=raw`, `application/vnd.ipld.raw`). Fetching blocks via HTTPS (HTTP/2) simplifies infrastructure and reduces costs for storage providers by leveraging HTTP caching and CDNs. + +To enable this feature for testing and feedback, set: + +```console +$ ipfs config --json HTTPRetrieval.Enabled true +``` + +See [`HTTPRetrieval`](https://github.com/ipfs/kubo/blob/master/docs/config.md#httpretrieval) for more details. + +#### Dedicated `Reprovider.Strategy` for MFS + +The [Mutable File System (MFS)](https://docs.ipfs.tech/concepts/glossary/#mfs) in Kubo is a UnixFS filesystem managed with [`ipfs files`](https://docs.ipfs.tech/reference/kubo/cli/#ipfs-files) commands. It supports familiar file operations like cp and mv within a folder-tree structure, automatically updating a MerkleDAG and a "root CID" that reflects the current MFS state. Files in MFS are protected from garbage collection, offering a simpler alternative to `ipfs pin`. This makes it a popular choice for tools like [IPFS Desktop](https://docs.ipfs.tech/install/ipfs-desktop/) and the [WebUI](https://github.com/ipfs/ipfs-webui/#readme). + +Previously, the `pinned` reprovider strategy required manual pin management: each dataset update meant pinning the new version and unpinning the old one. Now, new strategies—`mfs` and `pinned+mfs`—let users limit announcements to data explicitly placed in MFS. This simplifies updating datasets and announcing only the latest version to the Amino DHT. + +Users relying on the `pinned` strategy can switch to `pinned+mfs` and use MFS alone to manage updates and announcements, eliminating the need for manual pinning and unpinning. We hope this makes it easier to publish just the data that matters to you. + +See [`Reprovider.Strategy`](https://github.com/ipfs/kubo/blob/master/docs/config.md#reproviderstrategy) for more details. + +#### Experimental support for MFS as a FUSE mount point + +The MFS root (filesystem behind the `ipfs files` API) is now available as a read/write FUSE mount point at `Mounts.MFS`. This filesystem is mounted in the same way as `Mounts.IPFS` and `Mounts.IPNS` when running `ipfs mount` or `ipfs daemon --mount`. + +Note that the operations supported by the MFS FUSE mountpoint are limited, since MFS doesn't store file attributes. + +See [`Mounts`](https://github.com/ipfs/kubo/blob/master/docs/config.md#mounts) and [`docs/fuse.md`](https://github.com/ipfs/kubo/blob/master/docs/fuse.md) for more details. + +#### Grid view in WebUI + +The WebUI, accessible at http://127.0.0.1:5001/webui/, now includes support for the grid view on the _Files_ screen: + +> ![image](https://github.com/user-attachments/assets/80dcf0d0-8103-426f-ae91-416fb25d32b6) + +#### Enhanced DAG-Shaping Controls + +This release advances CIDv1 support by introducing fine-grained control over UnixFS DAG shaping during data ingestion with the `ipfs add` command. + +Wider DAG trees (more links per node, higher fanout, larger thresholds) are beneficial for large files and directories with many files, reducing tree depth and lookup latency in high-latency networks, but they increase node size, straining memory and CPU on resource-constrained devices. Narrower trees (lower link count, lower fanout, smaller thresholds) are preferable for smaller directories, frequent updates, or low-power clients, minimizing overhead and ensuring compatibility, though they may increase traversal steps for very large datasets. + +Kubo now allows users to act on these tradeoffs and customize the width of the DAG created by `ipfs add` command. + +##### New DAG-Shaping `ipfs add` Options + +Three new options allow you to override default settings for specific import operations: + +- `--max-file-links`: Sets the maximum number of child links for a single file chunk. +- `--max-directory-links`: Defines the maximum number of child entries in a "basic" (single-chunk) directory. + - Note: Directories exceeding this limit or the `Import.UnixFSHAMTDirectorySizeThreshold` are converted to HAMT-based (sharded across multiple blocks) structures. +- `--max-hamt-fanout`: Specifies the maximum number of child nodes for HAMT internal structures. + +##### Persistent DAG-Shaping `Import.*` Configuration + +You can set default values for these options using the following configuration settings: +- [`Import.UnixFSFileMaxLinks`](https://github.com/ipfs/kubo/blob/master/docs/config.md#importunixfsfilemaxlinks) +- [`Import.UnixFSDirectoryMaxLinks`](https://github.com/ipfs/kubo/blob/master/docs/config.md#importunixfsdirectorymaxlinks) +- [`Import.UnixFSHAMTDirectoryMaxFanout`](https://github.com/ipfs/kubo/blob/master/docs/config.md#importunixfshamtdirectorymaxfanout) +- [`Import.UnixFSHAMTDirectorySizeThreshold`](https://github.com/ipfs/kubo/blob/master/docs/config.md#importunixfshamtdirectorysizethreshold) + +##### Updated DAG-Shaping `Import` Profiles + +The release updated configuration [profiles](https://github.com/ipfs/kubo/blob/master/docs/config.md#profiles) to incorporate these new `Import.*` settings: +- Updated Profile: `test-cid-v1` now includes current defaults as explicit `Import.UnixFSFileMaxLinks=174`, `Import.UnixFSDirectoryMaxLinks=0`, `Import.UnixFSHAMTDirectoryMaxFanout=256` and `Import.UnixFSHAMTDirectorySizeThreshold=256KiB` +- New Profile: `test-cid-v1-wide` adopts experimental directory DAG-shaping defaults, increasing the maximum file DAG width from 174 to 1024, HAMT fanout from 256 to 1024, and raising the HAMT directory sharding threshold from 256KiB to 1MiB, aligning with 1MiB file chunks. + - Feedback: Try it out and share your thoughts at [discuss.ipfs.tech/t/should-we-profile-cids](https://discuss.ipfs.tech/t/should-we-profile-cids/18507) or [ipfs/specs#499](https://github.com/ipfs/specs/pull/499). + +> [!TIP] +> Apply one of CIDv1 test [profiles](https://github.com/ipfs/kubo/blob/master/docs/config.md#profiles) with `ipfs config profile apply test-cid-v1[-wide]`. + +#### `Datastore` Metrics Now Opt-In + +To reduce overhead in the default configuration, datastore metrics are no longer enabled by default when initializing a Kubo repository with `ipfs init`. +Metrics prefixed with `_datastore` (e.g., `flatfs_datastore_...`, `leveldb_datastore_...`) are not exposed unless explicitly enabled. For a complete list of affected default metrics, refer to [`prometheus_metrics_added_by_measure_profile`](https://github.com/ipfs/kubo/blob/master/test/sharness/t0119-prometheus-data/prometheus_metrics_added_by_measure_profile). + +Convenience opt-in [profiles](https://github.com/ipfs/kubo/blob/master/docs/config.md#profiles) can be enabled at initialization time with `ipfs init --profile`: `flatfs-measure`, `pebbleds-measure`, `badgerds-measure` + +It is also possible to manually add the `measure` wrapper. See examples in [`Datastore.Spec`](https://github.com/ipfs/kubo/blob/master/docs/config.md#datastorespec) documentation. + +#### Improved performance of data onboarding + +This Kubo release significantly improves both the speed of ingesting data via `ipfs add` and announcing newly produced CIDs to Amino DHT. + +##### Fast `ipfs add` in online mode + +Adding a large directory of data when `ipfs daemon` was running in online mode took a long time. A significant amount of this time was spent writing to and reading from the persisted provider queue. Due to this, many users had to shut down the daemon and perform data import in offline mode. This release fixes this known limitation, significantly improving the speed of `ipfs add`. + +> [!IMPORTANT] +> Performing `ipfs add` of 10GiB file would take about 30 minutes. +> Now it takes close to 30 seconds. + +Kubo v0.34: + +```console +$ time kubo/cmd/ipfs/ipfs add -r /tmp/testfiles-100M > /dev/null + 100.00 MiB / 100.00 MiB [=====================================================================] 100.00% +real 0m6.464s + +$ time kubo/cmd/ipfs/ipfs add -r /tmp/testfiles-1G > /dev/null + 1000.00 MiB / 1000.00 MiB [===================================================================] 100.00% +real 1m10.542s + +$ time kubo/cmd/ipfs/ipfs add -r /tmp/testfiles-10G > /dev/null + 10.00 GiB / 10.00 GiB [=======================================================================] 100.00% +real 24m5.744s +``` + +Kubo v0.35: + +```console +$ time kubo/cmd/ipfs/ipfs add -r /tmp/testfiles-100M > /dev/null + 100.00 MiB / 100.00 MiB [=====================================================================] 100.00% +real 0m0.326s + +$ time kubo/cmd/ipfs/ipfs add -r /tmp/testfiles-1G > /dev/null + 1.00 GiB / 1.00 GiB [=========================================================================] 100.00% +real 0m2.819s + +$ time kubo/cmd/ipfs/ipfs add -r /tmp/testfiles-10G > /dev/null + 10.00 GiB / 10.00 GiB [=======================================================================] 100.00% +real 0m28.405s +``` + +##### Optimized, dedicated queue for providing fresh CIDs + +From `kubo` [`v0.33.0`](https://github.com/ipfs/kubo/releases/tag/v0.33.0), +Bitswap stopped advertising newly added and received blocks to the DHT. Since +then `boxo/provider` is responsible for the first time provide and the recurring reprovide logic. Prior +to `v0.35.0`, provides and reprovides were handled together in batches, leading +to delays in initial advertisements (provides). + +Provides and Reprovides now have separate queues, allowing for immediate +provide of new CIDs and optimised batching of reprovides. + +###### New `Provider` configuration options + +This change introduces a new configuration options: + +- [`Provider.Enabled`](https://github.com/ipfs/kubo/blob/master/docs/config.md#providerenabled) is a global flag for disabling both [Provider](https://github.com/ipfs/kubo/blob/master/docs/config.md#provider) and [Reprovider](https://github.com/ipfs/kubo/blob/master/docs/config.md#reprovider) systems (announcing new/old CIDs to amino DHT). +- [`Provider.WorkerCount`](https://github.com/ipfs/kubo/blob/master/docs/config.md#providerworkercount) for limiting the number of concurrent provide operations, allows for fine-tuning the trade-off between announcement speed and system load when announcing new CIDs. +- Removed `Experimental.StrategicProviding`. Superseded by `Provider.Enabled`, `Reprovider.Interval` and [`Reprovider.Strategy`](https://github.com/ipfs/kubo/blob/master/docs/config.md#reproviderstrategy). + +> [!TIP] +> Users who need to provide large volumes of content immediately should consider setting `Routing.AcceleratedDHTClient` to `true`. If that is not enough, consider adjusting `Provider.WorkerCount` to a higher value. + +###### Deprecated `ipfs stats provider` + +Since the `ipfs stats provider` command was displaying statistics for both +provides and reprovides, this command isn't relevant anymore after separating +the two queues. + +The successor command is `ipfs stats reprovide`, showing the same statistics, +but for reprovides only. + +> [!NOTE] +> `ipfs stats provider` still works, but is marked as deprecated and will be removed in a future release. Be mindful that the command provides only statistics about reprovides (similar to `ipfs stats reprovide`) and not the new provide queue (this will be fixed as a part of wider refactor planned for a future release). + +#### New `Bitswap` configuration options + +- [`Bitswap.Libp2pEnabled`](https://github.com/ipfs/kubo/blob/master/docs/config.md#bitswaplibp2penabled) determines whether Kubo will use Bitswap over libp2p (both client and server). +- [`Bitswap.ServerEnabled`](https://github.com/ipfs/kubo/blob/master/docs/config.md#bitswapserverenabled) controls whether Kubo functions as a Bitswap server to host and respond to block requests. +- [`Internal.Bitswap.ProviderSearchMaxResults`](https://github.com/ipfs/kubo/blob/master/docs/config.md#internalbitswapprovidersearchmaxresults) for adjusting the maximum number of providers bitswap client should aim at before it stops searching for new ones. + +#### New `Routing` configuration options + +- [`Routing.IgnoreProviders`](https://github.com/ipfs/kubo/blob/master/docs/config.md#routingignoreproviders) allows ignoring specific peer IDs when returned by the content routing system as providers of content. + - Simplifies testing `HTTPRetrieval.Enabled` in setups where Bitswap over Libp2p and HTTP retrieval is served under different PeerIDs. +- [`Routing.DelegatedRouters`](https://github.com/ipfs/kubo/blob/master/docs/config.md#routingdelegatedrouters) allows customizing HTTP routers used by Kubo when `Routing.Type` is set to `auto` or `autoclient`. + - Users are now able to adjust the default routing system and directly query custom routers for increased resiliency or when dataset is too big and CIDs are not announced on Amino DHT. + +> [!TIP] +> +> For example, to use Pinata's routing endpoint in addition to IPNI at `cid.contact`: +> +> ```console +> $ ipfs config --json Routing.DelegatedRouters '["https://cid.contact","https://indexer.pinata.cloud"]' +> ``` + +#### New Pebble database format config + +This Kubo release provides node operators with more control over [Pebble's `FormatMajorVersion`](https://github.com/cockroachdb/pebble/tree/master?tab=readme-ov-file#format-major-versions). This allows testing a new Kubo release without automatically migrating Pebble datastores, keeping the ability to switch back to older Kubo. + +When IPFS is initialized to use the pebbleds datastore (opt-in via `ipfs init --profile=pebbleds`), the latest pebble database format is configured in the pebble datastore config as `"formatMajorVersion"`. Setting this in the datastore config prevents automatically upgrading to the latest available version when Kubo is upgraded. If a later version becomes available, the Kubo daemon prints a startup message to indicate this. The user can them update the config to use the latest format when they are certain a downgrade will not be necessary. + +Without the `"formatMajorVersion"` in the pebble datastore config, the database format is automatically upgraded to the latest version. If this happens, then it is possible a downgrade back to the previous version of Kubo will not work if new format is not compatible with the pebble datastore in the previous version of Kubo. + +When installing a new version of Kubo when `"formatMajorVersion"` is configured, automatic repository migration (`ipfs daemon with --migrate=true`) does not upgrade this to the latest available version. This is done because a user may have reasons not to upgrade the pebble database format, and may want to be able to downgrade Kubo if something else is not working in the new version. If the configured pebble database format in the old Kubo is not supported in the new Kubo, then the configured version must be updated and the old Kubo run, before installing the new Kubo. + +See other caveats and configuration options at [`kubo/docs/datastores.md#pebbleds`](https://github.com/ipfs/kubo/blob/master/docs/datastores.md#pebbleds) + +#### New environment variables + +The [`environment-variables.md`](https://github.com/ipfs/kubo/blob/master/docs/environment-variables.md) was extended with two new features: + +##### Improved Log Output Setting + +When stderr and/or stdout options are configured or specified by the `GOLOG_OUTPUT` environ variable, log only to the output(s) specified. For example: + +- `GOLOG_OUTPUT="stderr"` logs only to stderr +- `GOLOG_OUTPUT="stdout"` logs only to stdout +- `GOLOG_OUTPUT="stderr+stdout"` logs to both stderr and stdout + +##### New Repo Lock Optional Wait + +The environment variable `IPFS_WAIT_REPO_LOCK` specifies the amount of time to wait for the repo lock. Set the value of this variable to a string that can be [parsed](https://pkg.go.dev/time@go1.24.3#ParseDuration) as a golang `time.Duration`. For example: +``` +IPFS_WAIT_REPO_LOCK="15s" +``` + +If the lock cannot be acquired because someone else has the lock, and `IPFS_WAIT_REPO_LOCK` is set to a valid value, then acquiring the lock is retried every second until the lock is acquired or the specified wait time has elapsed. + +#### 📦️ Important dependency updates + +- update `boxo` to [v0.30.0](https://github.com/ipfs/boxo/releases/tag/v0.30.0) +- update `ipfs-webui` to [v4.7.0](https://github.com/ipfs/ipfs-webui/releases/tag/v4.7.0) +- update `go-ds-pebble` to [v0.5.0](https://github.com/ipfs/go-ds-pebble/releases/tag/v0.5.0) + - update `pebble` to [v2.0.3](https://github.com/cockroachdb/pebble/releases/tag/v2.0.3) +- update `go-libp2p-pubsub` to [v0.13.1](https://github.com:/libp2p/go-libp2p-pubsub/releases/tag/v0.13.1) +- update `go-libp2p-kad-dht` to [v0.33.1](https://github.com/libp2p/go-libp2p-kad-dht/releases/tag/v0.33.1) (incl. [v0.33.0](https://github.com/libp2p/go-libp2p-kad-dht/releases/tag/v0.33.0), [v0.32.0](https://github.com/libp2p/go-libp2p-kad-dht/releases/tag/v0.32.0), [v0.31.0](https://github.com/libp2p/go-libp2p-kad-dht/releases/tag/v0.31.0)) +- update `go-log` to [v2.6.0](https://github.com/ipfs/go-log/releases/tag/v2.6.0) +- update `p2p-forge/client` to [v0.5.1](https://github.com/ipshipyard/p2p-forge/releases/tag/v0.5.1) + +### 📝 Changelog + +
Full Changelog + +- github.com/ipfs/kubo: + - chore(version): 0.35.0 + - fix: go-libp2p-kad-dht v0.33.1 (#10814) ([ipfs/kubo#10814](https://github.com/ipfs/kubo/pull/10814)) + - fix: p2p-forge v0.5.1 ignoring /p2p-circuit (#10813) ([ipfs/kubo#10813](https://github.com/ipfs/kubo/pull/10813)) + - chore(version): 0.35.0-rc2 + - fix(fuse): ipns error handling and friendly errors (#10807) ([ipfs/kubo#10807](https://github.com/ipfs/kubo/pull/10807)) + - fix(config): wire up `Provider.Enabled` flag (#10804) ([ipfs/kubo#10804](https://github.com/ipfs/kubo/pull/10804)) + - docs(changelog): go-libp2p-kad-dht + - chore(version): 0.35.0-rc1 + - feat: IPFS_WAIT_REPO_LOCK (#10797) ([ipfs/kubo#10797](https://github.com/ipfs/kubo/pull/10797)) + - logging: upgrade to go-log/v2 v2.6.0 (#10798) ([ipfs/kubo#10798](https://github.com/ipfs/kubo/pull/10798)) + - chore: ensure /mfs is present in docker + - feat(fuse): Expose MFS as FUSE mount point (#10781) ([ipfs/kubo#10781](https://github.com/ipfs/kubo/pull/10781)) + - feat: opt-in http retrieval client (#10772) ([ipfs/kubo#10772](https://github.com/ipfs/kubo/pull/10772)) + - Update go-libp2p-pubsub to v0.13.1 (#10795) ([ipfs/kubo#10795](https://github.com/ipfs/kubo/pull/10795)) + - feat(config): ability to disable Bitswap fully or just server (#10782) ([ipfs/kubo#10782](https://github.com/ipfs/kubo/pull/10782)) + - refactor: make datastore metrics opt-in (#10788) ([ipfs/kubo#10788](https://github.com/ipfs/kubo/pull/10788)) + - feat(pebble): support pinning `FormatMajorVersion` (#10789) ([ipfs/kubo#10789](https://github.com/ipfs/kubo/pull/10789)) + - feat: `Provider.WorkerCount` and `stats reprovide` (#10779) ([ipfs/kubo#10779](https://github.com/ipfs/kubo/pull/10779)) + - Upgrade to Boxo v0.30.0 (#10794) ([ipfs/kubo#10794](https://github.com/ipfs/kubo/pull/10794)) + - docs: use latest fuse package (#10791) ([ipfs/kubo#10791](https://github.com/ipfs/kubo/pull/10791)) + - remove duplicate words (#10790) ([ipfs/kubo#10790](https://github.com/ipfs/kubo/pull/10790)) + - feat(config): `ipfs add` and `Import` options for controlling UnixFS DAG Width (#10774) ([ipfs/kubo#10774](https://github.com/ipfs/kubo/pull/10774)) + - feat(config): expose ProviderSearchMaxResults (#10773) ([ipfs/kubo#10773](https://github.com/ipfs/kubo/pull/10773)) + - feat: ipfs-webui v4.7.0 (#10780) ([ipfs/kubo#10780](https://github.com/ipfs/kubo/pull/10780)) + - feat: partial DAG provides with Reprovider.Strategy=mfs|pinned+mfs (#10754) ([ipfs/kubo#10754](https://github.com/ipfs/kubo/pull/10754)) + - chore: update url + - docs: known issues with file/urlstores (#10768) ([ipfs/kubo#10768](https://github.com/ipfs/kubo/pull/10768)) + - fix: Add IPFS & IPNS path details to error (re. #10762) (#10770) ([ipfs/kubo#10770](https://github.com/ipfs/kubo/pull/10770)) + - docs: Fix typo in v0.34 changelog (#10771) ([ipfs/kubo#10771](https://github.com/ipfs/kubo/pull/10771)) + - Support WithIgnoreProviders() in provider query manager ([ipfs/kubo#10765](https://github.com/ipfs/kubo/pull/10765)) + - Merge release v0.34.1 ([ipfs/kubo#10766](https://github.com/ipfs/kubo/pull/10766)) + - fix: reprovides warning (#10761) ([ipfs/kubo#10761](https://github.com/ipfs/kubo/pull/10761)) + - Merge release v0.34.0 ([ipfs/kubo#10759](https://github.com/ipfs/kubo/pull/10759)) + - feat: ipfs-webui v4.6 (#10756) ([ipfs/kubo#10756](https://github.com/ipfs/kubo/pull/10756)) + - docs(readme): update min. requirements + cleanup (#10750) ([ipfs/kubo#10750](https://github.com/ipfs/kubo/pull/10750)) + - Upgrade to Boxo v0.29.1 (#10755) ([ipfs/kubo#10755](https://github.com/ipfs/kubo/pull/10755)) + - Nonfunctional (#10753) ([ipfs/kubo#10753](https://github.com/ipfs/kubo/pull/10753)) + - provider: buffer pin providers ([ipfs/kubo#10746](https://github.com/ipfs/kubo/pull/10746)) + - chore: 0.35.0-dev +- github.com/ipfs/boxo (v0.29.1 -> v0.30.0): + - Release v0.30.0 ([ipfs/boxo#915](https://github.com/ipfs/boxo/pull/915)) + - feat(bitswap): add option to disable Bitswap server (#911) ([ipfs/boxo#911](https://github.com/ipfs/boxo/pull/911)) + - provider: dedicated provide queue (#907) ([ipfs/boxo#907](https://github.com/ipfs/boxo/pull/907)) + - provider: deduplicate cids in queue (#910) ([ipfs/boxo#910](https://github.com/ipfs/boxo/pull/910)) + - feat(unixfs/mfs): support MaxLinks and MaxHAMTFanout (#906) ([ipfs/boxo#906](https://github.com/ipfs/boxo/pull/906)) + - feat(ipld/unixfs): DagModifier: allow specifying MaxLinks per file (#898) ([ipfs/boxo#898](https://github.com/ipfs/boxo/pull/898)) + - feat: NewDAGProvider to walk partial DAGs in offline mode (#905) ([ipfs/boxo#905](https://github.com/ipfs/boxo/pull/905)) + - gateway: check for UseSubdomains with IP addresses (#903) ([ipfs/boxo#903](https://github.com/ipfs/boxo/pull/903)) + - feat(gateway): add cid copy button to directory listings (#899) ([ipfs/boxo#899](https://github.com/ipfs/boxo/pull/899)) + - Improve performance of data onboarding (#888) ([ipfs/boxo#888](https://github.com/ipfs/boxo/pull/888)) + - bitswap: add requestsInFlight metric ([ipfs/boxo#904](https://github.com/ipfs/boxo/pull/904)) + - provider: simplify reprovide (#890) ([ipfs/boxo#890](https://github.com/ipfs/boxo/pull/890)) + - Upgrade to go-libp2p v0.41.1 ([ipfs/boxo#896](https://github.com/ipfs/boxo/pull/896)) + - Update RELEASE.md ([ipfs/boxo#892](https://github.com/ipfs/boxo/pull/892)) + - changelog: document bsnet import path change ([ipfs/boxo#891](https://github.com/ipfs/boxo/pull/891)) + - fix(gateway): preserve query parameters on _redirects ([ipfs/boxo#886](https://github.com/ipfs/boxo/pull/886)) + - bitswap/httpnet: Add WithDenylist option ([ipfs/boxo#877](https://github.com/ipfs/boxo/pull/877)) +- github.com/ipfs/go-block-format (v0.2.0 -> v0.2.1): + - Update version (#60) ([ipfs/go-block-format#60](https://github.com/ipfs/go-block-format/pull/60)) + - Update go-ipfs-util to use boxo (#52) ([ipfs/go-block-format#52](https://github.com/ipfs/go-block-format/pull/52)) +- github.com/ipfs/go-ds-pebble (v0.4.4 -> v0.5.0): + - new version (#53) ([ipfs/go-ds-pebble#53](https://github.com/ipfs/go-ds-pebble/pull/53)) + - Upgrade to pebble v2.0.3 (#45) ([ipfs/go-ds-pebble#45](https://github.com/ipfs/go-ds-pebble/pull/45)) +- github.com/ipfs/go-fs-lock (v0.0.7 -> v0.1.1): + - new version (#48) ([ipfs/go-fs-lock#48](https://github.com/ipfs/go-fs-lock/pull/48)) + - Return original error when WaitLock times out (#47) ([ipfs/go-fs-lock#47](https://github.com/ipfs/go-fs-lock/pull/47)) + - new version (#45) ([ipfs/go-fs-lock#45](https://github.com/ipfs/go-fs-lock/pull/45)) + - Add WaitLock function (#44) ([ipfs/go-fs-lock#44](https://github.com/ipfs/go-fs-lock/pull/44)) + - sync: update CI config files ([ipfs/go-fs-lock#30](https://github.com/ipfs/go-fs-lock/pull/30)) + - sync: update CI config files (#27) ([ipfs/go-fs-lock#27](https://github.com/ipfs/go-fs-lock/pull/27)) + - sync: update CI config files ([ipfs/go-fs-lock#25](https://github.com/ipfs/go-fs-lock/pull/25)) +- github.com/ipfs/go-log/v2 (v2.5.1 -> v2.6.0): + - new version (#155) ([ipfs/go-log#155](https://github.com/ipfs/go-log/pull/155)) + - feat: only log to stderr or to stdout or both if configured (#154) ([ipfs/go-log#154](https://github.com/ipfs/go-log/pull/154)) + - ci: uci/copy-templates ([ipfs/go-log#145](https://github.com/ipfs/go-log/pull/145)) + - sync: update CI config files (#137) ([ipfs/go-log#137](https://github.com/ipfs/go-log/pull/137)) +- github.com/libp2p/go-libp2p-kad-dht (v0.30.2 -> v0.33.1): + - chore: release v0.33.1 (#1088) ([libp2p/go-libp2p-kad-dht#1088](https://github.com/libp2p/go-libp2p-kad-dht/pull/1088)) + - fix(fullrt): mutex cleanup (#1087) ([libp2p/go-libp2p-kad-dht#1087](https://github.com/libp2p/go-libp2p-kad-dht/pull/1087)) + - fix: use correct mutex for reading keyToPeerMap (#1086) ([libp2p/go-libp2p-kad-dht#1086](https://github.com/libp2p/go-libp2p-kad-dht/pull/1086)) + - fix: fullrt kMapLk unlock (#1085) ([libp2p/go-libp2p-kad-dht#1085](https://github.com/libp2p/go-libp2p-kad-dht/pull/1085)) + - chore: release v0.33.0 (#1083) ([libp2p/go-libp2p-kad-dht#1083](https://github.com/libp2p/go-libp2p-kad-dht/pull/1083)) + - fix/updates to use context passed in New function for context cancellation (#1081) ([libp2p/go-libp2p-kad-dht#1081](https://github.com/libp2p/go-libp2p-kad-dht/pull/1081)) + - chore: release v0.31.1 (#1079) ([libp2p/go-libp2p-kad-dht#1079](https://github.com/libp2p/go-libp2p-kad-dht/pull/1079)) + - fix: netsize warning (#1077) ([libp2p/go-libp2p-kad-dht#1077](https://github.com/libp2p/go-libp2p-kad-dht/pull/1077)) + - fix: use correct message type attribute in metrics (#1076) ([libp2p/go-libp2p-kad-dht#1076](https://github.com/libp2p/go-libp2p-kad-dht/pull/1076)) + - chore: bump go-log to v2 (#1074) ([libp2p/go-libp2p-kad-dht#1074](https://github.com/libp2p/go-libp2p-kad-dht/pull/1074)) + - release v0.31.0 (#1072) ([libp2p/go-libp2p-kad-dht#1072](https://github.com/libp2p/go-libp2p-kad-dht/pull/1072)) + - query: ip diversity filter (#1070) ([libp2p/go-libp2p-kad-dht#1070](https://github.com/libp2p/go-libp2p-kad-dht/pull/1070)) + - tests: fix flaky TestProvidesExpire (#1069) ([libp2p/go-libp2p-kad-dht#1069](https://github.com/libp2p/go-libp2p-kad-dht/pull/1069)) + - refactor: replace fmt.Errorf with errors.New when not formatting is required (#1067) ([libp2p/go-libp2p-kad-dht#1067](https://github.com/libp2p/go-libp2p-kad-dht/pull/1067)) + - fix: error on no valid provs (#1065) ([libp2p/go-libp2p-kad-dht#1065](https://github.com/libp2p/go-libp2p-kad-dht/pull/1065)) + - cleanup: remove deprecated opt package (#1064) ([libp2p/go-libp2p-kad-dht#1064](https://github.com/libp2p/go-libp2p-kad-dht/pull/1064)) + - cleanup: fullrt ([libp2p/go-libp2p-kad-dht#1062](https://github.com/libp2p/go-libp2p-kad-dht/pull/1062)) + - fix: remove peerstore no-op (#1063) ([libp2p/go-libp2p-kad-dht#1063](https://github.com/libp2p/go-libp2p-kad-dht/pull/1063)) + - tests: flaky TestSearchValue (dual) (#1060) ([libp2p/go-libp2p-kad-dht#1060](https://github.com/libp2p/go-libp2p-kad-dht/pull/1060)) +- github.com/libp2p/go-libp2p-kbucket (v0.6.5 -> v0.7.0): + - chore: release v0.7.0 (#143) ([libp2p/go-libp2p-kbucket#143](https://github.com/libp2p/go-libp2p-kbucket/pull/143)) + - peerdiversity: export IPGroupKey (#141) ([libp2p/go-libp2p-kbucket#141](https://github.com/libp2p/go-libp2p-kbucket/pull/141)) + - fix: flaky TestUsefulNewPeer (#140) ([libp2p/go-libp2p-kbucket#140](https://github.com/libp2p/go-libp2p-kbucket/pull/140)) + - fix: flaky TestTableFindMultipleBuckets (#139) ([libp2p/go-libp2p-kbucket#139](https://github.com/libp2p/go-libp2p-kbucket/pull/139)) +- github.com/libp2p/go-libp2p-pubsub (v0.13.0 -> v0.13.1): + - feat: WithValidatorData publishing option (#603) ([libp2p/go-libp2p-pubsub#603](https://github.com/libp2p/go-libp2p-pubsub/pull/603)) + - feat: avoid repeated checksum calculations (#599) ([libp2p/go-libp2p-pubsub#599](https://github.com/libp2p/go-libp2p-pubsub/pull/599)) +- github.com/libp2p/go-yamux/v4 (v4.0.1 -> v4.0.2): + - Release v4.0.2 (#124) ([libp2p/go-yamux#124](https://github.com/libp2p/go-yamux/pull/124)) + - fix: remove noisy logs (#116) ([libp2p/go-yamux#116](https://github.com/libp2p/go-yamux/pull/116)) + - check deadline before sending a message (#114) ([libp2p/go-yamux#114](https://github.com/libp2p/go-yamux/pull/114)) + - only check KeepAliveInterval if keep-alive are enabled (#113) ([libp2p/go-yamux#113](https://github.com/libp2p/go-yamux/pull/113)) + - ci: uci/copy-templates ([libp2p/go-yamux#109](https://github.com/libp2p/go-yamux/pull/109)) + +
+ +### 👨‍👩‍👧‍👦 Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Hector Sanjuan | 16 | +2662/-590 | 71 | +| Guillaume Michel | 27 | +1339/-714 | 69 | +| Andrew Gillis | 22 | +1056/-377 | 54 | +| Sergey Gorbunov | 1 | +962/-42 | 26 | +| Marcin Rataj | 19 | +714/-133 | 47 | +| IGP | 2 | +419/-35 | 11 | +| GITSRC | 1 | +90/-1 | 3 | +| guillaumemichel | 1 | +21/-43 | 1 | +| blockchainluffy | 1 | +27/-26 | 8 | +| web3-bot | 9 | +21/-22 | 13 | +| VersaliX | 1 | +31/-2 | 4 | +| gammazero | 5 | +18/-5 | 5 | +| Hlib Kanunnikov | 1 | +14/-4 | 1 | +| diogo464 | 1 | +6/-7 | 1 | +| Asutorufa | 2 | +7/-1 | 2 | +| Russell Dempsey | 1 | +6/-1 | 1 | +| Steven Allen | 1 | +1/-5 | 1 | +| Michael Vorburger | 2 | +3/-3 | 2 | +| Aayush Rajasekaran | 1 | +2/-2 | 1 | +| sukun | 1 | +1/-1 | 1 | diff --git a/docs/changelogs/v0.36.md b/docs/changelogs/v0.36.md new file mode 100644 index 000000000..2a5234477 --- /dev/null +++ b/docs/changelogs/v0.36.md @@ -0,0 +1,329 @@ +# Kubo changelog v0.36 + + + +This release was brought to you by the [Interplanetary Shipyard](https://ipshipyard.com/) team. + +- [v0.36.0](#v0360) + +## v0.36.0 + +[](https://github.com/user-attachments/assets/0d830631-7b92-48ca-8ce9-b537e1479dfb) + +- [Overview](#overview) +- [🔦 Highlights](#-highlights) + - [HTTP Retrieval Client Now Enabled by Default](#http-retrieval-client-now-enabled-by-default) + - [Bitswap Broadcast Reduction](#bitswap-broadcast-reduction) + - [Update go-log to v2](#update-go-log-to-v2) + - [Kubo now uses AutoNATv2 as a client](#kubo-now-uses-autonatv2-as-a-client) + - [Smarter AutoTLS registration](#smarter-autotls-registration) + - [Overwrite option for files cp command](#overwrite-option-for-files-cp-command) + - [Gateway now supports negative HTTP Range requests](#gateway-now-supports-negative-http-range-requests) + - [Option for `filestore` command to remove bad blocks](#option-for-filestore-command-to-remove-bad-blocks) + - [`ConnMgr.SilencePeriod` configuration setting exposed](#connmgrsilenceperiod-configuration-setting-exposed) + - [Fix handling of EDITOR env var](#fix-handling-of-editor-env-var) + - [📦️ Important dependency updates](#-important-dependency-updates) +- [📝 Changelog](#-changelog) +- [👨‍👩‍👧‍👦 Contributors](#-contributors) + +### Overview + +### 🔦 Highlights + +#### HTTP Retrieval Client Now Enabled by Default + +This release promotes the HTTP Retrieval client from an experimental feature to a standard feature that is enabled by default. When possible, Kubo will retrieve blocks over plain HTTPS (HTTP/2) without any extra user configuration. + +See [`HTTPRetrieval`](https://github.com/ipfs/kubo/blob/master/docs/config.md#httpretrieval) for more details. + +#### Bitswap Broadcast Reduction + +The Bitswap client now supports broadcast reduction logic, which is enabled by default. This feature significantly reduces the number of broadcast messages sent to peers, resulting in lower bandwidth usage during load spikes. + +The overall logic works by sending to non-local peers only if those peers have previously replied that they have data blocks. To minimize impact on existing workloads, by default, broadcasts are still always sent to peers on the local network, or the ones defined in `Peering.Peers`. + +At Shipyard, we conducted A/B testing on our internal Kubo staging gateway with organic CID requests to `ipfs.io`. While these results may not exactly match your specific workload, the benefits proved significant enough to make this feature default. Here are the key findings: + +- **Dramatic Resource Usage Reduction:** Internal testing demonstrated a reduction in Bitswap broadcast messages by 80-98% and network bandwidth savings of 50-95%, with the greatest improvements occurring during high traffic and peer spikes. These efficiency gains lower operational costs of running Kubo under high load and improve the IPFS Mainnet (which is >80% Kubo-based) by reducing ambient traffic for all connected peers. +- **Improved Memory Stability:** Memory stays stable even during major CID request spikes that increase peer count, preventing the out-of-memory (OOM) issues found in earlier Kubo versions. +- **Data Retrieval Performance Remains Strong:** Our tests suggest that Kubo gateway hosts with broadcast reduction enabled achieve similar or better HTTP 200 success rates compared to version 0.35, while maintaining equivalent or higher want-have responses and unique blocks received. + +For more information about our A/B tests, see [kubo#10825](https://github.com/ipfs/kubo/pull/10825). + +To revert to the previous behavior for your own A/B testing, set `Internal.Bitswap.BroadcastControl.Enable` to `false` and monitor relevant metrics (`ipfs_bitswap_bcast_skips_total`, `ipfs_bitswap_haves_received`, `ipfs_bitswap_unique_blocks_received`, `ipfs_bitswap_wanthaves_broadcast`, HTTP 200 success rate). + +For a description of the configuration items, see the documentation of [`Internal.Bitswap.BroadcastControl`](https://github.com/ipfs/kubo/blob/master/docs/config.md#internalbitswapbroadcastcontrol). + +#### Update go-log to v2 + +go-log v2 has been out for quite a while now and it's time to deprecate v1. + +- Replace all use of `go-log` with `go-log/v2` +- Makes `/api/v0/log/tail` useful over HTTP +- Fixes `ipfs log tail` +- Removes support for `ContextWithLoggable` as this is not needed for tracing-like functionality + +#### Kubo now uses AutoNATv2 as a client + +This Kubo release starts utilizing [AutoNATv2](https://github.com/libp2p/specs/blob/master/autonat/autonat-v2.md) client functionality. go-libp2p v0.42 supports and depends on both AutoNATv1 and v2, and Autorelay feature continues to use v1. go-libp2p v0.43+ will discontinue internal use of AutoNATv1. We will maintain support for both v1 and v2 until then, though v1 will gradually be deprecated and ultimately removed. + +##### Smarter AutoTLS registration + +This update to libp2p and [AutoTLS](https://github.com/ipfs/kubo/blob/master/docs/config.md#autotls) incorporates AutoNATv2 changes. It aims to reduce false-positive scenarios where AutoTLS certificate registration occurred before a publicly dialable multiaddr was available. This should result in fewer error logs during node start, especially when IPv6 and/or IPv4 NATs with UPnP/PCP/NAT-PMP are at play. + +#### Overwrite option for files cp command + +The `ipfs files cp` command has a `--force` option to allow it to overwrite existing files. Attempting to overwrite an existing directory results in an error. + +#### Gateway now supports negative HTTP Range requests + +The latest update to `boxo/gateway` adds support for negative HTTP Range requests, achieving [gateway-conformance@v0.8](https://github.com/ipfs/gateway-conformance/releases/tag/v0.8.0) compatibility. +This provides greater interoperability with generic HTTP-based tools. For example, [WebRecorder](https://webrecorder.net/archivewebpage/)'s https://replayweb.page/ can now directly load website snapshots from Kubo-backed URLs. + +#### Option for `filestore` command to remove bad blocks + +The [experimental `filestore`](https://github.com/ipfs/kubo/blob/master/docs/experimental-features.md#ipfs-filestore) command has a new option, `--remove-bad-blocks`, to verify objects in the filestore and remove those that fail verification. + +#### `ConnMgr.SilencePeriod` configuration setting exposed + +This connection manager option controls how often connections are swept and potentially terminated. See the [ConnMgr documentation](https://github.com/ipfs/kubo/blob/master/docs/config.md#swarmconnmgrsilenceperiod). + +#### Fix handling of EDITOR env var + +The `ipfs config edit` command did not correctly handle the `EDITOR` environment variable when its value contains flags and arguments, i.e. `EDITOR=emacs -nw`. The command was treating the entire value of `$EDITOR` as the name of the editor command. This has been fixed to parse the value of `$EDITOR` into separate args, respecting shell quoting. + +#### 📦️ Important dependency updates + +- update `go-libp2p` to [v0.42.0](https://github.com/libp2p/go-libp2p/releases/tag/v0.42.0) +- update `go-libp2p-kad-dht` to [v0.33.0](https://github.com/libp2p/go-libp2p-kad-dht/releases/tag/v0.33.0) +- update `boxo` to [v0.33.0](https://github.com/ipfs/boxo/releases/tag/v0.33.0) (incl. [v0.32.0](https://github.com/ipfs/boxo/releases/tag/v0.32.0)) +- update `gateway-conformance` to [v0.8](https://github.com/ipfs/gateway-conformance/releases/tag/v0.8.0) +- update `p2p-forge/client` to [v0.6.0](https://github.com/ipshipyard/p2p-forge/releases/tag/v0.6.0) +- update `github.com/cockroachdb/pebble/v2` to [v2.0.6](https://github.com/cockroachdb/pebble/releases/tag/v2.0.6) for Go 1.25 support + +### 📝 Changelog + +
Full Changelog + +- github.com/ipfs/kubo: + - chore: 0.36.0 + - chore: update links in markdown + - chore: 0.36.0-rc2 + - feat(httpnet): gather metrics for allowlist + - chore: changelog + - test: TestEditorParsing + - fix: handling of EDITOR env var (#10855) ([ipfs/kubo#10855](https://github.com/ipfs/kubo/pull/10855)) + - refactor: use slices.Sort where appropriate (#10858) ([ipfs/kubo#10858](https://github.com/ipfs/kubo/pull/10858)) + - Upgrade to Boxo v0.33.0 (#10857) ([ipfs/kubo#10857](https://github.com/ipfs/kubo/pull/10857)) + - chore: Upgrade github.com/cockroachdb/pebble/v2 to v2.0.6 for Go 1.25 support (#10850) ([ipfs/kubo#10850](https://github.com/ipfs/kubo/pull/10850)) + - core:constructor: add a log line about http retrieval + - chore: p2p-forge v0.6.0 + go-libp2p 0.42.0 (#10840) ([ipfs/kubo#10840](https://github.com/ipfs/kubo/pull/10840)) + - docs: fix minor typos (#10849) ([ipfs/kubo#10849](https://github.com/ipfs/kubo/pull/10849)) + - Replace use of go-car v1 with go-car/v2 (#10845) ([ipfs/kubo#10845](https://github.com/ipfs/kubo/pull/10845)) + - chore: v0.36.0-rc1 + - chore: deduplicate 0.36 changelog + - feat(config): connmgr: expose silence period (#10827) ([ipfs/kubo#10827](https://github.com/ipfs/kubo/pull/10827)) + - bitswap/client: configurable broadcast reduction (#10825) ([ipfs/kubo#10825](https://github.com/ipfs/kubo/pull/10825)) + - Upgrade to Boxo v0.32.0 (#10839) ([ipfs/kubo#10839](https://github.com/ipfs/kubo/pull/10839)) + - feat: HTTP retrieval enabled by default (#10836) ([ipfs/kubo#10836](https://github.com/ipfs/kubo/pull/10836)) + - feat: AutoTLS with AutoNATv2 client (#10835) ([ipfs/kubo#10835](https://github.com/ipfs/kubo/pull/10835)) + - commands: add `--force` option to `files cp` command (#10823) ([ipfs/kubo#10823](https://github.com/ipfs/kubo/pull/10823)) + - docs/env variables: Document LIBP2P_SWARM_FD_LIMIT ([ipfs/kubo#10828](https://github.com/ipfs/kubo/pull/10828)) + - test: fix "invert" commands in sharness tests (#9652) ([ipfs/kubo#9652](https://github.com/ipfs/kubo/pull/9652)) + - Ivan386/filestore fix (#7474) ([ipfs/kubo#7474](https://github.com/ipfs/kubo/pull/7474)) + - wrap user-facing mfs.Lookup error (#10821) ([ipfs/kubo#10821](https://github.com/ipfs/kubo/pull/10821)) + - Update fuse docs with FreeBSD specifics (#10820) ([ipfs/kubo#10820](https://github.com/ipfs/kubo/pull/10820)) + - Minor wording fixes in docs (#10822) ([ipfs/kubo#10822](https://github.com/ipfs/kubo/pull/10822)) + - fix(gateway): gateway-conformance v0.8 (#10818) ([ipfs/kubo#10818](https://github.com/ipfs/kubo/pull/10818)) + - Upgrade to Boxo v0.31.0 (#10819) ([ipfs/kubo#10819](https://github.com/ipfs/kubo/pull/10819)) + - Merge release v0.35.0 ([ipfs/kubo#10815](https://github.com/ipfs/kubo/pull/10815)) + - fix: go-libp2p-kad-dht v0.33.1 (#10814) ([ipfs/kubo#10814](https://github.com/ipfs/kubo/pull/10814)) + - fix: p2p-forge v0.5.1 ignoring /p2p-circuit (#10813) ([ipfs/kubo#10813](https://github.com/ipfs/kubo/pull/10813)) + - Upgrade go-libp2p-kad-dht to v0.33.0 (#10811) ([ipfs/kubo#10811](https://github.com/ipfs/kubo/pull/10811)) + - chore: use go-log/v2 (#10801) ([ipfs/kubo#10801](https://github.com/ipfs/kubo/pull/10801)) + - fix(fuse): ipns error handling and friendly errors (#10807) ([ipfs/kubo#10807](https://github.com/ipfs/kubo/pull/10807)) + - fix(config): wire up `Provider.Enabled` flag (#10804) ([ipfs/kubo#10804](https://github.com/ipfs/kubo/pull/10804)) + - chore: bump version to 0.36.0-dev +- github.com/ipfs/boxo (v0.30.0 -> v0.33.0): + - Release v0.33.0 ([ipfs/boxo#974](https://github.com/ipfs/boxo/pull/974)) + - [skip changelog] fix sending empty want from #968 (#975) ([ipfs/boxo#975](https://github.com/ipfs/boxo/pull/975)) + - minor typo fixes (#972) ([ipfs/boxo#972](https://github.com/ipfs/boxo/pull/972)) + - fix: normalize delegated /routing/v1 urls (#971) ([ipfs/boxo#971](https://github.com/ipfs/boxo/pull/971)) + - bitswap/client: Set DontHaveTimeout MinTimeout to 50ms (#965) ([ipfs/boxo#965](https://github.com/ipfs/boxo/pull/965)) + - remove unused code (#967) ([ipfs/boxo#967](https://github.com/ipfs/boxo/pull/967)) + - Fix sending extra wants (#968) ([ipfs/boxo#968](https://github.com/ipfs/boxo/pull/968)) + - Handle Bitswap messages without `Wantlist` (#961) ([ipfs/boxo#961](https://github.com/ipfs/boxo/pull/961)) + - bitswap/httpnet: limit metric cardinality ([ipfs/boxo#957](https://github.com/ipfs/boxo/pull/957)) + - bitswap/httpnet: Sanitize allow/denylist inputs ([ipfs/boxo#964](https://github.com/ipfs/boxo/pull/964)) + - Bitswap: Set DontHaveTimeout/MinTimeout to 200ms. ([ipfs/boxo#959](https://github.com/ipfs/boxo/pull/959)) + - upgrade go-libp2p to v0.42.0 (#960) ([ipfs/boxo#960](https://github.com/ipfs/boxo/pull/960)) + - refactor: use the built-in max/min to simplify the code [skip changelog] (#941) ([ipfs/boxo#941](https://github.com/ipfs/boxo/pull/941)) + - bitswap/httpnet: adjust error logging (#958) ([ipfs/boxo#958](https://github.com/ipfs/boxo/pull/958)) + - docs: reprovider metrics name in changelog (#953) ([ipfs/boxo#953](https://github.com/ipfs/boxo/pull/953)) + - Release v0.32.0 (#952) ([ipfs/boxo#952](https://github.com/ipfs/boxo/pull/952)) + - Remove redundant loop over published blocks (#950) ([ipfs/boxo#950](https://github.com/ipfs/boxo/pull/950)) + - Fix links in README.md (#948) ([ipfs/boxo#948](https://github.com/ipfs/boxo/pull/948)) + - chore(provider): meaningful info level log (#940) ([ipfs/boxo#940](https://github.com/ipfs/boxo/pull/940)) + - feat(provider): reprovide metrics (#944) ([ipfs/boxo#944](https://github.com/ipfs/boxo/pull/944)) + - ci: set up golangci lint in boxo (#943) ([ipfs/boxo#943](https://github.com/ipfs/boxo/pull/943)) + - Do not return error from notify blocks when bitswap shutdown (#947) ([ipfs/boxo#947](https://github.com/ipfs/boxo/pull/947)) + - bitswap/client: broadcast reduction and metrics (#937) ([ipfs/boxo#937](https://github.com/ipfs/boxo/pull/937)) + - fix: typo in HAMT error message ([ipfs/boxo#945](https://github.com/ipfs/boxo/pull/945)) + - bitswap/httpnet: expose the errors on connect when connection impossible ([ipfs/boxo#939](https://github.com/ipfs/boxo/pull/939)) + - fix(unixfs): int check (#936) ([ipfs/boxo#936](https://github.com/ipfs/boxo/pull/936)) + - Remove WithPeerLedger option and PeerLedger interface (#938) ([ipfs/boxo#938](https://github.com/ipfs/boxo/pull/938)) + - fix(gateway): support suffix range requests (#922) ([ipfs/boxo#922](https://github.com/ipfs/boxo/pull/922)) + - Release v0.31.0 ([ipfs/boxo#934](https://github.com/ipfs/boxo/pull/934)) + - Revert "Remove an unused timestamp from traceability.Block" (#931) ([ipfs/boxo#931](https://github.com/ipfs/boxo/pull/931)) + - update changelog (#930) ([ipfs/boxo#930](https://github.com/ipfs/boxo/pull/930)) + - Deprecate WithPeerLedger option for bitswap server (#929) ([ipfs/boxo#929](https://github.com/ipfs/boxo/pull/929)) + - refactor: use a more efficient querying method (#921) ([ipfs/boxo#921](https://github.com/ipfs/boxo/pull/921)) + - Use go-car/v2 for reading CAR files in gateway backend (#927) ([ipfs/boxo#927](https://github.com/ipfs/boxo/pull/927)) + - Upgrade go-libp2p-kad-dht v0.33.1 (#924) ([ipfs/boxo#924](https://github.com/ipfs/boxo/pull/924)) + - bitswap/httpnet: Disconnect peers after client errors ([ipfs/boxo#919](https://github.com/ipfs/boxo/pull/919)) + - Remove an unused timestamp from traceability.Block (#923) ([ipfs/boxo#923](https://github.com/ipfs/boxo/pull/923)) + - fix(bitswap/httpnet): idempotent Stop() (#920) ([ipfs/boxo#920](https://github.com/ipfs/boxo/pull/920)) + - Update dependencies (#916) ([ipfs/boxo#916](https://github.com/ipfs/boxo/pull/916)) +- github.com/ipfs/go-block-format (v0.2.1 -> v0.2.2): + - new version (#62) ([ipfs/go-block-format#62](https://github.com/ipfs/go-block-format/pull/62)) + - Use value receivers for `BasicBlock` methods (#61) ([ipfs/go-block-format#61](https://github.com/ipfs/go-block-format/pull/61)) +- github.com/ipfs/go-ds-badger4 (v0.1.5 -> v0.1.8): + - new version (#7) ([ipfs/go-ds-badger4#7](https://github.com/ipfs/go-ds-badger4/pull/7)) + - update version (#5) ([ipfs/go-ds-badger4#5](https://github.com/ipfs/go-ds-badger4/pull/5)) + - update dependencies (#4) ([ipfs/go-ds-badger4#4](https://github.com/ipfs/go-ds-badger4/pull/4)) + - new version ([ipfs/go-ds-badger4#3](https://github.com/ipfs/go-ds-badger4/pull/3)) + - use go-datastore without goprocess ([ipfs/go-ds-badger4#2](https://github.com/ipfs/go-ds-badger4/pull/2)) +- github.com/ipfs/go-ds-pebble (v0.5.0 -> v0.5.1): + - new version (#55) ([ipfs/go-ds-pebble#55](https://github.com/ipfs/go-ds-pebble/pull/55)) +- github.com/ipfs/go-ipfs-cmds (v0.14.1 -> v0.15.0): + - new version (#287) ([ipfs/go-ipfs-cmds#287](https://github.com/ipfs/go-ipfs-cmds/pull/287)) + - minor document updates (#286) ([ipfs/go-ipfs-cmds#286](https://github.com/ipfs/go-ipfs-cmds/pull/286)) + - Update go log v2 (#285) ([ipfs/go-ipfs-cmds#285](https://github.com/ipfs/go-ipfs-cmds/pull/285)) + - ci: uci/update-go (#281) ([ipfs/go-ipfs-cmds#281](https://github.com/ipfs/go-ipfs-cmds/pull/281)) +- github.com/ipfs/go-ipld-format (v0.6.0 -> v0.6.2): + - new version (#96) ([ipfs/go-ipld-format#96](https://github.com/ipfs/go-ipld-format/pull/96)) + - bump version (#94) ([ipfs/go-ipld-format#94](https://github.com/ipfs/go-ipld-format/pull/94)) +- github.com/ipfs/go-ipld-legacy (v0.2.1 -> v0.2.2): + - new version ([ipfs/go-ipld-legacy#25](https://github.com/ipfs/go-ipld-legacy/pull/25)) +- github.com/ipfs/go-test (v0.2.1 -> v0.2.2): + - new version (#25) ([ipfs/go-test#25](https://github.com/ipfs/go-test/pull/25)) + - Update README.md (#24) ([ipfs/go-test#24](https://github.com/ipfs/go-test/pull/24)) +- github.com/ipfs/go-unixfsnode (v1.10.0 -> v1.10.1): + - new version ([ipfs/go-unixfsnode#84](https://github.com/ipfs/go-unixfsnode/pull/84)) +- github.com/ipld/go-car/v2 (v2.14.2 -> v2.14.3): + - bump version ([ipld/go-car#579](https://github.com/ipld/go-car/pull/579)) + - chore: update to boxo merkledag package + - feat: car debug handles the zero length block ([ipld/go-car#569](https://github.com/ipld/go-car/pull/569)) + - chore(deps): bump github.com/rogpeppe/go-internal from 1.13.1 to 1.14.1 in /cmd ([ipld/go-car#566](https://github.com/ipld/go-car/pull/566)) + - Add a concatenation cli utility ([ipld/go-car#565](https://github.com/ipld/go-car/pull/565)) +- github.com/ipld/go-codec-dagpb (v1.6.0 -> v1.7.0): + - chore: v1.7.0 bump +- github.com/libp2p/go-flow-metrics (v0.2.0 -> v0.3.0): + - chore: release v0.3.0 ([libp2p/go-flow-metrics#38](https://github.com/libp2p/go-flow-metrics/pull/38)) + - go-clock migration ([libp2p/go-flow-metrics#36](https://github.com/libp2p/go-flow-metrics/pull/36)) +- github.com/libp2p/go-libp2p (v0.41.1 -> v0.42.0): + - Release v0.42.0 (#3318) ([libp2p/go-libp2p#3318](https://github.com/libp2p/go-libp2p/pull/3318)) + - mocknet: notify listeners on listen (#3310) ([libp2p/go-libp2p#3310](https://github.com/libp2p/go-libp2p/pull/3310)) + - autonatv2: add metrics (#3308) ([libp2p/go-libp2p#3308](https://github.com/libp2p/go-libp2p/pull/3308)) + - chore: fix errors reported by golangci-lint ([libp2p/go-libp2p#3295](https://github.com/libp2p/go-libp2p/pull/3295)) + - autonatv2: add Unknown addrs to event (#3305) ([libp2p/go-libp2p#3305](https://github.com/libp2p/go-libp2p/pull/3305)) + - transport: rate limit new connections (#3283) ([libp2p/go-libp2p#3283](https://github.com/libp2p/go-libp2p/pull/3283)) + - basichost: use autonatv2 to verify reachability (#3231) ([libp2p/go-libp2p#3231](https://github.com/libp2p/go-libp2p/pull/3231)) + - chore: Revert "go-clock migration" (#3303) ([libp2p/go-libp2p#3303](https://github.com/libp2p/go-libp2p/pull/3303)) + - tcp: ensure tcpGatedMaListener wrapping happens always (#3275) ([libp2p/go-libp2p#3275](https://github.com/libp2p/go-libp2p/pull/3275)) + - go-clock migration ([libp2p/go-libp2p#3293](https://github.com/libp2p/go-libp2p/pull/3293)) + - swarm_test: support more transports for GenSwarm (#3130) ([libp2p/go-libp2p#3130](https://github.com/libp2p/go-libp2p/pull/3130)) + - eventbus: change slow consumer event from error to warn (#3286) ([libp2p/go-libp2p#3286](https://github.com/libp2p/go-libp2p/pull/3286)) + - quicreuse: add some documentation for the package (#3279) ([libp2p/go-libp2p#3279](https://github.com/libp2p/go-libp2p/pull/3279)) + - identify: rate limit id push protocol (#3266) ([libp2p/go-libp2p#3266](https://github.com/libp2p/go-libp2p/pull/3266)) + - fix(pstoreds): add missing log for failed GC record unmarshalling in `purgeStore()` (#3273) ([libp2p/go-libp2p#3273](https://github.com/libp2p/go-libp2p/pull/3273)) + - nat: improve port mapping failure logging (#3261) ([libp2p/go-libp2p#3261](https://github.com/libp2p/go-libp2p/pull/3261)) + - ci: add golangci-lint for linting (#3269) ([libp2p/go-libp2p#3269](https://github.com/libp2p/go-libp2p/pull/3269)) + - build(test_analysis): use `modernc.org/sqlite` directly (#3227) ([libp2p/go-libp2p#3227](https://github.com/libp2p/go-libp2p/pull/3227)) + - chore(certificate): update test vectors (#3242) ([libp2p/go-libp2p#3242](https://github.com/libp2p/go-libp2p/pull/3242)) + - rcmgr: use netip.Prefix as map key instead of string (#3264) ([libp2p/go-libp2p#3264](https://github.com/libp2p/go-libp2p/pull/3264)) + - webrtc: support receiving 256kB messages (#3255) ([libp2p/go-libp2p#3255](https://github.com/libp2p/go-libp2p/pull/3255)) + - peerstore: remove leveldb tests (#3260) ([libp2p/go-libp2p#3260](https://github.com/libp2p/go-libp2p/pull/3260)) + - identify: reduce timeout to 5 seconds (#3259) ([libp2p/go-libp2p#3259](https://github.com/libp2p/go-libp2p/pull/3259)) + - fix(relay): fix data-race in relayFinder (#3258) ([libp2p/go-libp2p#3258](https://github.com/libp2p/go-libp2p/pull/3258)) + - chore: update p2p-forge to v0.5.0 for autotls example (#3257) ([libp2p/go-libp2p#3257](https://github.com/libp2p/go-libp2p/pull/3257)) + - peerstore: remove unused badger tests (#3252) ([libp2p/go-libp2p#3252](https://github.com/libp2p/go-libp2p/pull/3252)) + - chore: using t.TempDir() instead of os.MkdirTemp (#3222) ([libp2p/go-libp2p#3222](https://github.com/libp2p/go-libp2p/pull/3222)) + - chore(examples): p2p-forge/client v0.4.0 (#3211) ([libp2p/go-libp2p#3211](https://github.com/libp2p/go-libp2p/pull/3211)) + - transport: add GatedMaListener type (#3186) ([libp2p/go-libp2p#3186](https://github.com/libp2p/go-libp2p/pull/3186)) + - autonatv2: explicitly handle dns addrs (#3249) ([libp2p/go-libp2p#3249](https://github.com/libp2p/go-libp2p/pull/3249)) + - autonatv2: fix server dial data request policy (#3247) ([libp2p/go-libp2p#3247](https://github.com/libp2p/go-libp2p/pull/3247)) + - webtransport: wrap underlying transport error on stream resets (#3237) ([libp2p/go-libp2p#3237](https://github.com/libp2p/go-libp2p/pull/3237)) + - connmgr: remove WithEmergencyTrim (#3217) ([libp2p/go-libp2p#3217](https://github.com/libp2p/go-libp2p/pull/3217)) + - connmgr: fix transport association bug (#3221) ([libp2p/go-libp2p#3221](https://github.com/libp2p/go-libp2p/pull/3221)) + - webrtc: fix memory leak with udpmux.muxedConnection context (#3243) ([libp2p/go-libp2p#3243](https://github.com/libp2p/go-libp2p/pull/3243)) + - fix(libp2phttp): bound NewStream timeout (#3225) ([libp2p/go-libp2p#3225](https://github.com/libp2p/go-libp2p/pull/3225)) + - conngater: fix incorrect err return value (#3219) ([libp2p/go-libp2p#3219](https://github.com/libp2p/go-libp2p/pull/3219)) + - addrsmanager: extract out addressing logic from basichost (#3075) ([libp2p/go-libp2p#3075](https://github.com/libp2p/go-libp2p/pull/3075)) +- github.com/libp2p/go-socket-activation (v0.1.0 -> v0.1.1): + - new version (#35) ([libp2p/go-socket-activation#35](https://github.com/libp2p/go-socket-activation/pull/35)) + - Upgrade to go-log/v2 v2.6.0 (#33) ([libp2p/go-socket-activation#33](https://github.com/libp2p/go-socket-activation/pull/33)) + - sync: update CI config files (#20) ([libp2p/go-socket-activation#20](https://github.com/libp2p/go-socket-activation/pull/20)) + - sync: update CI config files (#18) ([libp2p/go-socket-activation#18](https://github.com/libp2p/go-socket-activation/pull/18)) + - sync: update CI config files (#17) ([libp2p/go-socket-activation#17](https://github.com/libp2p/go-socket-activation/pull/17)) +- github.com/libp2p/go-yamux/v5 (v5.0.0 -> v5.0.1): + - Release v5.0.1 + - fix: deadlock on close (#130) ([libp2p/go-yamux#130](https://github.com/libp2p/go-yamux/pull/130)) +- github.com/multiformats/go-multiaddr (v0.15.0 -> v0.16.0): + - Release v0.16.0 (#279) ([multiformats/go-multiaddr#279](https://github.com/multiformats/go-multiaddr/pull/279)) + - Rename CaptureStringVal to CaptureString (#278) ([multiformats/go-multiaddr#278](https://github.com/multiformats/go-multiaddr/pull/278)) + - Megular Expressions (#263) ([multiformats/go-multiaddr#263](https://github.com/multiformats/go-multiaddr/pull/263)) +- github.com/multiformats/go-multicodec (v0.9.0 -> v0.9.2): + - v0.9.2 bump + - chore: update submodules and go generate + - chore: v0.9.1 bump + - chore: update submodules and go generate + - ci: uci/update-go (#97) ([multiformats/go-multicodec#97](https://github.com/multiformats/go-multicodec/pull/97)) + - chore: update submodules and go generate + - chore: update submodules and go generate + - chore: update submodules and go generate + - chore: update submodules and go generate +- github.com/multiformats/go-multistream (v0.6.0 -> v0.6.1): + - Release v0.6.1 ([multiformats/go-multistream#121](https://github.com/multiformats/go-multistream/pull/121)) + - refactor(lazyClientConn): Use synctest friendly once func ([multiformats/go-multistream#120](https://github.com/multiformats/go-multistream/pull/120)) + +
+ +### 👨‍👩‍👧‍👦 Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| sukun | 25 | +7274/-1586 | 140 | +| galargh | 13 | +1714/-1680 | 115 | +| rvagg | 2 | +1383/-960 | 6 | +| Andrew Gillis | 46 | +1226/-564 | 140 | +| Marco Munizaga | 6 | +1643/-36 | 24 | +| Hector Sanjuan | 20 | +624/-202 | 40 | +| Marcin Rataj | 24 | +583/-175 | 49 | +| Dennis Trautwein | 1 | +134/-14 | 4 | +| Piotr Galar | 1 | +73/-71 | 23 | +| Guillaume Michel | 4 | +58/-44 | 23 | +| Ivan | 1 | +90/-9 | 3 | +| Will Scott | 1 | +97/-0 | 2 | +| gammazero | 11 | +47/-30 | 13 | +| guillaumemichel | 3 | +40/-35 | 21 | +| Adin Schmahmann | 1 | +58/-17 | 8 | +| Laurent Senta | 1 | +26/-24 | 4 | +| pullmerge | 1 | +20/-16 | 5 | +| vladopajic | 1 | +20/-14 | 1 | +| Probot | 1 | +18/-4 | 1 | +| Dmitry Markin | 1 | +13/-9 | 2 | +| overallteach | 1 | +4/-12 | 3 | +| web3-bot | 5 | +9/-6 | 7 | +| Pavel Zbitskiy | 1 | +14/-1 | 1 | +| Rod Vagg | 5 | +7/-7 | 5 | +| argentpapa | 1 | +3/-10 | 1 | +| GarmashAlex | 1 | +8/-3 | 1 | +| huochexizhan | 1 | +3/-3 | 1 | +| VolodymyrBg | 1 | +2/-3 | 1 | +| levisyin | 1 | +2/-2 | 2 | +| b00f | 1 | +3/-0 | 1 | +| achingbrain | 1 | +1/-1 | 1 | +| Ocenka | 1 | +1/-1 | 1 | +| Dreamacro | 1 | +1/-1 | 1 | +| Štefan Baebler | 1 | +1/-0 | 1 | diff --git a/docs/changelogs/v0.37.md b/docs/changelogs/v0.37.md new file mode 100644 index 000000000..595076131 --- /dev/null +++ b/docs/changelogs/v0.37.md @@ -0,0 +1,438 @@ +# Kubo changelog v0.37 + + + +This release was brought to you by the [Shipyard](https://ipshipyard.com/) team. + +- [v0.37.0](#v0370) + +## v0.37.0 + +- [Overview](#overview) +- [🔦 Highlights](#-highlights) + - [🚀 Repository migration from v16 to v17 with embedded tooling](#-repository-migration-from-v16-to-v17-with-embedded-tooling) + - [🚦 Gateway concurrent request limits and retrieval timeouts](#-gateway-concurrent-request-limits-and-retrieval-timeouts) + - [🔧 AutoConf: Complete control over network defaults](#-autoconf-complete-control-over-network-defaults) + - [🗑️ Clear provide queue when reprovide strategy changes](#-clear-provide-queue-when-reprovide-strategy-changes) + - [🪵 Revamped `ipfs log level` command](#-revamped-ipfs-log-level-command) + - [📌 Named pins in `ipfs add` command](#-named-pins-in-ipfs-add-command) + - [📝 New IPNS publishing options](#-new-ipns-publishing-options) + - [🔢 Custom sequence numbers in `ipfs name publish`](#-custom-sequence-numbers-in-ipfs-name-publish) + - [⚙️ `Reprovider.Strategy` is now consistently respected](#-reprovider-strategy-is-now-consistently-respected) + - [⚙️ `Reprovider.Strategy=all`: improved memory efficiency](#-reproviderstrategyall-improved-memory-efficiency) + - [🧹 Removed unnecessary dependencies](#-removed-unnecessary-dependencies) + - [🔍 Improved `ipfs cid`](#-improved-ipfs-cid) + - [⚠️ Deprecated `ipfs stats reprovide`](#-deprecated-ipfs-stats-reprovide) + - [🔄 AutoRelay now uses all connected peers for relay discovery](#-autorelay-now-uses-all-connected-peers-for-relay-discovery) + - [📊 Anonymous telemetry for better feature prioritization](#-anonymous-telemetry-for-better-feature-prioritization) +- [📦️ Important dependency updates](#-important-dependency-updates) +- [📝 Changelog](#-changelog) +- [👨‍👩‍👧‍👦 Contributors](#-contributors) + +### Overview + +Kubo 0.37.0 introduces embedded repository migrations, gateway resource protection, complete AutoConf control, improved reprovider strategies, and anonymous telemetry for better feature prioritization. This release significantly improves memory efficiency, network configuration flexibility, and operational reliability while maintaining full backward compatibility. + +### 🔦 Highlights + +#### 🚀 Repository migration from v16 to v17 with embedded tooling + +This release migrates the Kubo repository from version 16 to version 17. Migrations are now built directly into the binary - completing in milliseconds without internet access or external downloads. + +`ipfs daemon --migrate` performs migrations automatically. Manual migration: `ipfs repo migrate --to=17` (or `--to=16 --allow-downgrade` for compatibility). Embedded migrations apply to v17+; older versions still require external tools. + +**Legacy migration deprecation**: Support for legacy migrations that download binaries from the internet will be removed in a future version. Only embedded migrations for the last 3 releases will be supported. Users with very old repositories should update in stages rather than skipping multiple versions. + +#### 🚦 Gateway concurrent request limits and retrieval timeouts + +New configurable limits protect gateway resources during high load: + +- **[`Gateway.RetrievalTimeout`](https://github.com/ipfs/kubo/blob/master/docs/config.md#gatewayretrievaltimeout)** (default: 30s): Maximum duration for content retrieval. Returns 504 Gateway Timeout when exceeded - applies to both initial retrieval (time to first byte) and between subsequent writes. +- **[`Gateway.MaxConcurrentRequests`](https://github.com/ipfs/kubo/blob/master/docs/config.md#gatewaymaxconcurrentrequests)** (default: 4096): Limits concurrent HTTP requests. Returns 429 Too Many Requests when exceeded. Protects nodes from traffic spikes and resource exhaustion, especially useful behind reverse proxies without rate-limiting. + +New Prometheus metrics for monitoring: + +- `ipfs_http_gw_concurrent_requests`: Current requests being processed +- `ipfs_http_gw_responses_total`: HTTP responses by status code +- `ipfs_http_gw_retrieval_timeouts_total`: Timeouts by status code and truncation status + +Tuning tips: + +- Monitor metrics to understand gateway behavior and adjust based on observations +- Watch `ipfs_http_gw_concurrent_requests` for saturation +- Track `ipfs_http_gw_retrieval_timeouts_total` vs success rates to identify timeout patterns indicating routing or storage provider issues + +#### 🔧 AutoConf: Complete control over network defaults + +Configuration fields now support `["auto"]` placeholders that resolve to network defaults from [`AutoConf.URL`](https://github.com/ipfs/kubo/blob/master/docs/config.md#autoconfurl). These defaults can be inspected, replaced with custom values, or disabled entirely. Previously, empty configuration fields like `Routing.DelegatedRouters: []` would use hardcoded defaults - this system makes those defaults explicit through `"auto"` values. When upgrading to Kubo 0.37, custom configurations remain unchanged. + +New `--expand-auto` flag shows resolved values for any config field: + +```bash +ipfs config show --expand-auto # View all resolved endpoints +ipfs config Bootstrap --expand-auto # Check specific values +ipfs config Routing.DelegatedRouters --expand-auto +ipfs config DNS.Resolvers --expand-auto +``` + +Configuration can be managed via: +- Replace `"auto"` with custom endpoints or set `[]` to disable features +- Switch modes with `--profile=autoconf-on|autoconf-off` +- Configure via `AutoConf.Enabled` and custom manifests via `AutoConf.URL` + +```bash +# Enable automatic configuration +ipfs config profiles apply autoconf-on + +# Or manually set specific fields +ipfs config Bootstrap '["auto"]' +ipfs config --json DNS.Resolvers '{".": ["https://dns.example.com/dns-query"], "eth.": ["auto"]}' +``` + +Organizations can host custom AutoConf manifests for private networks. See [AutoConf documentation](https://github.com/ipfs/kubo/blob/master/docs/config.md#autoconf) and format spec at https://conf.ipfs-mainnet.org/ + +#### 🗑️ Clear provide queue when reprovide strategy changes + +Changing [`Reprovider.Strategy`](https://github.com/ipfs/kubo/blob/master/docs/config.md#reproviderstrategy) and restarting Kubo now automatically clears the provide queue. Only content matching the new strategy will be announced. + +Manual queue clearing is also available: + +- `ipfs provide clear` - clear all queued content announcements + +> [!NOTE] +> Upgrading to Kubo 0.37 will automatically clear any preexisting provide queue. The next time `Reprovider.Interval` hits, `Reprovider.Strategy` will be executed on a clean slate, ensuring consistent behavior with your current configuration. + +#### 🪵 Revamped `ipfs log level` command + +The `ipfs log level` command has been completely revamped to support both getting and setting log levels with a unified interface. + +**New: Getting log levels** + +- `ipfs log level` - Shows default level only +- `ipfs log level all` - Shows log level for every subsystem, including default level +- `ipfs log level foo` - Shows log level for a specific subsystem only +- Kubo RPC API: `POST /api/v0/log/level?arg=` + +**Enhanced: Setting log levels** + +- `ipfs log level foo debug` - Sets "foo" subsystem to "debug" level +- `ipfs log level all info` - Sets all subsystems to "info" level (convenient, no escaping) +- `ipfs log level '*' info` - Equivalent to above but requires shell escaping +- `ipfs log level foo default` - Sets "foo" subsystem to current default level + +The command now provides full visibility into your current logging configuration while maintaining full backward compatibility. Both `all` and `*` work for specifying all subsystems, with `all` being more convenient since it doesn't require shell escaping. + +#### 🧷 Named pins in `ipfs add` command + +Added `--pin-name` flag to `ipfs add` for assigning names to pins. + +```console +$ ipfs add --pin-name=testname cat.jpg +added bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi cat.jpg + +$ ipfs pin ls --names +bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi recursive testname +``` + +#### 📝 New IPNS publishing options + +Added support for controlling IPNS record publishing strategies with new command flags and configuration. + +**New command flags:** +```bash +# Publish without network connectivity (local datastore only) +ipfs name publish --allow-offline /ipfs/QmHash + +# Publish without DHT connectivity (uses local datastore and HTTP delegated publishers) +ipfs name publish --allow-delegated /ipfs/QmHash +``` + +**Delegated publishers configuration:** + +[`Ipns.DelegatedPublishers`](https://github.com/ipfs/kubo/blob/master/docs/config.md#ipnsdelegatedpublishers) configures HTTP endpoints for IPNS publishing. Supports `"auto"` for network defaults or custom HTTP endpoints. The `--allow-delegated` flag enables publishing through these endpoints without requiring DHT connectivity, useful for nodes behind restrictive networks or during testing. + +#### 🔢 Custom sequence numbers in `ipfs name publish` + +Added `--sequence` flag to `ipfs name publish` for setting custom sequence numbers in IPNS records. This enables advanced use cases like manually coordinating updates across multiple nodes. See `ipfs name publish --help` for details. + +#### ⚙️ `Reprovider.Strategy` is now consistently respected + +Prior to this version, files added, blocks received etc. were "provided" to the network (announced on the DHT) regardless of the ["reproviding strategy" setting](https://github.com/ipfs/kubo/blob/master/docs/config.md#reproviderstrategy). For example: + +- Strategy set to "pinned" + `ipfs add --pin=false` → file was provided regardless +- Strategy set to "roots" + `ipfs pin add` → all blocks (not only the root) were provided + +Only the periodic "reproviding" action (runs every 22h by default) respected the strategy. + +This was inefficient as content that should not be provided was getting provided once. Now all operations respect `Reprovider.Strategy`. If set to "roots", no blocks other than pin roots will be provided regardless of what is fetched, added etc. + +> [!NOTE] +> **Behavior change:** The `--offline` flag no longer affects providing behavior. Both `ipfs add` and `ipfs --offline add` now provide blocks according to the reproviding strategy when run against an online daemon (previously `--offline add` did not provide). Since `ipfs add` has been nearly as fast as offline mode [since v0.35](https://github.com/ipfs/kubo/blob/master/docs/changelogs/v0.35.md#fast-ipfs-add-in-online-mode), `--offline` is rarely needed. To run truly offline operations, use `ipfs --offline daemon`. + +#### ⚙️ `Reprovider.Strategy=all`: improved memory efficiency + +The memory cost of `Reprovider.Strategy=all` no longer grows with the number of pins. The strategy now processes blocks directly from the datastore in undefined order, eliminating the memory pressure tied to the number of pins. + +As part of this improvement, the `flat` reprovider strategy has been renamed to `all` (the default). This cleanup removes the workaround introduced in v0.28 for pin root prioritization. With the introduction of more granular strategies like [`pinned+mfs`](https://github.com/ipfs/kubo/blob/master/docs/config.md#reproviderstrategy), we can now optimize the default `all` strategy for lower memory usage without compromising users who need pin root prioritization ([rationale](https://github.com/ipfs/kubo/pull/10928#issuecomment-3211040182)). + +> [!NOTE] +> **Migration guidance:** If you experience undesired announcement delays of root CIDs with the new `all` strategy, switch to `pinned+mfs` for root prioritization. + +#### 🧹 Removed unnecessary dependencies + +Kubo has been cleaned up by removing unnecessary dependencies and packages: + +- Removed `thirdparty/assert` (replaced by `github.com/stretchr/testify/require`) +- Removed `thirdparty/dir` (replaced by `misc/fsutil`) +- Removed `thirdparty/notifier` (unused) +- Removed `goprocess` dependency (replaced with native Go `context` patterns) + +These changes reduce the dependency footprint while improving code maintainability and following Go best practices. + +#### 🔍 Improved `ipfs cid` + +Certain `ipfs cid` commands can now be run without a daemon or repository, and return correct exit code 1 on error, making it easier to perform CID conversion in scripts and CI/CD pipelines. + +While at it, we also fixed unicode support in `ipfs cid bases --prefix` to correctly show `base256emoji` 🚀 :-) + +#### ⚠️ Deprecated `ipfs stats reprovide` + +The `ipfs stats reprovide` command has moved to `ipfs provide stat`. This was done to organize provider commands in one location. + +> [!NOTE] +> `ipfs stats reprovide` still works, but is marked as deprecated and will be removed in a future release. + +#### 🔄 AutoRelay now uses all connected peers for relay discovery + +AutoRelay's relay discovery now includes all connected peers as potential relay candidates, not just peers discovered through the DHT. This allows peers connected via HTTP routing and manual `ipfs swarm connect` commands to serve as relays, improving connectivity for nodes using non-DHT routing configurations. + +#### 📊 Anonymous telemetry for better feature prioritization + +Per a suggestion from the IPFS Foundation, Kubo now sends optional anonymized telemetry information to Shipyard [maintainers](https://github.com/ipshipyard/roadmaps/issues/20). + +**Privacy first**: The telemetry system collects only anonymous data - no personally identifiable information, file paths, or content data. A random UUID is generated on first run for anonymous identification. Users are notified before any data is sent and have time to opt-out. + +**Why**: We want to better understand Kubo usage across the ecosystem so we can better direct funding and work efforts. For example, we have little insights into how many nodes are NAT'ed and rely on AutoNAT for reachability. Some of the information can be inferred by crawling the network or logging `/identify` details in the bootstrappers, but users have no way of opting out from that, so we believe it is more transparent to concentrate this functionality in one place. + +**What**: Currently, we send the following anonymous metrics: + +
Click to see telemetry metrics example + +``` + "uuid": "", + "agent_version": "kubo/0.37.0-dev", + "private_network": false, + "bootstrappers_custom": false, + "repo_size_bucket": 1073741824, + "uptime_bucket": 86400000000000, + "reprovider_strategy": "pinned", + "routing_type": "auto", + "routing_accelerated_dht_client": false, + "routing_delegated_count": 0, + "autonat_service_mode": "enabled", + "autonat_reachability": "", + "autoconf": true, + "autoconf_custom": false, + "swarm_enable_hole_punching": true, + "swarm_circuit_addresses": false, + "swarm_ipv4_public_addresses": true, + "swarm_ipv6_public_addresses": true, + "auto_tls_auto_wss": true, + "auto_tls_domain_suffix_custom": false, + "discovery_mdns_enabled": true, + "platform_os": "linux", + "platform_arch": "amd64", + "platform_containerized": false, + "platform_vm": false +``` + +
+ +The exact data sent for your node can be inspected by setting `GOLOG_LOG_LEVEL="telemetry=debug"`. Users will see an informative message the first time they launch a telemetry-enabled daemon, with time to opt-out before any data is collected. Telemetry data is sent every 24h, with the first collection starting 15 minutes after daemon launch. + +**User control**: You can opt-out at any time: + +- Set environment variable `IPFS_TELEMETRY=off` before starting the daemon +- Or run `ipfs config Plugins.Plugins.telemetry.Config.Mode off` and restart the daemon + +The telemetry plugin code lives in `plugin/plugins/telemetry`. + +Learn more: [`/kubo/docs/telemetry.md`](https://github.com/ipfs/kubo/blob/master/docs/telemetry.md) + +### 📦️ Important dependency updates + +- update `boxo` to [v0.34.0](https://github.com/ipfs/boxo/releases/tag/v0.34.0) (incl. [v0.33.1](https://github.com/ipfs/boxo/releases/tag/v0.33.1)) +- update `go-libp2p` to [v0.43.0](https://github.com/libp2p/go-libp2p/releases/tag/v0.43.0) +- update `go-libp2p-kad-dht` to [v0.34.0](https://github.com/libp2p/go-libp2p-kad-dht/releases/tag/v0.34.0) +- update `go-libp2p-pubsub` to [v0.14.2](https://github.com/libp2p/go-libp2p-pubsub/releases/tag/v0.14.2) (incl. [v0.14.1](https://github.com/libp2p/go-libp2p-pubsub/releases/tag/v0.14.1), [v0.14.0](https://github.com/libp2p/go-libp2p-pubsub/releases/tag/v0.14.0)) +- update `ipfs-webui` to [v4.8.0](https://github.com/ipfs/ipfs-webui/releases/tag/v4.8.0) +- update to [Go 1.25](https://go.dev/doc/go1.25) + +### 📝 Changelog + +
Full Changelog + +- github.com/ipfs/kubo: + - chore: set version to v0.37.0 + - feat(ci): docker linting (#10927) ([ipfs/kubo#10927](https://github.com/ipfs/kubo/pull/10927)) + - fix: disable telemetry in test profile (#10931) ([ipfs/kubo#10931](https://github.com/ipfs/kubo/pull/10931)) + - fix: harness tests random panic (#10933) ([ipfs/kubo#10933](https://github.com/ipfs/kubo/pull/10933)) + - chore: v0.37.0-rc1 + - feat: Reprovider.Strategy: rename "flat" to "all" (#10928) ([ipfs/kubo#10928](https://github.com/ipfs/kubo/pull/10928)) + - docs: improve `ipfs add --help` (#10926) ([ipfs/kubo#10926](https://github.com/ipfs/kubo/pull/10926)) + - feat: optimize docker builds (#10925) ([ipfs/kubo#10925](https://github.com/ipfs/kubo/pull/10925)) + - feat(config): AutoConf with "auto" placeholders (#10883) ([ipfs/kubo#10883](https://github.com/ipfs/kubo/pull/10883)) + - fix(ci): make NewRandPort thread-safe (#10921) ([ipfs/kubo#10921](https://github.com/ipfs/kubo/pull/10921)) + - fix: resolve TestAddMultipleGCLive race condition (#10916) ([ipfs/kubo#10916](https://github.com/ipfs/kubo/pull/10916)) + - feat: telemetry plugin (#10866) ([ipfs/kubo#10866](https://github.com/ipfs/kubo/pull/10866)) + - fix typos in docs and comments (#10920) ([ipfs/kubo#10920](https://github.com/ipfs/kubo/pull/10920)) + - Upgrade to Boxo v0.34.0 (#10917) ([ipfs/kubo#10917](https://github.com/ipfs/kubo/pull/10917)) + - test: fix flaky repo verify (#10743) ([ipfs/kubo#10743](https://github.com/ipfs/kubo/pull/10743)) + - feat(config): `Gateway.RetrievalTimeout|MaxConcurrentRequests` (#10905) ([ipfs/kubo#10905](https://github.com/ipfs/kubo/pull/10905)) + - chore: replace random test utils with equivalents in go-test/random (#10915) ([ipfs/kubo#10915](https://github.com/ipfs/kubo/pull/10915)) + - feat: require go1.25 for building kubo (#10913) ([ipfs/kubo#10913](https://github.com/ipfs/kubo/pull/10913)) + - feat(ci): reusable spellcheck from unified CI (#10873) ([ipfs/kubo#10873](https://github.com/ipfs/kubo/pull/10873)) + - fix(ci): docker build (#10914) ([ipfs/kubo#10914](https://github.com/ipfs/kubo/pull/10914)) + - Replace `uber-go/multierr` with `errors.Join` (#10912) ([ipfs/kubo#10912](https://github.com/ipfs/kubo/pull/10912)) + - feat(ipns): support passing custom sequence number during publishing (#10851) ([ipfs/kubo#10851](https://github.com/ipfs/kubo/pull/10851)) + - fix(relay): feed connected peers to AutoRelay discovery (#10901) ([ipfs/kubo#10901](https://github.com/ipfs/kubo/pull/10901)) + - fix(sharness): no blocking on unclean FUSE unmount (#10906) ([ipfs/kubo#10906](https://github.com/ipfs/kubo/pull/10906)) + - feat: add query functionality to log level command (#10885) ([ipfs/kubo#10885](https://github.com/ipfs/kubo/pull/10885)) + - fix(ci): switch to debian:bookworm-slim + - Fix failing FUSE test (#10904) ([ipfs/kubo#10904](https://github.com/ipfs/kubo/pull/10904)) + - fix(cmd): exit 1 on error (#10903) ([ipfs/kubo#10903](https://github.com/ipfs/kubo/pull/10903)) + - feat: go-libp2p v0.43.0 (#10892) ([ipfs/kubo#10892](https://github.com/ipfs/kubo/pull/10892)) + - fix: `ipfs cid` without repo (#10897) ([ipfs/kubo#10897](https://github.com/ipfs/kubo/pull/10897)) + - client/rpc: re-enable tests on windows. (#10895) ([ipfs/kubo#10895](https://github.com/ipfs/kubo/pull/10895)) + - fix: Provide according to Reprovider.Strategy (#10886) ([ipfs/kubo#10886](https://github.com/ipfs/kubo/pull/10886)) + - feat: ipfs-webui v4.8.0 (#10902) ([ipfs/kubo#10902](https://github.com/ipfs/kubo/pull/10902)) + - refactor: move `ipfs stat provide/reprovide` to `ipfs provide stat` (#10896) ([ipfs/kubo#10896](https://github.com/ipfs/kubo/pull/10896)) + - Bitswap: use a single ConnectEventManager. ([ipfs/kubo#10889](https://github.com/ipfs/kubo/pull/10889)) + - feat(add): add support for naming pinned CIDs (#10877) ([ipfs/kubo#10877](https://github.com/ipfs/kubo/pull/10877)) + - refactor: remove goprocess (#10872) ([ipfs/kubo#10872](https://github.com/ipfs/kubo/pull/10872)) + - feat(daemon): accelerated client startup note (#10859) ([ipfs/kubo#10859](https://github.com/ipfs/kubo/pull/10859)) + - docs:added GOLOG_LOG_LEVEL to debug-guide for logging more info (#10894) ([ipfs/kubo#10894](https://github.com/ipfs/kubo/pull/10894)) + - core: Add a ContentDiscovery field ([ipfs/kubo#10890](https://github.com/ipfs/kubo/pull/10890)) + - chore: update go-libp2p and p2p-forge (#10887) ([ipfs/kubo#10887](https://github.com/ipfs/kubo/pull/10887)) + - Upgrade to Boxo v0.33.1 (#10888) ([ipfs/kubo#10888](https://github.com/ipfs/kubo/pull/10888)) + - remove unneeded thirdparty packages (#10871) ([ipfs/kubo#10871](https://github.com/ipfs/kubo/pull/10871)) + - provider: clear provide queue when reprovide strategy changes (#10863) ([ipfs/kubo#10863](https://github.com/ipfs/kubo/pull/10863)) + - chore: merge release v0.36.0 ([ipfs/kubo#10868](https://github.com/ipfs/kubo/pull/10868)) + - docs: release checklist fixes from 0.36 (#10861) ([ipfs/kubo#10861](https://github.com/ipfs/kubo/pull/10861)) + - docs(config): add network exposure considerations (#10856) ([ipfs/kubo#10856](https://github.com/ipfs/kubo/pull/10856)) + - fix: handling of EDITOR env var (#10855) ([ipfs/kubo#10855](https://github.com/ipfs/kubo/pull/10855)) + - refactor: use slices.Sort where appropriate (#10858) ([ipfs/kubo#10858](https://github.com/ipfs/kubo/pull/10858)) + - Upgrade to Boxo v0.33.0 (#10857) ([ipfs/kubo#10857](https://github.com/ipfs/kubo/pull/10857)) + - chore: Upgrade github.com/cockroachdb/pebble/v2 to v2.0.6 for Go 1.25 support (#10850) ([ipfs/kubo#10850](https://github.com/ipfs/kubo/pull/10850)) + - core:constructor: add a log line about http retrieval ([ipfs/kubo#10852](https://github.com/ipfs/kubo/pull/10852)) + - chore: p2p-forge v0.6.0 + go-libp2p 0.42.0 (#10840) ([ipfs/kubo#10840](https://github.com/ipfs/kubo/pull/10840)) + - docs: fix minor typos (#10849) ([ipfs/kubo#10849](https://github.com/ipfs/kubo/pull/10849)) + - Replace use of go-car v1 with go-car/v2 (#10845) ([ipfs/kubo#10845](https://github.com/ipfs/kubo/pull/10845)) + - chore: 0.37.0-dev +- github.com/ipfs/boxo (v0.33.0 -> v0.34.0): + - Release v0.34.0 ([ipfs/boxo#1003](https://github.com/ipfs/boxo/pull/1003)) + - blockstore: remove HashOnRead ([ipfs/boxo#1001](https://github.com/ipfs/boxo/pull/1001)) + - Update go-log to v2.8.1 ([ipfs/boxo#998](https://github.com/ipfs/boxo/pull/998)) + - feat: autoconf client library (#997) ([ipfs/boxo#997](https://github.com/ipfs/boxo/pull/997)) + - feat(gateway): concurrency and retrieval timeout limits (#994) ([ipfs/boxo#994](https://github.com/ipfs/boxo/pull/994)) + - update dependencies ([ipfs/boxo#999](https://github.com/ipfs/boxo/pull/999)) + - fix: cidqueue gc must iterate all elements in queue ([ipfs/boxo#1000](https://github.com/ipfs/boxo/pull/1000)) + - Replace `uber-go/multierr` with `errors.Join` ([ipfs/boxo#996](https://github.com/ipfs/boxo/pull/996)) + - feat(namesys/IPNSPublisher): expose ability to set Sequence (#962) ([ipfs/boxo#962](https://github.com/ipfs/boxo/pull/962)) + - upgrade to go-libp2p v0.43.0 ([ipfs/boxo#993](https://github.com/ipfs/boxo/pull/993)) + - Remove providing Exchange. Call Provide() from relevant places. ([ipfs/boxo#976](https://github.com/ipfs/boxo/pull/976)) + - reprovider: s/initial/initial ([ipfs/boxo#992](https://github.com/ipfs/boxo/pull/992)) + - Release v0.33.1 ([ipfs/boxo#991](https://github.com/ipfs/boxo/pull/991)) + - fix(bootstrap): filter-out peers behind relays (#987) ([ipfs/boxo#987](https://github.com/ipfs/boxo/pull/987)) + - Bitswap: fix double-worker in connectEventManager. Logging improvements. ([ipfs/boxo#986](https://github.com/ipfs/boxo/pull/986)) + - upgrade to go-libp2p v0.42.1 (#988) ([ipfs/boxo#988](https://github.com/ipfs/boxo/pull/988)) + - bitswap/httpnet: fix sudden stop of http retrieval requests (#984) ([ipfs/boxo#984](https://github.com/ipfs/boxo/pull/984)) + - bitswap/client: disable use of traceability block by default (#956) ([ipfs/boxo#956](https://github.com/ipfs/boxo/pull/956)) + - test(gateway): fix race in TestCarBackendTar (#985) ([ipfs/boxo#985](https://github.com/ipfs/boxo/pull/985)) + - Shutdown the sessionWantSender changes queue when session is shutdown (#983) ([ipfs/boxo#983](https://github.com/ipfs/boxo/pull/983)) + - bitswap/httpnet: start pinging before signaling Connected ([ipfs/boxo#982](https://github.com/ipfs/boxo/pull/982)) + - Queue all changes in order using non-blocking async queue ([ipfs/boxo#981](https://github.com/ipfs/boxo/pull/981)) + - bitswap/httpnet: fix peers silently stopping from doing http requests ([ipfs/boxo#980](https://github.com/ipfs/boxo/pull/980)) + - provider: clear provide queue (#978) ([ipfs/boxo#978](https://github.com/ipfs/boxo/pull/978)) + - update dependencies ([ipfs/boxo#977](https://github.com/ipfs/boxo/pull/977)) +- github.com/ipfs/go-datastore (v0.8.2 -> v0.8.3): + - new version (#245) ([ipfs/go-datastore#245](https://github.com/ipfs/go-datastore/pull/245)) + - sort using slices.Sort (#243) ([ipfs/go-datastore#243](https://github.com/ipfs/go-datastore/pull/243)) + - Replace `uber-go/multierr` with `errors.Join` (#242) ([ipfs/go-datastore#242](https://github.com/ipfs/go-datastore/pull/242)) + - replace gopkg.in/check.v1 with github.com/stretchr/testify (#241) ([ipfs/go-datastore#241](https://github.com/ipfs/go-datastore/pull/241)) +- github.com/ipfs/go-ipld-cbor (v0.2.0 -> v0.2.1): + - new version ([ipfs/go-ipld-cbor#111](https://github.com/ipfs/go-ipld-cbor/pull/111)) + - update dependencies ([ipfs/go-ipld-cbor#110](https://github.com/ipfs/go-ipld-cbor/pull/110)) +- github.com/ipfs/go-log/v2 (v2.6.0 -> v2.8.1): + - new version (#171) ([ipfs/go-log#171](https://github.com/ipfs/go-log/pull/171)) + - feat: add LevelEnabled function to check if log level enabled (#170) ([ipfs/go-log#170](https://github.com/ipfs/go-log/pull/170)) + - Replace `uber-go/multierr` with `errors.Join` (#168) ([ipfs/go-log#168](https://github.com/ipfs/go-log/pull/168)) + - new version (#167) ([ipfs/go-log#167](https://github.com/ipfs/go-log/pull/167)) + - Test using testify package (#166) ([ipfs/go-log#166](https://github.com/ipfs/go-log/pull/166)) + - Revise the loglevel API to be more golang idiomatic (#165) ([ipfs/go-log#165](https://github.com/ipfs/go-log/pull/165)) + - new version (#164) ([ipfs/go-log#164](https://github.com/ipfs/go-log/pull/164)) + - feat: add GetLogLevel and GetAllLogLevels (#160) ([ipfs/go-log#160](https://github.com/ipfs/go-log/pull/160)) +- github.com/ipfs/go-test (v0.2.2 -> v0.2.3): + - new version (#30) ([ipfs/go-test#30](https://github.com/ipfs/go-test/pull/30)) + - fix: multihash random generation (#28) ([ipfs/go-test#28](https://github.com/ipfs/go-test/pull/28)) + - Add RandomName function to generate random filename (#26) ([ipfs/go-test#26](https://github.com/ipfs/go-test/pull/26)) +- github.com/libp2p/go-libp2p (v0.42.0 -> v0.43.0): + - Release v0.43 (#3353) ([libp2p/go-libp2p#3353](https://github.com/libp2p/go-libp2p/pull/3353)) + - basichost: fix deadlock with addrs_manager (#3348) ([libp2p/go-libp2p#3348](https://github.com/libp2p/go-libp2p/pull/3348)) + - basichost: fix Addrs docstring (#3341) ([libp2p/go-libp2p#3341](https://github.com/libp2p/go-libp2p/pull/3341)) + - quic: upgrade quic-go to v0.53 (#3323) ([libp2p/go-libp2p#3323](https://github.com/libp2p/go-libp2p/pull/3323)) +- github.com/libp2p/go-libp2p-kad-dht (v0.33.1 -> v0.34.0): + - chore: release v0.34.0 (#1130) ([libp2p/go-libp2p-kad-dht#1130](https://github.com/libp2p/go-libp2p-kad-dht/pull/1130)) + - make crawler protocol messenger configurable (#1128) ([libp2p/go-libp2p-kad-dht#1128](https://github.com/libp2p/go-libp2p-kad-dht/pull/1128)) + - fix: move non-error log to warning level (#1119) ([libp2p/go-libp2p-kad-dht#1119](https://github.com/libp2p/go-libp2p-kad-dht/pull/1119)) + - migrate providers package (#1094) ([libp2p/go-libp2p-kad-dht#1094](https://github.com/libp2p/go-libp2p-kad-dht/pull/1094)) +- github.com/libp2p/go-libp2p-pubsub (v0.13.1 -> v0.14.2): + - Release v0.14.2 (#629) ([libp2p/go-libp2p-pubsub#629](https://github.com/libp2p/go-libp2p-pubsub/pull/629)) + - Fix test races and enable race tests in CI (#626) ([libp2p/go-libp2p-pubsub#626](https://github.com/libp2p/go-libp2p-pubsub/pull/626)) + - Fix race when calling Preprocess and msg ID generator(#627) ([libp2p/go-libp2p-pubsub#627](https://github.com/libp2p/go-libp2p-pubsub/pull/627)) + - Release v0.14.1 (#623) ([libp2p/go-libp2p-pubsub#623](https://github.com/libp2p/go-libp2p-pubsub/pull/623)) + - fix(BatchPublishing): Make topic.AddToBatch threadsafe (#622) ([libp2p/go-libp2p-pubsub#622](https://github.com/libp2p/go-libp2p-pubsub/pull/622)) + - Release v0.14.0 (#614) ([libp2p/go-libp2p-pubsub#614](https://github.com/libp2p/go-libp2p-pubsub/pull/614)) + - refactor: 10x faster RPC splitting (#615) ([libp2p/go-libp2p-pubsub#615](https://github.com/libp2p/go-libp2p-pubsub/pull/615)) + - test: Fix flaky TestMessageBatchPublish (#616) ([libp2p/go-libp2p-pubsub#616](https://github.com/libp2p/go-libp2p-pubsub/pull/616)) + - Send IDONTWANT before first publish (#612) ([libp2p/go-libp2p-pubsub#612](https://github.com/libp2p/go-libp2p-pubsub/pull/612)) + - feat(gossipsub): Add MessageBatch (#607) ([libp2p/go-libp2p-pubsub#607](https://github.com/libp2p/go-libp2p-pubsub/pull/607)) + - fix(IDONTWANT)!: Do not IDONTWANT your sender (#609) ([libp2p/go-libp2p-pubsub#609](https://github.com/libp2p/go-libp2p-pubsub/pull/609)) +- github.com/multiformats/go-multiaddr (v0.16.0 -> v0.16.1): + - Release v0.16.1 (#281) ([multiformats/go-multiaddr#281](https://github.com/multiformats/go-multiaddr/pull/281)) + - reduce allocations in Bytes() and manet methods (#280) ([multiformats/go-multiaddr#280](https://github.com/multiformats/go-multiaddr/pull/280)) +- github.com/whyrusleeping/cbor-gen (v0.1.2 -> v0.3.1): + - fix: capture field count early for "optional" length check (#112) ([whyrusleeping/cbor-gen#112](https://github.com/whyrusleeping/cbor-gen/pull/112)) + - doc: basic cbor-gen documentation (#110) ([whyrusleeping/cbor-gen#110](https://github.com/whyrusleeping/cbor-gen/pull/110)) + - feat: add support for optional fields at the end of tuple structs (#109) ([whyrusleeping/cbor-gen#109](https://github.com/whyrusleeping/cbor-gen/pull/109)) + - Regenerate test files ([whyrusleeping/cbor-gen#107](https://github.com/whyrusleeping/cbor-gen/pull/107)) + - improve allocations in map serialization ([whyrusleeping/cbor-gen#105](https://github.com/whyrusleeping/cbor-gen/pull/105)) + - fixed array in struct instead of heap slice ([whyrusleeping/cbor-gen#104](https://github.com/whyrusleeping/cbor-gen/pull/104)) + - optionally sort type names in generated code file ([whyrusleeping/cbor-gen#102](https://github.com/whyrusleeping/cbor-gen/pull/102)) + - fix handling of an []*string field ([whyrusleeping/cbor-gen#101](https://github.com/whyrusleeping/cbor-gen/pull/101)) + - fix: reject negative big integers ([whyrusleeping/cbor-gen#100](https://github.com/whyrusleeping/cbor-gen/pull/100)) + +
+ +### 👨‍👩‍👧‍👦 Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Marcin Rataj | 26 | +16033/-755 | 176 | +| Andrew Gillis | 35 | +2656/-1911 | 142 | +| Hector Sanjuan | 30 | +2638/-760 | 114 | +| Marco Munizaga | 11 | +1244/-362 | 41 | +| Russell Dempsey | 2 | +1031/-33 | 7 | +| Guillaume Michel | 4 | +899/-65 | 15 | +| whyrusleeping | 4 | +448/-177 | 15 | +| sukun | 9 | +312/-191 | 31 | +| gammazero | 23 | +239/-216 | 45 | +| Brian Olson | 5 | +343/-16 | 11 | +| Steven Allen | 3 | +294/-7 | 9 | +| Sergey Gorbunov | 2 | +247/-11 | 9 | +| Kapil Sareen | 1 | +86/-13 | 10 | +| Masih H. Derkani | 1 | +72/-24 | 1 | +| Piotr Galar | 1 | +40/-55 | 23 | +| Rod Vagg | 1 | +13/-11 | 3 | +| Ankita Sahu | 1 | +2/-0 | 1 | +| Štefan Baebler | 1 | +1/-0 | 1 | diff --git a/docs/changelogs/v0.38.md b/docs/changelogs/v0.38.md new file mode 100644 index 000000000..f76667239 --- /dev/null +++ b/docs/changelogs/v0.38.md @@ -0,0 +1,400 @@ +# Kubo changelog v0.38 + + + +This release was brought to you by the [Shipyard](https://ipshipyard.com/) team. + +- [v0.38.0](#v0380) +- [v0.38.1](#v0381) +- [v0.38.2](#v0382) + +## v0.38.0 + +- [Overview](#overview) +- [🔦 Highlights](#-highlights) + - [🚀 Repository migration: simplified provide configuration](#-repository-migration-simplified-provide-configuration) + - [🧹 Experimental Sweeping DHT Provider](#-experimental-sweeping-dht-provider) + - [📊 Exposed DHT metrics](#-exposed-dht-metrics) + - [🚨 Improved gateway error pages with diagnostic tools](#-improved-gateway-error-pages-with-diagnostic-tools) + - [🎨 Updated WebUI](#-updated-webui) + - [📌 Pin name improvements](#-pin-name-improvements) + - [🛠️ Identity CID size enforcement and `ipfs files write` fixes](#️-identity-cid-size-enforcement-and-ipfs-files-write-fixes) + - [📤 Provide Filestore and Urlstore blocks on write](#-provide-filestore-and-urlstore-blocks-on-write) + - [🚦 MFS operation limit for --flush=false](#-mfs-operation-limit-for---flush=false) +- [📦️ Important dependency updates](#-important-dependency-updates) +- [📝 Changelog](#-changelog) +- [👨‍👩‍👧‍👦 Contributors](#-contributors) + +### Overview + +Kubo 0.38.0 simplifies content announcement configuration, introduces an experimental sweeping DHT provider for efficient large-scale operations, and includes various performance improvements. + +### 🔦 Highlights + +#### 🚀 Repository migration: simplified provide configuration + +This release migrates the repository from version 17 to version 18, simplifying how you configure content announcements. + +The old `Provider` and `Reprovider` sections are now combined into a single [`Provide`](https://github.com/ipfs/kubo/blob/master/docs/config.md#provide) section. Your existing settings are automatically migrated - no manual changes needed. + +**Migration happens automatically** when you run `ipfs daemon --migrate`. For manual migration: `ipfs repo migrate --to=18`. + +Read more about the new system below. + +#### 🧹 Experimental Sweeping DHT Provider + +A new experimental DHT provider is available as an alternative to both the default provider and the resource-intensive [accelerated DHT client](https://github.com/ipfs/kubo/blob/master/docs/config.md#routingaccelerateddhtclient). Enable it via [`Provide.DHT.SweepEnabled`](https://github.com/ipfs/kubo/blob/master/docs/config.md#providedhtsweepenabled). + +**How it works:** Instead of providing keys one-by-one, the sweep provider systematically explores DHT keyspace regions in batches. + +> +> +> +> Reprovide Cycle Comparison +> +> +> The diagram shows how sweep mode avoids the hourly traffic spikes of Accelerated DHT while maintaining similar effectiveness. By grouping CIDs into keyspace regions and processing them in batches, sweep mode reduces memory overhead and creates predictable network patterns. + +**Benefits for large-scale operations:** Handles hundreds of thousands of CIDs with reduced memory and network connections, spreads operations evenly to eliminate resource spikes, maintains state across restarts through persistent keystore, and provides better metrics visibility. + +**Monitoring and debugging:** Legacy mode (`SweepEnabled=false`) tracks `provider_reprovider_provide_count` and `provider_reprovider_reprovide_count`, while sweep mode (`SweepEnabled=true`) tracks `total_provide_count_total`. Enable debug logging with `GOLOG_LOG_LEVEL=error,provider=debug,dht/provider=debug` to see detailed logs from either system. + +> [!IMPORTANT] +> The metric `total_provide_count_total` was renamed to `provider_provides_total` in Kubo v0.39 to follow OpenTelemetry naming conventions. If you have dashboards or alerts monitoring this metric, update them accordingly. + +> [!NOTE] +> This feature is experimental and opt-in. In the future, it will become the default and replace the legacy system. Some commands like `ipfs stats provide` and `ipfs routing provide` are not yet available with sweep mode. Run `ipfs provide --help` for alternatives. + +For configuration details, see [`Provide.DHT`](https://github.com/ipfs/kubo/blob/master/docs/config.md#providedht). For metrics documentation, see [Provide metrics](https://github.com/ipfs/kubo/blob/master/docs/metrics.md#provide). + +#### 📊 Exposed DHT metrics + +Kubo now exposes DHT metrics from [go-libp2p-kad-dht](https://github.com/libp2p/go-libp2p-kad-dht/), including `total_provide_count_total` for sweep provider operations and RPC metrics prefixed with `rpc_inbound_` and `rpc_outbound_` for DHT message traffic. See [Kubo metrics documentation](https://github.com/ipfs/kubo/blob/master/docs/metrics.md) for details. + +> [!IMPORTANT] +> The metric `total_provide_count_total` was renamed to `provider_provides_total` in Kubo v0.39 to follow OpenTelemetry naming conventions. If you have dashboards or alerts monitoring this metric, update them accordingly. + +#### 🚨 Improved gateway error pages with diagnostic tools + +Gateway error pages now provide more actionable information during content retrieval failures. When a 504 Gateway Timeout occurs, users see detailed retrieval state information including which phase failed and a sample of providers that were attempted: + +> ![Improved gateway error page showing retrieval diagnostics](https://github.com/user-attachments/assets/18432c74-a5e0-4bbf-9815-7c780779dc98) +> +> - **[`Gateway.DiagnosticServiceURL`](https://github.com/ipfs/kubo/blob/master/docs/config.md#gatewaydiagnosticserviceurl)** (default: `https://check.ipfs.network`): Configures the diagnostic service URL. When set, 504 errors show a "Check CID retrievability" button that links to this service with `?cid=` for external diagnostics. Set to empty string to disable. +> - **Enhanced error details**: Timeout errors now display the retrieval phase where failure occurred (e.g., "connecting to providers", "fetching data") and up to 3 peer IDs that were attempted but couldn't deliver the content, making it easier to diagnose network or provider issues. +> - **Retry button on all error pages**: Every gateway error page now includes a retry button for quick page refresh without manual URL re-entry. + +#### 🎨 Updated WebUI + +The Web UI has been updated to [v4.9](https://github.com/ipfs/ipfs-webui/releases/tag/v4.9.0) with a new **Diagnostics** screen for troubleshooting and system monitoring. Access it at `http://127.0.0.1:5001/webui` when running your local IPFS node. + +| Diagnostics: Logs | Files: Check Retrieval | Diagnostics: Retrieval Results | +|:---:|:---:|:---:| +| ![Diagnostics logs](https://github.com/user-attachments/assets/a1560fd2-6f4e-4e4f-9506-85ecb10f96e5) | ![Retrieval check interface](https://github.com/user-attachments/assets/6efa8bf1-705e-4256-8c66-282455daf789) | ![Retrieval check results](https://github.com/user-attachments/assets/970f2de3-94a3-4d48-b0a4-46832f73c2e9) | +| Debug issues in real-time by adjusting [log level](https://github.com/ipfs/kubo/blob/master/docs/environment-variables.md#golog_log_level) without restart (global or per-subsystem like bitswap) | Check if content is available to other peers directly from Files screen | Find out why content won't load or who is providing it to the network | + +| Peers: Agent Versions | Files: Custom Sorting | +|:---:|:---:| +| ![Peers with Agent Version](https://github.com/user-attachments/assets/4bf95e72-193a-415d-9428-dd222795107a) | ![File sorting options](https://github.com/user-attachments/assets/fd7a1807-c487-4393-ab60-a16ae087e6cd) | +| Know what software peers run | Find files faster with new sorting | + +Additional improvements include a close button in the file viewer, better error handling, and fixed navigation highlighting. + +#### 📌 Pin name improvements + +`ipfs pin ls --names` now correctly returns pin names for specific CIDs ([#10649](https://github.com/ipfs/kubo/issues/10649), [boxo#1035](https://github.com/ipfs/boxo/pull/1035)), RPC no longer incorrectly returns names from other pins ([#10966](https://github.com/ipfs/kubo/pull/10966)), and pin names are now limited to 255 bytes for better cross-platform compatibility ([#10981](https://github.com/ipfs/kubo/pull/10981)). + +#### 🛠️ Identity CID size enforcement and `ipfs files write` fixes + +**Identity CID size limits are now enforced** + +This release enforces a maximum of 128 bytes for identity CIDs ([IPIP-512](https://github.com/ipfs/specs/pull/512)) - attempting to exceed this limit will return a clear error message. + +Identity CIDs use [multihash `0x00`](https://github.com/multiformats/multicodec/blob/master/table.csv#L2) to embed data directly in the CID without hashing. This experimental optimization was designed for tiny data where a CID reference would be larger than the data itself, but without size limits it was easy to misuse and could turn into an anti-pattern that wastes resources and enables abuse. + +- `ipfs add --inline-limit` and `--hash=identity` now enforce the 128-byte maximum (error when exceeded) +- `ipfs files write` prevents creation of oversized identity CIDs + +**Multiple `ipfs files write` bugs have been fixed** + +This release resolves several long-standing MFS issues: raw nodes now preserve their codec instead of being forced to dag-pb, append operations on raw nodes work correctly by converting to UnixFS when needed, and identity CIDs properly inherit the full CID prefix from parent directories. + +#### 📤 Provide Filestore and Urlstore blocks on write + +Improvements to the providing system in the last release (provide blocks according to the configured [Strategy](https://github.com/ipfs/kubo/blob/master/docs/config.md#providestrategy)) left out [Filestore](https://github.com/ipfs/kubo/blob/master/docs/experimental-features.md#ipfs-filestore) and [Urlstore](https://github.com/ipfs/kubo/blob/master/docs/experimental-features.md#ipfs-urlstore) blocks when the "all" strategy was used. They would only be reprovided but not provided on write. This is now fixed, and both Filestore blocks (local file references) and Urlstore blocks (HTTP/HTTPS URL references) will be provided correctly shortly after initial add. + +#### 🚦 MFS operation limit for --flush=false + +The new [`Internal.MFSNoFlushLimit`](https://github.com/ipfs/kubo/blob/master/docs/config.md#internalmfsnoflushlimit) configuration option prevents unbounded memory growth when using `--flush=false` with `ipfs files` commands. After performing the configured number of operations without flushing (default: 256), further operations will fail with a clear error message instructing users to flush manually. + +### 📦️ Important dependency updates + +- update `boxo` to [v0.35.0](https://github.com/ipfs/boxo/releases/tag/v0.35.0) +- update `go-libp2p-kad-dht` to [v0.35.0](https://github.com/libp2p/go-libp2p-kad-dht/releases/tag/v0.35.0) +- update `ipfs-webui` to [v4.9.1](https://github.com/ipfs/ipfs-webui/releases/tag/v4.9.1) (incl. [v4.9.0](https://github.com/ipfs/ipfs-webui/releases/tag/v4.9.0)) + +### 📝 Changelog + +
Full Changelog + +- github.com/ipfs/kubo: + - chore: v0.38.0 + - chore: bump go-libp2p-kad-dht to v0.35.0 (#11002) ([ipfs/kubo#11002](https://github.com/ipfs/kubo/pull/11002)) + - docs: add sweeping provide worker count recommendation (#11001) ([ipfs/kubo#11001](https://github.com/ipfs/kubo/pull/11001)) + - Upgrade to Boxo v0.35.0 (#10999) ([ipfs/kubo#10999](https://github.com/ipfs/kubo/pull/10999)) + - chore: 0.38.0-rc2 + - chore: update boxo and kad-dht dependencies (#10995) ([ipfs/kubo#10995](https://github.com/ipfs/kubo/pull/10995)) + - fix: update webui to v4.9.1 (#10994) ([ipfs/kubo#10994](https://github.com/ipfs/kubo/pull/10994)) + - fix: provider merge conflicts (#10989) ([ipfs/kubo#10989](https://github.com/ipfs/kubo/pull/10989)) + - fix(mfs): add soft limit for `--flush=false` (#10985) ([ipfs/kubo#10985](https://github.com/ipfs/kubo/pull/10985)) + - fix: provide Filestore nodes (#10990) ([ipfs/kubo#10990](https://github.com/ipfs/kubo/pull/10990)) + - feat: limit pin names to 255 bytes (#10981) ([ipfs/kubo#10981](https://github.com/ipfs/kubo/pull/10981)) + - fix: SweepingProvider slow start (#10980) ([ipfs/kubo#10980](https://github.com/ipfs/kubo/pull/10980)) + - chore: release v0.38.0-rc1 + - fix: SweepingProvider shouldn't error when missing DHT (#10975) ([ipfs/kubo#10975](https://github.com/ipfs/kubo/pull/10975)) + - fix: allow custom http provide when libp2p node is offline (#10974) ([ipfs/kubo#10974](https://github.com/ipfs/kubo/pull/10974)) + - docs(provide): validation and reprovide cycle visualization (#10977) ([ipfs/kubo#10977](https://github.com/ipfs/kubo/pull/10977)) + - refactor(ci): optimize build workflows (#10973) ([ipfs/kubo#10973](https://github.com/ipfs/kubo/pull/10973)) + - fix(cmds): cleanup unicode identify strings (#9465) ([ipfs/kubo#9465](https://github.com/ipfs/kubo/pull/9465)) + - feat: ipfs-webui v4.9.0 with retrieval diagnostics (#10969) ([ipfs/kubo#10969](https://github.com/ipfs/kubo/pull/10969)) + - fix(mfs): unbound cache growth with `flush=false` (#10971) ([ipfs/kubo#10971](https://github.com/ipfs/kubo/pull/10971)) + - fix: `ipfs pin ls --names` (#10970) ([ipfs/kubo#10970](https://github.com/ipfs/kubo/pull/10970)) + - refactor(config): migration 17-to-18 to unify Provider/Reprovider into Provide.DHT (#10951) ([ipfs/kubo#10951](https://github.com/ipfs/kubo/pull/10951)) + - feat: opt-in new Sweep provide system (#10834) ([ipfs/kubo#10834](https://github.com/ipfs/kubo/pull/10834)) + - rpc: retrieve pin names when Detailed option provided (#10966) ([ipfs/kubo#10966](https://github.com/ipfs/kubo/pull/10966)) + - fix: enforce identity CID size limits (#10949) ([ipfs/kubo#10949](https://github.com/ipfs/kubo/pull/10949)) + - docs: kubo logo sources (#10964) ([ipfs/kubo#10964](https://github.com/ipfs/kubo/pull/10964)) + - feat(config): validate Import config at daemon startup (#10957) ([ipfs/kubo#10957](https://github.com/ipfs/kubo/pull/10957)) + - fix(telemetry): improve vm/container detection (#10944) ([ipfs/kubo#10944](https://github.com/ipfs/kubo/pull/10944)) + - feat(gateway): improved error page with retrieval state details (#10950) ([ipfs/kubo#10950](https://github.com/ipfs/kubo/pull/10950)) + - close files opened during migration (#10956) ([ipfs/kubo#10956](https://github.com/ipfs/kubo/pull/10956)) + - fix ctrl-c prompt during run migrations prompt (#10947) ([ipfs/kubo#10947](https://github.com/ipfs/kubo/pull/10947)) + - repo: use config api to get node root path (#10934) ([ipfs/kubo#10934](https://github.com/ipfs/kubo/pull/10934)) + - docs: simplify release process (#10870) ([ipfs/kubo#10870](https://github.com/ipfs/kubo/pull/10870)) + - Merge release v0.37.0 ([ipfs/kubo#10943](https://github.com/ipfs/kubo/pull/10943)) + - feat(ci): docker linting (#10927) ([ipfs/kubo#10927](https://github.com/ipfs/kubo/pull/10927)) + - fix: disable telemetry in test profile (#10931) ([ipfs/kubo#10931](https://github.com/ipfs/kubo/pull/10931)) + - fix: harness tests random panic (#10933) ([ipfs/kubo#10933](https://github.com/ipfs/kubo/pull/10933)) + - chore: 0.38.0-dev +- github.com/ipfs/boxo (v0.34.0 -> v0.35.0): + - Release v0.35.0 ([ipfs/boxo#1046](https://github.com/ipfs/boxo/pull/1046)) + - feat(gateway): add `MaxRangeRequestFileSize` protection (#1043) ([ipfs/boxo#1043](https://github.com/ipfs/boxo/pull/1043)) + - revert: remove MFS auto-flush mechanism (#1041) ([ipfs/boxo#1041](https://github.com/ipfs/boxo/pull/1041)) + - Filestore: add Provider option to provide filestore blocks. (#1042) ([ipfs/boxo#1042](https://github.com/ipfs/boxo/pull/1042)) + - fix(pinner): restore indirect pin detection and add context cancellation (#1039) ([ipfs/boxo#1039](https://github.com/ipfs/boxo/pull/1039)) + - fix(mfs): limit cache growth by default (#1037) ([ipfs/boxo#1037](https://github.com/ipfs/boxo/pull/1037)) + - update dependencies (#1038) ([ipfs/boxo#1038](https://github.com/ipfs/boxo/pull/1038)) + - feat(pinner): add `CheckIfPinnedWithType` for efficient checks with names (#1035) ([ipfs/boxo#1035](https://github.com/ipfs/boxo/pull/1035)) + - fix(routing/http): don't cancel batch prematurely (#1036) ([ipfs/boxo#1036](https://github.com/ipfs/boxo/pull/1036)) + - refactor: use the new Reprovide Sweep interface (#995) ([ipfs/boxo#995](https://github.com/ipfs/boxo/pull/995)) + - Update go-dsqueue to latest (#1034) ([ipfs/boxo#1034](https://github.com/ipfs/boxo/pull/1034)) + - feat(routing/http): return 200 for empty results per IPIP-513 (#1032) ([ipfs/boxo#1032](https://github.com/ipfs/boxo/pull/1032)) + - replace provider queue with go-dsqueue (#1033) ([ipfs/boxo#1033](https://github.com/ipfs/boxo/pull/1033)) + - refactor: use slices package to simplify slice manipulation (#1031) ([ipfs/boxo#1031](https://github.com/ipfs/boxo/pull/1031)) + - bitswap/network: fix read/write data race in bitswap network test (#1030) ([ipfs/boxo#1030](https://github.com/ipfs/boxo/pull/1030)) + - fix(verifcid): enforce size limit for identity CIDs (#1018) ([ipfs/boxo#1018](https://github.com/ipfs/boxo/pull/1018)) + - docs: boxo logo source files (#1028) ([ipfs/boxo#1028](https://github.com/ipfs/boxo/pull/1028)) + - feat(gateway): enhance 504 timeout errors with diagnostic UX (#1023) ([ipfs/boxo#1023](https://github.com/ipfs/boxo/pull/1023)) + - Use `time.Duration` for rebroadcast delay (#1027) ([ipfs/boxo#1027](https://github.com/ipfs/boxo/pull/1027)) + - refactor(bitswap/client/internal): close session with Close method instead of context (#1011) ([ipfs/boxo#1011](https://github.com/ipfs/boxo/pull/1011)) + - fix: use %q for logging routing keys with binary data (#1025) ([ipfs/boxo#1025](https://github.com/ipfs/boxo/pull/1025)) + - rename `retrieval.RetrievalState` to `retrieval.State` (#1026) ([ipfs/boxo#1026](https://github.com/ipfs/boxo/pull/1026)) + - feat(gateway): add retrieval state tracking for timeout diagnostics (#1015) ([ipfs/boxo#1015](https://github.com/ipfs/boxo/pull/1015)) + - Nonfunctional changes (#1017) ([ipfs/boxo#1017](https://github.com/ipfs/boxo/pull/1017)) + - fix: flaky TestCancelOverridesPendingWants (#1016) ([ipfs/boxo#1016](https://github.com/ipfs/boxo/pull/1016)) + - bitswap/client: GetBlocks cancels session when finished (#1007) ([ipfs/boxo#1007](https://github.com/ipfs/boxo/pull/1007)) + - Remove unused context ([ipfs/boxo#1006](https://github.com/ipfs/boxo/pull/1006)) +- github.com/ipfs/go-block-format (v0.2.2 -> v0.2.3): + - new version (#66) ([ipfs/go-block-format#66](https://github.com/ipfs/go-block-format/pull/66)) + - Replace CI badge and add GoDoc link in README (#65) ([ipfs/go-block-format#65](https://github.com/ipfs/go-block-format/pull/65)) +- github.com/ipfs/go-datastore (v0.8.3 -> v0.9.0): + - new version (#255) ([ipfs/go-datastore#255](https://github.com/ipfs/go-datastore/pull/255)) + - feat(keytransform): support transaction feature (#239) ([ipfs/go-datastore#239](https://github.com/ipfs/go-datastore/pull/239)) + - feat: context datastore (#238) ([ipfs/go-datastore#238](https://github.com/ipfs/go-datastore/pull/238)) + - new version (#254) ([ipfs/go-datastore#254](https://github.com/ipfs/go-datastore/pull/254)) + - fix comment (#253) ([ipfs/go-datastore#253](https://github.com/ipfs/go-datastore/pull/253)) + - feat: query iterator (#244) ([ipfs/go-datastore#244](https://github.com/ipfs/go-datastore/pull/244)) + - Update readme links (#246) ([ipfs/go-datastore#246](https://github.com/ipfs/go-datastore/pull/246)) +- github.com/ipfs/go-ipld-format (v0.6.2 -> v0.6.3): + - new version (#100) ([ipfs/go-ipld-format#100](https://github.com/ipfs/go-ipld-format/pull/100)) + - avoid unnecessary slice allocation (#99) ([ipfs/go-ipld-format#99](https://github.com/ipfs/go-ipld-format/pull/99)) +- github.com/ipfs/go-unixfsnode (v1.10.1 -> v1.10.2): + - new version ([ipfs/go-unixfsnode#88](https://github.com/ipfs/go-unixfsnode/pull/88)) +- github.com/ipld/go-car/v2 (v2.14.3 -> v2.15.0): + - v2.15.0 bump (#606) ([ipld/go-car#606](https://github.com/ipld/go-car/pull/606)) + - feat: add NextReader to BlockReader (#603) ([ipld/go-car#603](https://github.com/ipld/go-car/pull/603)) + - Remove `@masih` form CODEOWNERS ([ipld/go-car#605](https://github.com/ipld/go-car/pull/605)) +- github.com/libp2p/go-libp2p-kad-dht (v0.34.0 -> v0.35.0): + - chore: release v0.35.0 (#1162) ([libp2p/go-libp2p-kad-dht#1162](https://github.com/libp2p/go-libp2p-kad-dht/pull/1162)) + - refactor: adjust FIND_NODE response exceptions (#1158) ([libp2p/go-libp2p-kad-dht#1158](https://github.com/libp2p/go-libp2p-kad-dht/pull/1158)) + - refactor: remove provider status command (#1157) ([libp2p/go-libp2p-kad-dht#1157](https://github.com/libp2p/go-libp2p-kad-dht/pull/1157)) + - refactor(provider): closestPeerToPrefix coverage trie (#1156) ([libp2p/go-libp2p-kad-dht#1156](https://github.com/libp2p/go-libp2p-kad-dht/pull/1156)) + - fix: don't empty mapdatastore keystore on close (#1155) ([libp2p/go-libp2p-kad-dht#1155](https://github.com/libp2p/go-libp2p-kad-dht/pull/1155)) + - provider: default options (#1153) ([libp2p/go-libp2p-kad-dht#1153](https://github.com/libp2p/go-libp2p-kad-dht/pull/1153)) + - fix(keystore): use new batch after commit (#1154) ([libp2p/go-libp2p-kad-dht#1154](https://github.com/libp2p/go-libp2p-kad-dht/pull/1154)) + - provider: more minor fixes (#1152) ([libp2p/go-libp2p-kad-dht#1152](https://github.com/libp2p/go-libp2p-kad-dht/pull/1152)) + - rename KeyStore -> Keystore (#1151) ([libp2p/go-libp2p-kad-dht#1151](https://github.com/libp2p/go-libp2p-kad-dht/pull/1151)) + - provider: minor fixes (#1150) ([libp2p/go-libp2p-kad-dht#1150](https://github.com/libp2p/go-libp2p-kad-dht/pull/1150)) + - buffered provider (#1149) ([libp2p/go-libp2p-kad-dht#1149](https://github.com/libp2p/go-libp2p-kad-dht/pull/1149)) + - keystore: remove mutex (#1147) ([libp2p/go-libp2p-kad-dht#1147](https://github.com/libp2p/go-libp2p-kad-dht/pull/1147)) + - provider: ResettableKeyStore (#1146) ([libp2p/go-libp2p-kad-dht#1146](https://github.com/libp2p/go-libp2p-kad-dht/pull/1146)) + - keystore: revamp (#1142) ([libp2p/go-libp2p-kad-dht#1142](https://github.com/libp2p/go-libp2p-kad-dht/pull/1142)) + - provider: use synctest for testing time (#1136) ([libp2p/go-libp2p-kad-dht#1136](https://github.com/libp2p/go-libp2p-kad-dht/pull/1136)) + - provider: connectivity state machine (#1135) ([libp2p/go-libp2p-kad-dht#1135](https://github.com/libp2p/go-libp2p-kad-dht/pull/1135)) + - provider: minor fixes (#1133) ([libp2p/go-libp2p-kad-dht#1133](https://github.com/libp2p/go-libp2p-kad-dht/pull/1133)) + - dual: provider (#1132) ([libp2p/go-libp2p-kad-dht#1132](https://github.com/libp2p/go-libp2p-kad-dht/pull/1132)) + - provider: refresh schedule (#1131) ([libp2p/go-libp2p-kad-dht#1131](https://github.com/libp2p/go-libp2p-kad-dht/pull/1131)) + - provider: integration tests (#1127) ([libp2p/go-libp2p-kad-dht#1127](https://github.com/libp2p/go-libp2p-kad-dht/pull/1127)) + - provider: daemon (#1126) ([libp2p/go-libp2p-kad-dht#1126](https://github.com/libp2p/go-libp2p-kad-dht/pull/1126)) + - provide: handle reprovide (#1125) ([libp2p/go-libp2p-kad-dht#1125](https://github.com/libp2p/go-libp2p-kad-dht/pull/1125)) + - provider: options (#1124) ([libp2p/go-libp2p-kad-dht#1124](https://github.com/libp2p/go-libp2p-kad-dht/pull/1124)) + - provider: catchup pending work (#1123) ([libp2p/go-libp2p-kad-dht#1123](https://github.com/libp2p/go-libp2p-kad-dht/pull/1123)) + - provider: batch reprovide (#1122) ([libp2p/go-libp2p-kad-dht#1122](https://github.com/libp2p/go-libp2p-kad-dht/pull/1122)) + - provider: batch provide (#1121) ([libp2p/go-libp2p-kad-dht#1121](https://github.com/libp2p/go-libp2p-kad-dht/pull/1121)) + - provider: swarm exploration (#1120) ([libp2p/go-libp2p-kad-dht#1120](https://github.com/libp2p/go-libp2p-kad-dht/pull/1120)) + - provider: handleProvide (#1118) ([libp2p/go-libp2p-kad-dht#1118](https://github.com/libp2p/go-libp2p-kad-dht/pull/1118)) + - provider: schedule (#1117) ([libp2p/go-libp2p-kad-dht#1117](https://github.com/libp2p/go-libp2p-kad-dht/pull/1117)) + - provider: schedule prefix length (#1116) ([libp2p/go-libp2p-kad-dht#1116](https://github.com/libp2p/go-libp2p-kad-dht/pull/1116)) + - provider: ProvideStatus interface (#1110) ([libp2p/go-libp2p-kad-dht#1110](https://github.com/libp2p/go-libp2p-kad-dht/pull/1110)) + - provider: network operations (#1115) ([libp2p/go-libp2p-kad-dht#1115](https://github.com/libp2p/go-libp2p-kad-dht/pull/1115)) + - provider: adding provide and reprovide queue (#1114) ([libp2p/go-libp2p-kad-dht#1114](https://github.com/libp2p/go-libp2p-kad-dht/pull/1114)) + - provider: trie allocation helper (#1108) ([libp2p/go-libp2p-kad-dht#1108](https://github.com/libp2p/go-libp2p-kad-dht/pull/1108)) + - add missing ShortestCoveredPrefix ([libp2p/go-libp2p-kad-dht@d0b110d](https://github.com/libp2p/go-libp2p-kad-dht/commit/d0b110d)) + - provider: keyspace helpers ([libp2p/go-libp2p-kad-dht@af3ce09](https://github.com/libp2p/go-libp2p-kad-dht/commit/af3ce09)) + - provider: helpers package rename (#1111) ([libp2p/go-libp2p-kad-dht#1111](https://github.com/libp2p/go-libp2p-kad-dht/pull/1111)) + - provider: trie region helpers (#1109) ([libp2p/go-libp2p-kad-dht#1109](https://github.com/libp2p/go-libp2p-kad-dht/pull/1109)) + - provider: PruneSubtrie helper (#1107) ([libp2p/go-libp2p-kad-dht#1107](https://github.com/libp2p/go-libp2p-kad-dht/pull/1107)) + - provider: NextNonEmptyLeaf trie helper (#1106) ([libp2p/go-libp2p-kad-dht#1106](https://github.com/libp2p/go-libp2p-kad-dht/pull/1106)) + - provider: find subtrie helper (#1105) ([libp2p/go-libp2p-kad-dht#1105](https://github.com/libp2p/go-libp2p-kad-dht/pull/1105)) + - provider: helpers trie find prefix (#1104) ([libp2p/go-libp2p-kad-dht#1104](https://github.com/libp2p/go-libp2p-kad-dht/pull/1104)) + - provider: trie items listing helpers (#1103) ([libp2p/go-libp2p-kad-dht#1103](https://github.com/libp2p/go-libp2p-kad-dht/pull/1103)) + - provider: add ShortestCoveredPrefix helper (#1102) ([libp2p/go-libp2p-kad-dht#1102](https://github.com/libp2p/go-libp2p-kad-dht/pull/1102)) + - provider: key helpers (#1101) ([libp2p/go-libp2p-kad-dht#1101](https://github.com/libp2p/go-libp2p-kad-dht/pull/1101)) + - provider: Connectivity Checker (#1099) ([libp2p/go-libp2p-kad-dht#1099](https://github.com/libp2p/go-libp2p-kad-dht/pull/1099)) + - provider: SweepingProvider interface (#1098) ([libp2p/go-libp2p-kad-dht#1098](https://github.com/libp2p/go-libp2p-kad-dht/pull/1098)) + - provider: keystore (#1096) ([libp2p/go-libp2p-kad-dht#1096](https://github.com/libp2p/go-libp2p-kad-dht/pull/1096)) + - provider initial commit ([libp2p/go-libp2p-kad-dht@70d21a8](https://github.com/libp2p/go-libp2p-kad-dht/commit/70d21a8)) + - test GCP result order (#1097) ([libp2p/go-libp2p-kad-dht#1097](https://github.com/libp2p/go-libp2p-kad-dht/pull/1097)) + - refactor: apply suggestions in records (#1113) ([libp2p/go-libp2p-kad-dht#1113](https://github.com/libp2p/go-libp2p-kad-dht/pull/1113)) +- github.com/libp2p/go-libp2p-kbucket (v0.7.0 -> v0.8.0): + - chore: release v0.8.0 (#147) ([libp2p/go-libp2p-kbucket#147](https://github.com/libp2p/go-libp2p-kbucket/pull/147)) + - feat: generic find PeerID with CPL (#145) ([libp2p/go-libp2p-kbucket#145](https://github.com/libp2p/go-libp2p-kbucket/pull/145)) +- github.com/multiformats/go-varint (v0.0.7 -> v0.1.0): + - v0.1.0 bump (#29) ([multiformats/go-varint#29](https://github.com/multiformats/go-varint/pull/29)) + - chore: optimise UvarintSize (#28) ([multiformats/go-varint#28](https://github.com/multiformats/go-varint/pull/28)) + +
+ +### 👨‍👩‍👧‍👦 Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Guillaume Michel | 62 | +15401/-5657 | 209 | +| Marcin Rataj | 33 | +9540/-1734 | 215 | +| Andrew Gillis | 29 | +771/-1093 | 70 | +| Hlib Kanunnikov | 2 | +350/-0 | 5 | +| Rod Vagg | 3 | +260/-9 | 4 | +| Hector Sanjuan | 4 | +188/-33 | 11 | +| Jakub Sztandera | 1 | +67/-15 | 3 | +| Masih H. Derkani | 1 | +1/-2 | 2 | +| Dominic Della Valle | 1 | +2/-1 | 1 | + +## v0.38.1 + +Fixes migration panic on Windows when upgrading from v0.37 to v0.38 ("panic: error can't be dealt with transactionally: Access is denied"). + +Updates go-ds-pebble to v0.5.3 (pebble v2.1.0). + +### 📝 Changelog + +
Full Changelog + +- github.com/ipfs/kubo: + - chore: v0.38.1 + - fix: migrations for Windows (#11010) ([ipfs/kubo#11010](https://github.com/ipfs/kubo/pull/11010)) + - Upgrade go-ds-pebble to v0.5.3 (#11011) ([ipfs/kubo#11011](https://github.com/ipfs/kubo/pull/11011)) + - upgrade go-ds-pebble to v0.5.2 (#11000) ([ipfs/kubo#11000](https://github.com/ipfs/kubo/pull/11000)) +- github.com/ipfs/go-ds-pebble (v0.5.1 -> v0.5.3): + - new version (#62) ([ipfs/go-ds-pebble#62](https://github.com/ipfs/go-ds-pebble/pull/62)) + - fix panic when batch is reused after commit (#61) ([ipfs/go-ds-pebble#61](https://github.com/ipfs/go-ds-pebble/pull/61)) + - new version (#60) ([ipfs/go-ds-pebble#60](https://github.com/ipfs/go-ds-pebble/pull/60)) + - Upgrade to pebble v2.1.0 (#59) ([ipfs/go-ds-pebble#59](https://github.com/ipfs/go-ds-pebble/pull/59)) + - update readme (#57) ([ipfs/go-ds-pebble#57](https://github.com/ipfs/go-ds-pebble/pull/57)) + +
+ +### 👨‍👩‍👧‍👦 Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Marcin Rataj | 2 | +613/-267 | 15 | +| Andrew Gillis | 6 | +148/-22 | 8 | + +## v0.38.2 + +- Updates [boxo v0.35.1](https://github.com/ipfs/boxo/releases/tag/v0.35.1) with bitswap and HTTP retrieval fixes: + - Fixed bitswap trace context not being passed to sessions, restoring observability for monitoring tools + - Kubo now fetches from HTTP gateways that return errors in legacy IPLD format, improving compatibility with older providers + - Better handling of rate-limited HTTP endpoints and clearer timeout error messages +- Updates [go-libp2p-kad-dht v0.35.1](https://github.com/libp2p/go-libp2p-kad-dht/releases/tag/v0.35.1) with memory optimizations for nodes using `Provide.DHT.SweepEnabled=true` +- Updates [quic-go v0.55.0](https://github.com/quic-go/quic-go/releases/tag/v0.55.0) to fix memory pooling where stream frames weren't returned to the pool on cancellation + +### 📝 Changelog + +
Full Changelog + +- github.com/ipfs/kubo: + - chore: boxo and kad-dht updates + - fix: update quic-go to v0.55.0 +- github.com/ipfs/boxo (v0.35.0 -> v0.35.1): + - Release v0.35.1 ([ipfs/boxo#1063](https://github.com/ipfs/boxo/pull/1063)) + - bitswap/httpnet: improve "Connect"/testCid check (#1057) ([ipfs/boxo#1057](https://github.com/ipfs/boxo/pull/1057)) + - fix: revert go-libp2p to v0.43.0 (#1061) ([ipfs/boxo#1061](https://github.com/ipfs/boxo/pull/1061)) + - bitswap/client: propagate trace state when calling `GetBlocks` ([ipfs/boxo#1060](https://github.com/ipfs/boxo/pull/1060)) + - fix(tracing): use context to pass trace and retrieval state to session ([ipfs/boxo#1059](https://github.com/ipfs/boxo/pull/1059)) + - bitswap: link traces ([ipfs/boxo#1053](https://github.com/ipfs/boxo/pull/1053)) + - fix(gateway): deduplicate peer IDs in retrieval diagnostics (#1058) ([ipfs/boxo#1058](https://github.com/ipfs/boxo/pull/1058)) + - update go-dsqueue to v0.1.0 ([ipfs/boxo#1049](https://github.com/ipfs/boxo/pull/1049)) + - Update go-libp2p to v0.44 ([ipfs/boxo#1048](https://github.com/ipfs/boxo/pull/1048)) +- github.com/ipfs/go-dsqueue (v0.0.5 -> v0.1.0): + - new version (#24) ([ipfs/go-dsqueue#24](https://github.com/ipfs/go-dsqueue/pull/24)) + - Do not reuse datastore Batch (#23) ([ipfs/go-dsqueue#23](https://github.com/ipfs/go-dsqueue/pull/23)) +- github.com/ipfs/go-log/v2 (v2.8.1 -> v2.8.2): + - new version (#175) ([ipfs/go-log#175](https://github.com/ipfs/go-log/pull/175)) + - fix: revert removal of LevelFromString to avoid breaking change (#174) ([ipfs/go-log#174](https://github.com/ipfs/go-log/pull/174)) +- github.com/ipld/go-car/v2 (v2.15.0 -> v2.16.0): + - v2.16.0 bump (#625) ([ipld/go-car#625](https://github.com/ipld/go-car/pull/625)) +- github.com/ipld/go-ipld-prime/storage/bsadapter (v0.0.0-20230102063945-1a409dc236dd -> v0.0.0-20250821084354-a425e60cd714): +- github.com/libp2p/go-libp2p-kad-dht (v0.35.0 -> v0.35.1): + - chore: release v0.35.1 (#1165) ([libp2p/go-libp2p-kad-dht#1165](https://github.com/libp2p/go-libp2p-kad-dht/pull/1165)) + - feat(provider): use Trie.AddMany (#1164) ([libp2p/go-libp2p-kad-dht#1164](https://github.com/libp2p/go-libp2p-kad-dht/pull/1164)) + - fix(provider): memory usage (#1163) ([libp2p/go-libp2p-kad-dht#1163](https://github.com/libp2p/go-libp2p-kad-dht/pull/1163)) +- github.com/libp2p/go-netroute (v0.2.2 -> v0.3.0): + - release v0.3.0 + - remove google/gopacket dependency + - Query routes via routesocket ([libp2p/go-netroute#57](https://github.com/libp2p/go-netroute/pull/57)) + - ci: uci/update-go (#52) ([libp2p/go-netroute#52](https://github.com/libp2p/go-netroute/pull/52)) +- github.com/multiformats/go-multicodec (v0.9.2 -> v0.10.0): + - chore: v0.10.0 bump + - chore: update submodules and go generate + - chore(deps): update stringer to v0.38.0 + - ci: uci/update-go ([multiformats/go-multicodec#104](https://github.com/multiformats/go-multicodec/pull/104)) + +
+ +### 👨‍👩‍👧‍👦 Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| rvagg | 1 | +537/-481 | 3 | +| Carlos Hernandez | 9 | +556/-218 | 11 | +| Guillaume Michel | 3 | +139/-105 | 6 | +| gammazero | 8 | +101/-97 | 14 | +| Hector Sanjuan | 1 | +87/-28 | 5 | +| Marcin Rataj | 4 | +57/-9 | 7 | +| Marco Munizaga | 2 | +42/-14 | 7 | +| Dennis Trautwein | 2 | +19/-7 | 7 | +| Andrew Gillis | 3 | +3/-19 | 3 | +| Rod Vagg | 4 | +12/-3 | 4 | +| web3-bot | 1 | +2/-1 | 1 | +| galargh | 1 | +1/-1 | 1 | diff --git a/docs/changelogs/v0.39.md b/docs/changelogs/v0.39.md new file mode 100644 index 000000000..19fdb5208 --- /dev/null +++ b/docs/changelogs/v0.39.md @@ -0,0 +1,364 @@ +# Kubo changelog v0.39 + + + +This release was brought to you by the [Shipyard](https://ipshipyard.com/) team. + +- [v0.39.0](#v0390) + +## v0.39.0 + +[](https://github.com/user-attachments/assets/427702e8-b6b8-4ac2-8425-18069626c321) + +- [Overview](#overview) +- [🔦 Highlights](#-highlights) + - [🎯 DHT Sweep provider is now the default](#-dht-sweep-provider-is-now-the-default) + - [⚡ Fast root CID providing for immediate content discovery](#-fast-root-cid-providing-for-immediate-content-discovery) + - [⏯️ Provider state persists across restarts](#️-provider-state-persists-across-restarts) + - [📊 Detailed statistics with `ipfs provide stat`](#-detailed-statistics-with-ipfs-provide-stat) + - [🔔 Slow reprovide warnings](#-slow-reprovide-warnings) + - [📊 Metric rename: `provider_provides_total`](#-metric-rename-provider_provides_total) + - [🔧 Automatic UPnP recovery after router restarts](#-automatic-upnp-recovery-after-router-restarts) + - [🪦 Deprecated `go-ipfs` name no longer published](#-deprecated-go-ipfs-name-no-longer-published) + - [🚦 Gateway range request limits for CDN compatibility](#-gateway-range-request-limits-for-cdn-compatibility) + - [🖥️ RISC-V support with prebuilt binaries](#️-risc-v-support-with-prebuilt-binaries) +- [📦️ Important dependency updates](#-important-dependency-updates) +- [📝 Changelog](#-changelog) +- [👨‍👩‍👧‍👦 Contributors](#-contributors) + +### Overview + +Kubo 0.39 makes self-hosting practical on consumer hardware and home networks. The DHT sweep provider (now default) announces your content to the network without traffic spikes that overwhelm residential connections. Automatic UPnP recovery means your node stays reachable after router restarts without manual intervention. + +New content becomes findable immediately after `ipfs add`. The provider system persists state across restarts, alerts you when falling behind, and exposes detailed stats for monitoring. This release also finalizes the deprecation of the legacy `go-ipfs` name. + +### 🔦 Highlights + +#### 🎯 DHT Sweep provider is now the default + +The Amino DHT Sweep provider system, introduced as experimental in v0.38, is now enabled by default (`Provide.DHT.SweepEnabled=true`). + +**What this means:** All nodes now benefit from efficient keyspace-sweeping content announcements that reduce memory overhead and create predictable network patterns, especially for nodes providing large content collections. + +**Migration:** The transition is automatic on upgrade. Your existing configuration is preserved: + +- If you explicitly set `Provide.DHT.SweepEnabled=false` in v0.38, you'll continue using the legacy provider +- If you were using the default settings, you'll automatically get the sweep provider +- To opt out and return to legacy behavior: `ipfs config --json Provide.DHT.SweepEnabled false` +- Providers with medium to large datasets may need to adjust defaults; see [Capacity Planning](https://github.com/ipfs/kubo/blob/master/docs/provide-stats.md#capacity-planning) +- When `Routing.AcceleratedDHTClient` is enabled, full sweep efficiency may not be available yet; consider disabling the accelerated client as sweep is sufficient for most workloads. See [caveat 4](https://github.com/ipfs/kubo/blob/master/docs/config.md#routingaccelerateddhtclient). + +**New features available with sweep mode:** + +- Detailed statistics via `ipfs provide stat` ([see below](#-detailed-statistics-with-ipfs-provide-stat)) +- Automatic resume after restarts with persistent state ([see below](#️-provider-state-persists-across-restarts)) +- Proactive alerts when reproviding falls behind ([see below](#-slow-reprovide-warnings)) +- Better metrics for monitoring (`provider_provides_total`) ([see below](#-metric-rename-provider_provides_total)) +- Fast optimistic provide of new root CIDs ([see below](#-fast-root-cid-providing-for-immediate-content-discovery)) + +For background on the sweep provider design and motivations, see [`Provide.DHT.SweepEnabled`](https://github.com/ipfs/kubo/blob/master/docs/config.md#providedhtsweepenabled) and Shipyard's blogpost [Provide Sweep: Solving the DHT Provide Bottleneck](https://ipshipyard.com/blog/2025-dht-provide-sweep/). + +#### ⚡ Fast root CID providing for immediate content discovery + +When you add content to IPFS, the sweep provider queues it for efficient DHT provides over time. While this is resource-efficient, other peers won't find your content immediately after `ipfs add` or `ipfs dag import` completes. + +To make sharing faster, `ipfs add` and `ipfs dag import` now do an immediate provide of root CIDs to the DHT in addition to the regular queue (controlled by the new `--fast-provide-root` flag, enabled by default). This complements the sweep provider system: fast-provide handles the urgent case (root CIDs that users share and reference), while the sweep provider efficiently provides all blocks according to `Provide.Strategy` over time. + +This closes the gap between command completion and content shareability: root CIDs typically become discoverable on the network in under a second (compared to 30+ seconds previously). The feature uses optimistic DHT operations, which are significantly faster with the sweep provider (now enabled by default). + +By default, this immediate provide runs in the background without blocking the command. For use cases requiring guaranteed discoverability before the command returns (e.g., sharing a link immediately), use `--fast-provide-wait` to block until the provide completes. + +**Simple examples:** + +```bash +ipfs add file.txt # Root provided immediately, blocks queued for sweep provider +ipfs add file.txt --fast-provide-wait # Wait for root provide to complete +ipfs dag import file.car # Same for CAR imports +``` + +**Configuration:** Set defaults via `Import.FastProvideRoot` (default: `true`) and `Import.FastProvideWait` (default: `false`). See `ipfs add --help` and `ipfs dag import --help` for more details and examples. + +Fast root CID provide is automatically skipped when DHT routing is unavailable (e.g., `Routing.Type=none` or delegated-only configurations). + +#### ⏯️ Provider state persists across restarts + +The Sweep provider now persists the reprovide cycle state and automatically resumes where it left off after a restart. This brings several improvements: + +- **Persistent progress**: The provider saves its position in the reprovide cycle to the datastore. On restart, it continues from where it stopped instead of starting from scratch. +- **Catch-up reproviding**: If the node was offline for an extended period, all CIDs that haven't been reprovided within the configured reprovide interval are immediately queued for reproviding when the node starts up. This ensures content availability is maintained even after downtime. +- **Persistent provide queue**: The provide queue is persisted to the datastore on shutdown. When the node restarts, queued CIDs are restored and provided as expected, preventing loss of pending provide operations. +- **Resume control**: The resume behavior is controlled via [`Provide.DHT.ResumeEnabled`](https://github.com/ipfs/kubo/blob/master/docs/config.md#providedhtresumeenabled) (default: `true`). Set to `false` if you don't want to keep the persisted provider state from a previous run. + +This feature improves reliability for nodes that experience intermittent connectivity or restarts. + +#### 📊 Detailed statistics with `ipfs provide stat` + +The Sweep provider system now exposes detailed statistics through `ipfs provide stat`, helping you monitor provider health and troubleshoot issues. + +Run `ipfs provide stat` for a quick summary, or use `--all` to see complete metrics including connectivity status, queue sizes, reprovide schedules, network statistics, operation rates, and worker utilization. For real-time monitoring, use `watch ipfs provide stat --all --compact` to observe changes in a 2-column layout. Individual sections can be displayed with flags like `--network`, `--operations`, or `--workers`. + +For Dual DHT configurations, use `--lan` to view LAN DHT statistics instead of the default WAN DHT stats. + +For more information, run `ipfs provide stat --help` or see the [Provide Stats documentation](https://github.com/ipfs/kubo/blob/master/docs/provide-stats.md), including [Capacity Planning](https://github.com/ipfs/kubo/blob/master/docs/provide-stats.md#capacity-planning). + +> [!NOTE] +> Legacy provider (when `Provide.DHT.SweepEnabled=false`) shows basic statistics without flag support. + +#### 🔔 Slow reprovide warnings + +Kubo now monitors DHT reprovide operations when `Provide.DHT.SweepEnabled=true` +and alerts you if your node is falling behind on reprovides. + +When the reprovide queue consistently grows and all periodic workers are busy, +a warning displays with: + +- Queue size and worker utilization details +- Recommended solutions: increase `Provide.DHT.MaxWorkers` or `Provide.DHT.DedicatedPeriodicWorkers` +- Command to monitor real-time progress: `watch ipfs provide stat --all --compact` + +The alert polls every 15 minutes (to avoid alert fatigue while catching +persistent issues) and only triggers after sustained growth across multiple +intervals. The legacy provider is unaffected by this change. + +#### 📊 Metric rename: `provider_provides_total` + +The Amino DHT Sweep provider metric has been renamed from `total_provide_count_total` to `provider_provides_total` to follow OpenTelemetry naming conventions and maintain consistency with other kad-dht metrics (which use dot notation like `rpc.inbound.messages`, `rpc.outbound.requests`, etc.). + +**Migration:** If you have Prometheus queries, dashboards, or alerts monitoring the old `total_provide_count_total` metric, update them to use `provider_provides_total` instead. This affects all nodes using sweep mode, which is now the default in v0.39 (previously opt-in experimental in v0.38). + +#### 🔧 Automatic UPnP recovery after router restarts + +Kubo now automatically recovers UPnP port mappings when routers restart or +become temporarily unavailable, fixing a critical connectivity issue that +affected self-hosted nodes behind NAT. + +**Previous behavior:** When a UPnP-enabled router restarted, Kubo would lose +its port mapping and fail to re-establish it automatically. Nodes would become +unreachable to the network until the daemon was manually restarted, forcing +reliance on relay connections which degraded performance. + +**New behavior:** The upgraded go-libp2p (v0.44.0) includes [Shipyard's fix](https://github.com/libp2p/go-libp2p/pull/3367) +for self-healing NAT mappings that automatically rediscover and re-establish +port forwarding after router events. Nodes now maintain public connectivity +without manual intervention. + +> [!NOTE] +> If your node runs behind a router and you haven't manually configured port +> forwarding, make sure [`Swarm.DisableNatPortMap=false`](https://github.com/ipfs/kubo/blob/master/docs/config.md#swarmdisablenatportmap) +> so UPnP can automatically handle port mapping (this is the default). + +This significantly improves reliability for desktop and self-hosted IPFS nodes +using UPnP for NAT traversal. + +#### 🪦 Deprecated `go-ipfs` name no longer published + +The `go-ipfs` name was deprecated in 2022 and renamed to `kubo`. Starting with this release, the legacy Docker image name has been replaced with a stub that displays an error message directing users to switch to `ipfs/kubo`. + +**Docker images:** The `ipfs/go-ipfs` image tags now contain only a stub script that exits with an error, instructing users to update their Docker configurations to use [`ipfs/kubo`](https://hub.docker.com/r/ipfs/kubo) instead. This ensures users are aware of the deprecation while allowing existing automation to fail explicitly rather than silently using outdated images. + +**Distribution binaries:** Download Kubo from or . The legacy `go-ipfs` distribution path should no longer be used. + +All users should migrate to the `kubo` name in their scripts and configurations. + +#### 🚦 Gateway range request limits for CDN compatibility + +The new [`Gateway.MaxRangeRequestFileSize`](https://github.com/ipfs/kubo/blob/master/docs/config.md#gatewaymaxrangerequestfilesize) configuration protects against CDN range request limitations that cause bandwidth overcharges on deserialized responses. Some CDNs convert range requests over large files into full file downloads, causing clients requesting small byte ranges to unknowingly download entire multi-gigabyte files. + +This only impacts deserialized responses. Clients using verifiable block requests (`application/vnd.ipld.raw`) are not affected. See the [configuration documentation](https://github.com/ipfs/kubo/blob/master/docs/config.md#gatewaymaxrangerequestfilesize) for details. + +#### 🖥️ RISC-V support with prebuilt binaries + +Kubo provides official `linux-riscv64` prebuilt binaries, bringing IPFS to [RISC-V](https://en.wikipedia.org/wiki/RISC-V) open hardware. + +As RISC-V single-board computers and embedded systems become more accessible, the distributed web is now supported on open hardware architectures - a natural pairing of open technologies. + +Download from or and look for the `linux-riscv64` archive. + +### 📦️ Important dependency updates + +- update `go-libp2p` to [v0.45.0](https://github.com/libp2p/go-libp2p/releases/tag/v0.45.0) (incl. [v0.44.0](https://github.com/libp2p/go-libp2p/releases/tag/v0.44.0)) with self-healing UPnP port mappings and go-log/slog interop fixes +- update `quic-go` to [v0.55.0](https://github.com/quic-go/quic-go/releases/tag/v0.55.0) +- update `go-log` to [v2.9.0](https://github.com/ipfs/go-log/releases/tag/v2.9.0) with slog integration for go-libp2p +- update `go-ds-pebble` to [v0.5.7](https://github.com/ipfs/go-ds-pebble/releases/tag/v0.5.7) (includes pebble [v2.1.2](https://github.com/cockroachdb/pebble/releases/tag/v2.1.2)) +- update `boxo` to [v0.35.2](https://github.com/ipfs/boxo/releases/tag/v0.35.2) (includes boxo [v0.35.1](https://github.com/ipfs/boxo/releases/tag/v0.35.1)) +- update `ipfs-webui` to [v4.10.0](https://github.com/ipfs/ipfs-webui/releases/tag/v4.10.0) +- update `go-libp2p-kad-dht` to [v0.36.0](https://github.com/libp2p/go-libp2p-kad-dht/releases/tag/v0.36.0) + +### 📝 Changelog + +
Full Changelog + +- github.com/ipfs/kubo: + - docs: mkreleaselog for 0.39 + - chore: version 0.39.0 + - bin/mkreleaselog: add github handle resolution and deduplication + - docs: restructure v0.39 changelog for clarity + - upgrade go-libp2p-kad-dht to v0.36.0 (#11079) ([ipfs/kubo#11079](https://github.com/ipfs/kubo/pull/11079)) + - fix(docker): include symlinks in scanning for init scripts (#11077) ([ipfs/kubo#11077](https://github.com/ipfs/kubo/pull/11077)) + - Update deprecation message for Reprovider fields (#11072) ([ipfs/kubo#11072](https://github.com/ipfs/kubo/pull/11072)) + - chore: release v0.39.0-rc1 + - test: add regression tests for config secrets protection (#11061) ([ipfs/kubo#11061](https://github.com/ipfs/kubo/pull/11061)) + - test: add regression tests for API.Authorizations (#11060) ([ipfs/kubo#11060](https://github.com/ipfs/kubo/pull/11060)) + - test: verifyWorkerRun and helptext (#11063) ([ipfs/kubo#11063](https://github.com/ipfs/kubo/pull/11063)) + - test(cmdutils): add tests for PathOrCidPath and ValidatePinName (#11062) ([ipfs/kubo#11062](https://github.com/ipfs/kubo/pull/11062)) + - fix: return original error in PathOrCidPath fallback (#11059) ([ipfs/kubo#11059](https://github.com/ipfs/kubo/pull/11059)) + - feat: fast provide support in `dag import` (#11058) ([ipfs/kubo#11058](https://github.com/ipfs/kubo/pull/11058)) + - feat(cli/rpc/add): fast provide of root CID (#11046) ([ipfs/kubo#11046](https://github.com/ipfs/kubo/pull/11046)) + - feat(telemetry): collect high level provide DHT sweep settings (#11056) ([ipfs/kubo#11056](https://github.com/ipfs/kubo/pull/11056)) + - feat: enable DHT Provide Sweep by default (#10955) ([ipfs/kubo#10955](https://github.com/ipfs/kubo/pull/10955)) + - feat(config): optional Gateway.MaxRangeRequestFileSize (#10997) ([ipfs/kubo#10997](https://github.com/ipfs/kubo/pull/10997)) + - docs: clarify provide stats metric types and calculations (#11041) ([ipfs/kubo#11041](https://github.com/ipfs/kubo/pull/11041)) + - Upgrade to Boxo v0.35.2 (#11050) ([ipfs/kubo#11050](https://github.com/ipfs/kubo/pull/11050)) + - fix(go-log@2.9/go-libp2p@0.45): dynamic log level control and tail (#11039) ([ipfs/kubo#11039](https://github.com/ipfs/kubo/pull/11039)) + - chore: update webui to v4.10.0 (#11048) ([ipfs/kubo#11048](https://github.com/ipfs/kubo/pull/11048)) + - fix(provider/stats): number format (#11045) ([ipfs/kubo#11045](https://github.com/ipfs/kubo/pull/11045)) + - provider: protect libp2p connections (#11028) ([ipfs/kubo#11028](https://github.com/ipfs/kubo/pull/11028)) + - Merge release v0.38.2 ([ipfs/kubo#11044](https://github.com/ipfs/kubo/pull/11044)) + - Upgrade to Boxo v0.35.1 (#11043) ([ipfs/kubo#11043](https://github.com/ipfs/kubo/pull/11043)) + - feat(provider): resume cycle (#11031) ([ipfs/kubo#11031](https://github.com/ipfs/kubo/pull/11031)) + - chore: upgrade pebble to v2.1.1 (#11040) ([ipfs/kubo#11040](https://github.com/ipfs/kubo/pull/11040)) + - fix(cli): provide stat cosmetics (#11034) ([ipfs/kubo#11034](https://github.com/ipfs/kubo/pull/11034)) + - fix: go-libp2p v0.44 with self-healing UPnP port mappings (#11032) ([ipfs/kubo#11032](https://github.com/ipfs/kubo/pull/11032)) + - feat(provide): slow reprovide alerts when SweepEnabled (#11021) ([ipfs/kubo#11021](https://github.com/ipfs/kubo/pull/11021)) + - feat: trace delegated routing http client (#11017) ([ipfs/kubo#11017](https://github.com/ipfs/kubo/pull/11017)) + - feat(provide): detailed `ipfs provide stat` (#11019) ([ipfs/kubo#11019](https://github.com/ipfs/kubo/pull/11019)) + - config: increase default Provide.DHT.MaxProvideConnsPerWorker (#11016) ([ipfs/kubo#11016](https://github.com/ipfs/kubo/pull/11016)) + - docs: update release checklist based on v0.38.0 learnings (#11007) ([ipfs/kubo#11007](https://github.com/ipfs/kubo/pull/11007)) + - chore: merge release v0.38.1 ([ipfs/kubo#11020](https://github.com/ipfs/kubo/pull/11020)) + - fix: migrations for Windows (#11010) ([ipfs/kubo#11010](https://github.com/ipfs/kubo/pull/11010)) + - Upgrade go-ds-pebble to v0.5.3 (#11011) ([ipfs/kubo#11011](https://github.com/ipfs/kubo/pull/11011)) + - Merge release v0.38.0 ([ipfs/kubo#11006](https://github.com/ipfs/kubo/pull/11006)) + - feat: add docker stub for deprecated ipfs/go-ipfs name (#10998) ([ipfs/kubo#10998](https://github.com/ipfs/kubo/pull/10998)) + - docs: add sweeping provide worker count recommendation (#11001) ([ipfs/kubo#11001](https://github.com/ipfs/kubo/pull/11001)) + - chore: bump go-libp2p-kad-dht to v0.35.0 (#11002) ([ipfs/kubo#11002](https://github.com/ipfs/kubo/pull/11002)) + - upgrade go-ds-pebble to v0.5.2 (#11000) ([ipfs/kubo#11000](https://github.com/ipfs/kubo/pull/11000)) + - Upgrade to Boxo v0.35.0 (#10999) ([ipfs/kubo#10999](https://github.com/ipfs/kubo/pull/10999)) + - Non-functional changes (#10996) ([ipfs/kubo#10996](https://github.com/ipfs/kubo/pull/10996)) + - chore: update boxo and kad-dht dependencies (#10995) ([ipfs/kubo#10995](https://github.com/ipfs/kubo/pull/10995)) + - fix: update webui to v4.9.1 (#10994) ([ipfs/kubo#10994](https://github.com/ipfs/kubo/pull/10994)) + - fix: provider merge conflicts (#10989) ([ipfs/kubo#10989](https://github.com/ipfs/kubo/pull/10989)) + - fix(mfs): add soft limit for `--flush=false` (#10985) ([ipfs/kubo#10985](https://github.com/ipfs/kubo/pull/10985)) + - fix: provide Filestore nodes (#10990) ([ipfs/kubo#10990](https://github.com/ipfs/kubo/pull/10990)) + - feat: limit pin names to 255 bytes (#10981) ([ipfs/kubo#10981](https://github.com/ipfs/kubo/pull/10981)) + - fix: SweepingProvider slow start (#10980) ([ipfs/kubo#10980](https://github.com/ipfs/kubo/pull/10980)) + - chore: start v0.39.0 release cycle +- github.com/gammazero/deque (v1.1.0 -> v1.2.0): + - add slice operation functions (#40) ([gammazero/deque#40](https://github.com/gammazero/deque/pull/40)) + - maintain base capacity after IterPop iteration (#44) ([gammazero/deque#44](https://github.com/gammazero/deque/pull/44)) +- github.com/ipfs/boxo (v0.35.1 -> v0.35.2): + - Release v0.35.2 ([ipfs/boxo#1068](https://github.com/ipfs/boxo/pull/1068)) + - fix(logs): upgrade go-libp2p to v0.45.0 and go-log to v2.9.0 ([ipfs/boxo#1066](https://github.com/ipfs/boxo/pull/1066)) +- github.com/ipfs/go-cid (v0.5.0 -> v0.6.0): + - v0.6.0 bump (#178) ([ipfs/go-cid#178](https://github.com/ipfs/go-cid/pull/178)) +- github.com/ipfs/go-ds-pebble (v0.5.3 -> v0.5.7): + - new version (#74) ([ipfs/go-ds-pebble#74](https://github.com/ipfs/go-ds-pebble/pull/74)) + - do not override logger if logger is provided (#72) ([ipfs/go-ds-pebble#72](https://github.com/ipfs/go-ds-pebble/pull/72)) + - new version (#70) ([ipfs/go-ds-pebble#70](https://github.com/ipfs/go-ds-pebble/pull/70)) + - new-version (#68) ([ipfs/go-ds-pebble#68](https://github.com/ipfs/go-ds-pebble/pull/68)) + - Do not allow batch to be reused after commit (#67) ([ipfs/go-ds-pebble#67](https://github.com/ipfs/go-ds-pebble/pull/67)) + - new version (#66) ([ipfs/go-ds-pebble#66](https://github.com/ipfs/go-ds-pebble/pull/66)) + - Make pebble write options configurable ([ipfs/go-ds-pebble#63](https://github.com/ipfs/go-ds-pebble/pull/63)) +- github.com/ipfs/go-dsqueue (v0.1.0 -> v0.1.1): + - new version (#26) ([ipfs/go-dsqueue#26](https://github.com/ipfs/go-dsqueue/pull/26)) + - update deque package and add stress test (#25) ([ipfs/go-dsqueue#25](https://github.com/ipfs/go-dsqueue/pull/25)) +- github.com/ipfs/go-log/v2 (v2.8.2 -> v2.9.0): + - chore: release v2.9.0 (#177) ([ipfs/go-log#177](https://github.com/ipfs/go-log/pull/177)) + - fix: go-libp2p and slog interop (#176) ([ipfs/go-log#176](https://github.com/ipfs/go-log/pull/176)) +- github.com/libp2p/go-libp2p (v0.43.0 -> v0.45.0): + - Release v0.45.0 (#3424) ([libp2p/go-libp2p#3424](https://github.com/libp2p/go-libp2p/pull/3424)) + - feat(gologshim): Add SetDefaultHandler (#3418) ([libp2p/go-libp2p#3418](https://github.com/libp2p/go-libp2p/pull/3418)) + - Update Drips ownedBy address in FUNDING.json + - fix(websocket): use debug level for http.Server errors + - chore: release v0.44.0 + - autonatv2: fix normalization for websocket addrs + - autonatv2: remove dependency on webrtc and webtransport + - quicreuse: update libp2p/go-netroute (#3405) ([libp2p/go-libp2p#3405](https://github.com/libp2p/go-libp2p/pull/3405)) + - basichost: don't advertise unreachable addrs. (#3357) ([libp2p/go-libp2p#3357](https://github.com/libp2p/go-libp2p/pull/3357)) + - basichost: improve autonatv2 reachability logic (#3356) ([libp2p/go-libp2p#3356](https://github.com/libp2p/go-libp2p/pull/3356)) + - basichost: fix lint error + - basichost: move EvtLocalAddrsChanged to addrs_manager (#3355) ([libp2p/go-libp2p#3355](https://github.com/libp2p/go-libp2p/pull/3355)) + - chore: gitignore go.work files + - refactor!: move insecure transport outside of core + - refactor: drop go-varint dependency + - refactor!: move canonicallog package outside of core + - fix: assignment to entry in nil map + - docs: Update contribute section with mailing list and irc (#3387) ([libp2p/go-libp2p#3387](https://github.com/libp2p/go-libp2p/pull/3387)) + - README: remove Drand from notable users section + - chore: add help comment + - refactor: replace context.WithCancel with t.Context + - feat(network): Add Conn.As + - Skip mdns tests on macOS in CI + - fix: deduplicate NAT port mapping requests + - fix: heal NAT mappings after router restart + - feat: relay: add option for custom filter function + - docs: remove broken link (#3375) ([libp2p/go-libp2p#3375](https://github.com/libp2p/go-libp2p/pull/3375)) + - AI tooling must be disclosed for contributions (#3372) ([libp2p/go-libp2p#3372](https://github.com/libp2p/go-libp2p/pull/3372)) + - feat: Migrate to log/slog (#3364) ([libp2p/go-libp2p#3364](https://github.com/libp2p/go-libp2p/pull/3364)) + - basichost: move observed address manager to basichost (#3332) ([libp2p/go-libp2p#3332](https://github.com/libp2p/go-libp2p/pull/3332)) + - chore: support Go 1.24 & 1.25 (#3366) ([libp2p/go-libp2p#3366](https://github.com/libp2p/go-libp2p/pull/3366)) + - feat(simlibp2p): Simulated libp2p Networks (#3262) ([libp2p/go-libp2p#3262](https://github.com/libp2p/go-libp2p/pull/3262)) + - bandwidthcounter: add Reset and TrimIdle methods to reporter interface (#3343) ([libp2p/go-libp2p#3343](https://github.com/libp2p/go-libp2p/pull/3343)) + - network: rename NAT Types (#3331) ([libp2p/go-libp2p#3331](https://github.com/libp2p/go-libp2p/pull/3331)) + - refactor(quicreuse): use errors.Join in Close method (#3363) ([libp2p/go-libp2p#3363](https://github.com/libp2p/go-libp2p/pull/3363)) + - swarm: move AddCertHashes to swarm (#3330) ([libp2p/go-libp2p#3330](https://github.com/libp2p/go-libp2p/pull/3330)) + - quicreuse: clean up associations for closed listeners. (#3306) ([libp2p/go-libp2p#3306](https://github.com/libp2p/go-libp2p/pull/3306)) +- github.com/libp2p/go-libp2p-kad-dht (v0.35.1 -> v0.36.0): + - new version (#1204) ([libp2p/go-libp2p-kad-dht#1204](https://github.com/libp2p/go-libp2p-kad-dht/pull/1204)) + - update dependencies (#1205) ([libp2p/go-libp2p-kad-dht#1205](https://github.com/libp2p/go-libp2p-kad-dht/pull/1205)) + - fix(provider): protect `SweepingProvider.wg` (#1200) ([libp2p/go-libp2p-kad-dht#1200](https://github.com/libp2p/go-libp2p-kad-dht/pull/1200)) + - fix(ResettableKeystore): race when closing during reset (#1201) ([libp2p/go-libp2p-kad-dht#1201](https://github.com/libp2p/go-libp2p-kad-dht/pull/1201)) + - fix(provider): conflict resolution (#1199) ([libp2p/go-libp2p-kad-dht#1199](https://github.com/libp2p/go-libp2p-kad-dht/pull/1199)) + - fix(provider): remove from trie by pruning prefix (#1198) ([libp2p/go-libp2p-kad-dht#1198](https://github.com/libp2p/go-libp2p-kad-dht/pull/1198)) + - fix(provider): rename metric to follow OpenTelemetry conventions (#1195) ([libp2p/go-libp2p-kad-dht#1195](https://github.com/libp2p/go-libp2p-kad-dht/pull/1195)) + - fix(provider): resume cycle from persisted keystore (#1193) ([libp2p/go-libp2p-kad-dht#1193](https://github.com/libp2p/go-libp2p-kad-dht/pull/1193)) + - feat(provider): connectivity callbacks (#1194) ([libp2p/go-libp2p-kad-dht#1194](https://github.com/libp2p/go-libp2p-kad-dht/pull/1194)) + - feat(provider): trie iterators (#1189) ([libp2p/go-libp2p-kad-dht#1189](https://github.com/libp2p/go-libp2p-kad-dht/pull/1189)) + - refactor(provider): optimize memory when allocating keys to peers (#1187) ([libp2p/go-libp2p-kad-dht#1187](https://github.com/libp2p/go-libp2p-kad-dht/pull/1187)) + - refactor(keystore): track size (#1181) ([libp2p/go-libp2p-kad-dht#1181](https://github.com/libp2p/go-libp2p-kad-dht/pull/1181)) + - Remove go-libp2p-maintainers from codeowners (#1192) ([libp2p/go-libp2p-kad-dht#1192](https://github.com/libp2p/go-libp2p-kad-dht/pull/1192)) + - switch to bit256.NewKeyFromArray (#1188) ([libp2p/go-libp2p-kad-dht#1188](https://github.com/libp2p/go-libp2p-kad-dht/pull/1188)) + - fix(provider): `RegionsFromPeers` may return multiple regions (#1185) ([libp2p/go-libp2p-kad-dht#1185](https://github.com/libp2p/go-libp2p-kad-dht/pull/1185)) + - feat(provider): skip bootstrap reprovide (#1186) ([libp2p/go-libp2p-kad-dht#1186](https://github.com/libp2p/go-libp2p-kad-dht/pull/1186)) + - refactor(provider): use adaptive deadline for CycleStats cleanup (#1183) ([libp2p/go-libp2p-kad-dht#1183](https://github.com/libp2p/go-libp2p-kad-dht/pull/1183)) + - refactor(provider/stats): use int64 to avoid overflows (#1182) ([libp2p/go-libp2p-kad-dht#1182](https://github.com/libp2p/go-libp2p-kad-dht/pull/1182)) + - provider: trigger connectivity check when missing libp2p addresses (#1180) ([libp2p/go-libp2p-kad-dht#1180](https://github.com/libp2p/go-libp2p-kad-dht/pull/1180)) + - fix(provider): resume cycle (#1176) ([libp2p/go-libp2p-kad-dht#1176](https://github.com/libp2p/go-libp2p-kad-dht/pull/1176)) + - tests: fix flaky TestProvidesExpire (#1179) ([libp2p/go-libp2p-kad-dht#1179](https://github.com/libp2p/go-libp2p-kad-dht/pull/1179)) + - tests: fix flaky TestFindPeerWithQueryFilter (#1178) ([libp2p/go-libp2p-kad-dht#1178](https://github.com/libp2p/go-libp2p-kad-dht/pull/1178)) + - tests: fix #1175 (#1177) ([libp2p/go-libp2p-kad-dht#1177](https://github.com/libp2p/go-libp2p-kad-dht/pull/1177)) + - feat(provider): exit early region exploration if no new peers discovered (#1174) ([libp2p/go-libp2p-kad-dht#1174](https://github.com/libp2p/go-libp2p-kad-dht/pull/1174)) + - provider: protect connections (#1172) ([libp2p/go-libp2p-kad-dht#1172](https://github.com/libp2p/go-libp2p-kad-dht/pull/1172)) + - feat(provider): resume reprovides (#1170) ([libp2p/go-libp2p-kad-dht#1170](https://github.com/libp2p/go-libp2p-kad-dht/pull/1170)) + - fix(provider): custom logger name (#1173) ([libp2p/go-libp2p-kad-dht#1173](https://github.com/libp2p/go-libp2p-kad-dht/pull/1173)) + - feat(provider): persist provide queue (#1167) ([libp2p/go-libp2p-kad-dht#1167](https://github.com/libp2p/go-libp2p-kad-dht/pull/1167)) + - provider: stats (#1144) ([libp2p/go-libp2p-kad-dht#1144](https://github.com/libp2p/go-libp2p-kad-dht/pull/1144)) +- github.com/probe-lab/go-libdht (v0.3.0 -> v0.4.0): + - chore: release v0.4.0 (#26) ([probe-lab/go-libdht#26](https://github.com/probe-lab/go-libdht/pull/26)) + - feat(key/bit256): memory optimized constructor (#25) ([probe-lab/go-libdht#25](https://github.com/probe-lab/go-libdht/pull/25)) + - refactor(trie): AddMany memory optimization (#24) ([probe-lab/go-libdht#24](https://github.com/probe-lab/go-libdht/pull/24)) + +
+ +### 👨‍👩‍👧‍👦 Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| [@guillaumemichel](https://github.com/guillaumemichel) | 41 | +9906/-1383 | 170 | +| [@lidel](https://github.com/lidel) | 30 | +6652/-694 | 97 | +| [@sukunrt](https://github.com/sukunrt) | 9 | +1618/-1524 | 39 | +| [@MarcoPolo](https://github.com/MarcoPolo) | 17 | +1665/-1452 | 160 | +| [@gammazero](https://github.com/gammazero) | 23 | +514/-53 | 29 | +| [@Prabhat1308](https://github.com/Prabhat1308) | 1 | +197/-67 | 4 | +| [@peterargue](https://github.com/peterargue) | 3 | +82/-25 | 5 | +| [@cargoedit](https://github.com/cargoedit) | 1 | +35/-72 | 14 | +| [@hsanjuan](https://github.com/hsanjuan) | 2 | +66/-29 | 5 | +| [@shoriwe](https://github.com/shoriwe) | 1 | +68/-21 | 3 | +| [@dennis-tra](https://github.com/dennis-tra) | 2 | +27/-2 | 2 | +| [@Lil-Duckling-22](https://github.com/Lil-Duckling-22) | 1 | +4/-1 | 1 | +| [@crStiv](https://github.com/crStiv) | 1 | +1/-3 | 1 | +| [@cpeliciari](https://github.com/cpeliciari) | 1 | +3/-0 | 1 | +| [@rvagg](https://github.com/rvagg) | 1 | +1/-1 | 1 | +| [@p-shahi](https://github.com/p-shahi) | 1 | +1/-1 | 1 | +| [@lbarrettanderson](https://github.com/lbarrettanderson) | 1 | +1/-1 | 1 | +| [@filipremb](https://github.com/filipremb) | 1 | +1/-1 | 1 | +| [@marten-seemann](https://github.com/marten-seemann) | 1 | +0/-1 | 1 | diff --git a/docs/changelogs/v0.4.md b/docs/changelogs/v0.4.md index bdc0f004b..de15c51dd 100644 --- a/docs/changelogs/v0.4.md +++ b/docs/changelogs/v0.4.md @@ -401,7 +401,7 @@ g generation. -n, --only-hash bool - Only chunk and hash - do not write to disk. -w, --wrap-with-directory bool - Wrap files with a directory o -bject. +object. -s, --chunker string - Chunking algorithm, size-[byt es] or rabin-[min]-[avg]-[max]. Default: size-262144. --pin bool - Pin this object when adding. @@ -1593,7 +1593,7 @@ The next steps are: - cmds: remove redundant func ([ipfs/go-ipfs#5750](https://github.com/ipfs/go-ipfs/pull/5750)) - commands/refs: use new cmds ([ipfs/go-ipfs#5679](https://github.com/ipfs/go-ipfs/pull/5679)) - commands/pin: use new cmds lib ([ipfs/go-ipfs#5674](https://github.com/ipfs/go-ipfs/pull/5674)) - - commands/boostrap: use new cmds ([ipfs/go-ipfs#5678](https://github.com/ipfs/go-ipfs/pull/5678)) + - commands/bootstrap: use new cmds ([ipfs/go-ipfs#5678](https://github.com/ipfs/go-ipfs/pull/5678)) - fix(cmd/add): progressbar output error when input is read from stdin ([ipfs/go-ipfs#5743](https://github.com/ipfs/go-ipfs/pull/5743)) - unexport GOFLAGS ([ipfs/go-ipfs#5747](https://github.com/ipfs/go-ipfs/pull/5747)) - refactor(cmds): use new cmds ([ipfs/go-ipfs#5659](https://github.com/ipfs/go-ipfs/pull/5659)) @@ -1808,7 +1808,7 @@ The next steps are: - make timecache duration configurable ([libp2p/go-libp2p-pubsub#148](https://github.com/libp2p/go-libp2p-pubsub/pull/148)) - godoc is not html either ([libp2p/go-libp2p-pubsub#147](https://github.com/libp2p/go-libp2p-pubsub/pull/147)) - godoc documentation is not markdown ([libp2p/go-libp2p-pubsub#146](https://github.com/libp2p/go-libp2p-pubsub/pull/146)) - - Add documentation for subscribe's non-instanteneous semantics ([libp2p/go-libp2p-pubsub#145](https://github.com/libp2p/go-libp2p-pubsub/pull/145)) + - Add documentation for subscribe's non-instantaneous semantics ([libp2p/go-libp2p-pubsub#145](https://github.com/libp2p/go-libp2p-pubsub/pull/145)) - Some documentation ([libp2p/go-libp2p-pubsub#140](https://github.com/libp2p/go-libp2p-pubsub/pull/140)) - rework peer tracking logic to handle multiple connections ([libp2p/go-libp2p-pubsub#132](https://github.com/libp2p/go-libp2p-pubsub/pull/132)) - github.com/libp2p/go-libp2p-pubsub-router: @@ -3255,7 +3255,7 @@ other requested improvements. See below for the full list of changes. - Make sure all keystore keys get republished ([ipfs/go-ipfs#3951](https://github.com/ipfs/go-ipfs/pull/3951)) - Documentation - Adding documentation on PubSub encodings ([ipfs/go-ipfs#3909](https://github.com/ipfs/go-ipfs/pull/3909)) - - Change 'neccessary' to 'necessary' ([ipfs/go-ipfs#3941](https://github.com/ipfs/go-ipfs/pull/3941)) + - Change 'necessary' to 'necessary' ([ipfs/go-ipfs#3941](https://github.com/ipfs/go-ipfs/pull/3941)) - README.md: add Nix to the linux package managers ([ipfs/go-ipfs#3939](https://github.com/ipfs/go-ipfs/pull/3939)) - More verbose errors in filestore ([ipfs/go-ipfs#3964](https://github.com/ipfs/go-ipfs/pull/3964)) - Bug fixes @@ -3347,7 +3347,7 @@ look at all the other cool things added in 0.4.8 below. - Features - Implement unixfs directory sharding ([ipfs/go-ipfs#3042](https://github.com/ipfs/go-ipfs/pull/3042)) - Add DisableNatPortMap option ([ipfs/go-ipfs#3798](https://github.com/ipfs/go-ipfs/pull/3798)) - - Basic Filestore utilty commands ([ipfs/go-ipfs#3653](https://github.com/ipfs/go-ipfs/pull/3653)) + - Basic Filestore utility commands ([ipfs/go-ipfs#3653](https://github.com/ipfs/go-ipfs/pull/3653)) - Improvements - More Robust GC ([ipfs/go-ipfs#3712](https://github.com/ipfs/go-ipfs/pull/3712)) - Automatically fix permissions for docker volumes ([ipfs/go-ipfs#3744](https://github.com/ipfs/go-ipfs/pull/3744)) @@ -3580,7 +3580,7 @@ few other improvements to other parts of the codebase. Notably: - Dependencies - Update libp2p to have fixed spdystream dep ([ipfs/go-ipfs#3210](https://github.com/ipfs/go-ipfs/pull/3210)) - Update libp2p and dht packages ([ipfs/go-ipfs#3263](https://github.com/ipfs/go-ipfs/pull/3263)) - - Update to libp2p 4.0.1 and propogate other changes ([ipfs/go-ipfs#3284](https://github.com/ipfs/go-ipfs/pull/3284)) + - Update to libp2p 4.0.1 and propagate other changes ([ipfs/go-ipfs#3284](https://github.com/ipfs/go-ipfs/pull/3284)) - Update to libp2p 4.0.4 ([ipfs/go-ipfs#3361](https://github.com/ipfs/go-ipfs/pull/3361)) - Update go-libp2p across codebase ([ipfs/go-ipfs#3406](https://github.com/ipfs/go-ipfs/pull/3406)) - Update to go-libp2p 4.1.0 ([ipfs/go-ipfs#3373](https://github.com/ipfs/go-ipfs/pull/3373)) diff --git a/docs/changelogs/v0.40.md b/docs/changelogs/v0.40.md new file mode 100644 index 000000000..adc7f252c --- /dev/null +++ b/docs/changelogs/v0.40.md @@ -0,0 +1,111 @@ +# Kubo changelog v0.40 + + + +This release was brought to you by the [Shipyard](https://ipshipyard.com/) team. + +- [v0.40.0](#v0400) + +## v0.40.0 + +- [Overview](#overview) +- [🔦 Highlights](#-highlights) + - [🧹 Automatic cleanup of interrupted imports](#-automatic-cleanup-of-interrupted-imports) + - [Routing V1 HTTP API now exposed by default](#routing-v1-http-api-now-exposed-by-default) + - [Track total size when adding pins](#track-total-size-when-adding-pins) + - [🚇 Improved `ipfs p2p` tunnels with foreground mode](#-improved-ipfs-p2p-tunnels-with-foreground-mode) + - [Improved `ipfs dag stat` output](#improved-ipfs-dag-stat-output) + - [Skip bad keys when listing](#skip_bad_keys_when_listing) + - [Accelerated DHT Client and Provide Sweep now work together](#accelerated-dht-client-and-provide-sweep-now-work-together) + - [📦️ Dependency updates](#-dependency-updates) +- [📝 Changelog](#-changelog) +- [👨‍👩‍👧‍👦 Contributors](#-contributors) + +### Overview + +### 🔦 Highlights + +#### 🧹 Automatic cleanup of interrupted imports + +If you cancel `ipfs add` or `ipfs dag import` mid-operation, Kubo now automatically cleans up incomplete data on the next daemon start. Previously, interrupted imports would leave orphan blocks in your repository that were difficult to identify and remove without pins and running explicit garbage collection. + +Batch operations also use less memory now. Block data is written to disk immediately rather than held in RAM until the batch commits. + +Under the hood, the block storage layer (flatfs) was rewritten to use atomic batch operations via a temporary staging directory. See [go-ds-flatfs#142](https://github.com/ipfs/go-ds-flatfs/pull/142) for details. + +#### Routing V1 HTTP API now exposed by default + +The [Routing V1 HTTP API](https://specs.ipfs.tech/routing/http-routing-v1/) is now exposed by default at `http://127.0.0.1:8080/routing/v1`. This allows light clients in browsers to use Kubo Gateway as a delegated routing backend instead of running a full DHT client. Support for [IPIP-476: Delegated Routing DHT Closest Peers API](https://github.com/ipfs/specs/pull/476) is included. Can be disabled via [`Gateway.ExposeRoutingAPI`](https://github.com/ipfs/kubo/blob/master/docs/config.md#gatewayexposeroutingapi). + +#### Track total size when adding pins + +Adds total size progress tracking of pinned nodes during `ipfs pin add --progress`. The output now shows the total size of the pinned dag. + +Example output: + +``` +Fetched/Processed 336 nodes (83 MB) +``` + +#### 🚇 Improved `ipfs p2p` tunnels with foreground mode + +P2P tunnels can now run like SSH port forwarding: start a tunnel, use it, and it cleans up automatically when you're done. + +The new `--foreground` (`-f`) flag for `ipfs p2p listen` and `ipfs p2p forward` keeps the command running until interrupted. When you Ctrl+C, send SIGTERM, or stop the service, the tunnel is removed automatically: + +```console +$ ipfs p2p listen /x/ssh /ip4/127.0.0.1/tcp/22 --foreground +Listening on /x/ssh, forwarding to /ip4/127.0.0.1/tcp/22, waiting for interrupt... +^C +Received interrupt, removing listener for /x/ssh +``` + +Without `--foreground`, commands return immediately and tunnels persist until explicitly closed (existing behavior). + +See [docs/p2p-tunnels.md](https://github.com/ipfs/kubo/blob/master/docs/p2p-tunnels.md) for usage examples. + +#### Improved `ipfs dag stat` output + +The `ipfs dag stat` command has been improved for better terminal UX: + +- Progress output now uses a single line with carriage return, avoiding terminal flooding +- Progress is auto-detected: shown only in interactive terminals by default +- Human-readable sizes are now displayed alongside raw byte counts + +Example progress (interactive terminal): +``` +Fetched/Processed 84 blocks, 2097152 bytes (2.1 MB) +``` + +Example summary output: +``` +Summary +Total Size: 2097152 (2.1 MB) +Unique Blocks: 42 +Shared Size: 1048576 (1.0 MB) +Ratio: 1.500000 +``` + +Use `--progress=true` to force progress even when piped, or `--progress=false` to disable it. + +#### Skip bad keys when listing + +Change the `ipfs key list` behavior to log an error and continue listing keys when a key cannot be read from the keystore or decoded. + +#### Accelerated DHT Client and Provide Sweep now work together + +Previously, provide operations could start before the Accelerated DHT Client discovered enough peers, causing sweep mode to lose its efficiency benefits. Now, providing waits for the initial network crawl (about 10 minutes). Your content will be properly distributed across DHT regions after initial DHT map is created. Check `ipfs provide stat` to see when providing begins. + +#### 📦️ Dependency updates + +- update `go-libp2p` to [v0.46.0](https://github.com/libp2p/go-libp2p/releases/tag/v0.46.0) + - Reduced WebRTC log noise by using debug level for pion errors ([go-libp2p#3426](https://github.com/libp2p/go-libp2p/pull/3426)). + - Fixed mDNS discovery on Windows and macOS by filtering addresses to reduce packet size ([go-libp2p#3434](https://github.com/libp2p/go-libp2p/pull/3434)). +- update `quic-go` to [v0.57.1](https://github.com/quic-go/quic-go/releases/tag/v0.57.1) (incl. [v0.56.0](https://github.com/quic-go/quic-go/releases/tag/v0.56.0) + [v0.57.0](https://github.com/quic-go/quic-go/releases/tag/v0.57.0)) +- update `p2p-forge` to [v0.7.0](https://github.com/ipshipyard/p2p-forge/releases/tag/v0.7.0) +- update `go-ds-pebble` to [v0.5.8](https://github.com/ipfs/go-ds-pebble/releases/tag/v0.5.8) + - updates `github.com/cockroachdb/pebble` to [v2.1.3](https://github.com/cockroachdb/pebble/releases/tag/v2.1.3) to enable Go 1.26 support + +### 📝 Changelog + +### 👨‍👩‍👧‍👦 Contributors diff --git a/docs/changelogs/v0.5.md b/docs/changelogs/v0.5.md index aa5f9c957..9e49565f6 100644 --- a/docs/changelogs/v0.5.md +++ b/docs/changelogs/v0.5.md @@ -357,7 +357,7 @@ It's now possible to initialize an IPFS node with an existing IPFS config by run > ipfs init /path/to/existing/config ``` -This will re-use the existing configuration in it's entirety (including the private key) and can be useful when: +This will reuse the existing configuration in it's entirety (including the private key) and can be useful when: * Migrating a node's identity between machines without keeping the data. * Resetting the datastore. @@ -773,7 +773,7 @@ As usual, this release contains several Windows specific fixes and improvements: - Introduce first strategic provider: do nothing ([ipfs/go-ipfs#6292](https://github.com/ipfs/go-ipfs/pull/6292)) - github.com/ipfs/go-bitswap (v0.0.8-e37498cf10d6 -> v0.2.13): - refactor: remove WantManager ([ipfs/go-bitswap#374](https://github.com/ipfs/go-bitswap/pull/374)) - - Send CANCELs when session context is cancelled ([ipfs/go-bitswap#375](https://github.com/ipfs/go-bitswap/pull/375)) + - Send CANCELs when session context is canceled ([ipfs/go-bitswap#375](https://github.com/ipfs/go-bitswap/pull/375)) - refactor: remove unused code ([ipfs/go-bitswap#373](https://github.com/ipfs/go-bitswap/pull/373)) - Change timing for DONT_HAVE timeouts to be more conservative ([ipfs/go-bitswap#371](https://github.com/ipfs/go-bitswap/pull/371)) - fix: avoid calling ctx.SetDeadline() every time we send a message ([ipfs/go-bitswap#369](https://github.com/ipfs/go-bitswap/pull/369)) diff --git a/docs/changelogs/v0.6.md b/docs/changelogs/v0.6.md index 960125594..40f5f1727 100644 --- a/docs/changelogs/v0.6.md +++ b/docs/changelogs/v0.6.md @@ -14,7 +14,7 @@ The highlights in this release include: **MIGRATION:** This release contains a small config migration to enable listening on the QUIC transport in addition the TCP transport. This migration will: * Normalize multiaddrs in the bootstrap list to use the `/p2p/Qm...` syntax for multiaddrs instead of the `/ipfs/Qm...` syntax. -* Add QUIC addresses for the default bootstrapers, as necessary. If you've removed the default bootstrappers from your bootstrap config, the migration won't add them back. +* Add QUIC addresses for the default bootstrappers, as necessary. If you've removed the default bootstrappers from your bootstrap config, the migration won't add them back. * Add a QUIC listener address to mirror any TCP addresses present in your config. For example, if you're listening on `/ip4/0.0.0.0/tcp/1234`, this migration will add a listen address for `/ip4/0.0.0.0/udp/1234/quic`. #### QUIC by default @@ -114,7 +114,7 @@ Use-cases: - docs: X-Forwarded-Proto: https ([ipfs/go-ipfs#7306](https://github.com/ipfs/go-ipfs/pull/7306)) - fix(mkreleaselog): make robust against running in different working directories ([ipfs/go-ipfs#7310](https://github.com/ipfs/go-ipfs/pull/7310)) - fix(mkreleasenotes): include commits directly to master ([ipfs/go-ipfs#7296](https://github.com/ipfs/go-ipfs/pull/7296)) - - write api file automically ([ipfs/go-ipfs#7282](https://github.com/ipfs/go-ipfs/pull/7282)) + - write api file automatically ([ipfs/go-ipfs#7282](https://github.com/ipfs/go-ipfs/pull/7282)) - systemd: disable swap-usage for ipfs ([ipfs/go-ipfs#7299](https://github.com/ipfs/go-ipfs/pull/7299)) - systemd: add helptext ([ipfs/go-ipfs#7265](https://github.com/ipfs/go-ipfs/pull/7265)) - systemd: add the link to the docs ([ipfs/go-ipfs#7287](https://github.com/ipfs/go-ipfs/pull/7287)) @@ -177,7 +177,7 @@ Use-cases: - feat: add peering service config section ([ipfs/go-ipfs-config#96](https://github.com/ipfs/go-ipfs-config/pull/96)) - fix: include key size in key init method ([ipfs/go-ipfs-config#95](https://github.com/ipfs/go-ipfs-config/pull/95)) - QUIC: remove experimental config option ([ipfs/go-ipfs-config#93](https://github.com/ipfs/go-ipfs-config/pull/93)) - - fix boostrap peers ([ipfs/go-ipfs-config#94](https://github.com/ipfs/go-ipfs-config/pull/94)) + - fix bootstrap peers ([ipfs/go-ipfs-config#94](https://github.com/ipfs/go-ipfs-config/pull/94)) - default config: add QUIC listening ports + quic to mars.i.ipfs.io ([ipfs/go-ipfs-config#91](https://github.com/ipfs/go-ipfs-config/pull/91)) - feat: remove strict signing pubsub option. ([ipfs/go-ipfs-config#90](https://github.com/ipfs/go-ipfs-config/pull/90)) - Add autocomment configuration @@ -260,7 +260,7 @@ Use-cases: - enhancement/remove-unused-variable ([libp2p/go-libp2p-kad-dht#633](https://github.com/libp2p/go-libp2p-kad-dht/pull/633)) - Put back TestSelfWalkOnAddressChange ([libp2p/go-libp2p-kad-dht#648](https://github.com/libp2p/go-libp2p-kad-dht/pull/648)) - Routing Table Refresh manager (#601) ([libp2p/go-libp2p-kad-dht#601](https://github.com/libp2p/go-libp2p-kad-dht/pull/601)) - - Boostrap empty RT and Optimize allocs when we discover new peers (#631) ([libp2p/go-libp2p-kad-dht#631](https://github.com/libp2p/go-libp2p-kad-dht/pull/631)) + - bootstrap empty RT and Optimize allocs when we discover new peers (#631) ([libp2p/go-libp2p-kad-dht#631](https://github.com/libp2p/go-libp2p-kad-dht/pull/631)) - fix all flaky tests ([libp2p/go-libp2p-kad-dht#628](https://github.com/libp2p/go-libp2p-kad-dht/pull/628)) - Update default concurrency parameter ([libp2p/go-libp2p-kad-dht#605](https://github.com/libp2p/go-libp2p-kad-dht/pull/605)) - clean up a channel that was dangling ([libp2p/go-libp2p-kad-dht#620](https://github.com/libp2p/go-libp2p-kad-dht/pull/620)) diff --git a/docs/changelogs/v0.7.md b/docs/changelogs/v0.7.md index 0160916ba..a06602cf3 100644 --- a/docs/changelogs/v0.7.md +++ b/docs/changelogs/v0.7.md @@ -149,7 +149,7 @@ The scripts in https://github.com/ipfs/go-ipfs-example-plugin have been updated - support flatfs fuzzing ([ipfs/go-datastore#157](https://github.com/ipfs/go-datastore/pull/157)) - fuzzing harness (#153) ([ipfs/go-datastore#153](https://github.com/ipfs/go-datastore/pull/153)) - feat(mount): don't give up on error ([ipfs/go-datastore#146](https://github.com/ipfs/go-datastore/pull/146)) - - /test: fix bad ElemCount/10 lenght (should not be divided) ([ipfs/go-datastore#152](https://github.com/ipfs/go-datastore/pull/152)) + - /test: fix bad ElemCount/10 length (should not be divided) ([ipfs/go-datastore#152](https://github.com/ipfs/go-datastore/pull/152)) - github.com/ipfs/go-ds-flatfs (v0.4.4 -> v0.4.5): - Add os.Rename wrapper for Plan 9 (#87) ([ipfs/go-ds-flatfs#87](https://github.com/ipfs/go-ds-flatfs/pull/87)) - github.com/ipfs/go-fs-lock (v0.0.5 -> v0.0.6): @@ -390,7 +390,7 @@ The scripts in https://github.com/ipfs/go-ipfs-example-plugin have been updated - reset the PTO count before setting the timer when dropping a PN space ([lucas-clemente/quic-go#2657](https://github.com/lucas-clemente/quic-go/pull/2657)) - enforce that a connection ID is not retired in a packet that uses that connection ID ([lucas-clemente/quic-go#2651](https://github.com/lucas-clemente/quic-go/pull/2651)) - don't retire the conn ID that's in use when receiving a retransmission ([lucas-clemente/quic-go#2652](https://github.com/lucas-clemente/quic-go/pull/2652)) - - fix flaky cancelation integration test ([lucas-clemente/quic-go#2649](https://github.com/lucas-clemente/quic-go/pull/2649)) + - fix flaky cancellation integration test ([lucas-clemente/quic-go#2649](https://github.com/lucas-clemente/quic-go/pull/2649)) - fix crash when the qlog callbacks returns a nil io.WriteCloser ([lucas-clemente/quic-go#2648](https://github.com/lucas-clemente/quic-go/pull/2648)) - fix flaky server test on Travis ([lucas-clemente/quic-go#2645](https://github.com/lucas-clemente/quic-go/pull/2645)) - fix a typo in the logging package test suite @@ -406,7 +406,7 @@ The scripts in https://github.com/ipfs/go-ipfs-example-plugin have been updated - remove superfluous parameters logged when not doing 0-RTT ([lucas-clemente/quic-go#2632](https://github.com/lucas-clemente/quic-go/pull/2632)) - return an infinite bandwidth if the RTT is zero ([lucas-clemente/quic-go#2636](https://github.com/lucas-clemente/quic-go/pull/2636)) - drop support for Go 1.13 ([lucas-clemente/quic-go#2628](https://github.com/lucas-clemente/quic-go/pull/2628)) - - remove superfluos handleResetStreamFrame method on the stream ([lucas-clemente/quic-go#2623](https://github.com/lucas-clemente/quic-go/pull/2623)) + - remove superfluous handleResetStreamFrame method on the stream ([lucas-clemente/quic-go#2623](https://github.com/lucas-clemente/quic-go/pull/2623)) - implement a token-bucket pacing algorithm ([lucas-clemente/quic-go#2615](https://github.com/lucas-clemente/quic-go/pull/2615)) - gracefully handle concurrent stream writes and cancellations ([lucas-clemente/quic-go#2624](https://github.com/lucas-clemente/quic-go/pull/2624)) - log sent packets right before sending them out ([lucas-clemente/quic-go#2613](https://github.com/lucas-clemente/quic-go/pull/2613)) diff --git a/docs/changelogs/v0.8.md b/docs/changelogs/v0.8.md index 7f4e1d759..8b28ff706 100644 --- a/docs/changelogs/v0.8.md +++ b/docs/changelogs/v0.8.md @@ -1,4 +1,4 @@ -# go-ipfs changelog v0.8 + # go-ipfs changelog v0.8 ## v0.8.0 2021-02-18 @@ -26,7 +26,7 @@ ipfs pin remote service add myservice https://myservice.tld:1234/api/path myacce ipfs pin remote add /ipfs/bafymydata --service=myservice --name=myfile ipfs pin remote ls --service=myservice --name=myfile ipfs pin remote ls --service=myservice --cid=bafymydata -ipfs pin remote rm --serivce=myservice --name=myfile +ipfs pin remote rm --service=myservice --name=myfile ``` A few notes: @@ -160,7 +160,7 @@ Go 1.15 (the latest version of Go) [no longer supports](https://github.com/golan - Update go-ipld-prime@v0.5.0 (#92) ([ipfs/go-graphsync#92](https://github.com/ipfs/go-graphsync/pull/92)) - refactor(metadata): use cbor-gen encoding (#96) ([ipfs/go-graphsync#96](https://github.com/ipfs/go-graphsync/pull/96)) - Release/v0.1.2 ([ipfs/go-graphsync#95](https://github.com/ipfs/go-graphsync/pull/95)) - - Return Request context cancelled error (#93) ([ipfs/go-graphsync#93](https://github.com/ipfs/go-graphsync/pull/93)) + - Return Request context canceled error (#93) ([ipfs/go-graphsync#93](https://github.com/ipfs/go-graphsync/pull/93)) - feat(benchmarks): add p2p stress test (#91) ([ipfs/go-graphsync#91](https://github.com/ipfs/go-graphsync/pull/91)) - Benchmark framework + First memory fixes (#89) ([ipfs/go-graphsync#89](https://github.com/ipfs/go-graphsync/pull/89)) - docs(CHANGELOG): update for v0.1.1 ([ipfs/go-graphsync#85](https://github.com/ipfs/go-graphsync/pull/85)) @@ -277,7 +277,7 @@ Go 1.15 (the latest version of Go) [no longer supports](https://github.com/golan - satisfy race detector - clean up - copy string topic - - add test for score adjustment from topis params reset + - add test for score adjustment from topic params reset - prettify things - add test for topic score parameter reset method - add test for topic score parameter reset @@ -315,7 +315,7 @@ Go 1.15 (the latest version of Go) [no longer supports](https://github.com/golan - pass a conn that can be type asserted to a net.UDPConn to quic-go ([libp2p/go-libp2p-quic-transport#180](https://github.com/libp2p/go-libp2p-quic-transport/pull/180)) - add more integration tests ([libp2p/go-libp2p-quic-transport#181](https://github.com/libp2p/go-libp2p-quic-transport/pull/181)) - always close the connection in the cmd client ([libp2p/go-libp2p-quic-transport#175](https://github.com/libp2p/go-libp2p-quic-transport/pull/175)) - - use GitHub Actions to test interopability of releases ([libp2p/go-libp2p-quic-transport#173](https://github.com/libp2p/go-libp2p-quic-transport/pull/173)) + - use GitHub Actions to test interoperability of releases ([libp2p/go-libp2p-quic-transport#173](https://github.com/libp2p/go-libp2p-quic-transport/pull/173)) - Implement CloseRead/CloseWrite ([libp2p/go-libp2p-quic-transport#174](https://github.com/libp2p/go-libp2p-quic-transport/pull/174)) - enable quic-go metrics collection ([libp2p/go-libp2p-quic-transport#172](https://github.com/libp2p/go-libp2p-quic-transport/pull/172)) - github.com/libp2p/go-libp2p-swarm (v0.2.8 -> v0.4.0): diff --git a/docs/changelogs/v0.9.md b/docs/changelogs/v0.9.md index 7289adde7..c0dba5abd 100644 --- a/docs/changelogs/v0.9.md +++ b/docs/changelogs/v0.9.md @@ -337,7 +337,7 @@ SECIO was deprecated and turned off by default given the prevalence of TLS and N - schema/gen/go: please vet a bit more - Introduce 'quip' data building helpers. ([ipld/go-ipld-prime#134](https://github.com/ipld/go-ipld-prime/pull/134)) - gengo: support for unions with stringprefix representation. ([ipld/go-ipld-prime#133](https://github.com/ipld/go-ipld-prime/pull/133)) - - target of opporunity DRY improvement: use more shared templates for structs with stringjoin representations. + - target of opportunity DRY improvement: use more shared templates for structs with stringjoin representations. - fix small consistency typo in gen function names. - drop old generation mechanisms that were already deprecated. - error type cleanup, and helpers. @@ -571,7 +571,7 @@ SECIO was deprecated and turned off by default given the prevalence of TLS and N - fix retry key and nonce for draft-34 ([lucas-clemente/quic-go#3062](https://github.com/lucas-clemente/quic-go/pull/3062)) - implement DPLPMTUD ([lucas-clemente/quic-go#3028](https://github.com/lucas-clemente/quic-go/pull/3028)) - only read multiple packets at a time after handshake completion ([lucas-clemente/quic-go#3041](https://github.com/lucas-clemente/quic-go/pull/3041)) - - make the certificate verificiation integration tests more explicit ([lucas-clemente/quic-go#3040](https://github.com/lucas-clemente/quic-go/pull/3040)) + - make the certificate verification integration tests more explicit ([lucas-clemente/quic-go#3040](https://github.com/lucas-clemente/quic-go/pull/3040)) - update gomock to v1.5.0, use mockgen source mode ([lucas-clemente/quic-go#3049](https://github.com/lucas-clemente/quic-go/pull/3049)) - trace dropping of 0-RTT keys ([lucas-clemente/quic-go#3054](https://github.com/lucas-clemente/quic-go/pull/3054)) - improve timeout measurement in the timeout test ([lucas-clemente/quic-go#3042](https://github.com/lucas-clemente/quic-go/pull/3042)) @@ -596,10 +596,10 @@ SECIO was deprecated and turned off by default given the prevalence of TLS and N - make sure the server is stopped before closing all server sessions ([lucas-clemente/quic-go#3020](https://github.com/lucas-clemente/quic-go/pull/3020)) - increase the size of the send queue ([lucas-clemente/quic-go#3016](https://github.com/lucas-clemente/quic-go/pull/3016)) - prioritize receiving packets over sending out more packets ([lucas-clemente/quic-go#3015](https://github.com/lucas-clemente/quic-go/pull/3015)) - - reenable key updates for HTTP/3 ([lucas-clemente/quic-go#3017](https://github.com/lucas-clemente/quic-go/pull/3017)) + - re-enable key updates for HTTP/3 ([lucas-clemente/quic-go#3017](https://github.com/lucas-clemente/quic-go/pull/3017)) - check for errors after handling each previously undecryptable packet ([lucas-clemente/quic-go#3011](https://github.com/lucas-clemente/quic-go/pull/3011)) - fix flaky streams map test on Windows ([lucas-clemente/quic-go#3013](https://github.com/lucas-clemente/quic-go/pull/3013)) - - fix flaky stream cancelation integration test ([lucas-clemente/quic-go#3014](https://github.com/lucas-clemente/quic-go/pull/3014)) + - fix flaky stream cancellation integration test ([lucas-clemente/quic-go#3014](https://github.com/lucas-clemente/quic-go/pull/3014)) - preallocate a slice of one frame when packing a packet ([lucas-clemente/quic-go#3018](https://github.com/lucas-clemente/quic-go/pull/3018)) - allow sending of ACKs when pacing limited ([lucas-clemente/quic-go#3010](https://github.com/lucas-clemente/quic-go/pull/3010)) - fix qlogging of the packet payload length ([lucas-clemente/quic-go#3004](https://github.com/lucas-clemente/quic-go/pull/3004)) @@ -624,7 +624,7 @@ SECIO was deprecated and turned off by default given the prevalence of TLS and N - fix flaky qlog test ([lucas-clemente/quic-go#2981](https://github.com/lucas-clemente/quic-go/pull/2981)) - only run gofumpt on .go files in pre-commit hook ([lucas-clemente/quic-go#2983](https://github.com/lucas-clemente/quic-go/pull/2983)) - fix outdated comment for the http3.Server - - make the OpenStreamSync cancelation test less flaky ([lucas-clemente/quic-go#2978](https://github.com/lucas-clemente/quic-go/pull/2978)) + - make the OpenStreamSync cancellation test less flaky ([lucas-clemente/quic-go#2978](https://github.com/lucas-clemente/quic-go/pull/2978)) - add some useful pre-commit hooks ([lucas-clemente/quic-go#2979](https://github.com/lucas-clemente/quic-go/pull/2979)) - publicize QUIC varint reading and writing ([lucas-clemente/quic-go#2973](https://github.com/lucas-clemente/quic-go/pull/2973)) - add a http3.RoundTripOpt to skip the request scheme check ([lucas-clemente/quic-go#2962](https://github.com/lucas-clemente/quic-go/pull/2962)) diff --git a/docs/config.md b/docs/config.md index 2bcf4b0f2..986798296 100644 --- a/docs/config.md +++ b/docs/config.md @@ -1,6 +1,6 @@ # The Kubo config file -The Kubo (go-ipfs) config file is a JSON document located at `$IPFS_PATH/config`. It +The Kubo config file is a JSON document located at `$IPFS_PATH/config`. It is read once at node instantiation, either for an offline command, or when starting the daemon. Commands that execute on a running daemon do not read the config file at runtime. @@ -27,6 +27,23 @@ config file at runtime. - [`AutoNAT.Throttle.GlobalLimit`](#autonatthrottlegloballimit) - [`AutoNAT.Throttle.PeerLimit`](#autonatthrottlepeerlimit) - [`AutoNAT.Throttle.Interval`](#autonatthrottleinterval) + - [`AutoTLS`](#autotls) + - [`AutoTLS.Enabled`](#autotlsenabled) + - [`AutoTLS.AutoWSS`](#autotlsautowss) + - [`AutoTLS.ShortAddrs`](#autotlsshortaddrs) + - [`AutoTLS.DomainSuffix`](#autotlsdomainsuffix) + - [`AutoTLS.RegistrationEndpoint`](#autotlsregistrationendpoint) + - [`AutoTLS.RegistrationToken`](#autotlsregistrationtoken) + - [`AutoTLS.RegistrationDelay`](#autotlsregistrationdelay) + - [`AutoTLS.CAEndpoint`](#autotlscaendpoint) + - [`AutoConf`](#autoconf) + - [`AutoConf.URL`](#autoconfurl) + - [`AutoConf.Enabled`](#autoconfenabled) + - [`AutoConf.RefreshInterval`](#autoconfrefreshinterval) + - [`AutoConf.TLSInsecureSkipVerify`](#autoconftlsinsecureskipverify) + - [`Bitswap`](#bitswap) + - [`Bitswap.Libp2pEnabled`](#bitswaplibp2penabled) + - [`Bitswap.ServerEnabled`](#bitswapserverenabled) - [`Bootstrap`](#bootstrap) - [`Datastore`](#datastore) - [`Datastore.StorageMax`](#datastorestoragemax) @@ -34,20 +51,27 @@ config file at runtime. - [`Datastore.GCPeriod`](#datastoregcperiod) - [`Datastore.HashOnRead`](#datastorehashonread) - [`Datastore.BloomFilterSize`](#datastorebloomfiltersize) + - [`Datastore.WriteThrough`](#datastorewritethrough) + - [`Datastore.BlockKeyCacheSize`](#datastoreblockkeycachesize) - [`Datastore.Spec`](#datastorespec) - [`Discovery`](#discovery) - [`Discovery.MDNS`](#discoverymdns) - [`Discovery.MDNS.Enabled`](#discoverymdnsenabled) - [`Discovery.MDNS.Interval`](#discoverymdnsinterval) - [`Experimental`](#experimental) + - [`Experimental.Libp2pStreamMounting`](#experimentallibp2pstreammounting) - [`Gateway`](#gateway) - [`Gateway.NoFetch`](#gatewaynofetch) - [`Gateway.NoDNSLink`](#gatewaynodnslink) - [`Gateway.DeserializedResponses`](#gatewaydeserializedresponses) - [`Gateway.DisableHTMLErrors`](#gatewaydisablehtmlerrors) - [`Gateway.ExposeRoutingAPI`](#gatewayexposeroutingapi) + - [`Gateway.RetrievalTimeout`](#gatewayretrievaltimeout) + - [`Gateway.MaxRangeRequestFileSize`](#gatewaymaxrangerequestfilesize) + - [`Gateway.MaxConcurrentRequests`](#gatewaymaxconcurrentrequests) - [`Gateway.HTTPHeaders`](#gatewayhttpheaders) - [`Gateway.RootRedirect`](#gatewayrootredirect) + - [`Gateway.DiagnosticServiceURL`](#gatewaydiagnosticserviceurl) - [`Gateway.FastDirIndexThreshold`](#gatewayfastdirindexthreshold) - [`Gateway.Writable`](#gatewaywritable) - [`Gateway.PathPrefixes`](#gatewaypathprefixes) @@ -68,7 +92,15 @@ config file at runtime. - [`Internal.Bitswap.EngineBlockstoreWorkerCount`](#internalbitswapengineblockstoreworkercount) - [`Internal.Bitswap.EngineTaskWorkerCount`](#internalbitswapenginetaskworkercount) - [`Internal.Bitswap.MaxOutstandingBytesPerPeer`](#internalbitswapmaxoutstandingbytesperpeer) - - [`Internal.Bitswap.ProviderSearchDelay`](#internalbitswapprovidersearchdelay) + - [`Internal.Bitswap.ProviderSearchDelay`](#internalbitswapprovidersearchdelay) + - [`Internal.Bitswap.ProviderSearchMaxResults`](#internalbitswapprovidersearchmaxresults) + - [`Internal.Bitswap.BroadcastControl`](#internalbitswapbroadcastcontrol) + - [`Internal.Bitswap.BroadcastControl.Enable`](#internalbitswapbroadcastcontrolenable) + - [`Internal.Bitswap.BroadcastControl.MaxPeers`](#internalbitswapbroadcastcontrolmaxpeers) + - [`Internal.Bitswap.BroadcastControl.LocalPeers`](#internalbitswapbroadcastcontrollocalpeers) + - [`Internal.Bitswap.BroadcastControl.PeeredPeers`](#internalbitswapbroadcastcontrolpeeredpeers) + - [`Internal.Bitswap.BroadcastControl.MaxRandomPeers`](#internalbitswapbroadcastcontrolmaxrandompeers) + - [`Internal.Bitswap.BroadcastControl.SendToPendingPeers`](#internalbitswapbroadcastcontrolsendtopendingpeers) - [`Internal.UnixFSShardingSizeThreshold`](#internalunixfsshardingsizethreshold) - [`Ipns`](#ipns) - [`Ipns.RepublishPeriod`](#ipnsrepublishperiod) @@ -76,12 +108,14 @@ config file at runtime. - [`Ipns.ResolveCacheSize`](#ipnsresolvecachesize) - [`Ipns.MaxCacheTTL`](#ipnsmaxcachettl) - [`Ipns.UsePubsub`](#ipnsusepubsub) + - [`Ipns.DelegatedPublishers`](#ipnsdelegatedpublishers) - [`Migration`](#migration) - [`Migration.DownloadSources`](#migrationdownloadsources) - [`Migration.Keep`](#migrationkeep) - [`Mounts`](#mounts) - [`Mounts.IPFS`](#mountsipfs) - [`Mounts.IPNS`](#mountsipns) + - [`Mounts.MFS`](#mountsmfs) - [`Mounts.FuseAllowOther`](#mountsfuseallowother) - [`Pinning`](#pinning) - [`Pinning.RemoteServices`](#pinningremoteservices) @@ -93,6 +127,23 @@ config file at runtime. - [`Pinning.RemoteServices: Policies.MFS.Enabled`](#pinningremoteservices-policiesmfsenabled) - [`Pinning.RemoteServices: Policies.MFS.PinName`](#pinningremoteservices-policiesmfspinname) - [`Pinning.RemoteServices: Policies.MFS.RepinInterval`](#pinningremoteservices-policiesmfsrepininterval) + - [`Provide`](#provide) + - [`Provide.Enabled`](#provideenabled) + - [`Provide.Strategy`](#providestrategy) + - [`Provide.DHT`](#providedht) + - [`Provide.DHT.MaxWorkers`](#providedhtmaxworkers) + - [`Provide.DHT.Interval`](#providedhtinterval) + - [`Provide.DHT.SweepEnabled`](#providedhtsweepenabled) + - [`Provide.DHT.ResumeEnabled`](#providedhtresumeenabled) + - [`Provide.DHT.DedicatedPeriodicWorkers`](#providedhtdedicatedperiodicworkers) + - [`Provide.DHT.DedicatedBurstWorkers`](#providedhtdedicatedburstworkers) + - [`Provide.DHT.MaxProvideConnsPerWorker`](#providedhtmaxprovideconnsperworker) + - [`Provide.DHT.KeystoreBatchSize`](#providedhtkeystorebatchsize) + - [`Provide.DHT.OfflineDelay`](#providedhtofflinedelay) + - [`Provider`](#provider) + - [`Provider.Enabled`](#providerenabled) + - [`Provider.Strategy`](#providerstrategy) + - [`Provider.WorkerCount`](#providerworkercount) - [`Pubsub`](#pubsub) - [`Pubsub.Enabled`](#pubsubenabled) - [`Pubsub.Router`](#pubsubrouter) @@ -103,15 +154,17 @@ config file at runtime. - [`Peering.Peers`](#peeringpeers) - [`Reprovider`](#reprovider) - [`Reprovider.Interval`](#reproviderinterval) - - [`Reprovider.Strategy`](#reproviderstrategy) + - [`Reprovider.Strategy`](#providestrategy) - [`Routing`](#routing) - [`Routing.Type`](#routingtype) + - [`Routing.DelegatedRouters`](#routingdelegatedrouters) - [`Routing.AcceleratedDHTClient`](#routingaccelerateddhtclient) - [`Routing.LoopbackAddressesOnLanDHT`](#routingloopbackaddressesonlandht) + - [`Routing.IgnoreProviders`](#routingignoreproviders) - [`Routing.Routers`](#routingrouters) - - [`Routing.Routers: Type`](#routingrouters-type) - - [`Routing.Routers: Parameters`](#routingrouters-parameters) - - [`Routing: Methods`](#routing-methods) + - [`Routing.Routers.[name].Type`](#routingroutersnametype) + - [`Routing.Routers.[name].Parameters`](#routingroutersnameparameters) + - [`Routing.Methods`](#routingmethods) - [`Swarm`](#swarm) - [`Swarm.AddrFilters`](#swarmaddrfilters) - [`Swarm.DisableBandwidthMetrics`](#swarmdisablebandwidthmetrics) @@ -142,6 +195,7 @@ config file at runtime. - [`Swarm.ConnMgr.LowWater`](#swarmconnmgrlowwater) - [`Swarm.ConnMgr.HighWater`](#swarmconnmgrhighwater) - [`Swarm.ConnMgr.GracePeriod`](#swarmconnmgrgraceperiod) + - [`Swarm.ConnMgr.SilencePeriod`](#swarmconnmgrsilenceperiod) - [`Swarm.ResourceMgr`](#swarmresourcemgr) - [`Swarm.ResourceMgr.Enabled`](#swarmresourcemgrenabled) - [`Swarm.ResourceMgr.MaxMemory`](#swarmresourcemgrmaxmemory) @@ -165,11 +219,26 @@ config file at runtime. - [`DNS`](#dns) - [`DNS.Resolvers`](#dnsresolvers) - [`DNS.MaxCacheTTL`](#dnsmaxcachettl) + - [`HTTPRetrieval`](#httpretrieval) + - [`HTTPRetrieval.Enabled`](#httpretrievalenabled) + - [`HTTPRetrieval.Allowlist`](#httpretrievalallowlist) + - [`HTTPRetrieval.Denylist`](#httpretrievaldenylist) + - [`HTTPRetrieval.NumWorkers`](#httpretrievalnumworkers) + - [`HTTPRetrieval.MaxBlockSize`](#httpretrievalmaxblocksize) + - [`HTTPRetrieval.TLSInsecureSkipVerify`](#httpretrievaltlsinsecureskipverify) - [`Import`](#import) - [`Import.CidVersion`](#importcidversion) - [`Import.UnixFSRawLeaves`](#importunixfsrawleaves) - [`Import.UnixFSChunker`](#importunixfschunker) - [`Import.HashFunction`](#importhashfunction) + - [`Import.FastProvideRoot`](#importfastprovideroot) + - [`Import.FastProvideWait`](#importfastprovidewait) + - [`Import.BatchMaxNodes`](#importbatchmaxnodes) + - [`Import.BatchMaxSize`](#importbatchmaxsize) + - [`Import.UnixFSFileMaxLinks`](#importunixfsfilemaxlinks) + - [`Import.UnixFSDirectoryMaxLinks`](#importunixfsdirectorymaxlinks) + - [`Import.UnixFSHAMTDirectoryMaxFanout`](#importunixfshamtdirectorymaxfanout) + - [`Import.UnixFSHAMTDirectorySizeThreshold`](#importunixfshamtdirectorysizethreshold) - [`Version`](#version) - [`Version.AgentSuffix`](#versionagentsuffix) - [`Version.SwarmCheckEnabled`](#versionswarmcheckenabled) @@ -180,13 +249,23 @@ config file at runtime. - [`default-datastore` profile](#default-datastore-profile) - [`local-discovery` profile](#local-discovery-profile) - [`default-networking` profile](#default-networking-profile) + - [`autoconf-on` profile](#autoconf-on-profile) + - [`autoconf-off` profile](#autoconf-off-profile) - [`flatfs` profile](#flatfs-profile) + - [`flatfs-measure` profile](#flatfs-measure-profile) + - [`pebbleds` profile](#pebbleds-profile) + - [`pebbleds-measure` profile](#pebbleds-measure-profile) - [`badgerds` profile](#badgerds-profile) + - [`badgerds-measure` profile](#badgerds-measure-profile) - [`lowpower` profile](#lowpower-profile) - [`announce-off` profile](#announce-off-profile) - [`announce-on` profile](#announce-on-profile) - [`legacy-cid-v0` profile](#legacy-cid-v0-profile) - [`test-cid-v1` profile](#test-cid-v1-profile) + - [`test-cid-v1-wide` profile](#test-cid-v1-wide-profile) + - [Security](#security) + - [Port and Network Exposure](#port-and-network-exposure) + - [Security Best Practices](#security-best-practices) - [Types](#types) - [`flag`](#flag) - [`priority`](#priority) @@ -203,47 +282,76 @@ Contains information about various listener addresses to be used by this node. ### `Addresses.API` -Multiaddr or array of multiaddrs describing the address to serve +[Multiaddr][multiaddr] or array of multiaddrs describing the addresses to serve the local [Kubo RPC API](https://docs.ipfs.tech/reference/kubo/rpc/) (`/api/v0`). Supported Transports: -* tcp/ip{4,6} - `/ipN/.../tcp/...` -* unix - `/unix/path/to/socket` +- tcp/ip{4,6} - `/ipN/.../tcp/...` +- unix - `/unix/path/to/socket` + +> [!CAUTION] +> **NEVER EXPOSE UNPROTECTED ADMIN RPC TO LAN OR THE PUBLIC INTERNET** +> +> The RPC API grants admin-level access to your Kubo IPFS node, including +> configuration and secret key management. +> +> By default, it is bound to localhost for security reasons. Exposing it to LAN +> or the public internet is highly risky—similar to exposing a SQL database or +> backend service without authentication middleware +> +> - If you need secure access to a subset of RPC, secure it with [`API.Authorizations`](#apiauthorizations) or custom auth middleware running in front of the localhost-only RPC port defined here. +> - If you are looking for an interface designed for browsers and public internet, use [`Addresses.Gateway`](#addressesgateway) port instead. +> - See [Security section](#security) for network exposure considerations. Default: `/ip4/127.0.0.1/tcp/5001` -Type: `strings` (multiaddrs) +Type: `strings` ([multiaddrs][multiaddr]) ### `Addresses.Gateway` -Multiaddr or array of multiaddrs describing the address to serve +[Multiaddr][multiaddr] or array of multiaddrs describing the address to serve the local [HTTP gateway](https://specs.ipfs.tech/http-gateways/) (`/ipfs`, `/ipns`) on. Supported Transports: -* tcp/ip{4,6} - `/ipN/.../tcp/...` -* unix - `/unix/path/to/socket` +- tcp/ip{4,6} - `/ipN/.../tcp/...` +- unix - `/unix/path/to/socket` + +> [!CAUTION] +> **SECURITY CONSIDERATIONS FOR GATEWAY EXPOSURE** +> +> By default, the gateway is bound to localhost for security. If you bind to `0.0.0.0` +> or a public IP, anyone with access can trigger retrieval of arbitrary CIDs, causing +> bandwidth usage and potential exposure to malicious content. Limit with +> [`Gateway.NoFetch`](#gatewaynofetch). Consider firewall rules, authentication, +> and [`Gateway.PublicGateways`](#gatewaypublicgateways) for public exposure. +> See [Security section](#security) for network exposure considerations. Default: `/ip4/127.0.0.1/tcp/8080` -Type: `strings` (multiaddrs) +Type: `strings` ([multiaddrs][multiaddr]) ### `Addresses.Swarm` -An array of multiaddrs describing which addresses to listen on for p2p swarm +An array of [multiaddrs][multiaddr] describing which addresses to listen on for p2p swarm connections. Supported Transports: -* tcp/ip{4,6} - `/ipN/.../tcp/...` -* websocket - `/ipN/.../tcp/.../ws` -* quicv1 (RFC9000) - `/ipN/.../udp/.../quic-v1` - can share the same two tuple with `/quic-v1/webtransport` -* webtransport `/ipN/.../udp/.../quic-v1/webtransport` - can share the same two tuple with `/quic-v1` +- tcp/ip{4,6} - `/ipN/.../tcp/...` +- websocket - `/ipN/.../tcp/.../ws` +- quicv1 (RFC9000) - `/ipN/.../udp/.../quic-v1` - can share the same two tuple with `/quic-v1/webtransport` +- webtransport `/ipN/.../udp/.../quic-v1/webtransport` - can share the same two tuple with `/quic-v1` + +> [!IMPORTANT] +> Make sure your firewall rules allow incoming connections on both TCP and UDP ports defined here. +> See [Security section](#security) for network exposure considerations. Note that quic (Draft-29) used to be supported with the format `/ipN/.../udp/.../quic`, but has since been [removed](https://github.com/libp2p/go-libp2p/releases/tag/v0.30.0). Default: + ```json [ "/ip4/0.0.0.0/tcp/4001", @@ -255,7 +363,7 @@ Default: ] ``` -Type: `array[string]` (multiaddrs) +Type: `array[string]` ([multiaddrs][multiaddr]) ### `Addresses.Announce` @@ -264,7 +372,7 @@ network. If empty, the daemon will announce inferred swarm addresses. Default: `[]` -Type: `array[string]` (multiaddrs) +Type: `array[string]` ([multiaddrs][multiaddr]) ### `Addresses.AppendAnnounce` @@ -273,7 +381,7 @@ override inferred swarm addresses if non-empty. Default: `[]` -Type: `array[string]` (multiaddrs) +Type: `array[string]` ([multiaddrs][multiaddr]) ### `Addresses.NoAnnounce` @@ -283,12 +391,12 @@ Takes precedence over `Addresses.Announce` and `Addresses.AppendAnnounce`. > [!TIP] > The [`server` configuration profile](#server-profile) fills up this list with sensible defaults, > preventing announcement of non-routable IP addresses (e.g., `/ip4/192.168.0.0/ipcidr/16`, -> which is the multiaddress representation of `192.168.0.0/16`) but you should always +> which is the [multiaddress][multiaddr] representation of `192.168.0.0/16`) but you should always > check settings against your own network and/or hosting provider. Default: `[]` -Type: `array[string]` (multiaddrs) +Type: `array[string]` ([multiaddrs][multiaddr]) ## `API` @@ -299,6 +407,7 @@ Contains information used by the [Kubo RPC API](https://docs.ipfs.tech/reference Map of HTTP headers to set on responses from the RPC (`/api/v0`) HTTP server. Example: + ```json { "Foo": ["bar"] @@ -315,7 +424,7 @@ The `API.Authorizations` field defines user-based access restrictions for the [Kubo RPC API](https://docs.ipfs.tech/reference/kubo/rpc/), which is located at `Addresses.API` under `/api/v0` paths. -By default, the RPC API is accessible without restrictions as it is only +By default, the admin-level RPC API is accessible without restrictions as it is only exposed on `127.0.0.1` and safeguarded with Origin check and implicit [CORS](https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS) headers that block random websites from accessing the RPC. @@ -325,9 +434,18 @@ unless a corresponding secret is present in the HTTP [`Authorization` header](ht and the requested path is included in the `AllowedPaths` list for that specific secret. +> [!CAUTION] +> **NEVER EXPOSE UNPROTECTED ADMIN RPC TO LAN OR THE PUBLIC INTERNET** +> +> The RPC API is vast. It grants admin-level access to your Kubo IPFS node, including +> configuration and secret key management. +> +> - If you need secure access to a subset of RPC, make sure you understand the risk, block everything by default and allow basic auth access with [`API.Authorizations`](#apiauthorizations) or custom auth middleware running in front of the localhost-only port defined in [`Addresses.API`](#addressesapi). +> - If you are looking for an interface designed for browsers and public internet, use [`Addresses.Gateway`](#addressesgateway) port instead. + Default: `null` -Type: `object[string -> object]` (user name -> authorization object, see bellow) +Type: `object[string -> object]` (user name -> authorization object, see below) For example, to limit RPC access to Alice (access `id` and MFS `files` commands with HTTP Basic Auth) and Bob (full access with Bearer token): @@ -401,18 +519,18 @@ the rest of the internet. When unset (default), the AutoNAT service defaults to _enabled_. Otherwise, this field can take one of two values: -* `enabled` - Enable the V1+V2 service (unless the node determines that it, +- `enabled` - Enable the V1+V2 service (unless the node determines that it, itself, isn't reachable by the public internet). -* `legacy-v1` - Same as `enabled` but only V1 service is enabled. Used for testing +- `legacy-v1` - **DEPRECATED** Same as `enabled` but only V1 service is enabled. Used for testing during as few releases as we [transition to V2](https://github.com/ipfs/kubo/issues/10091), will be removed in the future. -* `disabled` - Disable the service. +- `disabled` - Disable the service. Additional modes may be added in the future. > [!IMPORTANT] > We are in the progress of [rolling out AutoNAT V2](https://github.com/ipfs/kubo/issues/10091). > Right now, by default, a publicly dialable Kubo provides both V1 and V2 service to other peers, -> but only V1 is used by Kubo as a client. In a future release we will remove V1 and switch client to use V2. +> and V1 is still used by Kubo for Autorelay feature. In a future release we will remove V1 and switch all features to use V2. Default: `enabled` @@ -448,13 +566,317 @@ Default: 1 Minute Type: `duration` (when `0`/unset, the default value is used) +## `AutoConf` + +The AutoConf feature enables Kubo nodes to automatically fetch and apply network configuration from a remote JSON endpoint. This system allows dynamic configuration updates for bootstrap peers, DNS resolvers, delegated routing, and IPNS publishing endpoints without requiring manual updates to each node's local config. + +AutoConf works by using special `"auto"` placeholder values in configuration fields. When Kubo encounters these placeholders, it fetches the latest configuration from the specified URL and resolves the placeholders with the appropriate values at runtime. The original configuration file remains unchanged - `"auto"` values are preserved in the JSON and only resolved in memory during node operation. + +### Key Features + +- **Remote Configuration**: Fetch network defaults from a trusted URL +- **Automatic Updates**: Periodic background checks for configuration updates +- **Graceful Fallback**: Uses hardcoded IPFS Mainnet bootstrappers when remote config is unavailable +- **Validation**: Ensures all fetched configuration values are valid multiaddrs and URLs +- **Caching**: Stores multiple versions locally with ETags for efficient updates +- **User Notification**: Logs ERROR when new configuration is available requiring node restart +- **Debug Logging**: AutoConf operations can be inspected by setting `GOLOG_LOG_LEVEL="error,autoconf=debug"` + +### Supported Fields + +AutoConf can resolve `"auto"` placeholders in the following configuration fields: + +- `Bootstrap` - Bootstrap peer addresses +- `DNS.Resolvers` - DNS-over-HTTPS resolver endpoints +- `Routing.DelegatedRouters` - Delegated routing HTTP API endpoints +- `Ipns.DelegatedPublishers` - IPNS delegated publishing HTTP API endpoints + +### Usage Example + +```json +{ + "AutoConf": { + "URL": "https://example.com/autoconf.json", + "Enabled": true, + "RefreshInterval": "24h" + }, + "Bootstrap": ["auto"], + "DNS": { + "Resolvers": { + ".": ["auto"], + "eth.": ["auto"], + "custom.": ["https://dns.example.com/dns-query"] + } + }, + "Routing": { + "DelegatedRouters": ["auto", "https://router.example.org/routing/v1"] + } +} +``` + +**Notes:** + +- Configuration fetching happens at daemon startup and periodically in the background +- When new configuration is detected, users must restart their node to apply changes +- Mixed configurations are supported: you can use both `"auto"` and static values +- If AutoConf is disabled but `"auto"` values exist, daemon startup will fail with validation errors +- Cache is stored in `$IPFS_PATH/autoconf/` with up to 3 versions retained + +### Path-Based Routing Configuration + +AutoConf supports path-based routing URLs that automatically enable specific routing operations based on the URL path. This allows precise control over which HTTP Routing V1 endpoints are used for different operations: + +**Supported paths:** + +- `/routing/v1/providers` - Enables provider record lookups only +- `/routing/v1/peers` - Enables peer routing lookups only +- `/routing/v1/ipns` - Enables IPNS record operations only +- No path - Enables all routing operations (backward compatibility) + +**AutoConf JSON structure with path-based routing:** + +```json +{ + "DelegatedRouters": { + "mainnet-for-nodes-with-dht": [ + "https://cid.contact/routing/v1/providers" + ], + "mainnet-for-nodes-without-dht": [ + "https://delegated-ipfs.dev/routing/v1/providers", + "https://delegated-ipfs.dev/routing/v1/peers", + "https://delegated-ipfs.dev/routing/v1/ipns" + ] + }, + "DelegatedPublishers": { + "mainnet-for-ipns-publishers-with-http": [ + "https://delegated-ipfs.dev/routing/v1/ipns" + ] + } +} +``` + +**Node type categories:** + +- `mainnet-for-nodes-with-dht`: Mainnet nodes with DHT enabled (typically only need additional provider lookups) +- `mainnet-for-nodes-without-dht`: Mainnet nodes without DHT (need comprehensive routing services) +- `mainnet-for-ipns-publishers-with-http`: Mainnet nodes that publish IPNS records via HTTP + +This design enables efficient, selective routing where each endpoint URL automatically determines its capabilities based on the path, while maintaining semantic grouping by node configuration type. + +Default: `{}` + +Type: `object` + +### `AutoConf.Enabled` + +Controls whether the AutoConf system is active. When enabled, Kubo will fetch configuration from the specified URL and resolve `"auto"` placeholders at runtime. When disabled, any `"auto"` values in the configuration will cause daemon startup to fail with validation errors. + +This provides a safety mechanism to ensure nodes don't start with unresolved placeholders when AutoConf is intentionally disabled. + +Default: `true` + +Type: `flag` + +### `AutoConf.URL` + +Specifies the HTTP(S) URL from which to fetch the autoconf JSON. The endpoint should return a JSON document containing Bootstrap peers, DNS resolvers, delegated routing endpoints, and IPNS publishing endpoints that will replace `"auto"` placeholders in the local configuration. + +The URL must serve a JSON document matching the AutoConf schema. Kubo validates all multiaddr and URL values before caching to ensure they are properly formatted. + +When not specified in the configuration, the default mainnet URL is used automatically. + + + +> [!NOTE] +> Public good autoconf manifest at `conf.ipfs-mainnet.org` is provided by the team at [Shipyard](https://ipshipyard.com). + +Default: `"https://conf.ipfs-mainnet.org/autoconf.json"` (when not specified) + +Type: `optionalString` + +### `AutoConf.RefreshInterval` + +Specifies how frequently Kubo should refresh autoconf data. This controls both how often cached autoconf data is considered fresh and how frequently the background service checks for new configuration updates. + +When a new configuration version is detected during background updates, Kubo logs an ERROR message informing the user that a node restart is required to apply the changes to any `"auto"` entries in their configuration. + +Default: `24h` + +Type: `optionalDuration` + +### `AutoConf.TLSInsecureSkipVerify` + +**FOR TESTING ONLY** - Allows skipping TLS certificate verification when fetching autoconf from HTTPS URLs. This should never be enabled in production as it makes the configuration fetching vulnerable to man-in-the-middle attacks. + +Default: `false` + +Type: `flag` + +## `AutoTLS` + +The [AutoTLS](https://blog.libp2p.io/autotls/) feature enables publicly reachable Kubo nodes (those dialable from the public +internet) to automatically obtain a wildcard TLS certificate for a DNS name +unique to their PeerID at `*.[PeerID].libp2p.direct`. This enables direct +libp2p connections and retrieval of IPFS content from browsers [Secure Context](https://developer.mozilla.org/en-US/docs/Web/Security/Secure_Contexts) +using transports such as [Secure WebSockets](https://github.com/libp2p/specs/blob/master/websockets/README.md), +without requiring user to do any manual domain registration and certificate configuration. + +Under the hood, [p2p-forge] client uses public utility service at `libp2p.direct` as an [ACME DNS-01 Challenge](https://letsencrypt.org/docs/challenge-types/#dns-01-challenge) +broker enabling peer to obtain a wildcard TLS certificate tied to public key of their [PeerID](https://docs.libp2p.io/concepts/fundamentals/peers/#peer-id). + +By default, the certificates are requested from Let's Encrypt. Origin and rationale for this project can be found in [community.letsencrypt.org discussion](https://community.letsencrypt.org/t/feedback-on-raising-certificates-per-registered-domain-to-enable-peer-to-peer-networking/223003). + + + +> [!NOTE] +> Public good DNS and [p2p-forge] infrastructure at `libp2p.direct` is run by the team at [Interplanetary Shipyard](https://ipshipyard.com). +> +[p2p-forge]: https://github.com/ipshipyard/p2p-forge + +Default: `{}` + +Type: `object` + +### `AutoTLS.Enabled` + +Enables the AutoTLS feature to provide DNS and TLS support for [libp2p Secure WebSocket](https://github.com/libp2p/specs/blob/master/websockets/README.md) over a `/tcp` port, +to allow JS clients running in web browser [Secure Context](https://w3c.github.io/webappsec-secure-contexts/) to connect to Kubo directly. + +When activated, together with [`AutoTLS.AutoWSS`](#autotlsautowss) (default) or manually including a `/tcp/{port}/tls/sni/*.libp2p.direct/ws` multiaddr in [`Addresses.Swarm`](#addressesswarm) +(with SNI suffix matching [`AutoTLS.DomainSuffix`](#autotlsdomainsuffix)), Kubo retrieves a trusted PKI TLS certificate for `*.{peerid}.libp2p.direct` and configures the `/ws` listener to use it. + +**Note:** + +- This feature requires a publicly reachable node. If behind NAT, manual port forwarding or UPnP (`Swarm.DisableNatPortMap=false`) is required. +- The first time AutoTLS is used, it may take 5-15 minutes + [`AutoTLS.RegistrationDelay`](#autotlsregistrationdelay) before `/ws` listener is added. Be patient. +- Avoid manual configuration. [`AutoTLS.AutoWSS=true`](#autotlsautowss) should automatically add `/ws` listener to existing, firewall-forwarded `/tcp` ports. +- To troubleshoot, use `GOLOG_LOG_LEVEL="error,autotls=debug` for detailed logs, or `GOLOG_LOG_LEVEL="error,autotls=info` for quieter output. +- Certificates are stored in `$IPFS_PATH/p2p-forge-certs`; deleting this directory and restarting the daemon forces a certificate rotation. +- For now, the TLS cert applies solely to `/ws` libp2p WebSocket connections, not HTTP [`Gateway`](#gateway), which still need separate reverse proxy TLS setup with a custom domain. + +Default: `true` + +Type: `flag` + +### `AutoTLS.AutoWSS` + +Optional. Controls if Kubo should add `/tls/sni/*.libp2p.direct/ws` listener to every pre-existing `/tcp` port IFF no explicit `/ws` is defined in [`Addresses.Swarm`](#addressesswarm) already. + +Default: `true` (if `AutoTLS.Enabled`) + +Type: `flag` + +### `AutoTLS.ShortAddrs` + +Optional. Controls if final AutoTLS listeners are announced under shorter `/dnsX/A.B.C.D.peerid.libp2p.direct/tcp/4001/tls/ws` addresses instead of fully resolved `/ip4/A.B.C.D/tcp/4001/tls/sni/A-B-C-D.peerid.libp2p.direct/tls/ws`. + +The main use for AutoTLS is allowing connectivity from Secure Context in a web browser, and DNS lookup needs to happen there anyway, making `/dnsX` a more compact, more interoperable option without obvious downside. + +Default: `true` + +Type: `flag` + +### `AutoTLS.DomainSuffix` + +Optional override of the parent domain suffix that will be used in DNS+TLS+WebSockets multiaddrs generated by [p2p-forge] client. +Do not change this unless you self-host [p2p-forge]. + +Default: `libp2p.direct` (public good run by [Interplanetary Shipyard](https://ipshipyard.com)) + +Type: `optionalString` + +### `AutoTLS.RegistrationEndpoint` + +Optional override of [p2p-forge] HTTP registration API. +Do not change this unless you self-host [p2p-forge] under own domain. + +> [!IMPORTANT] +> The default endpoint performs [libp2p Peer ID Authentication over HTTP](https://github.com/libp2p/specs/blob/master/http/peer-id-auth.md) +> (proving ownership of PeerID), probes if your Kubo node can correctly answer to a [libp2p Identify](https://github.com/libp2p/specs/tree/master/identify) query. +> This ensures only a correctly configured, publicly dialable Kubo can initiate [ACME DNS-01 challenge](https://letsencrypt.org/docs/challenge-types/#dns-01-challenge) for `peerid.libp2p.direct`. + +Default: `https://registration.libp2p.direct` (public good run by [Interplanetary Shipyard](https://ipshipyard.com)) + +Type: `optionalString` + +### `AutoTLS.RegistrationToken` + +Optional value for `Forge-Authorization` token sent with request to `RegistrationEndpoint` +(useful for private/self-hosted/test instances of [p2p-forge], unset by default). + +Default: `""` + +Type: `optionalString` + +### `AutoTLS.RegistrationDelay` + +An additional delay applied before sending a request to the `RegistrationEndpoint`. + +The default delay is bypassed if the user explicitly set `AutoTLS.Enabled=true` in the JSON configuration file. +This ensures that ephemeral nodes using the default configuration do not spam the`AutoTLS.CAEndpoint` with unnecessary ACME requests. + +Default: `1h` (or `0` if explicit `AutoTLS.Enabled=true`) + +Type: `optionalDuration` + +### `AutoTLS.CAEndpoint` + +Optional override of CA ACME API used by [p2p-forge] system. +Do not change this unless you self-host [p2p-forge] under own domain. + +> [!IMPORTANT] +> CAA DNS record at `libp2p.direct` limits CA choice to Let's Encrypt. If you want to use a different CA, use your own domain. + +Default: [certmagic.LetsEncryptProductionCA](https://pkg.go.dev/github.com/caddyserver/certmagic#pkg-constants) (see [community.letsencrypt.org discussion](https://community.letsencrypt.org/t/feedback-on-raising-certificates-per-registered-domain-to-enable-peer-to-peer-networking/223003)) + +Type: `optionalString` + +## `Bitswap` + +High level client and server configuration of the [Bitswap Protocol](https://specs.ipfs.tech/bitswap-protocol/) over libp2p. + +For internal configuration see [`Internal.Bitswap`](#internalbitswap). + +For HTTP version see [`HTTPRetrieval`](#httpretrieval). + +### `Bitswap.Libp2pEnabled` + +Determines whether Kubo will use Bitswap over libp2p. + +Disabling this, will remove `/ipfs/bitswap/*` protocol support from [libp2p identify](https://github.com/libp2p/specs/blob/master/identify/README.md) responses, effectively shutting down both Bitswap libp2p client and server. + +> [!WARNING] +> Bitswap over libp2p is a core component of Kubo and the oldest way of exchanging blocks. Disabling it completely may cause unpredictable outcomes, such as retrieval failures, if the only providers were libp2p ones. Treat this as experimental and use it solely for testing purposes with `HTTPRetrieval.Enabled`. + +Default: `true` + +Type: `flag` + +### `Bitswap.ServerEnabled` + +Determines whether Kubo functions as a Bitswap server to host and respond to block requests. + +Disabling the server retains client and protocol support in [libp2p identify](https://github.com/libp2p/specs/blob/master/identify/README.md) responses but causes Kubo to reply with "don't have" to all block requests. + +Default: `true` (requires `Bitswap.Libp2pEnabled`) + +Type: `flag` + ## `Bootstrap` -Bootstrap is an array of multiaddrs of trusted nodes that your node connects to, to fetch other nodes of the network on startup. +Bootstrap peers help your node discover and connect to the IPFS network when starting up. This array contains [multiaddrs][multiaddr] of trusted nodes that your node contacts first to find other peers and content. -Default: The ipfs.io bootstrap nodes +The special value `"auto"` automatically uses curated, up-to-date bootstrap peers from [AutoConf](#autoconf), ensuring your node can always connect to the healthy network without manual maintenance. -Type: `array[string]` (multiaddrs) +**What this gives you:** + +- **Reliable startup**: Your node can always find the network, even if some bootstrap peers go offline +- **Automatic updates**: New bootstrap peers are added as the network evolves +- **Custom control**: Add your own trusted peers alongside or instead of the defaults + +Default: `["auto"]` + +Type: `array[string]` ([multiaddrs][multiaddr] or `"auto"`) ## `Datastore` @@ -514,25 +936,84 @@ we'd want to use 1199120 bytes. As of writing, [7 hash functions](https://github.com/ipfs/go-ipfs-blockstore/blob/547442836ade055cc114b562a3cc193d4e57c884/caching.go#L22) are used, so the constant `k` is 7 in the formula. +Enabling the BloomFilter can provide performance improvements specially when +responding to many requests for inexistent blocks. It however requires a full +sweep of all the datastore keys on daemon start. On very large datastores this +can be a very taxing operation, particularly if the datastore does not support +querying existing keys without reading their values at the same time (blocks). + Default: `0` (disabled) Type: `integer` (non-negative, bytes) +### `Datastore.WriteThrough` + +This option controls whether a block that already exist in the datastore +should be written to it. When set to `false`, a `Has()` call is performed +against the datastore prior to writing every block. If the block is already +stored, the write is skipped. This check happens both on the Blockservice and +the Blockstore layers and this setting affects both. + +When set to `true`, no checks are performed and blocks are written to the +datastore, which depending on the implementation may perform its own checks. + +This option can affect performance and the strategy should be taken in +conjunction with [`BlockKeyCacheSize`](#datastoreblockkeycachesize) and +[`BloomFilterSize`](#datastoreboomfiltersize`). + +Default: `true` + +Type: `bool` + +### `Datastore.BlockKeyCacheSize` + +A number representing the maximum size in bytes of the blockstore's Two-Queue +cache, which caches block-cids and their block-sizes. Use `0` to disable. + +This cache, once primed, can greatly speed up operations like `ipfs repo stat` +as there is no need to read full blocks to know their sizes. Size should be +adjusted depending on the number of CIDs on disk (`NumObjects in`ipfs repo stat`). + +Default: `65536` (64KiB) + +Type: `optionalInteger` (non-negative, bytes) + ### `Datastore.Spec` Spec defines the structure of the ipfs datastore. It is a composable structure, where each datastore is represented by a json object. Datastores can wrap other datastores to provide extra functionality (eg metrics, logging, or caching). -This can be changed manually, however, if you make any changes that require a -different on-disk structure, you will need to run the [ipfs-ds-convert -tool](https://github.com/ipfs/ipfs-ds-convert) to migrate data into the new -structures. - -For more information on possible values for this configuration option, see -[docs/datastores.md](datastores.md) +> [!NOTE] +> For more information on possible values for this configuration option, see [`kubo/docs/datastores.md`](datastores.md) Default: + +``` +{ + "mounts": [ + { + "mountpoint": "/blocks", + "path": "blocks", + "prefix": "flatfs.datastore", + "shardFunc": "/repo/flatfs/shard/v1/next-to-last/2", + "sync": false, + "type": "flatfs" + }, + { + "compression": "none", + "mountpoint": "/", + "path": "datastore", + "prefix": "leveldb.datastore", + "type": "levelds" + } + ], + "type": "mount" +} +``` + +With `flatfs-measure` profile: + ``` { "mounts": [ @@ -574,7 +1055,7 @@ Options for [ZeroConf](https://github.com/libp2p/zeroconf#readme) Multicast DNS- #### `Discovery.MDNS.Enabled` -A boolean value for whether or not Multicast DNS-SD should be active. +A boolean value to activate or deactivate Multicast DNS-SD. Default: `true` @@ -589,11 +1070,26 @@ in the [new mDNS implementation](https://github.com/libp2p/zeroconf#readme). Toggle and configure experimental features of Kubo. Experimental features are listed [here](./experimental-features.md). +### `Experimental.Libp2pStreamMounting` + +Enables the `ipfs p2p` commands for tunneling TCP connections through libp2p +streams, similar to SSH port forwarding. + +See [docs/p2p-tunnels.md](p2p-tunnels.md) for usage examples. + +Default: `false` + +Type: `bool` + ## `Gateway` Options for the HTTP gateway. -**NOTE:** support for `/api/v0` under the gateway path is now deprecated. It will be removed in future versions: https://github.com/ipfs/kubo/issues/10312. +> [!IMPORTANT] +> By default, Kubo's gateway is configured for local use at `127.0.0.1` and `localhost`. +> To run a public gateway, configure your domain names in [`Gateway.PublicGateways`](#gatewaypublicgateways). +> For production deployment considerations (reverse proxy, timeouts, rate limiting, CDN), +> see [Running in Production](gateway.md#running-in-production). ### `Gateway.NoFetch` @@ -618,7 +1114,7 @@ Type: `bool` An optional flag to explicitly configure whether this gateway responds to deserialized requests, or not. By default, it is enabled. When disabling this option, the gateway -operates as a Trustless Gateway only: https://specs.ipfs.tech/http-gateways/trustless-gateway/. +operates as a Trustless Gateway only: . Default: `true` @@ -648,10 +1144,84 @@ Kubo will filter out routing results which are not actionable, for example, all graphsync providers will be skipped. If you need a generic pass-through, see standalone router implementation named [someguy](https://github.com/ipfs/someguy). -Default: `false` +Default: `true` Type: `flag` +### `Gateway.RetrievalTimeout` + +Maximum duration Kubo will wait for content retrieval (new bytes to arrive). + +**Timeout behavior:** + +- **Time to first byte**: Returns 504 Gateway Timeout if the gateway cannot start writing within this duration (e.g., stuck searching for providers) +- **Time between writes**: After first byte, timeout resets with each write. Response terminates if no new data can be written within this duration + +**Truncation handling:** When timeout occurs after HTTP 200 headers are sent (e.g., during CAR streams), the gateway: + +- Appends error message to indicate truncation +- Forces TCP reset (RST) to prevent caching incomplete responses +- Records in metrics with original status code and `truncated=true` flag + +**Monitoring:** Track `ipfs_http_gw_retrieval_timeouts_total` by status code and truncation status. + +**Tuning guidance:** + +- Compare timeout rates (`ipfs_http_gw_retrieval_timeouts_total`) with success rates (`ipfs_http_gw_responses_total{status="200"}`) +- High timeout rate: consider increasing timeout or scaling horizontally if hardware is constrained +- Many 504s may indicate routing problems - check requested CIDs and provider availability using +- `truncated=true` timeouts indicate retrieval stalled mid-file with no new bytes for the timeout duration + +A value of 0 disables this timeout. + +Default: `30s` + +Type: `optionalDuration` + +### `Gateway.MaxRangeRequestFileSize` + +Maximum file size for HTTP range requests on deserialized responses. Range requests for files larger than this limit return 501 Not Implemented. + +**Why this exists:** + +Some CDNs like Cloudflare intercept HTTP range requests and convert them to full file downloads when files exceed their cache bucket limits. Cloudflare's default plan only caches range requests for files up to 5GiB. Files larger than this receive HTTP 200 with the entire file instead of HTTP 206 with the requested byte range. A client requesting 1MB from a 40GiB file would unknowingly download all 40GiB, causing bandwidth overcharges for the gateway operator, unexpected data costs for the client, and potential browser crashes. + +This only affects deserialized responses. Clients fetching verifiable blocks as `application/vnd.ipld.raw` are not impacted because they work with small chunks that stay well below CDN cache limits. + +**How to use:** + +Set this to your CDN's range request cache limit (e.g., `"5GiB"` for Cloudflare's default plan). The gateway returns 501 Not Implemented for range requests over files larger than this limit, with an error message suggesting verifiable block requests as an alternative. + +> [!NOTE] +> Cloudflare users running open gateway hosting deserialized responses should deploy additional protection via Cloudflare Snippets (requires Enterprise plan). The Kubo configuration alone is not sufficient because Cloudflare has already intercepted and cached the response by the time it reaches your origin. See [boxo#856](https://github.com/ipfs/boxo/issues/856#issuecomment-3523944976) for a snippet that aborts HTTP 200 responses when Content-Length exceeds the limit. + +Default: `0` (no limit) + +Type: [`optionalBytes`](#optionalbytes) + +### `Gateway.MaxConcurrentRequests` + +Limits concurrent HTTP requests. Requests beyond limit receive 429 Too Many Requests. + +Protects nodes from traffic spikes and resource exhaustion, especially behind reverse proxies without rate-limiting. Default (4096) aligns with common reverse proxy configurations (e.g., nginx: 8 workers × 1024 connections). + +**Monitoring:** `ipfs_http_gw_concurrent_requests` tracks current requests in flight. + +**Tuning guidance:** + +- Monitor `ipfs_http_gw_concurrent_requests` gauge for usage patterns +- Track 429s (`ipfs_http_gw_responses_total{status="429"}`) and success rate (`{status="200"}`) +- Near limit with low resource usage → increase value +- Memory pressure or OOMs → decrease value and consider scaling +- Set slightly below reverse proxy limit for graceful degradation +- Start with default, adjust based on observed performance for your hardware + +A value of 0 disables the limit. + +Default: `4096` + +Type: `optionalInteger` + ### `Gateway.HTTPHeaders` Headers to set on gateway responses. @@ -662,12 +1232,22 @@ Type: `object[string -> array[string]]` ### `Gateway.RootRedirect` -A url to redirect requests for `/` to. +A URL to redirect requests for `/` to. Default: `""` Type: `string` (url) +### `Gateway.DiagnosticServiceURL` + +URL for a service to diagnose CID retrievability issues. When the gateway returns a 504 Gateway Timeout error, an "Inspect retrievability of CID" button will be shown that links to this service with the CID appended as `?cid=`. + +Set to empty string to disable the button. + +Default: `"https://check.ipfs.network"` + +Type: `optionalstring` (url) + ### `Gateway.FastDirIndexThreshold` **REMOVED**: this option is [no longer necessary](https://github.com/ipfs/kubo/pull/9481). Ignored since [Kubo 0.18](https://github.com/ipfs/kubo/blob/master/docs/changelogs/v0.18.md). @@ -691,7 +1271,7 @@ We are working on developing a modern replacement. To support our efforts, pleas on specified hostnames that point at your Kubo instance. It is useful when you want to run [Path gateway](https://specs.ipfs.tech/http-gateways/path-gateway/) on `example.com/ipfs/cid`, -and [Subdomain gateway](https://specs.ipfs.tech/http-gateways/subdomain-gateway/) on `cid.ipfs.example.org`, +and [Subdomain gateway](https://specs.ipfs.tech/http-gateways/subdomain-gateway/) on `cid.ipfs.example.org`, or limit `verifiable.example.net` to response types defined in [Trustless Gateway](https://specs.ipfs.tech/http-gateways/trustless-gateway/) specification. > [!CAUTION] @@ -700,14 +1280,21 @@ or limit `verifiable.example.net` to response types defined in [Trustless Gatewa Hostnames can optionally be defined with one or more wildcards. Examples: + - `*.example.com` will match requests to `http://foo.example.com/ipfs/*` or `http://{cid}.ipfs.bar.example.com/*`. - `foo-*.example.com` will match requests to `http://foo-bar.example.com/ipfs/*` or `http://{cid}.ipfs.foo-xyz.example.com/*`. +> [!IMPORTANT] +> **Reverse Proxy:** If running behind nginx or another reverse proxy, ensure +> `Host` and `X-Forwarded-*` headers are forwarded correctly. +> See [Reverse Proxy Caveats](gateway.md#reverse-proxy) in gateway documentation. + #### `Gateway.PublicGateways: Paths` An array of paths that should be exposed on the hostname. Example: + ```json { "Gateway": { @@ -729,13 +1316,14 @@ Type: `array[string]` #### `Gateway.PublicGateways: UseSubdomains` A boolean to configure whether the gateway at the hostname should be -a [Subdomain Gateway](https://specs.ipfs.tech/http-gateways/subdomain-gateway/) +a [Subdomain Gateway](https://specs.ipfs.tech/http-gateways/subdomain-gateway/) and provide [Origin isolation](https://developer.mozilla.org/en-US/docs/Web/Security/Same-origin_policy) between content roots. - `true` - enables [subdomain gateway](https://docs.ipfs.tech/how-to/address-ipfs-on-web/#subdomain-gateway) at `http://*.{hostname}/` - - **Requires whitelist:** make sure respective `Paths` are set. + - **Requires whitelist:** make sure respective `Paths` are set. For example, `Paths: ["/ipfs", "/ipns"]` are required for `http://{cid}.ipfs.{hostname}` and `http://{foo}.ipns.{hostname}` to work: + ```json "Gateway": { "PublicGateways": { @@ -746,10 +1334,12 @@ between content roots. } } ``` - - **Backward-compatible:** requests for content paths such as `http://{hostname}/ipfs/{cid}` produce redirect to `http://{cid}.ipfs.{hostname}` + + - **Backward-compatible:** requests for content paths such as `http://{hostname}/ipfs/{cid}` produce redirect to `http://{cid}.ipfs.{hostname}` - `false` - enables [path gateway](https://docs.ipfs.tech/how-to/address-ipfs-on-web/#path-gateway) at `http://{hostname}/*` - Example: + ```json "Gateway": { "PublicGateways": { @@ -765,6 +1355,9 @@ Default: `false` Type: `bool` +> [!IMPORTANT] +> See [Reverse Proxy Caveats](gateway.md#reverse-proxy) if running behind nginx or another reverse proxy. + #### `Gateway.PublicGateways: NoDNSLink` A boolean to configure whether DNSLink for hostname present in `Host` @@ -775,6 +1368,9 @@ Default: `false` (DNSLink lookup enabled by default for every defined hostname) Type: `bool` +> [!IMPORTANT] +> See [Reverse Proxy Caveats](gateway.md#reverse-proxy) if running behind nginx or another reverse proxy. + #### `Gateway.PublicGateways: InlineDNSLink` An optional flag to explicitly configure whether subdomain gateway's redirects @@ -788,7 +1384,7 @@ into a single DNS label ([specification](https://specs.ipfs.tech/http-gateways/s DNSLink name inlining allows for HTTPS on public subdomain gateways with single label wildcard TLS certs (also enabled when passing `X-Forwarded-Proto: https`), and provides disjoint Origin per root CID when special rules like -https://publicsuffix.org, or a custom localhost logic in browsers like Brave +, or a custom localhost logic in browsers like Brave has to be applied. Default: `false` @@ -815,6 +1411,7 @@ Type: `flag` Default entries for `localhost` hostname and loopback IPs are always present. If additional config is provided for those hostnames, it will be merged on top of implicit values: + ```json { "Gateway": { @@ -834,14 +1431,18 @@ For example, to disable subdomain gateway on `localhost` and make that hostname act the same as `127.0.0.1`: ```console -$ ipfs config --json Gateway.PublicGateways '{"localhost": null }' +ipfs config --json Gateway.PublicGateways '{"localhost": null }' ``` ### `Gateway` recipes -Below is a list of the most common public gateway setups. +Below is a list of the most common gateway setups. + +> [!IMPORTANT] +> See [Reverse Proxy Caveats](gateway.md#reverse-proxy) if running behind nginx or another reverse proxy. + +- Public [subdomain gateway](https://docs.ipfs.tech/how-to/address-ipfs-on-web/#subdomain-gateway) at `http://{cid}.ipfs.dweb.link` (each content root gets its own Origin) -* Public [subdomain gateway](https://docs.ipfs.tech/how-to/address-ipfs-on-web/#subdomain-gateway) at `http://{cid}.ipfs.dweb.link` (each content root gets its own Origin) ```console $ ipfs config --json Gateway.PublicGateways '{ "dweb.link": { @@ -850,22 +1451,24 @@ Below is a list of the most common public gateway setups. } }' ``` - - **Backward-compatible:** this feature enables automatic redirects from content paths to subdomains: + + - **Performance:** Consider enabling `Routing.AcceleratedDHTClient=true` to improve content routing lookups. Separately, gateway operators should decide if the gateway node should also co-host and provide (announce) fetched content to the DHT. If providing content, enable `Provide.DHT.SweepEnabled=true` for efficient announcements. If announcements are still not fast enough, adjust `Provide.DHT.MaxWorkers`. For a read-only gateway that doesn't announce content, use `Provide.Enabled=false`. + - **Backward-compatible:** this feature enables automatic redirects from content paths to subdomains: `http://dweb.link/ipfs/{cid}` → `http://{cid}.ipfs.dweb.link` - - **X-Forwarded-Proto:** if you run Kubo behind a reverse proxy that provides TLS, make it add a `X-Forwarded-Proto: https` HTTP header to ensure users are redirected to `https://`, not `http://`. It will also ensure DNSLink names are inlined to fit in a single DNS label, so they work fine with a wildcart TLS cert ([details](https://github.com/ipfs/in-web-browsers/issues/169)). The NGINX directive is `proxy_set_header X-Forwarded-Proto "https";`.: + - **X-Forwarded-Proto:** if you run Kubo behind a reverse proxy that provides TLS, make it add a `X-Forwarded-Proto: https` HTTP header to ensure users are redirected to `https://`, not `http://`. It will also ensure DNSLink names are inlined to fit in a single DNS label, so they work fine with a wildcard TLS cert ([details](https://github.com/ipfs/in-web-browsers/issues/169)). The NGINX directive is `proxy_set_header X-Forwarded-Proto "https";`.: `http://dweb.link/ipfs/{cid}` → `https://{cid}.ipfs.dweb.link` `http://dweb.link/ipns/your-dnslink.site.example.com` → `https://your--dnslink-site-example-com.ipfs.dweb.link` - - **X-Forwarded-Host:** we also support `X-Forwarded-Host: example.com` if you want to override subdomain gateway host from the original request: + - **X-Forwarded-Host:** we also support `X-Forwarded-Host: example.com` if you want to override subdomain gateway host from the original request: `http://dweb.link/ipfs/{cid}` → `http://{cid}.ipfs.example.com` +- Public [path gateway](https://docs.ipfs.tech/how-to/address-ipfs-on-web/#path-gateway) at `http://ipfs.io/ipfs/{cid}` (no Origin separation) -* Public [path gateway](https://docs.ipfs.tech/how-to/address-ipfs-on-web/#path-gateway) at `http://ipfs.io/ipfs/{cid}` (no Origin separation) ```console $ ipfs config --json Gateway.PublicGateways '{ "ipfs.io": { @@ -875,13 +1478,17 @@ Below is a list of the most common public gateway setups. }' ``` -* Public [DNSLink](https://dnslink.io/) gateway resolving every hostname passed in `Host` header. - ```console - $ ipfs config --json Gateway.NoDNSLink false - ``` - * Note that `NoDNSLink: false` is the default (it works out of the box unless set to `true` manually) + - **Performance:** Consider enabling `Routing.AcceleratedDHTClient=true` to improve content routing lookups. When running an open, recursive gateway, decide if the gateway should also co-host and provide (announce) fetched content to the DHT. If providing content, enable `Provide.DHT.SweepEnabled=true` for efficient announcements. If announcements are still not fast enough, adjust `Provide.DHT.MaxWorkers`. For a read-only gateway that doesn't announce content, use `Provide.Enabled=false`. -* Hardened, site-specific [DNSLink gateway](https://docs.ipfs.tech/how-to/address-ipfs-on-web/#dnslink-gateway). +- Public [DNSLink](https://dnslink.io/) gateway resolving every hostname passed in `Host` header. + + ```console + ipfs config --json Gateway.NoDNSLink false + ``` + + - Note that `NoDNSLink: false` is the default (it works out of the box unless set to `true` manually) + +- Hardened, site-specific [DNSLink gateway](https://docs.ipfs.tech/how-to/address-ipfs-on-web/#dnslink-gateway). Disable fetching of remote data (`NoFetch: true`) and resolving DNSLink at unknown hostnames (`NoDNSLink: true`). Then, enable DNSLink gateway only for the specific hostname (for which data @@ -923,6 +1530,10 @@ This section includes internal knobs for various subsystems to allow advanced us ### `Internal.Bitswap` `Internal.Bitswap` contains knobs for tuning bitswap resource utilization. + +> [!TIP] +> For high level configuration see [`Bitswap`](#bitswap). + The knobs (below) document how their value should related to each other. Whether their values should be raised or lowered should be determined based on the metrics `ipfs_bitswap_active_tasks`, `ipfs_bitswap_pending_tasks`, @@ -989,7 +1600,7 @@ deteriorate the quality provided to less aggressively-wanting peers. Type: `optionalInteger` (byte count, `null` means default which is 1MB) -### `Internal.Bitswap.ProviderSearchDelay` +#### `Internal.Bitswap.ProviderSearchDelay` This parameter determines how long to wait before looking for providers outside of bitswap. Other routing systems like the Amino DHT are able to provide results in less than a second, so lowering @@ -997,17 +1608,113 @@ this number will allow faster peers lookups in some cases. Type: `optionalDuration` (`null` means default which is 1s) +#### `Internal.Bitswap.ProviderSearchMaxResults` + +Maximum number of providers bitswap client should aim at before it stops searching for new ones. +Setting to 0 means unlimited. + +Type: `optionalInteger` (`null` means default which is 10) + +#### `Internal.Bitswap.BroadcastControl` + +`Internal.Bitswap.BroadcastControl` contains settings for the bitswap client's broadcast control functionality. + +Broadcast control tries to reduce the number of bitswap broadcast messages sent to peers by choosing a subset of of the peers to send to. Peers are chosen based on whether they have previously responded indicating they have wanted blocks, as well as other configurable criteria. The settings here change how peers are selected as broadcast targets. Broadcast control can also be completely disabled to return bitswap to its previous behavior before broadcast control was introduced. + +Enabling broadcast control should generally reduce the number of broadcasts significantly without significantly degrading the ability to discover which peers have wanted blocks. However, if block discovery on your network relies sufficiently on broadcasts to discover peers that have wanted blocks, then adjusting the broadcast control configuration or disabling it altogether, may be helpful. + +##### `Internal.Bitswap.BroadcastControl.Enable` + +Enables or disables broadcast control functionality. Setting this to `false` disables broadcast reduction logic and restores the previous (Kubo < 0.36) broadcast behavior of sending broadcasts to all peers. When disabled, all other `Bitswap.BroadcastControl` configuration items are ignored. + +Default: `true` (Enabled) + +Type: `flag` + +##### `Internal.Bitswap.BroadcastControl.MaxPeers` + +Sets a hard limit on the number of peers to send broadcasts to. A value of `0` means no broadcasts are sent. A value of `-1` means there is no limit. + +Default: `0` (no limit) + +Type: `optionalInteger` (non-negative, 0 means no limit) + +##### `Internal.Bitswap.BroadcastControl.LocalPeers` + +Enables or disables broadcast control for peers on the local network. Peers that have private or loopback addresses are considered to be on the local network. If this setting is `false`, than always broadcast to peers on the local network. If `true`, apply broadcast control to local peers. + +Default: `false` (Always broadcast to peers on local network) + +Type: `flag` + +##### `Internal.Bitswap.BroadcastControl.PeeredPeers` + +Enables or disables broadcast reduction for peers configured for peering. If `false`, than always broadcast to peers configured for peering. If `true`, apply broadcast reduction to peered peers. + +Default: `false` (Always broadcast to peers configured for peering) + +Type: `flag` + +##### `Internal.Bitswap.BroadcastControl.MaxRandomPeers` + +Sets the number of peers to broadcast to anyway, even though broadcast control logic has determined that they are not broadcast targets. Setting this to a non-zero value ensures at least this number of random peers receives a broadcast. This may be helpful in cases where peers that are not receiving broadcasts my have wanted blocks. + +Default: `0` (do not send broadcasts to peers not already targeted broadcast control) + +Type: `optionalInteger` (non-negative, 0 means do not broadcast to any random peers) + +##### `Internal.Bitswap.BroadcastControl.SendToPendingPeers` + +Enables or disables sending broadcasts to any peers to which there is a pending message to send. When enabled, this sends broadcasts to many more peers, but does so in a way that does not increase the number of separate broadcast messages. There is still the increased cost of the recipients having to process and respond to the broadcasts. + +Default: `false` (Do not send broadcasts to all peers for which there are pending messages) + +Type: `flag` + ### `Internal.UnixFSShardingSizeThreshold` -The sharding threshold used internally to decide whether a UnixFS directory should be sharded or not. -This value is not strictly related to the size of the UnixFS directory block and any increases in -the threshold should come with being careful that block sizes stay under 2MiB in order for them to be -reliably transferable through the networking stack (IPFS peers on the public swarm tend to ignore requests for blocks bigger than 2MiB). +**MOVED:** see [`Import.UnixFSHAMTDirectorySizeThreshold`](#importunixfshamtdirectorysizethreshold) -Decreasing this value to 1B is functionally equivalent to the previous experimental sharding option to -shard all directories. +### `Internal.MFSNoFlushLimit` -Type: `optionalBytes` (`null` means default which is 256KiB) +Controls the maximum number of consecutive MFS operations allowed with `--flush=false` +before requiring a manual flush. This prevents unbounded memory growth and ensures +data consistency when using deferred flushing with `ipfs files` commands. + +When the limit is reached, further operations will fail with an error message +instructing the user to run `ipfs files flush`, use `--flush=true`, or increase +this limit in the configuration. + +**Why operations fail instead of auto-flushing:** Automatic flushing once the limit +is reached was considered but rejected because it can lead to data corruption issues +that are difficult to debug. When the system decides to flush without user knowledge, it can: + +- Create partial states that violate user expectations about atomicity +- Interfere with concurrent operations in unexpected ways +- Make debugging and recovery much harder when issues occur + +By failing explicitly, users maintain control over when their data is persisted, +allowing them to: + +- Batch related operations together before flushing +- Handle errors predictably at natural transaction boundaries +- Understand exactly when and why their data is written to disk + +If you expect automatic flushing behavior, simply use the default `--flush=true` +(or omit the flag entirely) instead of `--flush=false`. + +**⚠️ WARNING:** Increasing this limit or disabling it (setting to 0) can lead to: + +- **Out-of-memory errors (OOM)** - Each unflushed operation consumes memory +- **Data loss** - If the daemon crashes before flushing, all unflushed changes are lost +- **Degraded performance** - Large unflushed caches slow down MFS operations + +Default: `256` + +Type: `optionalInteger` (0 disables the limit, strongly discouraged) + +**Note:** This is an EXPERIMENTAL feature and may change or be removed in future releases. +See [#10842](https://github.com/ipfs/kubo/issues/10842) for more information. ## `Ipns` @@ -1052,9 +1759,10 @@ When `Ipns.MaxCacheTTL` is set, it defines the upper bound limit of how long a will be cached and read from cache before checking for updates. **Examples:** -* `"1m"` IPNS results are cached 1m or less (good compromise for system where + +- `"1m"` IPNS results are cached 1m or less (good compromise for system where faster updates are desired). -* `"0s"` IPNS caching is effectively turned off (useful for testing, bad for production use) +- `"0s"` IPNS caching is effectively turned off (useful for testing, bad for production use) - **Note:** setting this to `0` will turn off TTL-based caching entirely. This is discouraged in production environments. It will make IPNS websites artificially slow because IPNS resolution results will expire as soon as @@ -1064,7 +1772,6 @@ will be cached and read from cache before checking for updates. Default: No upper bound, [TTL from IPNS Record](https://specs.ipfs.tech/ipns/ipns-record/#ttl-uint64) (see `ipns name publish --help`) is always respected. - Type: `optionalDuration` ### `Ipns.UsePubsub` @@ -1077,25 +1784,59 @@ Default: `disabled` Type: `flag` +### `Ipns.DelegatedPublishers` + +HTTP endpoints for delegated IPNS publishing operations. These endpoints must support the [IPNS API](https://specs.ipfs.tech/routing/http-routing-v1/#ipns-api) from the Delegated Routing V1 HTTP specification. + +The special value `"auto"` loads delegated publishers from [AutoConf](#autoconf) when enabled. + +**Publishing behavior depends on routing configuration:** + +- `Routing.Type=auto` (default): Uses DHT for publishing, `"auto"` resolves to empty list +- `Routing.Type=delegated`: Uses HTTP delegated publishers only, `"auto"` resolves to configured endpoints + +When using `"auto"`, inspect the effective publishers with: `ipfs config Ipns.DelegatedPublishers --expand-auto` + +**Command flags override publishing behavior:** + +- `--allow-offline` - Publishes to local datastore without requiring network connectivity +- `--allow-delegated` - Uses local datastore and HTTP delegated publishers only (no DHT connectivity required) + +For self-hosting, you can run your own `/routing/v1/ipns` endpoint using [someguy](https://github.com/ipfs/someguy/). + +Default: `["auto"]` + +Type: `array[string]` (URLs or `"auto"`) + ## `Migration` -Migration configures how migrations are downloaded and if the downloads are added to IPFS locally. +> [!WARNING] +> **DEPRECATED:** Only applies to legacy migrations (repo versions <16). Modern repos (v16+) use embedded migrations. +> This section is optional and will not appear in new configurations. ### `Migration.DownloadSources` -Sources in order of preference, where "IPFS" means use IPFS and "HTTPS" means use default gateways. Any other values are interpreted as hostnames for custom gateways. An empty list means "use default sources". +**DEPRECATED:** Download sources for legacy migrations. Only `"HTTPS"` is supported. -Default: `["HTTPS", "IPFS"]` +Type: `array[string]` (optional) + +Default: `["HTTPS"]` ### `Migration.Keep` -Specifies whether or not to keep the migration after downloading it. Options are "discard", "cache", "pin". Empty string for default. +**DEPRECATED:** Controls retention of legacy migration binaries. Options: `"cache"` (default), `"discard"`, `"keep"`. -Default: `cache` +Type: `string` (optional) + +Default: `"cache"` ## `Mounts` -**EXPERIMENTAL:** read about current limitations at [fuse.md](./fuse.md). +> [!CAUTION] +> **EXPERIMENTAL:** +> This feature is disabled by default, requires an explicit opt-in with `ipfs mount` or `ipfs daemon --mount`. +> +> Read about current limitations at [fuse.md](./fuse.md). FUSE mount point configuration options. @@ -1115,9 +1856,22 @@ Default: `/ipns` Type: `string` (filesystem path) +### `Mounts.MFS` + +Mountpoint for Mutable File System (MFS) behind the `ipfs files` API. + +> [!CAUTION] +> +> - Write support is highly experimental and not recommended for mission-critical deployments. +> - Avoid storing lazy-loaded datasets in MFS. Exposing a partially local, lazy-loaded DAG risks operating system search indexers crawling it, which may trigger unintended network prefetching of non-local DAG components. + +Default: `/mfs` + +Type: `string` (filesystem path) + ### `Mounts.FuseAllowOther` -Sets the 'FUSE allow other'-option on the mount point. +Sets the 'FUSE allow-other' option on the mount point. ## `Pinning` @@ -1132,13 +1886,14 @@ A remote pinning service is a remote service that exposes an API for managing that service's interest in long-term data storage. The exposed API conforms to the specification defined at -https://ipfs.github.io/pinning-services-api-spec/ + #### `Pinning.RemoteServices: API` Contains information relevant to utilizing the remote pinning service Example: + ```json { "Pinning": { @@ -1158,7 +1913,7 @@ Example: The HTTP(S) endpoint through which to access the pinning service -Example: "https://pinningservice.tld:1234/my/api/path" +Example: "" Type: `string` @@ -1208,6 +1963,435 @@ Default: `"5m"` Type: `duration` +## `Provide` + +Configures how your node advertises content to make it discoverable by other +peers. + +**What is providing?** When your node stores content, it publishes provider +records to the routing system announcing "I have this content". These records +map CIDs to your peer ID, enabling content discovery across the network. + +While designed to support multiple routing systems in the future, the current +default configuration only supports [providing to the Amino DHT](#providedht). + +### `Provide.Enabled` + +Controls whether Kubo provide and reprovide systems are enabled. + +> [!CAUTION] +> Disabling this will prevent other nodes from discovering your content. +> Your node will stop announcing data to the routing system, making it +> inaccessible unless peers connect to you directly. + +Default: `true` + +Type: `flag` + +### `Provide.Strategy` + +Tells the provide system what should be announced. Valid strategies are: + +- `"all"` - announce all CIDs of stored blocks +- `"pinned"` - only announce recursively pinned CIDs (`ipfs pin add -r`, both roots and child blocks) + - Order: root blocks of direct and recursive pins are announced first, then the child blocks of recursive pins +- `"roots"` - only announce the root block of explicitly pinned CIDs (`ipfs pin add`) + - **⚠️ BE CAREFUL:** node with `roots` strategy will not announce child blocks. + It makes sense only for use cases where the entire DAG is fetched in full, + and a graceful resume does not have to be guaranteed: the lack of child + announcements means an interrupted retrieval won't be able to find + providers for the missing block in the middle of a file, unless the peer + happens to already be connected to a provider and asks for child CID over + bitswap. +- `"mfs"` - announce only the local CIDs that are part of the MFS (`ipfs files`) + - Note: MFS is lazy-loaded. Only the MFS blocks present in local datastore are announced. +- `"pinned+mfs"` - a combination of the `pinned` and `mfs` strategies. + - **ℹ️ NOTE:** This is the suggested strategy for users who run without GC and don't want to provide everything in cache. + - Order: first `pinned` and then the locally available part of `mfs`. + +**Strategy changes automatically clear the provide queue.** When you change `Provide.Strategy` and restart Kubo, the provide queue is automatically cleared to ensure only content matching your new strategy is announced. You can also manually clear the queue using `ipfs provide clear`. + +**Memory requirements:** + +- Reproviding larger pinsets using the `mfs`, `pinned`, `pinned+mfs` or `roots` strategies requires additional memory, with an estimated ~1 GiB of RAM per 20 million CIDs for reproviding to the Amino DHT. +- This is due to the use of a buffered provider, which loads all CIDs into memory to avoid holding a lock on the entire pinset during the reprovide cycle. + +Default: `"all"` + +Type: `optionalString` (unset for the default) + +### `Provide.DHT` + +Configuration for providing data to Amino DHT peers. + +**Provider record lifecycle:** On the Amino DHT, provider records expire after +[`amino.DefaultProvideValidity`](https://github.com/libp2p/go-libp2p-kad-dht/blob/v0.34.0/amino/defaults.go#L40-L43). +Your node must re-announce (reprovide) content periodically to keep it +discoverable. The [`Provide.DHT.Interval`](#providedhtinterval) setting +controls this timing, with the default ensuring records refresh well before +expiration or negative churn effects kick in. + +**Two provider systems:** + +- **Sweep provider**: Divides the DHT keyspace into regions and systematically + sweeps through them over the reprovide interval. This batches CIDs allocated + to the same DHT servers, dramatically reducing the number of DHT lookups and + PUTs needed. Spreads work evenly over time with predictable resource usage. + +- **Legacy provider**: Processes each CID individually with separate DHT + lookups. Works well for small content collections but struggles to complete + reprovide cycles when managing thousands of CIDs. + +#### Monitoring Provide Operations + +**Quick command-line monitoring:** Use `ipfs provide stat` to view the current +state of the provider system. For real-time monitoring, run +`watch ipfs provide stat --all --compact` to see detailed statistics refreshed +continuously in a 2-column layout. + +**Long-term monitoring:** For in-depth or long-term monitoring, metrics are +exposed at the Prometheus endpoint: `{Addresses.API}/debug/metrics/prometheus` +(default: `http://127.0.0.1:5001/debug/metrics/prometheus`). Different metrics +are available depending on whether you use legacy mode (`SweepEnabled=false`) or +sweep mode (`SweepEnabled=true`). See [Provide metrics documentation](https://github.com/ipfs/kubo/blob/master/docs/metrics.md#provide) +for details. + +**Debug logging:** For troubleshooting, enable detailed logging by setting: + +```sh +GOLOG_LOG_LEVEL=error,provider=debug,dht/provider=debug +``` + +- `provider=debug` enables generic logging (legacy provider and any non-dht operations) +- `dht/provider=debug` enables logging for the sweep provider + +#### `Provide.DHT.Interval` + +Sets how often to re-announce content to the DHT. Provider records on Amino DHT +expire after [`amino.DefaultProvideValidity`](https://github.com/libp2p/go-libp2p-kad-dht/blob/v0.34.0/amino/defaults.go#L40-L43). + +**Why this matters:** The interval must be shorter than the expiration window to +ensure provider records refresh before they expire. The default value is +approximately half of [`amino.DefaultProvideValidity`](https://github.com/libp2p/go-libp2p-kad-dht/blob/v0.34.0/amino/defaults.go#L40-L43), +which accounts for network churn and ensures records stay alive without +overwhelming the network with unnecessary announcements. + +**With sweep mode enabled +([`Provide.DHT.SweepEnabled`](#providedhtsweepenabled)):** The system spreads +reprovide operations smoothly across this entire interval. Each keyspace region +is reprovided at scheduled times throughout the period, ensuring each region's +announcements complete before records expire. + +**With legacy mode:** The system attempts to reprovide all CIDs as quickly as +possible at the start of each interval. If reproviding takes longer than this +interval (common with large datasets), the next cycle is skipped and provider +records may expire. + +- If unset, it uses the implicit safe default. +- If set to the value `"0"` it will disable content reproviding to DHT. + +> [!CAUTION] +> Disabling this will prevent other nodes from discovering your content via the DHT. +> Your node will stop announcing data to the DHT, making it +> inaccessible unless peers connect to you directly. Since provider +> records expire after `amino.DefaultProvideValidity`, your content will become undiscoverable +> after this period. + +Default: `22h` + +Type: `optionalDuration` (unset for the default) + +#### `Provide.DHT.MaxWorkers` + +Sets the maximum number of _concurrent_ DHT provide operations. + +**When `Provide.DHT.SweepEnabled` is false (legacy mode):** + +- Controls NEW CID announcements only +- Reprovide operations do **not** count against this limit +- A value of `0` allows unlimited provide workers + +**When `Provide.DHT.SweepEnabled` is true:** + +- Controls the total worker pool for both provide and reprovide operations +- Workers are split between periodic reprovides and burst provides +- Use a positive value to control resource usage +- See [`DedicatedPeriodicWorkers`](#providedhtdedicatedperiodicworkers) and [`DedicatedBurstWorkers`](#providedhtdedicatedburstworkers) for task allocation + +If the [accelerated DHT client](#routingaccelerateddhtclient) is enabled, each +provide operation opens ~20 connections in parallel. With the standard DHT +client (accelerated disabled), each provide opens between 20 and 60 +connections, with at most 10 active at once. Provides complete more quickly +when using the accelerated client. Be mindful of how many simultaneous +connections this setting can generate. + +> [!CAUTION] +> For nodes without strict connection limits that need to provide large volumes +> of content, we recommend first trying `Provide.DHT.SweepEnabled=true` for efficient +> announcements. If announcements are still not fast enough, adjust `Provide.DHT.MaxWorkers`. +> As a last resort, consider enabling `Routing.AcceleratedDHTClient=true` but be aware that it is very resource hungry. +> +> At the same time, mind that raising this value too high may lead to increased load. +> Proceed with caution, ensure proper hardware and networking are in place. + +> [!TIP] +> **When `SweepEnabled` is true:** Users providing millions of CIDs or more +> should increase the worker count accordingly. Underprovisioning can lead to +> slow provides (burst workers) and inability to keep up with content +> reproviding (periodic workers). For nodes with sufficient resources (CPU, +> bandwidth, number of connections), dedicating `1024` for [periodic +> workers](#providedhtdedicatedperiodicworkers) and `512` for [burst +> workers](#providedhtdedicatedburstworkers), and `2048` [max +> workers](#providedhtmaxworkers) should be adequate even for the largest +> users. The system will only use workers as needed - unused resources won't be +> consumed. Ensure you adjust the swarm [connection manager](#swarmconnmgr) and +> [resource manager](#swarmresourcemgr) configuration accordingly. +> See [Capacity Planning](https://github.com/ipfs/kubo/blob/master/docs/provide-stats.md#capacity-planning) for more details. + +Default: `16` + +Type: `optionalInteger` (non-negative; `0` means unlimited number of workers) + +#### `Provide.DHT.SweepEnabled` + +Enables the sweep provider for efficient content announcements. When disabled, +the legacy [`boxo/provider`](https://github.com/ipfs/boxo/tree/main/provider) is +used instead. + +**The legacy provider problem:** The legacy system processes CIDs one at a +time, requiring a separate DHT lookup (10-20 seconds each) to find the 20 +closest peers for each CID. This sequential approach typically handles less +than 10,000 CID over 22h ([`Provide.DHT.Interval`](#providedhtinterval)). If +your node has more CIDs than can be reprovided within +[`Provide.DHT.Interval`](#providedhtinterval), provider records start expiring +after +[`amino.DefaultProvideValidity`](https://github.com/libp2p/go-libp2p-kad-dht/blob/v0.34.0/amino/defaults.go#L40-L43), +making content undiscoverable. + +**How sweep mode works:** The sweep provider divides the DHT keyspace into +regions based on keyspace prefixes. It estimates the Amino DHT size, calculates +how many regions are needed (sized to contain at least 20 peers each), then +schedules region processing evenly across +[`Provide.DHT.Interval`](#providedhtinterval). When processing a region, it +discovers the peers in that region once, then sends all provider records for +CIDs allocated to those peers in a batch. This batching is the key efficiency: +instead of N lookups for N CIDs, the number of lookups is bounded by a constant +fraction of the Amino DHT size (e.g., ~3,000 lookups when there are ~10,000 DHT +servers), regardless of how many CIDs you're providing. + +**Efficiency gains:** For a node providing 100,000 CIDs, sweep mode reduces +lookups by 97% compared to legacy. The work spreads smoothly over time rather +than completing in bursts, preventing resource spikes and duplicate +announcements. Long-running nodes reprovide systematically just before records +would expire, keeping content continuously discoverable without wasting +bandwidth. + +**Implementation details:** The sweep provider tracks CIDs in a persistent +keystore. New content added via `StartProviding()` enters the provide queue and +gets batched by keyspace region. The keystore is periodically refreshed at each +[`Provide.DHT.Interval`](#providedhtinterval) with CIDs matching +[`Provide.Strategy`](#providestrategy) to ensure only current content remains +scheduled. This handles cases where content is unpinned or removed. + +**Persistent reprovide cycle state:** When Provide Sweep is enabled, the +reprovide cycle state is persisted to the datastore by default. On restart, Kubo +automatically resumes from where it left off. If the node was offline for an +extended period, all CIDs that haven't been reprovided within the configured +[`Provide.DHT.Interval`](#providedhtinterval) are immediately queued for +reproviding. Additionally, the provide queue is persisted on shutdown and +restored on startup, ensuring no pending provide operations are lost. If you +don't want to keep the persisted provider state from a previous run, you can +disable this behavior by setting [`Provide.DHT.ResumeEnabled`](#providedhtresumeenabled) +to `false`. + +> +> +> +> Reprovide Cycle Comparison +> +> +> The diagram compares performance patterns: +> +> - **Legacy mode**: Sequential processing, one lookup per CID, struggles with large datasets +> - **Sweep mode**: Smooth distribution over time, batched lookups by keyspace region, predictable resource usage +> - **Accelerated DHT**: Hourly network crawls creating traffic spikes, high resource usage +> +> Sweep mode achieves similar effectiveness to the Accelerated DHT client but with steady resource consumption. + +For background on the sweep provider design and motivations, see Shipyard's blogpost [Provide Sweep: Solving the DHT Provide Bottleneck](https://ipshipyard.com/blog/2025-dht-provide-sweep/). + +You can compare the effectiveness of sweep mode vs legacy mode by monitoring the appropriate metrics (see [Monitoring Provide Operations](#monitoring-provide-operations) above). + +> [!NOTE] +> This is the default provider system as of Kubo v0.39. To use the legacy provider instead, set `Provide.DHT.SweepEnabled=false`. + +> [!NOTE] +> When DHT routing is unavailable (e.g., `Routing.Type=custom` with only HTTP routers), the provider automatically falls back to the legacy provider regardless of this setting. + +Default: `true` + +Type: `flag` + +#### `Provide.DHT.ResumeEnabled` + +Controls whether the provider resumes from its previous state on restart. Only +applies when `Provide.DHT.SweepEnabled` is true. + +When enabled (the default), the provider persists its reprovide cycle state and +provide queue to the datastore, and restores them on restart. This ensures: + +- The reprovide cycle continues from where it left off instead of starting over +- Any CIDs in the provide queue during shutdown are restored and provided after +restart +- CIDs that missed their reprovide window while the node was offline are queued +for immediate reproviding + +When disabled, the provider starts fresh on each restart, discarding any +previous reprovide cycle state and provide queue. On a fresh start, all CIDs +matching the [`Provide.Strategy`](#providestrategy) will be provided ASAP (as +burst provides), and then keyspace regions are reprovided according to the +regular schedule starting from the beginning of the reprovide cycle. + +> [!NOTE] +> Disabling this option means the provider will provide all content matching +> your strategy on every restart (which can be resource-intensive for large +> datasets), then start from the beginning of the reprovide cycle. For nodes +> with large datasets or frequent restarts, keeping this enabled (the default) +> is recommended for better resource efficiency and more consistent reproviding +> behavior. + +Default: `true` + +Type: `flag` + +#### `Provide.DHT.DedicatedPeriodicWorkers` + +Number of workers dedicated to periodic keyspace region reprovides. Only +applies when `Provide.DHT.SweepEnabled` is true. + +Among the [`Provide.DHT.MaxWorkers`](#providedhtmaxworkers), this +number of workers will be dedicated to the periodic region reprovide only. The sum of +`DedicatedPeriodicWorkers` and `DedicatedBurstWorkers` should not exceed `MaxWorkers`. +Any remaining workers (MaxWorkers - DedicatedPeriodicWorkers - DedicatedBurstWorkers) +form a shared pool that can be used for either type of work as needed. + +> [!NOTE] +> If the provider system isn't able to keep up with reproviding all your +> content within the [Provide.DHT.Interval](#providedhtinterval), consider +> increasing this value. + +Default: `2` + +Type: `optionalInteger` (`0` means there are no dedicated workers, but the +operation can be performed by free non-dedicated workers) + +#### `Provide.DHT.DedicatedBurstWorkers` + +Number of workers dedicated to burst provides. Only applies when `Provide.DHT.SweepEnabled` is true. + +Burst provides are triggered by: + +- Manual provide commands (`ipfs routing provide`) +- New content matching your `Provide.Strategy` (blocks from `ipfs add`, bitswap, or trustless gateway requests) +- Catch-up reprovides after being disconnected/offline for a while + +Having dedicated burst workers ensures that bulk operations (like adding many CIDs +or reconnecting to the network) don't delay regular periodic reprovides, and vice versa. + +Among the [`Provide.DHT.MaxWorkers`](#providedhtmaxworkers), this +number of workers will be dedicated to burst provides only. In addition to +these, if there are available workers in the pool, they can also be used for +burst provides. + +> [!NOTE] +> If CIDs aren't provided quickly enough to your taste, and you can afford more +> CPU and bandwidth, consider increasing this value. + +Default: `1` + +Type: `optionalInteger` (`0` means there are no dedicated workers, but the +operation can be performed by free non-dedicated workers) + +#### `Provide.DHT.MaxProvideConnsPerWorker` + +Maximum number of connections that a single worker can use to send provider +records over the network. + +When reproviding CIDs corresponding to a keyspace region, the reprovider must +send a provider record to the 20 closest peers to the CID (in XOR distance) for +each CID belonging to this keyspace region. + +The reprovider opens a connection to a peer from that region, sends it all its +allocated provider records. Once done, it opens a connection to the next peer +from that keyspace region until all provider records are assigned. + +This option defines how many such connections can be open concurrently by a +single worker. + +> [!NOTE] +> Increasing this value can speed up the provide operation, at the cost of +> opening more simultaneous connections to DHT servers. A keyspace typically +> has less than 60 peers, so you may hit a performance ceiling beyond which +> increasing this value has no effect. + +Default: `20` + +Type: `optionalInteger` (non-negative) + +#### `Provide.DHT.KeystoreBatchSize` + +During the garbage collection, all keys stored in the Keystore are removed, and +the keys are streamed from a channel to fill the Keystore again with up-to-date +keys. Since a high number of CIDs to reprovide can easily fill up the memory, +keys are read and written in batches to optimize for memory usage. + +This option defines how many multihashes should be contained within a batch. A +multihash is usually represented by 34 bytes. + +Default: `16384` (~544 KiB per batch) + +Type: `optionalInteger` (non-negative) + +#### `Provide.DHT.OfflineDelay` + +The `SweepingProvider` has 3 states: `ONLINE`, `DISCONNECTED` and `OFFLINE`. It +starts `OFFLINE`, and as the node bootstraps, it changes its state to `ONLINE`. + +When the provider loses connection to all DHT peers, it switches to the +`DISCONNECTED` state. In this state, new provides will be added to the provide +queue, and provided as soon as the node comes back online. + +After a node has been `DISCONNECTED` for `OfflineDelay`, it goes to `OFFLINE` +state. When `OFFLINE`, the provider drops the provide queue, and returns errors +to new provide requests. However, when `OFFLINE` the provider still adds the +keys to its state, so keys will eventually be provided in the +[`Provide.DHT.Interval`](#providedhtinterval) after the provider comes back +`ONLINE`. + +Default: `2h` + +Type: `optionalDuration` + +## `Provider` + +### `Provider.Enabled` + +**REMOVED** + +Replaced with [`Provide.Enabled`](#provideenabled). + +### `Provider.Strategy` + +**REMOVED** + +This field was unused. Use [`Provide.Strategy`](#providestrategy) instead. + +### `Provider.WorkerCount` + +**REMOVED** + +Replaced with [`Provide.DHT.MaxWorkers`](#providedhtmaxworkers). + ## `Pubsub` **DEPRECATED**: See [#9717](https://github.com/ipfs/kubo/issues/9717) @@ -1232,9 +2416,9 @@ Type: `flag` Sets the default router used by pubsub to route messages to peers. This can be one of: -* `"floodsub"` - floodsub is a basic router that simply _floods_ messages to all +- `"floodsub"` - floodsub is a basic router that simply _floods_ messages to all connected peers. This router is extremely inefficient but _very_ reliable. -* `"gossipsub"` - [gossipsub][] is a more advanced routing algorithm that will +- `"gossipsub"` - [gossipsub][] is a more advanced routing algorithm that will build an overlay mesh from a subset of the links in the network. Default: `"gossipsub"` @@ -1313,11 +2497,11 @@ improve reliability. Use-cases: -* An IPFS gateway connected to an IPFS cluster should peer to ensure that the +- An IPFS gateway connected to an IPFS cluster should peer to ensure that the gateway can always fetch content from the cluster. -* A dapp may peer embedded Kubo nodes with a set of pinning services or +- A dapp may peer embedded Kubo nodes with a set of pinning services or textile cafes/hubs. -* A set of friends may peer to ensure that they can always fetch each other's +- A set of friends may peer to ensure that they can always fetch each other's content. When a node is added to the set of peered nodes, Kubo will: @@ -1333,9 +2517,9 @@ When a node is added to the set of peered nodes, Kubo will: Peering can be asymmetric or symmetric: -* When symmetric, the connection will be protected by both nodes and will likely +- When symmetric, the connection will be protected by both nodes and will likely be very stable. -* When asymmetric, only one node (the node that configured peering) will protect +- When asymmetric, only one node (the node that configured peering) will protect the connection and attempt to re-connect to the peered node on disconnect. If the peered node is under heavy load and/or has a low connection limit, the connection may flap repeatedly. Be careful when asymmetrically peering to not @@ -1375,42 +2559,15 @@ Type: `array[peering]` ### `Reprovider.Interval` -Sets the time between rounds of reproviding local content to the routing -system. +**REMOVED** -- If unset, it uses the implicit safe default. -- If set to the value `"0"` it will disable content reproviding. - -Note: disabling content reproviding will result in other nodes on the network -not being able to discover that you have the objects that you have. If you want -to have this disabled and keep the network aware of what you have, you must -manually announce your content periodically. - -Default: `22h` (`DefaultReproviderInterval`) - -Type: `optionalDuration` (unset for the default) +Replaced with [`Provide.DHT.Interval`](#providedhtinterval). ### `Reprovider.Strategy` -Tells reprovider what should be announced. Valid strategies are: +**REMOVED** -- `"all"` - announce all CIDs of stored blocks - - Order: root blocks of direct and recursive pins are announced first, then the rest of blockstore -- `"pinned"` - only announce pinned CIDs recursively (both roots and child blocks) - - Order: root blocks of direct and recursive pins are announced first, then the child blocks of recursive pins -- `"roots"` - only announce the root block of explicitly pinned CIDs - - **⚠️ BE CAREFUL:** node with `roots` strategy will not announce child blocks. - It makes sense only for use cases where the entire DAG is fetched in full, - and a graceful resume does not have to be guaranteed: the lack of child - announcements means an interrupted retrieval won't be able to find - providers for the missing block in the middle of a file, unless the peer - happens to already be connected to a provider and ask for child CID over - bitswap. -- `"flat"` - same as `all`, announce all CIDs of stored blocks, but without prioritizing anything - -Default: `"all"` - -Type: `optionalString` (unset for the default) +Replaced with [`Provide.Strategy`](#providestrategy). ## `Routing` @@ -1418,51 +2575,69 @@ Contains options for content, peer, and IPNS routing mechanisms. ### `Routing.Type` -There are multiple routing options: "auto", "autoclient", "none", "dht", "dhtclient", and "custom". +Controls how your node discovers content and peers on the network. -* **DEFAULT:** If unset, or set to "auto", your node will use the public IPFS DHT (aka "Amino") - and parallel HTTP routers listed below for additional speed. +**Production options:** -* If set to "autoclient", your node will behave as in "auto" but without running a DHT server. +- **`auto`** (default): Uses both the public IPFS DHT (Amino) and HTTP routers + from [`Routing.DelegatedRouters`](#routingdelegatedrouters) for faster lookups. + Your node starts as a DHT client and automatically switches to server mode + when reachable from the public internet. -* If set to "none", your node will use _no_ routing system. You'll have to - explicitly connect to peers that have the content you're looking for. +- **`autoclient`**: Same as `auto`, but never runs a DHT server. + Use this if your node is behind a firewall or NAT. -* If set to "dht" (or "dhtclient"/"dhtserver"), your node will ONLY use the Amino DHT (no HTTP routers). +- **`dht`**: Uses only the Amino DHT (no HTTP routers). Automatically switches + between client and server mode based on reachability. -* If set to "custom", all default routers are disabled, and only ones defined in `Routing.Routers` will be used. +- **`dhtclient`**: DHT-only, always running as a client. Lower resource usage. -When the DHT is enabled, it can operate in two modes: client and server. +- **`dhtserver`**: DHT-only, always running as a server. + Only use this if your node is reachable from the public internet. -* In server mode, your node will query other peers for DHT records, and will - respond to requests from other peers (both requests to store records and - requests to retrieve records). +- **`none`**: Disables all routing. You must manually connect to peers. -* In client mode, your node will query the DHT as a client but will not respond - to requests from other peers. This mode is less resource-intensive than server - mode. +**About DHT client vs server mode:** +When the DHT is enabled, your node can operate as either a client or server. +In server mode, it queries other peers and responds to their queries - this helps +the network but uses more resources. In client mode, it only queries others without +responding, which is less resource-intensive. With `auto` or `dht`, your node starts +as a client and switches to server when it detects public reachability. -When `Routing.Type` is set to `auto` or `dht`, your node will start as a DHT client, and -switch to a DHT server when and if it determines that it's reachable from the -public internet (e.g., it's not behind a firewall). +> [!CAUTION] +> **`Routing.Type` Experimental options:** +> +> These modes are for research and testing only, not production use. +> They may change without notice between releases. +> +> - **`delegated`**: Uses only HTTP routers from [`Routing.DelegatedRouters`](#routingdelegatedrouters) +> and IPNS publishers from [`Ipns.DelegatedPublishers`](#ipnsdelegatedpublishers), +> without initializing the DHT. Useful when peer-to-peer connectivity is unavailable. +> Note: cannot provide content to the network (no DHT means no provider records). +> +> - **`custom`**: Disables all default routers. You define your own routing in +> [`Routing.Routers`](#routingrouters). See [delegated-routing.md](delegated-routing.md). -To force a specific Amino DHT-only mode, client or server, set `Routing.Type` to -`dhtclient` or `dhtserver` respectively. Please do not set this to `dhtserver` -unless you're sure your node is reachable from the public network. - -When `Routing.Type` is set to `auto` or `autoclient` your node will accelerate some types of routing -by leveraging HTTP endpoints compatible with [Delegated Routing V1 HTTP API](https://specs.ipfs.tech/routing/http-routing-v1/) -introduced in [IPIP-337](https://github.com/ipfs/specs/pull/337) -in addition to the Amino DHT. -By default, an instance of [IPNI](https://github.com/ipni/specs/blob/main/IPNI.md#readme) -at https://cid.contact is used. - -Alternative routing rules can be configured in `Routing.Routers` after setting `Routing.Type` to `custom`. - -Default: `auto` (DHT + IPNI) +Default: `auto` Type: `optionalString` (`null`/missing means the default) +### `Routing.DelegatedRouters` + +An array of URL hostnames for delegated routers to be queried in addition to the Amino DHT when `Routing.Type` is set to `auto` (default) or `autoclient`. +These endpoints must support the [Delegated Routing V1 HTTP API](https://specs.ipfs.tech/routing/http-routing-v1/). + +The special value `"auto"` uses delegated routers from [AutoConf](#autoconf) when enabled. +You can combine `"auto"` with custom URLs (e.g., `["auto", "https://custom.example.com"]`) to query both the default delegated routers and your own endpoints. The first `"auto"` entry gets substituted with autoconf values, and other URLs are preserved. + +> [!TIP] +> Delegated routing allows IPFS implementations to offload tasks like content routing, peer routing, and naming to a separate process or server while also benefiting from HTTP caching. +> +> One can run their own delegated router either by implementing the [Delegated Routing V1 HTTP API](https://specs.ipfs.tech/routing/http-routing-v1/) themselves, or by using [Someguy](https://github.com/ipfs/someguy), a turn-key implementation that proxies requests to other routing systems. A public utility instance of Someguy is hosted at [`https://delegated-ipfs.dev`](https://docs.ipfs.tech/concepts/public-utilities/#delegated-routing). + +Default: `["auto"]` + +Type: `array[string]` (URLs or `"auto"`) ### `Routing.AcceleratedDHTClient` @@ -1480,31 +2655,34 @@ This is not compatible with `Routing.Type` `custom`. If you are using composable you can configure this individually on each router. When it is enabled: + - Client DHT operations (reads and writes) should complete much faster - The provider will now use a keyspace sweeping mode allowing to keep alive CID sets that are multiple orders of magnitude larger. + - **Note:** For improved provide/reprovide operations specifically, consider using + [`Provide.DHT.SweepEnabled`](#providedhtsweepenabled) instead, which offers similar + benefits without the hourly traffic spikes. - The standard Bucket-Routing-Table DHT will still run for the DHT server (if the DHT server is enabled). This means the classical routing table will still be used to answer other nodes. This is critical to maintain to not harm the network. - The operations `ipfs stats dht` will default to showing information about the accelerated DHT client -**Caveats:** -1. Running the accelerated client likely will result in more resource consumption (connections, RAM, CPU, bandwidth) - - Users that are limited in the number of parallel connections their machines/networks can perform will likely suffer - - The resource usage is not smooth as the client crawls the network in rounds and reproviding is similarly done in rounds - - Users who previously had a lot of content but were unable to advertise it on the network will see an increase in - egress bandwidth as their nodes start to advertise all of their CIDs into the network. If you have lots of data - entering your node that you don't want to advertise, then consider using [Reprovider Strategies](#reproviderstrategy) - to reduce the number of CIDs that you are reproviding. Similarly, if you are running a node that deals mostly with - short-lived temporary data (e.g. you use a separate node for ingesting data then for storing and serving it) then - you may benefit from using [Strategic Providing](experimental-features.md#strategic-providing) to prevent advertising - of data that you ultimately will not have. -2. Currently, the DHT is not usable for queries for the first 5-10 minutes of operation as the routing table is being -prepared. This means operations like searching the DHT for particular peers or content will not work initially. - - You can see if the DHT has been initially populated by running `ipfs stats dht` -3. Currently, the accelerated DHT client is not compatible with LAN-based DHTs and will not perform operations against -them +> [!CAUTION] +> **`Routing.AcceleratedDHTClient` Caveats:** +> +> 1. Running the accelerated client likely will result in more resource consumption (connections, RAM, CPU, bandwidth) +> - Users that are limited in the number of parallel connections their machines/networks can perform will be most affected +> - The resource usage is not smooth as the client crawls the network in rounds and reproviding is similarly done in rounds +> - Users who previously had a lot of content but were unable to advertise it on the network will see an increase in +> egress bandwidth as their nodes start to advertise all of their CIDs into the network. If you have lots of data +> entering your node that you don't want to advertise, consider using [`Provide.*`](#provide) configuration +> to control which CIDs are reprovided. +> 2. Currently, the DHT is not usable for queries for the first 5-10 minutes of operation as the routing table is being +> prepared. This means operations like searching the DHT for particular peers or content will not work initially. +> - You can see if the DHT has been initially populated by running `ipfs stats dht` +> 3. Currently, the accelerated DHT client is not compatible with LAN-based DHTs and will not perform operations against +> them. Default: `false` @@ -1522,13 +2700,36 @@ Default: `false` Type: `bool` (missing means `false`) +### `Routing.IgnoreProviders` + +An array of [string-encoded PeerIDs](https://github.com/libp2p/specs/blob/master/peer-ids/peer-ids.md#string-representation). Any provider record associated to one of these peer IDs is ignored. + +Apart from ignoring specific providers for reasons like misbehaviour etc. this +setting is useful to ignore providers as a way to indicate preference, when the same provider +is found under different peerIDs (i.e. one for HTTP and one for Bitswap retrieval). + +> [!TIP] +> This denylist operates on PeerIDs. +> To deny specific HTTP Provider URL, use [`HTTPRetrieval.Denylist`](#httpretrievaldenylist) instead. + +Default: `[]` + +Type: `array[string]` + ### `Routing.Routers` -**EXPERIMENTAL: `Routing.Routers` configuration may change in future release** +Alternative configuration used when `Routing.Type=custom`. -Map of additional Routers. +> [!CAUTION] +> **EXPERIMENTAL: `Routing.Routers` is for research and testing only, not production use.** +> +> - The configuration format and behavior may change without notice between releases. +> - Bugs and regressions may not be prioritized. +> - HTTP-only configurations cannot reliably provide content. See [delegated-routing.md](delegated-routing.md#limitations). +> +> Most users should use `Routing.Type=auto` or `autoclient` with [`Routing.DelegatedRouters`](#routingdelegatedrouters). -Allows for extending the default routing (Amino DHT) with alternative Router +Allows for replacing the default routing (Amino DHT) with alternative Router implementations. The map key is a name of a Router, and the value is its configuration. @@ -1537,9 +2738,9 @@ Default: `{}` Type: `object[string->object]` -#### `Routing.Routers: Type` +#### `Routing.Routers.[name].Type` -**EXPERIMENTAL: `Routing.Routers` configuration may change in future release** +**⚠️ EXPERIMENTAL: For research and testing only. May change without notice.** It specifies the routing type that will be created. @@ -1551,46 +2752,62 @@ Currently supported types: Type: `string` -#### `Routing.Routers: Parameters` +#### `Routing.Routers.[name].Parameters` -**EXPERIMENTAL: `Routing.Routers` configuration may change in future release** +**⚠️ EXPERIMENTAL: For research and testing only. May change without notice.** Parameters needed to create the specified router. Supported params per router type: HTTP: - - `Endpoint` (mandatory): URL that will be used to connect to a specified router. - - `MaxProvideBatchSize`: This number determines the maximum amount of CIDs sent per batch. Servers might not accept more than 100 elements per batch. 100 elements by default. - - `MaxProvideConcurrency`: It determines the number of threads used when providing content. GOMAXPROCS by default. + +- `Endpoint` (mandatory): URL that will be used to connect to a specified router. +- `MaxProvideBatchSize`: This number determines the maximum amount of CIDs sent per batch. Servers might not accept more than 100 elements per batch. 100 elements by default. +- `MaxProvideConcurrency`: It determines the number of threads used when providing content. GOMAXPROCS by default. DHT: - - `"Mode"`: Mode used by the Amino DHT. Possible values: "server", "client", "auto" - - `"AcceleratedDHTClient"`: Set to `true` if you want to use the acceleratedDHT. - - `"PublicIPNetwork"`: Set to `true` to create a `WAN` DHT. Set to `false` to create a `LAN` DHT. + +- `"Mode"`: Mode used by the Amino DHT. Possible values: "server", "client", "auto" +- `"AcceleratedDHTClient"`: Set to `true` if you want to use the acceleratedDHT. +- `"PublicIPNetwork"`: Set to `true` to create a `WAN` DHT. Set to `false` to create a `LAN` DHT. Parallel: - - `Routers`: A list of routers that will be executed in parallel: - - `Name:string`: Name of the router. It should be one of the previously added to `Routers` list. - - `Timeout:duration`: Local timeout. It accepts strings compatible with Go `time.ParseDuration(string)` (`10s`, `1m`, `2h`). Time will start counting when this specific router is called, and it will stop when the router returns, or we reach the specified timeout. - - `ExecuteAfter:duration`: Providing this param will delay the execution of that router at the specified time. It accepts strings compatible with Go `time.ParseDuration(string)` (`10s`, `1m`, `2h`). - - `IgnoreErrors:bool`: It will specify if that router should be ignored if an error occurred. - - `Timeout:duration`: Global timeout. It accepts strings compatible with Go `time.ParseDuration(string)` (`10s`, `1m`, `2h`). + +- `Routers`: A list of routers that will be executed in parallel: + - `Name:string`: Name of the router. It should be one of the previously added to `Routers` list. + - `Timeout:duration`: Local timeout. It accepts strings compatible with Go `time.ParseDuration(string)` (`10s`, `1m`, `2h`). Time will start counting when this specific router is called, and it will stop when the router returns, or we reach the specified timeout. + - `ExecuteAfter:duration`: Providing this param will delay the execution of that router at the specified time. It accepts strings compatible with Go `time.ParseDuration(string)` (`10s`, `1m`, `2h`). + - `IgnoreErrors:bool`: It will specify if that router should be ignored if an error occurred. +- `Timeout:duration`: Global timeout. It accepts strings compatible with Go `time.ParseDuration(string)` (`10s`, `1m`, `2h`). Sequential: - - `Routers`: A list of routers that will be executed in order: - - `Name:string`: Name of the router. It should be one of the previously added to `Routers` list. - - `Timeout:duration`: Local timeout. It accepts strings compatible with Go `time.ParseDuration(string)`. Time will start counting when this specific router is called, and it will stop when the router returns, or we reach the specified timeout. - - `IgnoreErrors:bool`: It will specify if that router should be ignored if an error occurred. - - `Timeout:duration`: Global timeout. It accepts strings compatible with Go `time.ParseDuration(string)`. + +- `Routers`: A list of routers that will be executed in order: + - `Name:string`: Name of the router. It should be one of the previously added to `Routers` list. + - `Timeout:duration`: Local timeout. It accepts strings compatible with Go `time.ParseDuration(string)`. Time will start counting when this specific router is called, and it will stop when the router returns, or we reach the specified timeout. + - `IgnoreErrors:bool`: It will specify if that router should be ignored if an error occurred. +- `Timeout:duration`: Global timeout. It accepts strings compatible with Go `time.ParseDuration(string)`. Default: `{}` (use the safe implicit defaults) Type: `object[string->string]` -### `Routing: Methods` +### `Routing.Methods` -`Methods:map` will define which routers will be executed per method. The key will be the name of the method: `"provide"`, `"find-providers"`, `"find-peers"`, `"put-ipns"`, `"get-ipns"`. All methods must be added to the list. +`Methods:map` will define which routers will be executed per method used when `Routing.Type=custom`. + +> [!CAUTION] +> **EXPERIMENTAL: `Routing.Methods` is for research and testing only, not production use.** +> +> - The configuration format and behavior may change without notice between releases. +> - Bugs and regressions may not be prioritized. +> - HTTP-only configurations cannot reliably provide content. See [delegated-routing.md](delegated-routing.md#limitations). +> +> Most users should use `Routing.Type=auto` or `autoclient` with [`Routing.DelegatedRouters`](#routingdelegatedrouters). + +The key will be the name of the method: `"provide"`, `"find-providers"`, `"find-peers"`, `"put-ipns"`, `"get-ipns"`. All methods must be added to the list. The value will contain: + - `RouterName:string`: Name of the router. It should be one of the previously added to `Routing.Routers` list. Type: `object[string->object]` @@ -1675,7 +2892,7 @@ trigger netscan alerts on some hosting providers or cause strain in some setups. > [!TIP] > The [`server` configuration profile](#server-profile) fills up this list with sensible defaults, > preventing dials to all non-routable IP addresses (e.g., `/ip4/192.168.0.0/ipcidr/16`, -> which is the multiaddress representation of `192.168.0.0/16`) but you should always +> which is the [multiaddress][multiaddr] representation of `192.168.0.0/16`) but you should always > check settings against your own network and/or hosting provider. Default: `[]` @@ -1800,7 +3017,6 @@ Default: `131072` (128 kb) Type: `optionalInteger` - #### `Swarm.RelayService.ReservationTTL` Duration of a new or refreshed reservation. @@ -1809,7 +3025,6 @@ Default: `"1h"` Type: `duration` - #### `Swarm.RelayService.MaxReservations` Maximum number of active relay slots. @@ -1818,7 +3033,6 @@ Default: `128` Type: `optionalInteger` - #### `Swarm.RelayService.MaxCircuits` Maximum number of open relay connections for each peer. @@ -1827,7 +3041,6 @@ Default: `16` Type: `optionalInteger` - #### `Swarm.RelayService.BufferSize` Size of the relayed connection buffers. @@ -1836,15 +3049,9 @@ Default: `2048` Type: `optionalInteger` - #### `Swarm.RelayService.MaxReservationsPerPeer` -Maximum number of reservations originating from the same peer. - -Default: `4` - -Type: `optionalInteger` - +**REMOVED in kubo 0.32 due to [go-libp2p#2974](https://github.com/libp2p/go-libp2p/pull/2974)** #### `Swarm.RelayService.MaxReservationsPerIP` @@ -1885,8 +3092,8 @@ Please use [`AutoNAT.ServiceMode`](#autonatservicemode). The connection manager determines which and how many connections to keep and can be configured to keep. Kubo currently supports two connection managers: -* none: never close idle connections. -* basic: the default connection manager. +- none: never close idle connections. +- basic: the default connection manager. By default, this section is empty and the implicit defaults defined below are used. @@ -1904,16 +3111,17 @@ Type: `optionalString` (default when unset or empty) The basic connection manager uses a "high water", a "low water", and internal scoring to periodically close connections to free up resources. When a node -using the basic connection manager reaches `HighWater` idle connections, it will -close the least useful ones until it reaches `LowWater` idle connections. +using the basic connection manager reaches `HighWater` idle connections, it +will close the least useful ones until it reaches `LowWater` idle +connections. The process of closing connections happens every `SilencePeriod`. The connection manager considers a connection idle if: -* It has not been explicitly _protected_ by some subsystem. For example, Bitswap +- It has not been explicitly _protected_ by some subsystem. For example, Bitswap will protect connections to peers from which it is actively downloading data, the DHT will protect some peers for routing, and the peering subsystem will protect all "peered" nodes. -* It has existed for longer than the `GracePeriod`. +- It has existed for longer than the `GracePeriod`. **Example:** @@ -1924,7 +3132,8 @@ The connection manager considers a connection idle if: "Type": "basic", "LowWater": 100, "HighWater": 200, - "GracePeriod": "30s" + "GracePeriod": "30s", + "SilencePeriod": "10s" } } } @@ -1958,6 +3167,14 @@ Default: `"20s"` Type: `optionalDuration` +##### `Swarm.ConnMgr.SilencePeriod` + +SilencePeriod is the time duration between connection manager runs, when connections that are idle are closed. + +Default: `"10s"` + +Type: `optionalDuration` + ### `Swarm.ResourceMgr` Learn more about Kubo's usage of libp2p Network Resource Manager @@ -1972,7 +3189,8 @@ Type: `flag` #### `Swarm.ResourceMgr.MaxMemory` -This is the max amount of memory to allow libp2p to use. +This is the max amount of memory to allow go-libp2p to use. + libp2p's resource manager will prevent additional resource creation while this limit is reached. This value is also used to scale the limit on various resources at various scopes when the default limits (discussed in [libp2p resource management](./libp2p-resource-management.md)) are used. @@ -1980,8 +3198,13 @@ For example, increasing this value will increase the default limit for incoming It is possible to inspect the runtime limits via `ipfs swarm resources --help`. +> [!IMPORTANT] +> `Swarm.ResourceMgr.MaxMemory` is the memory limit for go-libp2p networking stack alone, and not for entire Kubo or Bitswap. +> +> To set memory limit for the entire Kubo process, use [`GOMEMLIMIT` environment variable](http://web.archive.org/web/20240222201412/https://kupczynski.info/posts/go-container-aware/) which all Go programs recognize, and then set `Swarm.ResourceMgr.MaxMemory` to less than your custom `GOMEMLIMIT`. + Default: `[TOTAL_SYSTEM_MEMORY]/2` -Type: `optionalBytes` +Type: [`optionalBytes`](#optionalbytes) #### `Swarm.ResourceMgr.MaxFileDescriptors` @@ -1995,12 +3218,12 @@ Type: `optionalInteger` #### `Swarm.ResourceMgr.Allowlist` -A list of multiaddrs that can bypass normal system limits (but are still limited by the allowlist scope). +A list of [multiaddrs][libp2p-multiaddrs] that can bypass normal system limits (but are still limited by the allowlist scope). Convenience config around [go-libp2p-resource-manager#Allowlist.Add](https://pkg.go.dev/github.com/libp2p/go-libp2p/p2p/host/resource-manager#Allowlist.Add). Default: `[]` -Type: `array[string]` (multiaddrs) +Type: `array[string]` ([multiaddrs][multiaddr]) ### `Swarm.Transports` @@ -2015,6 +3238,14 @@ transports, multiaddrs for these transports must be added to `Addresses.Swarm`. Supported transports are: QUIC, TCP, WS, Relay, WebTransport and WebRTCDirect. +> [!CAUTION] +> **SECURITY CONSIDERATIONS FOR NETWORK TRANSPORTS** +> +> Enabling network transports allows your node to accept connections from the internet. +> Ensure your firewall rules and [`Addresses.Swarm`](#addressesswarm) configuration +> align with your security requirements. +> See [Security section](#security) for network exposure considerations. + Each field in this section is a `flag`. #### `Swarm.Transports.Network.TCP` @@ -2029,8 +3260,9 @@ Default: Enabled Type: `flag` Listen Addresses: -* /ip4/0.0.0.0/tcp/4001 (default) -* /ip6/::/tcp/4001 (default) + +- /ip4/0.0.0.0/tcp/4001 (default) +- /ip6/::/tcp/4001 (default) #### `Swarm.Transports.Network.Websocket` @@ -2045,8 +3277,9 @@ Default: Enabled Type: `flag` Listen Addresses: -* /ip4/0.0.0.0/tcp/4002/ws -* /ip6/::/tcp/4002/ws + +- /ip4/0.0.0.0/tcp/4001/ws +- /ip6/::/tcp/4001/ws #### `Swarm.Transports.Network.QUIC` @@ -2064,6 +3297,7 @@ Default: Enabled Type: `flag` Listen Addresses: + - `/ip4/0.0.0.0/udp/4001/quic-v1` (default) - `/ip6/::/udp/4001/quic-v1` (default) @@ -2072,14 +3306,15 @@ Listen Addresses: [Libp2p Relay](https://github.com/libp2p/specs/tree/master/relay) proxy transport that forms connections by hopping between multiple libp2p nodes. Allows IPFS node to connect to other peers using their `/p2p-circuit` -multiaddrs. This transport is primarily useful for bypassing firewalls and +[multiaddrs][libp2p-multiaddrs]. This transport is primarily useful for bypassing firewalls and NATs. See also: + - Docs: [Libp2p Circuit Relay](https://docs.libp2p.io/concepts/circuit-relay/) - [`Swarm.RelayClient.Enabled`](#swarmrelayclientenabled) for getting a public -- `/p2p-circuit` address when behind a firewall. - - [`Swarm.EnableHolePunching`](#swarmenableholepunching) for direct connection upgrade through relay +- `/p2p-circuit` address when behind a firewall. +- [`Swarm.EnableHolePunching`](#swarmenableholepunching) for direct connection upgrade through relay - [`Swarm.RelayService.Enabled`](#swarmrelayserviceenabled) for becoming a limited relay for other peers @@ -2088,9 +3323,9 @@ Default: Enabled Type: `flag` Listen Addresses: -* This transport is special. Any node that enables this transport can receive - inbound connections on this transport, without specifying a listen address. +- This transport is special. Any node that enables this transport can receive + inbound connections on this transport, without specifying a listen address. #### `Swarm.Transports.Network.WebTransport` @@ -2113,6 +3348,7 @@ Default: Enabled Type: `flag` Listen Addresses: + - `/ip4/0.0.0.0/udp/4001/quic-v1/webtransport` (default) - `/ip6/::/udp/4001/quic-v1/webtransport` (default) @@ -2143,6 +3379,7 @@ Default: Enabled Type: `flag` Listen Addresses: + - `/ip4/0.0.0.0/udp/4001/webrtc-direct` (default) - `/ip6/::/udp/4001/webrtc-direct` (default) @@ -2215,14 +3452,14 @@ Type: `priority` ### `Swarm.Transports.Multiplexers.Mplex` -**REMOVED**: See https://github.com/ipfs/kubo/issues/9958 +**REMOVED**: See Support for Mplex has been [removed from Kubo and go-libp2p](https://github.com/libp2p/specs/issues/553). Please remove this option from your config. ## `DNS` -Options for configuring DNS resolution for [DNSLink](https://docs.ipfs.tech/concepts/dnslink/) and `/dns*` [Multiaddrs](https://github.com/multiformats/multiaddr/). +Options for configuring DNS resolution for [DNSLink](https://docs.ipfs.tech/concepts/dnslink/) and `/dns*` [Multiaddrs][libp2p-multiaddrs]. ### `DNS.Resolvers` @@ -2232,6 +3469,7 @@ This allows for overriding the default DNS resolver provided by the operating sy and using different resolvers per domain or TLD (including ones from alternative, non-ICANN naming systems). Example: + ```json { "DNS": { @@ -2246,18 +3484,13 @@ Example: ``` Be mindful that: + - Currently only `https://` URLs for [DNS over HTTPS (DoH)](https://en.wikipedia.org/wiki/DNS_over_HTTPS) endpoints are supported as values. - The default catch-all resolver is the cleartext one provided by your operating system. It can be overridden by adding a DoH entry for the DNS root indicated by `.` as illustrated above. -- Out-of-the-box support for selected decentralized TLDs relies on a [centralized service which is provided on best-effort basis](https://www.cloudflare.com/distributed-web-gateway-terms/). The implicit DoH resolvers are: - ```json - { - "eth.": "https://resolver.cloudflare-eth.com/dns-query", - "crypto.": "https://resolver.cloudflare-eth.com/dns-query" - } - ``` - To get all the benefits of a decentralized naming system we strongly suggest setting DoH endpoint to an empty string and running own decentralized resolver as catch-all one on localhost. +- Out-of-the-box support for selected non-ICANN TLDs relies on third-party centralized services provided by respective communities on best-effort basis. +- The special value `"auto"` uses DNS resolvers from [AutoConf](#autoconf) when enabled. For example: `{".": "auto"}` uses any custom DoH resolver (global or per TLD) provided by AutoConf system. -Default: `{}` +Default: `{".": "auto"}` Type: `object[string -> string]` @@ -2271,13 +3504,109 @@ If present, the upper bound is applied to DoH resolvers in [`DNS.Resolvers`](#dn Note: this does NOT work with Go's default DNS resolver. To make this a global setting, add a `.` entry to `DNS.Resolvers` first. **Examples:** -* `"1m"` DNS entries are kept for 1 minute or less. -* `"0s"` DNS entries expire as soon as they are retrieved. + +- `"1m"` DNS entries are kept for 1 minute or less. +- `"0s"` DNS entries expire as soon as they are retrieved. Default: Respect DNS Response TTL Type: `optionalDuration` +## `HTTPRetrieval` + +`HTTPRetrieval` is configuration for pure HTTP retrieval based on Trustless HTTP Gateways' +[Block Responses (`application/vnd.ipld.raw`)](https://specs.ipfs.tech/http-gateways/trustless-gateway/#block-responses-application-vnd-ipld-raw) +which can be used in addition to or instead of retrieving blocks with [Bitswap over Libp2p](#bitswap). + +Default: `{}` + +Type: `object` + +### `HTTPRetrieval.Enabled` + +Controls whether HTTP-based block retrieval is enabled. + +When enabled, Kubo will act on `/tls/http` (HTTP/2) providers ([Trustless HTTP Gateways](https://specs.ipfs.tech/http-gateways/trustless-gateway/)) returned by the [`Routing.DelegatedRouters`](#routingdelegatedrouters) +to perform pure HTTP [block retrievals](https://specs.ipfs.tech/http-gateways/trustless-gateway/#block-responses-application-vnd-ipld-raw) +(`/ipfs/cid?format=raw`, `Accept: application/vnd.ipld.raw`) +alongside [Bitswap over Libp2p](#bitswap). + +HTTP requests for `application/vnd.ipld.raw` will be made instead of Bitswap when a peer has a `/tls/http` multiaddr +and the HTTPS server returns HTTP 200 for the [probe path](https://specs.ipfs.tech/http-gateways/trustless-gateway/#dedicated-probe-paths). + +> [!IMPORTANT] +> This feature is relatively new. Please report any issues via [Github](https://github.com/ipfs/kubo/issues/new). +> +> Important notes: +> +> - TLS and HTTP/2 are required. For privacy reasons, and to maintain feature-parity with browsers, unencrypted `http://` providers are ignored and not used. +> - This feature works in the same way as Bitswap: connected HTTP-peers receive optimistic block requests even for content that they are not announcing. +> - For performance reasons, and to avoid loops, the HTTP client does not follow redirects. Providers should keep announcements up to date. +> - IPFS ecosystem is working towards [supporting HTTP providers on Amino DHT](https://github.com/ipfs/specs/issues/496). Currently, HTTP providers are mostly limited to results from [`Routing.DelegatedRouters`](#routingdelegatedrouters) endpoints and requires `Routing.Type=auto|autoclient`. + +Default: `true` + +Type: `flag` + +### `HTTPRetrieval.Allowlist` + +Optional list of hostnames for which HTTP retrieval is allowed for. +If this list is not empty, only hosts matching these entries will be allowed for HTTP retrieval. + +> [!TIP] +> To limit HTTP retrieval to a provider at `/dns4/example.com/tcp/443/tls/http` (which would serve `HEAD|GET https://example.com/ipfs/cid?format=raw`), set this to `["example.com"]` + +Default: `[]` + +Type: `array[string]` + +### `HTTPRetrieval.Denylist` + +Optional list of hostnames for which HTTP retrieval is not allowed. +Denylist entries take precedence over Allowlist entries. + +> [!TIP] +> This denylist operates on HTTP endpoint hostnames. +> To deny specific PeerID, use [`Routing.IgnoreProviders`](#routingignoreproviders) instead. + +Default: `[]` + +Type: `array[string]` + +### `HTTPRetrieval.NumWorkers` + +The number of worker goroutines to use for concurrent HTTP retrieval operations. +This setting controls the level of parallelism for HTTP-based block retrieval operations. +Higher values can improve performance when retrieving many blocks but may increase resource usage. + +Default: `16` + +Type: `optionalInteger` + +### `HTTPRetrieval.MaxBlockSize` + +Sets the maximum size of a block that the HTTP retrieval client will accept. + +> [!NOTE] +> This setting is a security feature designed to protect Kubo from malicious providers who might send excessively large or invalid data. +> Increasing this value allows Kubo to retrieve larger blocks from compatible HTTP providers, but doing so reduces interoperability with Bitswap, and increases potential security risks. +> +> Learn more: [Supporting Large IPLD Blocks: Why block limits?](https://discuss.ipfs.tech/t/supporting-large-ipld-blocks/15093#why-block-limits-5) + +Default: `2MiB` (matching [Bitswap size limit](https://specs.ipfs.tech/bitswap-protocol/#block-sizes)) + +Type: `optionalString` + +### `HTTPRetrieval.TLSInsecureSkipVerify` + +Disables TLS certificate validation. +Allows making HTTPS connections to HTTP/2 test servers with self-signed TLS certificates. +Only for testing, do not use in production. + +Default: `false` + +Type: `flag` + ## `Import` Options to configure the default options used for ingesting data, in commands such as `ipfs add` or `ipfs block put`. All affected commands are detailed per option. @@ -2288,6 +3617,8 @@ Note that using flags will override the options defined here. The default CID version. Commands affected: `ipfs add`. +Must be either 0 or 1. CIDv0 uses SHA2-256 only, while CIDv1 supports multiple hash functions. + Default: `0` Type: `optionalInteger` @@ -2304,6 +3635,12 @@ Type: `flag` The default UnixFS chunker. Commands affected: `ipfs add`. +Valid formats: + +- `size-` - fixed size chunker +- `rabin---` - rabin fingerprint chunker +- `buzhash` - buzhash chunker + Default: `size-262144` Type: `optionalString` @@ -2312,10 +3649,148 @@ Type: `optionalString` The default hash function. Commands affected: `ipfs add`, `ipfs block put`, `ipfs dag put`. +Must be a valid multihash name (e.g., `sha2-256`, `blake3`) and must be allowed for use in IPFS according to security constraints. + +Run `ipfs cid hashes --supported` to see the full list of allowed hash functions. + Default: `sha2-256` Type: `optionalString` +### `Import.FastProvideRoot` + +Immediately provide root CIDs to the DHT in addition to the regular provide queue. + +This complements the sweep provider system: fast-provide handles the urgent case (root CIDs that users share and reference), while the sweep provider efficiently provides all blocks according to the `Provide.Strategy` over time. Together, they optimize for both immediate discoverability of newly imported content and efficient resource usage for complete DAG provides. + +When disabled, only the sweep provider's queue is used. + +This setting applies to both `ipfs add` and `ipfs dag import` commands and can be overridden per-command with the `--fast-provide-root` flag. + +Ignored when DHT is not available for routing (e.g., `Routing.Type=none` or delegated-only configurations). + +Default: `true` + +Type: `flag` + +### `Import.FastProvideWait` + +Wait for the immediate root CID provide to complete before returning. + +When enabled, the command blocks until the provide completes, ensuring guaranteed discoverability before returning. When disabled (default), the provide happens asynchronously in the background without blocking the command. + +Use this when you need certainty that content is discoverable before the command returns (e.g., sharing a link immediately after adding). + +This setting applies to both `ipfs add` and `ipfs dag import` commands and can be overridden per-command with the `--fast-provide-wait` flag. + +Ignored when DHT is not available for routing (e.g., `Routing.Type=none` or delegated-only configurations). + +Default: `false` + +Type: `flag` + +### `Import.BatchMaxNodes` + +The maximum number of nodes in a write-batch. The total size of the batch is limited by `BatchMaxnodes` and `BatchMaxSize`. + +Increasing this will batch more items together when importing data with `ipfs dag import`, which can speed things up. + +Must be positive (> 0). Setting to 0 would cause immediate batching after each node, which is inefficient. + +Default: `128` + +Type: `optionalInteger` + +### `Import.BatchMaxSize` + +The maximum size of a single write-batch (computed as the sum of the sizes of the blocks). The total size of the batch is limited by `BatchMaxnodes` and `BatchMaxSize`. + +Increasing this will batch more items together when importing data with `ipfs dag import`, which can speed things up. + +Must be positive (> 0). Setting to 0 would cause immediate batching after any data, which is inefficient. + +Default: `20971520` (20MiB) + +Type: `optionalInteger` + +### `Import.UnixFSFileMaxLinks` + +The maximum number of links that a node part of a UnixFS File can have +when building the DAG while importing. + +This setting controls both the fanout in files that are chunked into several +blocks and grouped as a Unixfs (dag-pb) DAG. + +Must be positive (> 0). Zero or negative values would break file DAG construction. + +Default: `174` + +Type: `optionalInteger` + +### `Import.UnixFSDirectoryMaxLinks` + +The maximum number of links that a node part of a UnixFS basic directory can +have when building the DAG while importing. + +This setting controls both the fanout for basic, non-HAMT folder nodes. It +sets a limit after which directories are converted to a HAMT-based structure. + +When unset (0), no limit exists for children. Directories will be converted to +HAMTs based on their estimated size only. + +This setting will cause basic directories to be converted to HAMTs when they +exceed the maximum number of children. This happens transparently during the +add process. The fanout of HAMT nodes is controlled by `MaxHAMTFanout`. + +Must be non-negative (>= 0). Zero means no limit, negative values are invalid. + +Commands affected: `ipfs add` + +Default: `0` (no limit, because [`Import.UnixFSHAMTDirectorySizeThreshold`](#importunixfshamtdirectorysizethreshold) triggers controls when to switch to HAMT sharding when a directory grows too big) + +Type: `optionalInteger` + +### `Import.UnixFSHAMTDirectoryMaxFanout` + +The maximum number of children that a node part of a UnixFS HAMT directory +(aka sharded directory) can have. + +HAMT directories have unlimited children and are used when basic directories +become too big or reach `MaxLinks`. A HAMT is a structure made of UnixFS +nodes that store the list of elements in the folder. This option controls the +maximum number of children that the HAMT nodes can have. + +According to the [UnixFS specification](https://specs.ipfs.tech/unixfs/#hamt-structure-and-parameters), this value must be a power of 2, a multiple of 8 (for byte-aligned bitfields), and not exceed 1024 (to prevent denial-of-service attacks). + +Commands affected: `ipfs add`, `ipfs daemon` (globally overrides [`boxo/ipld/unixfs/io.DefaultShardWidth`](https://github.com/ipfs/boxo/blob/6c5a07602aed248acc86598f30ab61923a54a83e/ipld/unixfs/io/directory.go#L30C5-L30C22)) + +Default: `256` + +Type: `optionalInteger` + +### `Import.UnixFSHAMTDirectorySizeThreshold` + +The sharding threshold to decide whether a basic UnixFS directory +should be sharded (converted into HAMT Directory) or not. + +This value is not strictly related to the size of the UnixFS directory block +and any increases in the threshold should come with being careful that block +sizes stay under 2MiB in order for them to be reliably transferable through the +networking stack. At the time of writing this, IPFS peers on the public swarm +tend to ignore requests for blocks bigger than 2MiB. + +Uses implementation from `boxo/ipld/unixfs/io/directory`, where the size is not +the _exact_ block size of the encoded directory but just the estimated size +based byte length of DAG-PB Links names and CIDs. + +Setting to `1B` is functionally equivalent to always using HAMT (useful in testing). + +Commands affected: `ipfs add`, `ipfs daemon` (globally overrides [`boxo/ipld/unixfs/io.HAMTShardingSize`](https://github.com/ipfs/boxo/blob/6c5a07602aed248acc86598f30ab61923a54a83e/ipld/unixfs/io/directory.go#L26)) + +Default: `256KiB` (may change, inspect `DefaultUnixFSHAMTDirectorySizeThreshold` to confirm) + +Type: [`optionalBytes`](#optionalbytes) + ## `Version` Options to configure agent version announced to the swarm, and leveraging @@ -2328,7 +3803,7 @@ Optional suffix to the AgentVersion presented by `ipfs id` and exposed via [libp The value from config takes precedence over value passed via `ipfs daemon --agent-version-suffix`. > [!NOTE] -> Setting a custom version suffix helps with ecosystem analysis, such as Amino DHT reports published at https://stats.ipfs.network +> Setting a custom version suffix helps with ecosystem analysis, such as Amino DHT reports published at Default: `""` (no suffix, or value from `ipfs daemon --agent-version-suffix=`) @@ -2401,11 +3876,21 @@ is useful when using the daemon in test environments. Restores default network settings. Inverse profile of the test profile. +### `autoconf-on` profile + +Safe default for joining the public IPFS Mainnet swarm with automatic configuration. +Can also be used with custom AutoConf.URL for other networks. + +### `autoconf-off` profile + +Disables AutoConf and clears all networking fields for manual configuration. +Use this for private networks or when you want explicit control over all endpoints. + ### `flatfs` profile -Configures the node to use the flatfs datastore. Flatfs is the default datastore. +Configures the node to use the flatfs datastore. +Flatfs is the default, most battle-tested and reliable datastore. -This is the most battle-tested and reliable datastore. You should use this datastore if: - You need a very simple and very reliable datastore, and you trust your @@ -2416,11 +3901,43 @@ You should use this datastore if: - You want to minimize memory usage. - You are ok with the default speed of data import, or prefer to use `--nocopy`. -This profile may only be applied when first initializing the node. +> [!WARNING] +> This profile may only be applied when first initializing the node via `ipfs init --profile flatfs` + +> [!NOTE] +> See caveats and configuration options at [`datastores.md#flatfs`](datastores.md#flatfs) + +### `flatfs-measure` profile + +Configures the node to use the flatfs datastore with metrics. This is the same as [`flatfs` profile](#flatfs-profile) with the addition of the `measure` datastore wrapper. + +### `pebbleds` profile + +Configures the node to use the pebble high-performance datastore. + +Pebble is a LevelDB/RocksDB inspired key-value store focused on performance and internal usage by CockroachDB. +You should use this datastore if: + +- You need a datastore that is focused on performance. +- You need a datastore that is good for multi-terabyte data sets. +- You need reliability by default, but may choose to disable WAL for maximum performance when reliability is not critical. +- You want a datastore that does not need GC cycles and does not use more space than necessary +- You want a datastore that does not take several minutes to start with large repositories +- You want a datastore that performs well even with default settings, but can optimized by setting configuration to tune it for your specific needs. + +> [!WARNING] +> This profile may only be applied when first initializing the node via `ipfs init --profile pebbleds` + +> [!NOTE] +> See other caveats and configuration options at [`datastores.md#pebbleds`](datastores.md#pebbleds) + +### `pebbleds-measure` profile + +Configures the node to use the pebble datastore with metrics. This is the same as [`pebbleds` profile](#pebble-profile) with the addition of the `measure` datastore wrapper. ### `badgerds` profile -Configures the node to use the legacy badgerv1 datastore. +Configures the node to use the **legacy** badgerv1 datastore. > [!CAUTION] > This is based on very old badger 1.x, which has known bugs and is no longer supported by the upstream team. @@ -2437,7 +3954,15 @@ Also, be aware that: - Good for medium-size datastores, but may run into performance issues if your dataset is bigger than a terabyte. - The current implementation is based on old badger 1.x which is no longer supported by the upstream team. -This profile may only be applied when first initializing the node. +> [!WARNING] +> This profile may only be applied when first initializing the node via `ipfs init --profile badgerds` + +> [!NOTE] +> See other caveats and configuration options at [`datastores.md#pebbleds`](datastores.md#pebbleds) + +### `badgerds-measure` profile + +Configures the node to use the **legacy** badgerv1 datastore with metrics. This is the same as [`badgerds` profile](#badger-profile) with the addition of the `measure` datastore wrapper. ### `lowpower` profile @@ -2454,7 +3979,7 @@ Reduces daemon overhead on the system by disabling optional swarm services. ### `announce-off` profile -Disables [Reprovider](#reprovider) system (and announcing to Amino DHT). +Disables [Provide](#provide) system (and announcing to Amino DHT). > [!CAUTION] > The main use case for this is setups with manual Peering.Peers config. @@ -2464,22 +3989,65 @@ Disables [Reprovider](#reprovider) system (and announcing to Amino DHT). ### `announce-on` profile -(Re-)enables [Reprovider](#reprovider) system (reverts [`announce-off` profile](#annouce-off-profile). +(Re-)enables [Provide](#provide) system (reverts [`announce-off` profile](#announce-off-profile)). ### `legacy-cid-v0` profile Makes UnixFS import (`ipfs add`) produce legacy CIDv0 with no raw leaves, sha2-256 and 256 KiB chunks. +See for exact [`Import.*`](#import) settings. + > [!NOTE] > This profile is provided for legacy users and should not be used for new projects. ### `test-cid-v1` profile -Makes UnixFS import (`ipfs add`) produce modern CIDv1 with raw leaves, sha2-256 and 1 MiB chunks. +Makes UnixFS import (`ipfs add`) produce modern CIDv1 with raw leaves, sha2-256 +and 1 MiB chunks (max 174 links per file, 256 per HAMT node, switch dir to HAMT +above 256KiB). + +See for exact [`Import.*`](#import) settings. > [!NOTE] -> This profile will become the new implicit default, provided for testing purposes. -> Follow [kubo#4143](https://github.com/ipfs/kubo/issues/4143) for more details. +> [`Import.*`](#import) settings applied by this profile MAY change in future release. Provided for testing purposes. +> +> Follow [kubo#4143](https://github.com/ipfs/kubo/issues/4143) for more details, +> and provide feedback in [discuss.ipfs.tech/t/should-we-profile-cids](https://discuss.ipfs.tech/t/should-we-profile-cids/18507) or [ipfs/specs#499](https://github.com/ipfs/specs/pull/499). + +### `test-cid-v1-wide` profile + +Makes UnixFS import (`ipfs add`) produce modern CIDv1 with raw leaves, sha2-256 +and 1 MiB chunks and wider file DAGs (max 1024 links per every node type, +switch dir to HAMT above 1MiB). + +See for exact [`Import.*`](#import) settings. + +> [!NOTE] +> [`Import.*`](#import) settings applied by this profile MAY change in future release. Provided for testing purposes. +> +> Follow [kubo#4143](https://github.com/ipfs/kubo/issues/4143) for more details, +> and provide feedback in [discuss.ipfs.tech/t/should-we-profile-cids](https://discuss.ipfs.tech/t/should-we-profile-cids/18507) or [ipfs/specs#499](https://github.com/ipfs/specs/pull/499). + +## Security + +This section provides an overview of security considerations for configurations that expose network services. + +### Port and Network Exposure + +Several configuration options expose TCP or UDP ports that can make your Kubo node accessible from the network: + +- **[`Addresses.API`](#addressesapi)** - Exposes the admin RPC API (default: localhost:5001) +- **[`Addresses.Gateway`](#addressesgateway)** - Exposes the HTTP gateway (default: localhost:8080) +- **[`Addresses.Swarm`](#addressesswarm)** - Exposes P2P connectivity (default: 0.0.0.0:4001, both UDP and TCP) +- **[`Swarm.Transports.Network`](#swarmtransportsnetwork)** - Controls which P2P transport protocols are enabled over TCP and UDP + +### Security Best Practices + +- Keep admin services ([`Addresses.API`](#addressesapi)) bound to localhost unless authentication ([`API.Authorizations`](#apiauthorizations)) is configured +- Use [`Gateway.NoFetch`](#gatewaynofetch) to prevent arbitrary CID retrieval if Kubo is acting as a public gateway available to anyone +- Configure firewall rules to restrict access to exposed ports. Note that [`Addresses.Swarm`](#addressesswarm) is special - all incoming traffic to swarm ports should be allowed to ensure proper P2P connectivity +- Control which public-facing addresses are announced to other peers using [`Addresses.NoAnnounce`](#addressesnoannounce), [`Addresses.Announce`](#addressesannounce), and [`Addresses.AppendAnnounce`](#addressesappendannounce) +- Consider using the [`server` profile](#server-profile) for production deployments ## Types @@ -2538,6 +4106,7 @@ an implicit default when missing from the config file: - a string value indicating the number of bytes, including human readable representations: - [SI sizes](https://en.wikipedia.org/wiki/Metric_prefix#List_of_SI_prefixes) (metric units, powers of 1000), e.g. `1B`, `2kB`, `3MB`, `4GB`, `5TB`, …) - [IEC sizes](https://en.wikipedia.org/wiki/Binary_prefix#IEC_prefixes) (binary units, powers of 1024), e.g. `1B`, `2KiB`, `3MiB`, `4GiB`, `5TiB`, …) +- a raw number (will be interpreted as bytes, e.g. `1048576` for 1MiB) ### `optionalString` @@ -2554,3 +4123,7 @@ an implicit default when missing from the config file: - `null`/missing will apply the default value defined in Kubo sources (`.WithDefault("1h2m3s")`) - a string with a valid [go duration](#duration) (e.g, `"1d2h4m40.01s"`). + +---- + +[multiaddr]: https://docs.ipfs.tech/concepts/glossary/#multiaddr diff --git a/docs/content-blocking.md b/docs/content-blocking.md index fad63ad9e..e894868ac 100644 --- a/docs/content-blocking.md +++ b/docs/content-blocking.md @@ -44,7 +44,7 @@ caused the request to be blocked. [NOpfs](https://github.com/ipfs-shipyard/nopfs) supports the format from [IPIP-383](https://github.com/ipfs/specs/pull/383). Clear-text rules are simple: just put content paths to block, one per line. -Paths with unicode and whitespace need to be percend-encoded: +Paths with unicode and whitespace need to be percent-encoded: ``` /ipfs/QmbWqxBEKC3P8tqsKc98xmWNzrzDtRLMiMPL8wBuTGsMnR diff --git a/docs/datastores.md b/docs/datastores.md index c18ecb0c7..9ba500a59 100644 --- a/docs/datastores.md +++ b/docs/datastores.md @@ -3,15 +3,22 @@ This document describes the different possible values for the `Datastore.Spec` field in the ipfs configuration file. +- [flatfs](#flatfs) +- [levelds](#levelds) +- [pebbleds](#pebbleds) +- [badgerds](#badgerds) +- [mount](#mount) +- [measure](#measure) + ## flatfs -Stores each key value pair as a file on the filesystem. +Stores each key-value pair as a file on the filesystem. The shardFunc is prefixed with `/repo/flatfs/shard/v1` then followed by a descriptor of the sharding strategy. Some example values are: - `/repo/flatfs/shard/v1/next-to-last/2` - Shards on the two next to last characters of the key - `/repo/flatfs/shard/v1/prefix/2` - - Shards based on the two character prefix of the key + - Shards based on the two-character prefix of the key ```json { @@ -22,10 +29,12 @@ The shardFunc is prefixed with `/repo/flatfs/shard/v1` then followed by a descri } ``` +- `sync`: Flush every write to disk before continuing. Setting this to false is safe as kubo will automatically flush writes to disk before and after performing critical operations like pinning. However, you can set this to true to be extra-safe (at the cost of a slowdown when adding files). + NOTE: flatfs must only be used as a block store (mounted at `/blocks`) as it only partially implements the datastore interface. You can mount flatfs for /blocks only using the mount datastore (described below). ## levelds -Uses a leveldb database to store key value pairs. +Uses a leveldb database to store key-value pairs. ```json { @@ -35,9 +44,53 @@ Uses a leveldb database to store key value pairs. } ``` +## pebbleds + +Uses [pebble](https://github.com/cockroachdb/pebble) as a key-value store. + +```json +{ + "type": "pebbleds", + "path": "", +} +``` + +The following options are available for tuning pebble. +If they are not configured (or assigned their zero-valued), then default values are used. + +* `bytesPerSync`: int, Sync sstables periodically in order to smooth out writes to disk. (default: 512KB) +* `disableWAL`: true|false, Disable the write-ahead log (WAL) at expense of prohibiting crash recovery. (default: false) +* `cacheSize`: Size of pebble's shared block cache. (default: 8MB) +* `formatVersionMajor`: int, Sets the format of pebble on-disk files. If 0 or unset, automatically convert to latest format. +* `l0CompactionThreshold`: int, Count of L0 files necessary to trigger an L0 compaction. +* `l0StopWritesThreshold`: int, Limit on L0 read-amplification, computed as the number of L0 sublevels. +* `lBaseMaxBytes`: int, Maximum number of bytes for LBase. The base level is the level which L0 is compacted into. +* `maxConcurrentCompactions`: int, Maximum number of concurrent compactions. (default: 1) +* `memTableSize`: int, Size of a MemTable in steady state. The actual MemTable size starts at min(256KB, MemTableSize) and doubles for each subsequent MemTable up to MemTableSize (default: 4MB) +* `memTableStopWritesThreshold`: int, Limit on the number of queued of MemTables. (default: 2) +* `walBytesPerSync`: int: Sets the number of bytes to write to a WAL before calling Sync on it in the background. (default: 0, no background syncing) +* `walMinSyncSeconds`: int: Sets the minimum duration between syncs of the WAL. (default: 0) + +> [!TIP] +> Start using pebble with only default values and configure tuning items are needed for your needs. For a more complete description of these values, see: `https://pkg.go.dev/github.com/cockroachdb/pebble@vA.B.C#Options` (where `A.B.C` is pebble version from Kubo's `go.mod`). + +Using a pebble datastore can be set when initializing kubo `ipfs init --profile pebbleds`. + +#### Use of `formatMajorVersion` + +[Pebble's `FormatMajorVersion`](https://github.com/cockroachdb/pebble/tree/master?tab=readme-ov-file#format-major-versions) is a constant controlling the format of persisted data. Backwards incompatible changes to durable formats are gated behind new format major versions. + +At any point, a database's format major version may be bumped. However, once a database's format major version is increased, previous versions of Pebble will refuse to open the database. + +When IPFS is initialized to use the pebbleds datastore (`ipfs init --profile=pebbleds`), the latest pebble database format is configured in the pebble datastore config as `"formatMajorVersion"`. Setting this in the datastore config prevents automatically upgrading to the latest available version when kubo is upgraded. If a later version becomes available, the kubo daemon prints a startup message to indicate this. The user can them update the config to use the latest format when they are certain a downgrade will not be necessary. + +Without the `"formatMajorVersion"` in the pebble datastore config, the database format is automatically upgraded to the latest version. If this happens, then it is possible a downgrade back to the previous version of kubo will not work if new format is not compatible with the pebble datastore in the previous version of kubo. + +When installing a new version of kubo when `"formatMajorVersion"` is configured, migration does not upgrade this to the latest available version. This is done because a user may have reasons not to upgrade the pebble database format, and may want to be able to downgrade kubo if something else is not working in the new version. If the configured pebble database format in the old kubo is not supported in the new kubo, then the configured version must be updated and the old kubo run, before installing the new kubo. + ## badgerds -Uses [badger](https://github.com/dgraph-io/badger) as a key value store. +Uses [badger](https://github.com/dgraph-io/badger) as a key-value store. > [!CAUTION] > This is based on very old badger 1.x, which has known bugs and is no longer supported by the upstream team. @@ -46,7 +99,7 @@ Uses [badger](https://github.com/dgraph-io/badger) as a key value store. * `syncWrites`: Flush every write to disk before continuing. Setting this to false is safe as kubo will automatically flush writes to disk before and after performing critical operations like pinning. However, you can set this to true to be extra-safe (at the cost of a 2-3x slowdown when adding files). -* `truncate`: Truncate the DB if a partially written sector is found (defaults to true). There is no good reason to set this to false unless you want to manually recover partially written (and unpinned) blocks if kubo crashes half-way through adding a file. +* `truncate`: Truncate the DB if a partially written sector is found (defaults to true). There is no good reason to set this to false unless you want to manually recover partially written (and unpinned) blocks if kubo crashes half-way through a write operation. ```json { diff --git a/docs/debug-guide.md b/docs/debug-guide.md index 9ea8a3bb6..7268ef6bc 100644 --- a/docs/debug-guide.md +++ b/docs/debug-guide.md @@ -15,6 +15,8 @@ This is a document for helping debug Kubo. Please add to it if you can! ### Beginning +> **Note:** Enable more logs by setting `GOLOG_LOG_LEVEL` env variable when troubleshooting. See [go-log documentation](https://github.com/ipfs/go-log#golog_log_level) for configuration options and available log levels. + When you see ipfs doing something (using lots of CPU, memory, or otherwise being weird), the first thing you want to do is gather all the relevant profiling information. @@ -106,6 +108,6 @@ See `tracing/doc.go` for more details. ### Other -If you have any questions, or want us to analyze some weird kubo behaviour, +If you have any questions, or want us to analyze some weird kubo behavior, just let us know, and be sure to include all the profiling information mentioned at the top. diff --git a/docs/delegated-routing.md b/docs/delegated-routing.md index 6f15972ed..5e781c147 100644 --- a/docs/delegated-routing.md +++ b/docs/delegated-routing.md @@ -1,4 +1,15 @@ -# New multi-router configuration system +# Delegated Routing Notes + +- Status Date: 2025-12 + +> [!IMPORTANT] +> Most users are best served by setting delegated HTTP router URLs in [`Routing.DelegatedRouters`](https://github.com/ipfs/kubo/blob/master/docs/config.md#routingdelegatedrouters) and `Routing.Type` to `auto` or `autoclient`, rather than using custom routing with `Routing.Routers` and `Routing.Methods` directly. +> +> The rest of this documentation describes experimental features intended only for researchers and advanced users. + +---- + +# Custom Multi-Router Configuration (Experimental) - Start Date: 2022-08-15 - Related Issues: @@ -6,19 +17,16 @@ - https://github.com/ipfs/kubo/issues/9079 - https://github.com/ipfs/kubo/pull/9877 -## Summary - -Previously we only used the Amino DHT for content routing and content -providing. - -Kubo 0.14 introduced experimental support for [delegated routing](https://github.com/ipfs/kubo/pull/8997), -which then got changed and standardized as [Routing V1 HTTP API](https://specs.ipfs.tech/routing/http-routing-v1/). - -Kubo 0.23.0 release added support for [self-hosting Routing V1 HTTP API server](https://github.com/ipfs/kubo/blob/master/docs/changelogs/v0.23.md#self-hosting-routingv1-endpoint-for-delegated-routing-needs). - -Now we need a better way to add different routers using different protocols -like [Routing V1](https://specs.ipfs.tech/routing/http-routing-v1/) or Amino -DHT, and be able to configure them (future routing systems to come) to cover different use cases. +> [!CAUTION] +> **`Routing.Type=custom` with `Routing.Routers` and `Routing.Methods` is EXPERIMENTAL.** +> +> This feature is provided for **research and testing purposes only**. It is **not suitable for production use**. +> +> - The configuration format and behavior may change without notice between Kubo releases. +> - Bugs and regressions affecting custom routing may not be prioritized or fixed promptly. +> - HTTP-only routing configurations (without DHT) cannot reliably provide content to the network (👉️ see [Limitations](#limitations) below). +> +> **For production deployments**, use `Routing.Type=auto` (default) or `Routing.Type=autoclient` with [`Routing.DelegatedRouters`](https://github.com/ipfs/kubo/blob/master/docs/config.md#routingdelegatedrouters). ## Motivation @@ -338,7 +346,7 @@ As test fixtures we can add different use cases here and see how the configurati ~~We need to create a config migration using [fs-repo-migrations](https://github.com/ipfs/fs-repo-migrations). We should remove the `Routing.Type` param and add the configuration specified [previously](#Mimic-previous-dual-DHT-config).~~ -We don't need to create any config migration! To avoid to the users the hassle of understanding how the new routing system works, we are gonna keep the old behavior. We will add the Type `custom` to make available the new Routing system. +We don't need to create any config migration! To avoid to the users the hassle of understanding how the new routing system works, we are going to keep the old behavior. We will add the Type `custom` to make available the new Routing system. ### Security @@ -354,6 +362,29 @@ I got ideas from all of the following links to create this design document: - https://www.notion.so/pl-strflt/Delegated-Routing-Thoughts-very-very-WIP-0543bc51b1bd4d63a061b0f28e195d38 - https://gist.github.com/guseggert/effa027ff4cbadd7f67598efb6704d12 +### Limitations + +#### HTTP-only routing cannot reliably provide content + +Configurations that use only HTTP routers (without any DHT router) are unable to reliably announce content (provider records) to the network. + +This limitation exists because: + +1. **No standardized HTTP API for providing**: The [Routing V1 HTTP API](https://specs.ipfs.tech/routing/http-routing-v1/) spec only defines read operations (`GET /routing/v1/providers/{cid}`). The write operation (`PUT /routing/v1/providers`) was never standardized. + +2. **Legacy experimental API**: The only available HTTP providing mechanism is an undocumented `PUT /routing/v1/providers` request format called `ProvideBitswap`, which is a historical experiment. See [IPIP-526](https://github.com/ipfs/specs/pull/526) for ongoing discussion about formalizing HTTP-based provider announcements. + +3. **Provider system integration**: Kubo's default provider system (`Provide.DHT.SweepEnabled=true` since v0.38) is designed for DHT-based providing. When no DHT is configured, the provider system may silently skip HTTP routers or behave unexpectedly. + +**Workarounds for testing:** + +If you need to test HTTP providing, you can try: + +- Setting `Provide.DHT.SweepEnabled=false` to use the legacy provider system +- Including at least one DHT router in your custom configuration alongside HTTP routers + +These workarounds are not guaranteed to work across Kubo versions and should not be relied upon for production use. + ### Copyright Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/). diff --git a/docs/developer-guide.md b/docs/developer-guide.md new file mode 100644 index 000000000..5799b48ca --- /dev/null +++ b/docs/developer-guide.md @@ -0,0 +1,316 @@ +# Developer Guide + +By the end of this guide, you will be able to: + +- Build Kubo from source +- Run the test suites +- Make and verify code changes + +This guide covers the local development workflow. For user documentation, see [docs.ipfs.tech](https://docs.ipfs.tech/). + +## Table of Contents + +- [Prerequisites](#prerequisites) +- [Quick Start](#quick-start) +- [Building](#building) +- [Running Tests](#running-tests) +- [Running the Linter](#running-the-linter) +- [Common Development Tasks](#common-development-tasks) +- [Code Organization](#code-organization) +- [Architecture](#architecture) +- [Troubleshooting](#troubleshooting) +- [Development Dependencies](#development-dependencies) +- [Further Reading](#further-reading) + +## Prerequisites + +Before you begin, ensure you have: + +- **Go** - see `go.mod` for the minimum required version +- **Git** +- **GNU Make** +- **GCC** (optional) - required for CGO (Go's C interop); without it, build with `CGO_ENABLED=0` + +## Quick Start + +```bash +git clone https://github.com/ipfs/kubo.git +cd kubo +make build +./cmd/ipfs/ipfs version +``` + +You should see output like: + +``` +ipfs version 0.34.0-dev +``` + +The binary is built to `cmd/ipfs/ipfs`. To install it system-wide: + +```bash +make install +``` + +This installs the binary to `$GOPATH/bin`. + +## Building + +| Command | Description | +|---------|-------------| +| `make build` | build the `ipfs` binary to `cmd/ipfs/ipfs` | +| `make install` | install to `$GOPATH/bin` | +| `make nofuse` | build without FUSE support | +| `make build CGO_ENABLED=0` | build without CGO (no C compiler needed) | + +For Windows-specific instructions, see [windows.md](windows.md). + +## Running Tests + +Kubo has two types of tests: + +- **Unit tests** - test individual packages in isolation. Fast and don't require a running daemon. +- **End-to-end tests** - spawn real `ipfs` nodes, run actual CLI commands, and test the full system. Slower but catch integration issues. + +Note that `go test ./...` runs both unit and end-to-end tests. Use `make test` to run all tests. CI runs unit and end-to-end tests in separate jobs for faster feedback. + + + +For end-to-end tests, Kubo has two suites: + +- **`test/cli`** - modern Go-based test harness that spawns real `ipfs` nodes and runs actual CLI commands. All new tests should be added here. +- **`test/sharness`** - legacy bash-based tests. We are slowly migrating these to `test/cli`. + +When modifying tests: cosmetic changes to `test/sharness` are fine, but if significant rewrites are needed, remove the outdated sharness test and add a modern one to `test/cli` instead. + +### Before Running Tests + +**Environment requirements**: some legacy tests expect default ports (8080, 5001, 4001) to be free and no mDNS (local network discovery) Kubo service on the LAN. Tests may fail if you have a local Kubo instance running. Before running the full test suite, stop any running `ipfs daemon`. + +Two critical setup steps: + +1. **Rebuild after code changes**: if you modify any `.go` files outside of `test/`, you must run `make build` before running integration tests. + +2. **Set environment variables**: integration tests use the `ipfs` binary from `PATH` and need an isolated `IPFS_PATH`. Run these commands from the repository root: + +```bash +export PATH="$PWD/cmd/ipfs:$PATH" +export IPFS_PATH="$(mktemp -d)" +``` + +### Unit Tests + +```bash +go test ./... +``` + +### CLI Integration Tests (`test/cli`) + +These are Go-based integration tests that invoke the `ipfs` CLI. + +Instead of running the entire test suite, you can run a specific test to get faster feedback during development. + +Run a specific test (recommended during development): + +```bash +go test ./test/cli/... -run TestAdd -v +``` + +Run all CLI tests: + +```bash +go test ./test/cli/... +``` + +Run a specific test: + +```bash +go test ./test/cli/... -run TestAdd +``` + +Run with verbose output: + +```bash +go test ./test/cli/... -v +``` + +**Common error**: "version (16) is lower than repos (17)" means your `PATH` points to an old binary. Check `which ipfs` and rebuild with `make build`. + +### Sharness Tests (`test/sharness`) + +Shell-based integration tests using [sharness](https://github.com/chriscool/sharness) (a portable shell testing framework). + +```bash +cd test/sharness +``` + +Run a specific test: + +```bash +timeout 60s ./t0080-repo.sh +``` + +Run with verbose output (this disables automatic cleanup): + +```bash +./t0080-repo.sh -v +``` + +**Cleanup**: the `-v` flag disables automatic cleanup. Before re-running tests, kill any dangling `ipfs daemon` processes: + +```bash +pkill -f "ipfs daemon" +``` + +### Full Test Suite + +```bash +make test # run all tests +make test_short # run shorter test suite +``` + +## Running the Linter + +Run the linter using the Makefile target (not `golangci-lint` directly): + +```bash +make -O test_go_lint +``` + +## Common Development Tasks + +### Modifying CLI Commands + +After editing help text in `core/commands/`, verify the output width: + +```bash +go test ./test/cli/... -run TestCommandDocsWidth +``` + +### Updating Dependencies + +Use the Makefile target (not `go mod tidy` directly): + +```bash +make mod_tidy +``` + +### Editing the Changelog + +When modifying `docs/changelogs/`: + +- update the Table of Contents when adding sections +- add user-facing changes to the Highlights section (the Changelog section is auto-generated) + +### Running the Daemon + +Always run the daemon with a timeout or shut it down promptly. + +With timeout: + +```bash +timeout 60s ipfs daemon +``` + +Or shut down via API: + +```bash +ipfs shutdown +``` + +For multi-step experiments, store `IPFS_PATH` in a file to ensure consistency. + +## Code Organization + +| Directory | Description | +|-----------|-------------| +| `cmd/ipfs/` | CLI entry point and binary | +| `core/` | core IPFS node implementation | +| `core/commands/` | CLI command definitions | +| `core/coreapi/` | Go API implementation | +| `client/rpc/` | HTTP RPC client | +| `plugin/` | plugin system | +| `repo/` | repository management | +| `test/cli/` | Go-based CLI integration tests | +| `test/sharness/` | legacy shell-based integration tests | +| `docs/` | documentation | + +Key external dependencies: + +- [go-libp2p](https://github.com/libp2p/go-libp2p) - networking stack +- [go-libp2p-kad-dht](https://github.com/libp2p/go-libp2p-kad-dht) - distributed hash table +- [boxo](https://github.com/ipfs/boxo) - IPFS SDK (including Bitswap, the data exchange engine) + +For a deep dive into how code flows through Kubo, see [The `Add` command demystified](add-code-flow.md). + +## Architecture + +**Map of Implemented Subsystems** ([editable source](https://docs.google.com/drawings/d/1OVpBT2q-NtSJqlPX3buvjYhOnWfdzb85YEsM_njesME/edit)): + + + +**CLI, HTTP-API, Core Diagram**: + +![](./cli-http-api-core-diagram.png) + +## Troubleshooting + +### "version (N) is lower than repos (M)" Error + +This means the `ipfs` binary in your `PATH` is older than expected. + +Check which binary is being used: + +```bash +which ipfs +``` + +Rebuild and verify PATH: + +```bash +make build +export PATH="$PWD/cmd/ipfs:$PATH" +./cmd/ipfs/ipfs version +``` + +### FUSE Issues + +If you don't need FUSE support, build without it: + +```bash +make nofuse +``` + +Or set the `TEST_FUSE=0` environment variable when running tests. + +### Build Fails with "No such file: stdlib.h" + +You're missing a C compiler. Either install GCC or build without CGO: + +```bash +make build CGO_ENABLED=0 +``` + +## Development Dependencies + +If you make changes to the protocol buffers, you will need to install the [protoc compiler](https://github.com/google/protobuf). + +## Further Reading + +- [The `Add` command demystified](add-code-flow.md) - deep dive into code flow +- [Configuration reference](config.md) +- [Performance debugging](debug-guide.md) +- [Experimental features](experimental-features.md) +- [Release process](releases.md) +- [Contributing guidelines](https://github.com/ipfs/community/blob/master/CONTRIBUTING.md) + +## Source Code + +The complete source code is at [github.com/ipfs/kubo](https://github.com/ipfs/kubo). diff --git a/docs/environment-variables.md b/docs/environment-variables.md index f0f6b3f18..cd900de94 100644 --- a/docs/environment-variables.md +++ b/docs/environment-variables.md @@ -1,5 +1,34 @@ # Kubo environment variables +- [Variables](#variables) + - [`IPFS_PATH`](#ipfs_path) + - [`IPFS_LOGGING`](#ipfs_logging) + - [`IPFS_LOGGING_FMT`](#ipfs_logging_fmt) + - [`GOLOG_LOG_LEVEL`](#golog_log_level) + - [`GOLOG_LOG_FMT`](#golog_log_fmt) + - [`GOLOG_FILE`](#golog_file) + - [`GOLOG_OUTPUT`](#golog_output) + - [`GOLOG_TRACING_FILE`](#golog_tracing_file) + - [`IPFS_FUSE_DEBUG`](#ipfs_fuse_debug) + - [`YAMUX_DEBUG`](#yamux_debug) + - [`IPFS_FD_MAX`](#ipfs_fd_max) + - [`IPFS_DIST_PATH`](#ipfs_dist_path) + - [`IPFS_NS_MAP`](#ipfs_ns_map) + - [`IPFS_HTTP_ROUTERS`](#ipfs_http_routers) + - [`IPFS_HTTP_ROUTERS_FILTER_PROTOCOLS`](#ipfs_http_routers_filter_protocols) + - [`IPFS_CONTENT_BLOCKING_DISABLE`](#ipfs_content_blocking_disable) + - [`IPFS_WAIT_REPO_LOCK`](#ipfs_wait_repo_lock) + - [`IPFS_TELEMETRY`](#ipfs_telemetry) + - [`LIBP2P_TCP_REUSEPORT`](#libp2p_tcp_reuseport) + - [`LIBP2P_TCP_MUX`](#libp2p_tcp_mux) + - [`LIBP2P_MUX_PREFS`](#libp2p_mux_prefs) + - [`LIBP2P_RCMGR`](#libp2p_rcmgr) + - [`LIBP2P_DEBUG_RCMGR`](#libp2p_debug_rcmgr) + - [`LIBP2P_SWARM_FD_LIMIT`](#libp2p_swarm_fd_limit) +- [Tracing](#tracing) + +# Variables + ## `IPFS_PATH` Sets the location of the IPFS repo (where the config, blocks, etc. @@ -63,6 +92,14 @@ The logging format defaults to `color` when the output is a terminal, and `nocol Sets the file to which Kubo logs. By default, Kubo logs to standard error. +## `GOLOG_OUTPUT` + +When stderr and/or stdout options are configured or specified by the `GOLOG_OUTPUT` environ variable, log only to the output(s) specified. For example: + +- `GOLOG_OUTPUT="stderr"` logs only to stderr +- `GOLOG_OUTPUT="stdout"` logs only to stdout +- `GOLOG_OUTPUT="stderr+stdout"` logs to both stderr and stdout + ## `GOLOG_TRACING_FILE` Sets the file to which Kubo sends tracing events. By default, tracing is @@ -116,9 +153,15 @@ $ ipfs resolve -r /ipns/dnslink-test2.example.com ## `IPFS_HTTP_ROUTERS` -Overrides all implicit HTTP routers enabled when `Routing.Type=auto` with -the space-separated list of URLs provided in this variable. -Useful for testing and debugging in offline contexts. +Overrides AutoConf and all other HTTP routers when set. +When `Routing.Type=auto`, this environment variable takes precedence over +both AutoConf-provided endpoints and any manually configured delegated routers. +The value should be a space or comma-separated list of HTTP routing endpoint URLs. + +This is useful for: +- Testing and debugging in offline contexts +- Overriding AutoConf endpoints temporarily +- Using custom or private HTTP routing services Example: @@ -127,23 +170,70 @@ $ ipfs config Routing.Type auto $ IPFS_HTTP_ROUTERS="http://127.0.0.1:7423" ipfs daemon ``` -The above will replace implicit HTTP routers with single one, allowing for +The above will replace all AutoConf endpoints with a single local one, allowing for inspection/debug of HTTP requests sent by Kubo via `while true ; do nc -l 7423; done` or more advanced tools like [mitmproxy](https://docs.mitmproxy.org/stable/#mitmproxy). +When not set, Kubo uses endpoints from AutoConf (when enabled) or manually configured `Routing.DelegatedRouters`. + +## `IPFS_HTTP_ROUTERS_FILTER_PROTOCOLS` + +Overrides values passed with `filter-protocols` parameter defined in IPIP-484. +Value is space-separated. + +```console +$ IPFS_HTTP_ROUTERS_FILTER_PROTOCOLS="unknown transport-bitswap transport-foo" ipfs daemon +``` + +Default: `config.DefaultHTTPRoutersFilterProtocols` ## `IPFS_CONTENT_BLOCKING_DISABLE` Disables the content-blocking subsystem. No denylists will be watched and no content will be blocked. +## `IPFS_WAIT_REPO_LOCK` + +Specifies the amount of time to wait for the repo lock. Set the value of this variable to a string that can be [parsed](https://pkg.go.dev/time@go1.24.3#ParseDuration) as a golang `time.Duration`. For example: +``` +IPFS_WAIT_REPO_LOCK="15s" +``` + +If the lock cannot be acquired because someone else has the lock, and `IPFS_WAIT_REPO_LOCK` is set to a valid value, then acquiring the lock is retried every second until the lock is acquired or the specified wait time has elapsed. + +## `IPFS_TELEMETRY` + +Controls the behavior of the [telemetry plugin](telemetry.md). Valid values are: + +- `on`: Enables telemetry. +- `off`: Disables telemetry. +- `auto`: Like `on`, but logs an informative message about telemetry and gives user 15 minutes to opt-out before first collection. Used automatically on first run and when `IPFS_TELEMETRY` is not set. + +The mode can also be set in the config file under `Plugins.Plugins.telemetry.Config.Mode`. + +Example: + +```bash +export IPFS_TELEMETRY="off" +``` + ## `LIBP2P_TCP_REUSEPORT` Kubo tries to reuse the same source port for all connections to improve NAT traversal. If this is an issue, you can disable it by setting `LIBP2P_TCP_REUSEPORT` to false. -Default: true +Default: `true` + +## `LIBP2P_TCP_MUX` + +By default Kubo tries to reuse the same listener port for raw TCP and WebSockets transports via experimental `libp2p.ShareTCPListener()` feature introduced in [go-libp2p#2984](https://github.com/libp2p/go-libp2p/pull/2984). +If this is an issue, you can disable it by setting `LIBP2P_TCP_MUX` to `false` and use separate ports for each TCP transport. + +> [!CAUTION] +> This configuration option may be removed once `libp2p.ShareTCPListener()` becomes default in go-libp2p. + +Default: `true` ## `LIBP2P_MUX_PREFS` @@ -169,6 +259,14 @@ and outputs it to `rcmgr.json.gz` Default: disabled (not set) +## `LIBP2P_SWARM_FD_LIMIT` + +This variable controls the number of concurrent outbound dials (except dials to relay addresses which have their own limiting logic). + +Reducing it slows down connection ballooning but might affect performance negatively. + +Default: [160](https://github.com/libp2p/go-libp2p/blob/master/p2p/net/swarm/swarm_dial.go#L91) (not set) + # Tracing For tracing configuration, please check: https://github.com/ipfs/boxo/blob/main/docs/tracing.md diff --git a/docs/examples/kubo-as-a-library/go.mod b/docs/examples/kubo-as-a-library/go.mod index 7478ae14d..f59d136d6 100644 --- a/docs/examples/kubo-as-a-library/go.mod +++ b/docs/examples/kubo-as-a-library/go.mod @@ -1,223 +1,232 @@ module github.com/ipfs/kubo/examples/kubo-as-a-library -go 1.22 +go 1.25 // Used to keep this in sync with the current version of kubo. You should remove // this if you copy this example. replace github.com/ipfs/kubo => ./../../.. require ( - github.com/ipfs/boxo v0.23.1-0.20240927234853-19a402b7dc34 + github.com/ipfs/boxo v0.35.3-0.20251202220026-0842ad274a0c github.com/ipfs/kubo v0.0.0-00010101000000-000000000000 - github.com/libp2p/go-libp2p v0.36.3 - github.com/multiformats/go-multiaddr v0.13.0 + github.com/libp2p/go-libp2p v0.46.0 + github.com/multiformats/go-multiaddr v0.16.1 ) require ( bazil.org/fuse v0.0.0-20200117225306-7b5117fecadc // indirect github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 // indirect + github.com/DataDog/zstd v1.5.7 // indirect github.com/Jorropo/jsync v1.0.1 // indirect - github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 // indirect + github.com/RaduBerinde/axisds v0.0.0-20250419182453-5135a0650657 // indirect + github.com/RaduBerinde/btreemap v0.0.0-20250419174037-3d62b7205d54 // indirect + github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect github.com/alexbrainman/goissue34681 v0.0.0-20191006012335-3fc7a47baff5 // indirect github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect + github.com/caddyserver/certmagic v0.23.0 // indirect + github.com/caddyserver/zerossl v0.1.3 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect - github.com/ceramicnetwork/go-dag-jose v0.1.0 // indirect + github.com/cenkalti/backoff/v5 v5.0.3 // indirect + github.com/ceramicnetwork/go-dag-jose v0.1.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/containerd/cgroups v1.1.0 // indirect - github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/crackcomm/go-gitignore v0.0.0-20231225121904-e25f5bc08668 // indirect + github.com/cockroachdb/crlib v0.0.0-20241112164430-1264a2edc35b // indirect + github.com/cockroachdb/errors v1.11.3 // indirect + github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect + github.com/cockroachdb/pebble/v2 v2.1.3 // indirect + github.com/cockroachdb/redact v1.1.5 // indirect + github.com/cockroachdb/swiss v0.0.0-20251224182025-b0f6560f979b // indirect + github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect + github.com/crackcomm/go-gitignore v0.0.0-20241020182519-7843d2ba8fdf // indirect github.com/cskr/pubsub v1.0.2 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect - github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect github.com/dgraph-io/badger v1.6.2 // indirect github.com/dgraph-io/ristretto v0.1.1 // indirect - github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect - github.com/elastic/gosigar v0.14.3 // indirect github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/filecoin-project/go-clock v0.1.0 // indirect github.com/flynn/noise v1.1.0 // indirect - github.com/francoispqt/gojay v1.2.13 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/gabriel-vasile/mimetype v1.4.4 // indirect - github.com/go-logr/logr v1.4.2 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/gabriel-vasile/mimetype v1.4.10 // indirect + github.com/gammazero/chanqueue v1.1.1 // indirect + github.com/gammazero/deque v1.2.0 // indirect + github.com/getsentry/sentry-go v0.27.0 // indirect + github.com/go-jose/go-jose/v4 v4.1.1 // indirect + github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-task/slim-sprig/v3 v3.0.0 // indirect - github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/glog v1.2.1 // indirect + github.com/golang/glog v1.2.5 // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/golang/snappy v0.0.4 // indirect + github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e // indirect github.com/google/gopacket v1.1.19 // indirect - github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/websocket v1.5.3 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect - github.com/hashicorp/errwrap v1.1.0 // indirect - github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect + github.com/guillaumemichel/reservedpool v0.3.0 // indirect github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/huin/goupnp v1.3.0 // indirect - github.com/ipfs-shipyard/nopfs v0.0.12 // indirect - github.com/ipfs-shipyard/nopfs/ipfs v0.13.2-0.20231027223058-cde3b5ba964c // indirect + github.com/ipfs-shipyard/nopfs v0.0.14 // indirect + github.com/ipfs-shipyard/nopfs/ipfs v0.25.0 // indirect github.com/ipfs/bbloom v0.0.4 // indirect github.com/ipfs/go-bitfield v1.1.0 // indirect - github.com/ipfs/go-block-format v0.2.0 // indirect - github.com/ipfs/go-blockservice v0.5.2 // indirect - github.com/ipfs/go-cid v0.4.1 // indirect + github.com/ipfs/go-block-format v0.2.3 // indirect + github.com/ipfs/go-cid v0.6.0 // indirect github.com/ipfs/go-cidutil v0.1.0 // indirect - github.com/ipfs/go-datastore v0.6.0 // indirect - github.com/ipfs/go-ds-badger v0.3.0 // indirect - github.com/ipfs/go-ds-flatfs v0.5.1 // indirect - github.com/ipfs/go-ds-leveldb v0.5.0 // indirect - github.com/ipfs/go-ds-measure v0.2.0 // indirect - github.com/ipfs/go-fs-lock v0.0.7 // indirect - github.com/ipfs/go-ipfs-blockstore v1.3.1 // indirect - github.com/ipfs/go-ipfs-delay v0.0.1 // indirect + github.com/ipfs/go-datastore v0.9.0 // indirect + github.com/ipfs/go-ds-badger v0.3.4 // indirect + github.com/ipfs/go-ds-flatfs v0.6.0 // indirect + github.com/ipfs/go-ds-leveldb v0.5.2 // indirect + github.com/ipfs/go-ds-measure v0.2.2 // indirect + github.com/ipfs/go-ds-pebble v0.5.8 // indirect + github.com/ipfs/go-dsqueue v0.1.1 // indirect + github.com/ipfs/go-fs-lock v0.1.1 // indirect + github.com/ipfs/go-ipfs-cmds v0.15.0 // indirect github.com/ipfs/go-ipfs-ds-help v1.1.1 // indirect - github.com/ipfs/go-ipfs-exchange-interface v0.2.1 // indirect github.com/ipfs/go-ipfs-pq v0.0.3 // indirect - github.com/ipfs/go-ipfs-redirects-file v0.1.1 // indirect - github.com/ipfs/go-ipfs-util v0.0.3 // indirect - github.com/ipfs/go-ipld-cbor v0.1.0 // indirect - github.com/ipfs/go-ipld-format v0.6.0 // indirect + github.com/ipfs/go-ipfs-redirects-file v0.1.2 // indirect + github.com/ipfs/go-ipld-cbor v0.2.1 // indirect + github.com/ipfs/go-ipld-format v0.6.3 // indirect github.com/ipfs/go-ipld-git v0.1.1 // indirect - github.com/ipfs/go-ipld-legacy v0.2.1 // indirect - github.com/ipfs/go-log v1.0.5 // indirect - github.com/ipfs/go-log/v2 v2.5.1 // indirect - github.com/ipfs/go-merkledag v0.11.0 // indirect - github.com/ipfs/go-metrics-interface v0.0.1 // indirect - github.com/ipfs/go-peertaskqueue v0.8.1 // indirect - github.com/ipfs/go-unixfsnode v1.9.1 // indirect - github.com/ipfs/go-verifcid v0.0.3 // indirect - github.com/ipld/go-car v0.6.2 // indirect - github.com/ipld/go-car/v2 v2.13.1 // indirect - github.com/ipld/go-codec-dagpb v1.6.0 // indirect + github.com/ipfs/go-ipld-legacy v0.2.2 // indirect + github.com/ipfs/go-log/v2 v2.9.0 // indirect + github.com/ipfs/go-metrics-interface v0.3.0 // indirect + github.com/ipfs/go-peertaskqueue v0.8.2 // indirect + github.com/ipfs/go-test v0.2.3 // indirect + github.com/ipfs/go-unixfsnode v1.10.2 // indirect + github.com/ipld/go-car/v2 v2.16.0 // indirect + github.com/ipld/go-codec-dagpb v1.7.0 // indirect github.com/ipld/go-ipld-prime v0.21.0 // indirect + github.com/ipshipyard/p2p-forge v0.7.0 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect - github.com/jbenet/goprocess v0.1.4 // indirect - github.com/klauspost/compress v1.17.9 // indirect - github.com/klauspost/cpuid/v2 v2.2.8 // indirect - github.com/koron/go-ssdp v0.0.4 // indirect + github.com/klauspost/compress v1.18.0 // indirect + github.com/klauspost/cpuid/v2 v2.3.0 // indirect + github.com/koron/go-ssdp v0.0.6 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/libdns/libdns v1.0.0-beta.1 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/libp2p/go-cidranger v1.1.0 // indirect - github.com/libp2p/go-doh-resolver v0.4.0 // indirect - github.com/libp2p/go-flow-metrics v0.1.0 // indirect + github.com/libp2p/go-doh-resolver v0.5.0 // indirect + github.com/libp2p/go-flow-metrics v0.3.0 // indirect github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect - github.com/libp2p/go-libp2p-kad-dht v0.26.1 // indirect - github.com/libp2p/go-libp2p-kbucket v0.6.3 // indirect - github.com/libp2p/go-libp2p-pubsub v0.11.0 // indirect + github.com/libp2p/go-libp2p-kad-dht v0.36.0 // indirect + github.com/libp2p/go-libp2p-kbucket v0.8.0 // indirect + github.com/libp2p/go-libp2p-pubsub v0.14.2 // indirect github.com/libp2p/go-libp2p-pubsub-router v0.6.0 // indirect - github.com/libp2p/go-libp2p-record v0.2.0 // indirect - github.com/libp2p/go-libp2p-routing-helpers v0.7.4 // indirect + github.com/libp2p/go-libp2p-record v0.3.1 // indirect + github.com/libp2p/go-libp2p-routing-helpers v0.7.5 // indirect github.com/libp2p/go-libp2p-xor v0.1.0 // indirect github.com/libp2p/go-msgio v0.3.0 // indirect - github.com/libp2p/go-nat v0.2.0 // indirect - github.com/libp2p/go-netroute v0.2.1 // indirect + github.com/libp2p/go-netroute v0.3.0 // indirect github.com/libp2p/go-reuseport v0.4.0 // indirect - github.com/libp2p/go-yamux/v4 v4.0.1 // indirect + github.com/libp2p/go-yamux/v5 v5.0.1 // indirect github.com/libp2p/zeroconf/v2 v2.2.0 // indirect github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/miekg/dns v1.1.61 // indirect + github.com/mholt/acmez/v3 v3.1.2 // indirect + github.com/miekg/dns v1.1.68 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect + github.com/minio/minlz v1.0.1-0.20250507153514-87eb42fe8882 // indirect github.com/minio/sha256-simd v1.0.1 // indirect - github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mr-tron/base58 v1.2.0 // indirect github.com/multiformats/go-base32 v0.1.0 // indirect github.com/multiformats/go-base36 v0.2.0 // indirect - github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect + github.com/multiformats/go-multiaddr-dns v0.4.1 // indirect github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect github.com/multiformats/go-multibase v0.2.0 // indirect - github.com/multiformats/go-multicodec v0.9.0 // indirect + github.com/multiformats/go-multicodec v0.10.0 // indirect github.com/multiformats/go-multihash v0.2.3 // indirect - github.com/multiformats/go-multistream v0.5.0 // indirect - github.com/multiformats/go-varint v0.0.7 // indirect + github.com/multiformats/go-multistream v0.6.1 // indirect + github.com/multiformats/go-varint v0.1.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/onsi/ginkgo/v2 v2.19.1 // indirect - github.com/opencontainers/runtime-spec v1.2.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/openzipkin/zipkin-go v0.4.3 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 // indirect - github.com/pion/datachannel v1.5.8 // indirect + github.com/pion/datachannel v1.5.10 // indirect github.com/pion/dtls/v2 v2.2.12 // indirect - github.com/pion/ice/v2 v2.3.34 // indirect - github.com/pion/interceptor v0.1.29 // indirect - github.com/pion/logging v0.2.2 // indirect - github.com/pion/mdns v0.0.12 // indirect + github.com/pion/dtls/v3 v3.0.6 // indirect + github.com/pion/ice/v4 v4.0.10 // indirect + github.com/pion/interceptor v0.1.40 // indirect + github.com/pion/logging v0.2.3 // indirect + github.com/pion/mdns/v2 v2.0.7 // indirect github.com/pion/randutil v0.1.0 // indirect - github.com/pion/rtcp v1.2.14 // indirect - github.com/pion/rtp v1.8.8 // indirect - github.com/pion/sctp v1.8.20 // indirect - github.com/pion/sdp/v3 v3.0.9 // indirect - github.com/pion/srtp/v2 v2.0.20 // indirect + github.com/pion/rtcp v1.2.15 // indirect + github.com/pion/rtp v1.8.19 // indirect + github.com/pion/sctp v1.8.39 // indirect + github.com/pion/sdp/v3 v3.0.13 // indirect + github.com/pion/srtp/v3 v3.0.6 // indirect github.com/pion/stun v0.6.1 // indirect + github.com/pion/stun/v3 v3.0.0 // indirect github.com/pion/transport/v2 v2.2.10 // indirect - github.com/pion/turn/v2 v2.1.6 // indirect - github.com/pion/webrtc/v3 v3.3.0 // indirect + github.com/pion/transport/v3 v3.0.7 // indirect + github.com/pion/turn/v4 v4.0.2 // indirect + github.com/pion/webrtc/v4 v4.1.2 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect github.com/polydawn/refmt v0.89.0 // indirect - github.com/prometheus/client_golang v1.19.1 // indirect - github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.55.0 // indirect - github.com/prometheus/procfs v0.15.1 // indirect - github.com/quic-go/qpack v0.4.0 // indirect - github.com/quic-go/quic-go v0.45.2 // indirect - github.com/quic-go/webtransport-go v0.8.0 // indirect - github.com/raulk/go-watchdog v1.3.0 // indirect - github.com/samber/lo v1.46.0 // indirect + github.com/probe-lab/go-libdht v0.4.0 // indirect + github.com/prometheus/client_golang v1.23.2 // indirect + github.com/prometheus/client_model v0.6.2 // indirect + github.com/prometheus/common v0.66.1 // indirect + github.com/prometheus/procfs v0.17.0 // indirect + github.com/quic-go/qpack v0.6.0 // indirect + github.com/quic-go/quic-go v0.57.1 // indirect + github.com/quic-go/webtransport-go v0.9.0 // indirect + github.com/rogpeppe/go-internal v1.14.1 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect - github.com/stretchr/testify v1.9.0 // indirect - github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect + github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb // indirect github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc // indirect github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 // indirect - github.com/whyrusleeping/cbor-gen v0.1.2 // indirect + github.com/whyrusleeping/cbor-gen v0.3.1 // indirect github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f // indirect github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 // indirect - github.com/wlynxg/anet v0.0.3 // indirect + github.com/wlynxg/anet v0.0.5 // indirect + github.com/zeebo/blake3 v0.2.4 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 // indirect - go.opentelemetry.io/otel v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/zipkin v1.27.0 // indirect - go.opentelemetry.io/otel/metric v1.28.0 // indirect - go.opentelemetry.io/otel/sdk v1.27.0 // indirect - go.opentelemetry.io/otel/trace v1.28.0 // indirect - go.opentelemetry.io/proto/otlp v1.3.1 // indirect - go.uber.org/atomic v1.11.0 // indirect - go.uber.org/dig v1.17.1 // indirect - go.uber.org/fx v1.22.1 // indirect - go.uber.org/mock v0.4.0 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 // indirect + go.opentelemetry.io/otel v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/zipkin v1.38.0 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + go.opentelemetry.io/otel/sdk v1.38.0 // indirect + go.opentelemetry.io/otel/trace v1.38.0 // indirect + go.opentelemetry.io/proto/otlp v1.7.1 // indirect + go.uber.org/dig v1.19.0 // indirect + go.uber.org/fx v1.24.0 // indirect + go.uber.org/mock v0.5.2 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect + go.uber.org/zap/exp v0.3.0 // indirect + go.yaml.in/yaml/v2 v2.4.3 // indirect go4.org v0.0.0-20230225012048-214862532bf5 // indirect - golang.org/x/crypto v0.25.0 // indirect - golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect - golang.org/x/mod v0.19.0 // indirect - golang.org/x/net v0.27.0 // indirect - golang.org/x/sync v0.7.0 // indirect - golang.org/x/sys v0.24.0 // indirect - golang.org/x/text v0.16.0 // indirect - golang.org/x/tools v0.23.0 // indirect - golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 // indirect - gonum.org/v1/gonum v0.15.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240617180043-68d350f18fd4 // indirect - google.golang.org/grpc v1.64.0 // indirect - google.golang.org/protobuf v1.34.2 // indirect - gopkg.in/square/go-jose.v2 v2.6.0 // indirect + golang.org/x/crypto v0.45.0 // indirect + golang.org/x/exp v0.0.0-20251125195548-87e1e737ad39 // indirect + golang.org/x/mod v0.30.0 // indirect + golang.org/x/net v0.47.0 // indirect + golang.org/x/sync v0.18.0 // indirect + golang.org/x/sys v0.38.0 // indirect + golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54 // indirect + golang.org/x/text v0.31.0 // indirect + golang.org/x/time v0.12.0 // indirect + golang.org/x/tools v0.39.0 // indirect + golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect + gonum.org/v1/gonum v0.16.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 // indirect + google.golang.org/grpc v1.75.0 // indirect + google.golang.org/protobuf v1.36.10 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - lukechampine.com/blake3 v1.3.0 // indirect + lukechampine.com/blake3 v1.4.1 // indirect ) diff --git a/docs/examples/kubo-as-a-library/go.sum b/docs/examples/kubo-as-a-library/go.sum index fff159908..790250ba1 100644 --- a/docs/examples/kubo-as-a-library/go.sum +++ b/docs/examples/kubo-as-a-library/go.sum @@ -1,9 +1,7 @@ bazil.org/fuse v0.0.0-20200117225306-7b5117fecadc h1:utDghgcjE8u+EBjHOgYT+dJPcnDF05KqWMBcjuJy510= bazil.org/fuse v0.0.0-20200117225306-7b5117fecadc/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= @@ -18,37 +16,35 @@ cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2k cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= -dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= -dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= -git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 h1:cTp8I5+VIoKjsnZuH8vjyaysT/ses3EvZeaV/1UkF2M= github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DataDog/zstd v1.5.7 h1:ybO8RBeh29qrxIhCA9E8gKY6xfONU9T6G6aP9DTKfLE= +github.com/DataDog/zstd v1.5.7/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/Jorropo/jsync v1.0.1 h1:6HgRolFZnsdfzRUj+ImB9og1JYOxQoReSywkHOGSaUU= github.com/Jorropo/jsync v1.0.1/go.mod h1:jCOZj3vrBCri3bSU3ErUYvevKlnbssrXeCivybS5ABQ= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/RaduBerinde/axisds v0.0.0-20250419182453-5135a0650657 h1:8XBWWQD+vFF+JqOsm16t0Kab1a7YWV8+GISVEP8AuZ8= +github.com/RaduBerinde/axisds v0.0.0-20250419182453-5135a0650657/go.mod h1:UHGJonU9z4YYGKJxSaC6/TNcLOBptpmM5m2Cksbnw0Y= +github.com/RaduBerinde/btreemap v0.0.0-20250419174037-3d62b7205d54 h1:bsU8Tzxr/PNz75ayvCnxKZWEYdLMPDkUgticP4a4Bvk= +github.com/RaduBerinde/btreemap v0.0.0-20250419174037-3d62b7205d54/go.mod h1:0tr7FllbE9gJkHq7CVeeDDFAFKQVy5RnCSSNBOvdqbc= +github.com/aclements/go-perfevent v0.0.0-20240301234650-f7843625020f h1:JjxwchlOepwsUWcQwD2mLUAGE9aCp0/ehy6yCHFBOvo= +github.com/aclements/go-perfevent v0.0.0-20240301234650-f7843625020f/go.mod h1:tMDTce/yLLN/SK8gMOxQfnyeMeCg8KGzp0D1cbECEeo= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= -github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 h1:t3eaIm0rUkzbrIewtiFmMK5RXHej2XnoXNhxVsAYUfg= -github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= +github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b h1:mimo19zliBX/vSQ6PWWSL9lK8qwHozUj03+zLoEB8O0= +github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= github.com/alexbrainman/goissue34681 v0.0.0-20191006012335-3fc7a47baff5 h1:iW0a5ljuFxkLGPNem5Ui+KBjFJzKg4Fv2fnxe4dvzpM= github.com/alexbrainman/goissue34681 v0.0.0-20191006012335-3fc7a47baff5/go.mod h1:Y2QMoi1vgtOIfc+6DhrMOGkLoGzqSV2rKp4Sm+opsyA= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= -github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= @@ -58,12 +54,17 @@ github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVa github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= -github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/caddyserver/certmagic v0.23.0 h1:CfpZ/50jMfG4+1J/u2LV6piJq4HOfO6ppOnOf7DkFEU= +github.com/caddyserver/certmagic v0.23.0/go.mod h1:9mEZIWqqWoI+Gf+4Trh04MOVPD0tGSxtqsxg87hAIH4= +github.com/caddyserver/zerossl v0.1.3 h1:onS+pxp3M8HnHpN5MMbOMyNjmTheJyWRaZYwn+YTAyA= +github.com/caddyserver/zerossl v0.1.3/go.mod h1:CxA0acn7oEGO6//4rtrRjYgEoa4MFw/XofZnrYwGqG4= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= +github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/ceramicnetwork/go-dag-jose v0.1.0 h1:yJ/HVlfKpnD3LdYP03AHyTvbm3BpPiz2oZiOeReJRdU= -github.com/ceramicnetwork/go-dag-jose v0.1.0/go.mod h1:qYA1nYt0X8u4XoMAVoOV3upUVKtrxy/I670Dg5F0wjI= +github.com/ceramicnetwork/go-dag-jose v0.1.1 h1:7pObs22egc14vSS3AfCFfS1VmaL4lQUsAK7OGC3PlKk= +github.com/ceramicnetwork/go-dag-jose v0.1.1/go.mod h1:8ptnYwY2Z2y/s5oJnNBn/UCxLg6CpramNJ2ZXF/5aNY= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= @@ -71,55 +72,60 @@ github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= -github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= -github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= +github.com/cockroachdb/crlib v0.0.0-20241112164430-1264a2edc35b h1:SHlYZ/bMx7frnmeqCu+xm0TCxXLzX3jQIVuFbnFGtFU= +github.com/cockroachdb/crlib v0.0.0-20241112164430-1264a2edc35b/go.mod h1:Gq51ZeKaFCXk6QwuGM0w1dnaOqc/F5zKT2zA9D6Xeac= +github.com/cockroachdb/datadriven v1.0.3-0.20250407164829-2945557346d5 h1:UycK/E0TkisVrQbSoxvU827FwgBBcZ95nRRmpj/12QI= +github.com/cockroachdb/datadriven v1.0.3-0.20250407164829-2945557346d5/go.mod h1:jsaKMvD3RBCATk1/jbUZM8C9idWBJME9+VRZ5+Liq1g= +github.com/cockroachdb/errors v1.11.3 h1:5bA+k2Y6r+oz/6Z/RFlNeVCesGARKuC6YymtcDrbC/I= +github.com/cockroachdb/errors v1.11.3/go.mod h1:m4UIW4CDjx+R5cybPsNrRbreomiFqt8o1h1wUVazSd8= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= +github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895 h1:XANOgPYtvELQ/h4IrmPAohXqe2pWA8Bwhejr3VQoZsA= +github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895/go.mod h1:aPd7gM9ov9M8v32Yy5NJrDyOcD8z642dqs+F0CeNXfA= +github.com/cockroachdb/pebble/v2 v2.1.3 h1:irU503OnjRoJBrkZQIJvwv9c4WvpUeOJxhRApojB8D8= +github.com/cockroachdb/pebble/v2 v2.1.3/go.mod h1:B1UgWsyR+L+UvZXNgpxw+WqsUKA8VQ/bb//FXOHghB8= +github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= +github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cockroachdb/swiss v0.0.0-20251224182025-b0f6560f979b h1:VXvSNzmr8hMj8XTuY0PT9Ane9qZGul/p67vGYwl9BFI= +github.com/cockroachdb/swiss v0.0.0-20251224182025-b0f6560f979b/go.mod h1:yBRu/cnL4ks9bgy4vAASdjIW+/xMlFwuHKqtmh3GZQg= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= -github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= -github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/crackcomm/go-gitignore v0.0.0-20231225121904-e25f5bc08668 h1:ZFUue+PNxmHlu7pYv+IYMtqlaO/0VwaGEqKepZf9JpA= -github.com/crackcomm/go-gitignore v0.0.0-20231225121904-e25f5bc08668/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE= +github.com/crackcomm/go-gitignore v0.0.0-20241020182519-7843d2ba8fdf h1:dwGgBWn84wUS1pVikGiruW+x5XM4amhjaZO20vCjay4= +github.com/crackcomm/go-gitignore v0.0.0-20241020182519-7843d2ba8fdf/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= -github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y= -github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= +github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8= +github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40= github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= github.com/dgraph-io/badger v1.6.2 h1:mNw0qs90GVgGGWylh0umH5iag1j6n/PeJtNvL6KY/x8= github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= -github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= -github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= -github.com/elastic/gosigar v0.14.3 h1:xwkKwPia+hSfg9GqrCUKYdId102m9qTJIIr7egmK/uo= -github.com/elastic/gosigar v0.14.3/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -128,54 +134,55 @@ github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5 h1:BBso6MBKW github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5/go.mod h1:JpoxHjuQauoxiFMl1ie8Xc/7TfLuMZ5eOCONd1sUBHg= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/filecoin-project/go-clock v0.1.0 h1:SFbYIM75M8NnFm1yMHhN9Ahy3W5bEZV9gd6MPfXbKVU= +github.com/filecoin-project/go-clock v0.1.0/go.mod h1:4uB/O4PvOjlx1VCMdZ9MyDZXRm//gkj1ELEbxfI1AZs= github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg= github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= -github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= -github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= -github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/gabriel-vasile/mimetype v1.4.4 h1:QjV6pZ7/XZ7ryI2KuyeEDE8wnh7fHP9YnQy+R0LnH8I= -github.com/gabriel-vasile/mimetype v1.4.4/go.mod h1:JwLei5XPtWdGiMFB5Pjle1oEeoSeEuJfJE+TtfvdB/s= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/gabriel-vasile/mimetype v1.4.10 h1:zyueNbySn/z8mJZHLt6IPw0KoZsiQNszIpU+bX4+ZK0= +github.com/gabriel-vasile/mimetype v1.4.10/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s= +github.com/gammazero/chanqueue v1.1.1 h1:n9Y+zbBxw2f7uUE9wpgs0rOSkP/I/yhDLiNuhyVjojQ= +github.com/gammazero/chanqueue v1.1.1/go.mod h1:fMwpwEiuUgpab0sH4VHiVcEoji1pSi+EIzeG4TPeKPc= +github.com/gammazero/deque v1.2.0 h1:scEFO8Uidhw6KDU5qg1HA5fYwM0+us2qdeJqm43bitU= +github.com/gammazero/deque v1.2.0/go.mod h1:JVrR+Bj1NMQbPnYclvDlvSX0nVGReLrQZ0aUMuWLctg= +github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= +github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9 h1:r5GgOLGbza2wVHRzK7aAj6lWZjfbAwiu/RDCVOKjRyM= +github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs= github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= -github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-jose/go-jose/v4 v4.1.1 h1:JYhSgy4mXXzAdF3nUx3ygx347LRXJRrpgyU3adRmkAI= +github.com/go-jose/go-jose/v4 v4.1.1/go.mod h1:BdsZGqgdO3b6tTc6LSE56wcDbMMLuPsw5d4ZD5f94kA= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= -github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= -github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= -github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.2.1 h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4= -github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.5 h1:DrW6hGnjIhtvhOIiAKT6Psh/Kd/ldepEa81DKeiRJ5I= +github.com/golang/glog v1.2.5/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -192,11 +199,14 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e h1:4bw4WeyTYPp0smaXiJZCNnLrvVBqirQVreixayXezGc= +github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -204,30 +214,23 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= -github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k= -github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= -github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -237,17 +240,12 @@ github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs= +github.com/guillaumemichel/reservedpool v0.3.0 h1:eqqO/QvTllLBrit7LVtVJBqw4cD0WdV9ajUe7WNTajw= +github.com/guillaumemichel/reservedpool v0.3.0/go.mod h1:sXSDIaef81TFdAJglsCFCMfgF5E5Z5xK1tFhjDhvbUc= github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= -github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= -github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= @@ -259,132 +257,103 @@ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpO github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/ipfs-shipyard/nopfs v0.0.12 h1:mvwaoefDF5VI9jyvgWCmaoTJIJFAfrbyQV5fJz35hlk= -github.com/ipfs-shipyard/nopfs v0.0.12/go.mod h1:mQyd0BElYI2gB/kq/Oue97obP4B3os4eBmgfPZ+hnrE= -github.com/ipfs-shipyard/nopfs/ipfs v0.13.2-0.20231027223058-cde3b5ba964c h1:7UynTbtdlt+w08ggb1UGLGaGjp1mMaZhoTZSctpn5Ak= -github.com/ipfs-shipyard/nopfs/ipfs v0.13.2-0.20231027223058-cde3b5ba964c/go.mod h1:6EekK/jo+TynwSE/ZOiOJd4eEvRXoavEC3vquKtv4yI= +github.com/ipfs-shipyard/nopfs v0.0.14 h1:HFepJt/MxhZ3/GsLZkkAPzIPdNYKaLO1Qb7YmPbWIKk= +github.com/ipfs-shipyard/nopfs v0.0.14/go.mod h1:mQyd0BElYI2gB/kq/Oue97obP4B3os4eBmgfPZ+hnrE= +github.com/ipfs-shipyard/nopfs/ipfs v0.25.0 h1:OqNqsGZPX8zh3eFMO8Lf8EHRRnSGBMqcdHUd7SDsUOY= +github.com/ipfs-shipyard/nopfs/ipfs v0.25.0/go.mod h1:BxhUdtBgOXg1B+gAPEplkg/GpyTZY+kCMSfsJvvydqU= github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= -github.com/ipfs/boxo v0.23.1-0.20240927234853-19a402b7dc34 h1:/Etgc4IR0OUF+nIoNdqwu12EYuaSMpd7/Nc5wRLd67U= -github.com/ipfs/boxo v0.23.1-0.20240927234853-19a402b7dc34/go.mod h1:ulu5I6avTmgGmvjuCaBRKwsaOOKjBfQw1EiOOQp8M6E= +github.com/ipfs/boxo v0.35.3-0.20251202220026-0842ad274a0c h1:mczpALnNzNhmggehO5Ehr9+Q8+NiJyKJfT4EPwi01d0= +github.com/ipfs/boxo v0.35.3-0.20251202220026-0842ad274a0c/go.mod h1:Abmp1if6bMQG87/0SQPIB9fkxJnZMLCt2nQw3yUZHH0= github.com/ipfs/go-bitfield v1.1.0 h1:fh7FIo8bSwaJEh6DdTWbCeZ1eqOaOkKFI74SCnsWbGA= github.com/ipfs/go-bitfield v1.1.0/go.mod h1:paqf1wjq/D2BBmzfTVFlJQ9IlFOZpg422HL0HqsGWHU= -github.com/ipfs/go-bitswap v0.11.0 h1:j1WVvhDX1yhG32NTC9xfxnqycqYIlhzEzLXG/cU1HyQ= -github.com/ipfs/go-bitswap v0.11.0/go.mod h1:05aE8H3XOU+LXpTedeAS0OZpcO1WFsj5niYQH9a1Tmk= github.com/ipfs/go-block-format v0.0.3/go.mod h1:4LmD4ZUw0mhO+JSKdpWwrzATiEfM7WWgQ8H5l6P8MVk= -github.com/ipfs/go-block-format v0.2.0 h1:ZqrkxBA2ICbDRbK8KJs/u0O3dlp6gmAuuXUJNiW1Ycs= -github.com/ipfs/go-block-format v0.2.0/go.mod h1:+jpL11nFx5A/SPpsoBn6Bzkra/zaArfSmsknbPMYgzM= -github.com/ipfs/go-blockservice v0.5.2 h1:in9Bc+QcXwd1apOVM7Un9t8tixPKdaHQFdLSUM1Xgk8= -github.com/ipfs/go-blockservice v0.5.2/go.mod h1:VpMblFEqG67A/H2sHKAemeH9vlURVavlysbdUI632yk= +github.com/ipfs/go-block-format v0.2.3 h1:mpCuDaNXJ4wrBJLrtEaGFGXkferrw5eqVvzaHhtFKQk= +github.com/ipfs/go-block-format v0.2.3/go.mod h1:WJaQmPAKhD3LspLixqlqNFxiZ3BZ3xgqxxoSR/76pnA= github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M= github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= -github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= -github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= +github.com/ipfs/go-cid v0.6.0 h1:DlOReBV1xhHBhhfy/gBNNTSyfOM6rLiIx9J7A4DGf30= +github.com/ipfs/go-cid v0.6.0/go.mod h1:NC4kS1LZjzfhK40UGmpXv5/qD2kcMzACYJNntCUiDhQ= github.com/ipfs/go-cidutil v0.1.0 h1:RW5hO7Vcf16dplUU60Hs0AKDkQAVPVplr7lk97CFL+Q= github.com/ipfs/go-cidutil v0.1.0/go.mod h1:e7OEVBMIv9JaOxt9zaGEmAoSlXW9jdFZ5lP/0PwcfpA= github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= -github.com/ipfs/go-datastore v0.5.0/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= -github.com/ipfs/go-datastore v0.6.0 h1:JKyz+Gvz1QEZw0LsX1IBn+JFCJQH4SJVFtM4uWU0Myk= -github.com/ipfs/go-datastore v0.6.0/go.mod h1:rt5M3nNbSO/8q1t4LNkLyUwRs8HupMeN/8O4Vn9YAT8= +github.com/ipfs/go-datastore v0.9.0 h1:WocriPOayqalEsueHv6SdD4nPVl4rYMfYGLD4bqCZ+w= +github.com/ipfs/go-datastore v0.9.0/go.mod h1:uT77w/XEGrvJWwHgdrMr8bqCN6ZTW9gzmi+3uK+ouHg= github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= github.com/ipfs/go-ds-badger v0.0.7/go.mod h1:qt0/fWzZDoPW6jpQeqUjR5kBfhDNB65jd9YlmAvpQBk= -github.com/ipfs/go-ds-badger v0.3.0 h1:xREL3V0EH9S219kFFueOYJJTcjgNSZ2HY1iSvN7U1Ro= -github.com/ipfs/go-ds-badger v0.3.0/go.mod h1:1ke6mXNqeV8K3y5Ak2bAA0osoTfmxUdupVCGm4QUIek= -github.com/ipfs/go-ds-flatfs v0.5.1 h1:ZCIO/kQOS/PSh3vcF1H6a8fkRGS7pOfwfPdx4n/KJH4= -github.com/ipfs/go-ds-flatfs v0.5.1/go.mod h1:RWTV7oZD/yZYBKdbVIFXTX2fdY2Tbvl94NsWqmoyAX4= +github.com/ipfs/go-ds-badger v0.3.4 h1:MmqFicftE0KrwMC77WjXTrPuoUxhwyFsjKONSeWrlOo= +github.com/ipfs/go-ds-badger v0.3.4/go.mod h1:HfqsKJcNnIr9ZhZ+rkwS1J5PpaWjJjg6Ipmxd7KPfZ8= +github.com/ipfs/go-ds-flatfs v0.6.0 h1:olAEnDNBK1VMoTRZvfzgo90H5kBP4qIZPpYMtNlBBws= +github.com/ipfs/go-ds-flatfs v0.6.0/go.mod h1:p8a/YhmAFYyuonxDbvuIANlDCgS69uqVv+iH5f8fAxY= github.com/ipfs/go-ds-leveldb v0.1.0/go.mod h1:hqAW8y4bwX5LWcCtku2rFNX3vjDZCy5LZCg+cSZvYb8= -github.com/ipfs/go-ds-leveldb v0.5.0 h1:s++MEBbD3ZKc9/8/njrn4flZLnCuY9I79v94gBUNumo= -github.com/ipfs/go-ds-leveldb v0.5.0/go.mod h1:d3XG9RUDzQ6V4SHi8+Xgj9j1XuEk1z82lquxrVbml/Q= -github.com/ipfs/go-ds-measure v0.2.0 h1:sG4goQe0KDTccHMyT45CY1XyUbxe5VwTKpg2LjApYyQ= -github.com/ipfs/go-ds-measure v0.2.0/go.mod h1:SEUD/rE2PwRa4IQEC5FuNAmjJCyYObZr9UvVh8V3JxE= -github.com/ipfs/go-fs-lock v0.0.7 h1:6BR3dajORFrFTkb5EpCUFIAypsoxpGpDSVUdFwzgL9U= -github.com/ipfs/go-fs-lock v0.0.7/go.mod h1:Js8ka+FNYmgQRLrRXzU3CB/+Csr1BwrRilEcvYrHhhc= -github.com/ipfs/go-ipfs-blockstore v1.3.1 h1:cEI9ci7V0sRNivqaOr0elDsamxXFxJMMMy7PTTDQNsQ= -github.com/ipfs/go-ipfs-blockstore v1.3.1/go.mod h1:KgtZyc9fq+P2xJUiCAzbRdhhqJHvsw8u2Dlqy2MyRTE= -github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ= -github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk= +github.com/ipfs/go-ds-leveldb v0.5.2 h1:6nmxlQ2zbp4LCNdJVsmHfs9GP0eylfBNxpmY1csp0x0= +github.com/ipfs/go-ds-leveldb v0.5.2/go.mod h1:2fAwmcvD3WoRT72PzEekHBkQmBDhc39DJGoREiuGmYo= +github.com/ipfs/go-ds-measure v0.2.2 h1:4kwvBGbbSXNYe4ANlg7qTIYoZU6mNlqzQHdVqICkqGI= +github.com/ipfs/go-ds-measure v0.2.2/go.mod h1:b/87ak0jMgH9Ylt7oH0+XGy4P8jHx9KG09Qz+pOeTIs= +github.com/ipfs/go-ds-pebble v0.5.8 h1:NbAfKQo+m39Nka6gt8PARAyH+VoHtRInB6CFCmT+wqo= +github.com/ipfs/go-ds-pebble v0.5.8/go.mod h1:AJjJTHgads/Fn5+tuWmaDGjGEbks7Wgx82NQ/pwmEhc= +github.com/ipfs/go-dsqueue v0.1.1 h1:6PQlHDyf9PSTN69NmwUir5+0is3tU0vRJj8zLlgK8Mc= +github.com/ipfs/go-dsqueue v0.1.1/go.mod h1:Xxg353WSwwzYn3FGSzZ+taSQII3pIZ+EJC8/oWRDM10= +github.com/ipfs/go-fs-lock v0.1.1 h1:TecsP/Uc7WqYYatasreZQiP9EGRy4ZnKoG4yXxR33nw= +github.com/ipfs/go-fs-lock v0.1.1/go.mod h1:2goSXMCw7QfscHmSe09oXiR34DQeUdm+ei+dhonqly0= +github.com/ipfs/go-ipfs-cmds v0.15.0 h1:nQDgKadrzyiFyYoZMARMIoVoSwe3gGTAfGvrWLeAQbQ= +github.com/ipfs/go-ipfs-cmds v0.15.0/go.mod h1:VABf/mv/wqvYX6hLG6Z+40eNAEw3FQO0bSm370Or3Wk= github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ= github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= github.com/ipfs/go-ipfs-ds-help v1.1.1 h1:B5UJOH52IbcfS56+Ul+sv8jnIV10lbjLF5eOO0C66Nw= github.com/ipfs/go-ipfs-ds-help v1.1.1/go.mod h1:75vrVCkSdSFidJscs8n4W+77AtTpCIAdDGAwjitJMIo= -github.com/ipfs/go-ipfs-exchange-interface v0.2.1 h1:jMzo2VhLKSHbVe+mHNzYgs95n0+t0Q69GQ5WhRDZV/s= -github.com/ipfs/go-ipfs-exchange-interface v0.2.1/go.mod h1:MUsYn6rKbG6CTtsDp+lKJPmVt3ZrCViNyH3rfPGsZ2E= -github.com/ipfs/go-ipfs-exchange-offline v0.3.0 h1:c/Dg8GDPzixGd0MC8Jh6mjOwU57uYokgWRFidfvEkuA= -github.com/ipfs/go-ipfs-exchange-offline v0.3.0/go.mod h1:MOdJ9DChbb5u37M1IcbrRB02e++Z7521fMxqCNRrz9s= github.com/ipfs/go-ipfs-pq v0.0.3 h1:YpoHVJB+jzK15mr/xsWC574tyDLkezVrDNeaalQBsTE= github.com/ipfs/go-ipfs-pq v0.0.3/go.mod h1:btNw5hsHBpRcSSgZtiNm/SLj5gYIZ18AKtv3kERkRb4= -github.com/ipfs/go-ipfs-redirects-file v0.1.1 h1:Io++k0Vf/wK+tfnhEh63Yte1oQK5VGT2hIEYpD0Rzx8= -github.com/ipfs/go-ipfs-redirects-file v0.1.1/go.mod h1:tAwRjCV0RjLTjH8DR/AU7VYvfQECg+lpUy2Mdzv7gyk= -github.com/ipfs/go-ipfs-routing v0.3.0 h1:9W/W3N+g+y4ZDeffSgqhgo7BsBSJwPMcyssET9OWevc= -github.com/ipfs/go-ipfs-routing v0.3.0/go.mod h1:dKqtTFIql7e1zYsEuWLyuOU+E0WJWW8JjbTPLParDWo= +github.com/ipfs/go-ipfs-redirects-file v0.1.2 h1:QCK7VtL91FH17KROVVy5KrzDx2hu68QvB2FTWk08ZQk= +github.com/ipfs/go-ipfs-redirects-file v0.1.2/go.mod h1:yIiTlLcDEM/8lS6T3FlCEXZktPPqSOyuY6dEzVqw7Fw= github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ= -github.com/ipfs/go-ipfs-util v0.0.3 h1:2RFdGez6bu2ZlZdI+rWfIdbQb1KudQp3VGwPtdNCmE0= -github.com/ipfs/go-ipfs-util v0.0.3/go.mod h1:LHzG1a0Ig4G+iZ26UUOMjHd+lfM84LZCrn17xAKWBvs= -github.com/ipfs/go-ipld-cbor v0.1.0 h1:dx0nS0kILVivGhfWuB6dUpMa/LAwElHPw1yOGYopoYs= -github.com/ipfs/go-ipld-cbor v0.1.0/go.mod h1:U2aYlmVrJr2wsUBU67K4KgepApSZddGRDWBYR0H4sCk= -github.com/ipfs/go-ipld-format v0.6.0 h1:VEJlA2kQ3LqFSIm5Vu6eIlSxD/Ze90xtc4Meten1F5U= -github.com/ipfs/go-ipld-format v0.6.0/go.mod h1:g4QVMTn3marU3qXchwjpKPKgJv+zF+OlaKMyhJ4LHPg= +github.com/ipfs/go-ipld-cbor v0.2.1 h1:H05yEJbK/hxg0uf2AJhyerBDbjOuHX4yi+1U/ogRa7E= +github.com/ipfs/go-ipld-cbor v0.2.1/go.mod h1:x9Zbeq8CoE5R2WicYgBMcr/9mnkQ0lHddYWJP2sMV3A= +github.com/ipfs/go-ipld-format v0.6.3 h1:9/lurLDTotJpZSuL++gh3sTdmcFhVkCwsgx2+rAh4j8= +github.com/ipfs/go-ipld-format v0.6.3/go.mod h1:74ilVN12NXVMIV+SrBAyC05UJRk0jVvGqdmrcYZvCBk= github.com/ipfs/go-ipld-git v0.1.1 h1:TWGnZjS0htmEmlMFEkA3ogrNCqWjIxwr16x1OsdhG+Y= github.com/ipfs/go-ipld-git v0.1.1/go.mod h1:+VyMqF5lMcJh4rwEppV0e6g4nCCHXThLYYDpKUkJubI= -github.com/ipfs/go-ipld-legacy v0.2.1 h1:mDFtrBpmU7b//LzLSypVrXsD8QxkEWxu5qVxN99/+tk= -github.com/ipfs/go-ipld-legacy v0.2.1/go.mod h1:782MOUghNzMO2DER0FlBR94mllfdCJCkTtDtPM51otM= +github.com/ipfs/go-ipld-legacy v0.2.2 h1:DThbqCPVLpWBcGtU23KDLiY2YRZZnTkXQyfz8aOfBkQ= +github.com/ipfs/go-ipld-legacy v0.2.2/go.mod h1:hhkj+b3kG9b2BcUNw8IFYAsfeNo8E3U7eYlWeAOPyDU= github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= -github.com/ipfs/go-log v1.0.3/go.mod h1:OsLySYkwIbiSUR/yBTdv1qPtcE4FW3WPWk/ewz9Ru+A= -github.com/ipfs/go-log v1.0.5 h1:2dOuUCB1Z7uoczMWgAyDck5JLb72zHzrMnGnCNNbvY8= -github.com/ipfs/go-log v1.0.5/go.mod h1:j0b8ZoR+7+R99LD9jZ6+AJsrzkPbSXbZfGakb5JPtIo= -github.com/ipfs/go-log/v2 v2.0.3/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= -github.com/ipfs/go-log/v2 v2.0.5/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw= -github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g= -github.com/ipfs/go-log/v2 v2.3.0/go.mod h1:QqGoj30OTpnKaG/LKTGTxoP2mmQtjVMEnK72gynbe/g= -github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= -github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= -github.com/ipfs/go-merkledag v0.11.0 h1:DgzwK5hprESOzS4O1t/wi6JDpyVQdvm9Bs59N/jqfBY= -github.com/ipfs/go-merkledag v0.11.0/go.mod h1:Q4f/1ezvBiJV0YCIXvt51W/9/kqJGH4I1LsA7+djsM4= -github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg= -github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY= -github.com/ipfs/go-peertaskqueue v0.8.1 h1:YhxAs1+wxb5jk7RvS0LHdyiILpNmRIRnZVztekOF0pg= -github.com/ipfs/go-peertaskqueue v0.8.1/go.mod h1:Oxxd3eaK279FxeydSPPVGHzbwVeHjatZ2GA8XD+KbPU= -github.com/ipfs/go-test v0.0.4 h1:DKT66T6GBB6PsDFLoO56QZPrOmzJkqU1FZH5C9ySkew= -github.com/ipfs/go-test v0.0.4/go.mod h1:qhIM1EluEfElKKM6fnWxGn822/z9knUGM1+I/OAQNKI= -github.com/ipfs/go-unixfs v0.4.5 h1:wj8JhxvV1G6CD7swACwSKYa+NgtdWC1RUit+gFnymDU= -github.com/ipfs/go-unixfs v0.4.5/go.mod h1:BIznJNvt/gEx/ooRMI4Us9K8+qeGO7vx1ohnbk8gjFg= -github.com/ipfs/go-unixfsnode v1.9.1 h1:2cdSIDQCt7emNhlyUqUFQnKo2XvecARoIcurIKFjPD8= -github.com/ipfs/go-unixfsnode v1.9.1/go.mod h1:u8WxhmXzyrq3xfSYkhfx+uI+n91O+0L7KFjq3TS7d6g= -github.com/ipfs/go-verifcid v0.0.3 h1:gmRKccqhWDocCRkC+a59g5QW7uJw5bpX9HWBevXa0zs= -github.com/ipfs/go-verifcid v0.0.3/go.mod h1:gcCtGniVzelKrbk9ooUSX/pM3xlH73fZZJDzQJRvOUw= -github.com/ipld/go-car v0.6.2 h1:Hlnl3Awgnq8icK+ze3iRghk805lu8YNq3wlREDTF2qc= -github.com/ipld/go-car v0.6.2/go.mod h1:oEGXdwp6bmxJCZ+rARSkDliTeYnVzv3++eXajZ+Bmr8= -github.com/ipld/go-car/v2 v2.13.1 h1:KnlrKvEPEzr5IZHKTXLAEub+tPrzeAFQVRlSQvuxBO4= -github.com/ipld/go-car/v2 v2.13.1/go.mod h1:QkdjjFNGit2GIkpQ953KBwowuoukoM75nP/JI1iDJdo= -github.com/ipld/go-codec-dagpb v1.6.0 h1:9nYazfyu9B1p3NAgfVdpRco3Fs2nFC72DqVsMj6rOcc= -github.com/ipld/go-codec-dagpb v1.6.0/go.mod h1:ANzFhfP2uMJxRBr8CE+WQWs5UsNa0pYtmKZ+agnUw9s= +github.com/ipfs/go-log/v2 v2.9.0 h1:l4b06AwVXwldIzbVPZy5z7sKp9lHFTX0KWfTBCtHaOk= +github.com/ipfs/go-log/v2 v2.9.0/go.mod h1:UhIYAwMV7Nb4ZmihUxfIRM2Istw/y9cAk3xaK+4Zs2c= +github.com/ipfs/go-metrics-interface v0.3.0 h1:YwG7/Cy4R94mYDUuwsBfeziJCVm9pBMJ6q/JR9V40TU= +github.com/ipfs/go-metrics-interface v0.3.0/go.mod h1:OxxQjZDGocXVdyTPocns6cOLwHieqej/jos7H4POwoY= +github.com/ipfs/go-peertaskqueue v0.8.2 h1:PaHFRaVFdxQk1Qo3OKiHPYjmmusQy7gKQUaL8JDszAU= +github.com/ipfs/go-peertaskqueue v0.8.2/go.mod h1:L6QPvou0346c2qPJNiJa6BvOibxDfaiPlqHInmzg0FA= +github.com/ipfs/go-test v0.2.3 h1:Z/jXNAReQFtCYyn7bsv/ZqUwS6E7iIcSpJ2CuzCvnrc= +github.com/ipfs/go-test v0.2.3/go.mod h1:QW8vSKkwYvWFwIZQLGQXdkt9Ud76eQXRQ9Ao2H+cA1o= +github.com/ipfs/go-unixfsnode v1.10.2 h1:TREegX1J4X+k1w4AhoDuxxFvVcS9SegMRvrmxF6Tca8= +github.com/ipfs/go-unixfsnode v1.10.2/go.mod h1:ImDPTSiKZ+2h4UVdkSDITJHk87bUAp7kX/lgifjRicg= +github.com/ipld/go-car/v2 v2.16.0 h1:LWe0vmN/QcQmUU4tr34W5Nv5mNraW+G6jfN2s+ndBco= +github.com/ipld/go-car/v2 v2.16.0/go.mod h1:RqFGWN9ifcXVmCrTAVnfnxiWZk1+jIx67SYhenlmL34= +github.com/ipld/go-codec-dagpb v1.7.0 h1:hpuvQjCSVSLnTnHXn+QAMR0mLmb1gA6wl10LExo2Ts0= +github.com/ipld/go-codec-dagpb v1.7.0/go.mod h1:rD3Zg+zub9ZnxcLwfol/OTQRVjaLzXypgy4UqHQvilM= github.com/ipld/go-ipld-prime v0.11.0/go.mod h1:+WIAkokurHmZ/KwzDOMUuoeJgaRQktHtEaLglS3ZeV8= -github.com/ipld/go-ipld-prime v0.14.1/go.mod h1:QcE4Y9n/ZZr8Ijg5bGPT0GqYWgZ1704nH0RDcQtgTP0= github.com/ipld/go-ipld-prime v0.21.0 h1:n4JmcpOlPDIxBcY037SVfpd1G+Sj1nKZah0m6QH9C2E= github.com/ipld/go-ipld-prime v0.21.0/go.mod h1:3RLqy//ERg/y5oShXXdx5YIp50cFGOanyMctpPjsvxQ= -github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20230102063945-1a409dc236dd h1:gMlw/MhNr2Wtp5RwGdsW23cs+yCuj9k2ON7i9MiJlRo= -github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20230102063945-1a409dc236dd/go.mod h1:wZ8hH8UxeryOs4kJEJaiui/s00hDSbE37OKsL47g+Sw= +github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20250821084354-a425e60cd714 h1:cqNk8PEwHnK0vqWln+U/YZhQc9h2NB3KjUjDPZo5Q2s= +github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20250821084354-a425e60cd714/go.mod h1:ZEUdra3CoqRVRYgAX/jAJO9aZGz6SKtKEG628fHHktY= +github.com/ipshipyard/p2p-forge v0.7.0 h1:PQayexxZC1FR2Vx0XOSbmZ6wDPliidS48I+xXWuF+YU= +github.com/ipshipyard/p2p-forge v0.7.0/go.mod h1:i2wg0p7WmHGyo5vYaK9COZBp8BN5Drncfu3WoQNZlQY= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= -github.com/jbenet/go-cienv v0.1.0 h1:Vc/s0QbQtoxX8MwwSLWWh+xNNZvM3Lw7NsTcHrvvhMc= github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk= github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY= github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= -github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o= -github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= -github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= @@ -395,58 +364,57 @@ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQL github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= -github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= -github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= -github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= +github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0= -github.com/koron/go-ssdp v0.0.4/go.mod h1:oDXq+E5IL5q0U8uSBcoAXzTzInwy5lEgC91HoKtbmZk= +github.com/koron/go-ssdp v0.0.6 h1:Jb0h04599eq/CY7rB5YEqPS83HmRfHP2azkxMN2rFtU= +github.com/koron/go-ssdp v0.0.6/go.mod h1:0R9LfRJGek1zWTjN3JUNlm5INCDYGpRDfAptnct63fI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/libdns/libdns v1.0.0-beta.1 h1:KIf4wLfsrEpXpZ3vmc/poM8zCATXT2klbdPe6hyOBjQ= +github.com/libdns/libdns v1.0.0-beta.1/go.mod h1:4Bj9+5CQiNMVGf87wjX4CY3HQJypUHRuLvlsfsZqLWQ= github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ= github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c= github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic= -github.com/libp2p/go-doh-resolver v0.4.0 h1:gUBa1f1XsPwtpE1du0O+nnZCUqtG7oYi7Bb+0S7FQqw= -github.com/libp2p/go-doh-resolver v0.4.0/go.mod h1:v1/jwsFusgsWIGX/c6vCRrnJ60x7bhTiq/fs2qt0cAg= +github.com/libp2p/go-doh-resolver v0.5.0 h1:4h7plVVW+XTS+oUBw2+8KfoM1jF6w8XmO7+skhePFdE= +github.com/libp2p/go-doh-resolver v0.5.0/go.mod h1:aPDxfiD2hNURgd13+hfo29z9IC22fv30ee5iM31RzxU= github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZxBdp967ls1g+k8= github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= -github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= -github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= -github.com/libp2p/go-libp2p v0.36.3 h1:NHz30+G7D8Y8YmznrVZZla0ofVANrvBl2c+oARfMeDQ= -github.com/libp2p/go-libp2p v0.36.3/go.mod h1:4Y5vFyCUiJuluEPmpnKYf6WFx5ViKPUYs/ixe9ANFZ8= +github.com/libp2p/go-flow-metrics v0.3.0 h1:q31zcHUvHnwDO0SHaukewPYgwOBSxtt830uJtUx6784= +github.com/libp2p/go-flow-metrics v0.3.0/go.mod h1:nuhlreIwEguM1IvHAew3ij7A8BMlyHQJ279ao24eZZo= +github.com/libp2p/go-libp2p v0.46.0 h1:0T2yvIKpZ3DVYCuPOFxPD1layhRU486pj9rSlGWYnDM= +github.com/libp2p/go-libp2p v0.46.0/go.mod h1:TbIDnpDjBLa7isdgYpbxozIVPBTmM/7qKOJP4SFySrQ= github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= github.com/libp2p/go-libp2p-core v0.2.4/go.mod h1:STh4fdfa5vDYr0/SzYYeqnt+E6KfEV5VxfIrm0bcI0g= github.com/libp2p/go-libp2p-core v0.3.0/go.mod h1:ACp3DmS3/N64c2jDzcV429ukDpicbL6+TrrxANBjPGw= -github.com/libp2p/go-libp2p-kad-dht v0.26.1 h1:AazV3LCImYVkDUGAHx5lIEgZ9iUI2QQKH5GMRQU8uEA= -github.com/libp2p/go-libp2p-kad-dht v0.26.1/go.mod h1:mqRUGJ/+7ziQ3XknU2kKHfsbbgb9xL65DXjPOJwmZF8= +github.com/libp2p/go-libp2p-kad-dht v0.36.0 h1:7QuXhV36+Vyj+L6A7mrYkn2sYLrbRcbjvsYDu/gXhn8= +github.com/libp2p/go-libp2p-kad-dht v0.36.0/go.mod h1:O24LxTH9Rt3I5XU8nmiA9VynS4TrTwAyj+zBJKB05vQ= github.com/libp2p/go-libp2p-kbucket v0.3.1/go.mod h1:oyjT5O7tS9CQurok++ERgc46YLwEpuGoFq9ubvoUOio= -github.com/libp2p/go-libp2p-kbucket v0.6.3 h1:p507271wWzpy2f1XxPzCQG9NiN6R6lHL9GiSErbQQo0= -github.com/libp2p/go-libp2p-kbucket v0.6.3/go.mod h1:RCseT7AH6eJWxxk2ol03xtP9pEHetYSPXOaJnOiD8i0= +github.com/libp2p/go-libp2p-kbucket v0.8.0 h1:QAK7RzKJpYe+EuSEATAaaHYMYLkPDGC18m9jxPLnU8s= +github.com/libp2p/go-libp2p-kbucket v0.8.0/go.mod h1:JMlxqcEyKwO6ox716eyC0hmiduSWZZl6JY93mGaaqc4= github.com/libp2p/go-libp2p-peerstore v0.1.4/go.mod h1:+4BDbDiiKf4PzpANZDAT+knVdLxvqh7hXOujessqdzs= -github.com/libp2p/go-libp2p-pubsub v0.11.0 h1:+JvS8Kty0OiyUiN0i8H5JbaCgjnJTRnTHe4rU88dLFc= -github.com/libp2p/go-libp2p-pubsub v0.11.0/go.mod h1:QEb+hEV9WL9wCiUAnpY29FZR6W3zK8qYlaml8R4q6gQ= +github.com/libp2p/go-libp2p-pubsub v0.14.2 h1:nT5lFHPQOFJcp9CW8hpKtvbpQNdl2udJuzLQWbgRum8= +github.com/libp2p/go-libp2p-pubsub v0.14.2/go.mod h1:MKPU5vMI8RRFyTP0HfdsF9cLmL1nHAeJm44AxJGJx44= github.com/libp2p/go-libp2p-pubsub-router v0.6.0 h1:D30iKdlqDt5ZmLEYhHELCMRj8b4sFAqrUcshIUvVP/s= github.com/libp2p/go-libp2p-pubsub-router v0.6.0/go.mod h1:FY/q0/RBTKsLA7l4vqC2cbRbOvyDotg8PJQ7j8FDudE= -github.com/libp2p/go-libp2p-record v0.2.0 h1:oiNUOCWno2BFuxt3my4i1frNrt7PerzB3queqa1NkQ0= -github.com/libp2p/go-libp2p-record v0.2.0/go.mod h1:I+3zMkvvg5m2OcSdoL0KPljyJyvNDFGKX7QdlpYUcwk= -github.com/libp2p/go-libp2p-routing-helpers v0.7.4 h1:6LqS1Bzn5CfDJ4tzvP9uwh42IB7TJLNFJA6dEeGBv84= -github.com/libp2p/go-libp2p-routing-helpers v0.7.4/go.mod h1:we5WDj9tbolBXOuF1hGOkR+r7Uh1408tQbAKaT5n1LE= +github.com/libp2p/go-libp2p-record v0.3.1 h1:cly48Xi5GjNw5Wq+7gmjfBiG9HCzQVkiZOUZ8kUl+Fg= +github.com/libp2p/go-libp2p-record v0.3.1/go.mod h1:T8itUkLcWQLCYMqtX7Th6r7SexyUJpIyPgks757td/E= +github.com/libp2p/go-libp2p-routing-helpers v0.7.5 h1:HdwZj9NKovMx0vqq6YNPTh6aaNzey5zHD7HeLJtq6fI= +github.com/libp2p/go-libp2p-routing-helpers v0.7.5/go.mod h1:3YaxrwP0OBPDD7my3D0KxfR89FlcX/IEbxDEDfAmj98= github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg= github.com/libp2p/go-libp2p-xor v0.1.0 h1:hhQwT4uGrBcuAkUGXADuPltalOdpf9aag9kaYNT2tLA= @@ -454,39 +422,34 @@ github.com/libp2p/go-libp2p-xor v0.1.0/go.mod h1:LSTM5yRnjGZbWNTA/hRwq2gGFrvRIbQ github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0= github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM= -github.com/libp2p/go-nat v0.2.0 h1:Tyz+bUFAYqGyJ/ppPPymMGbIgNRH+WqC5QrT5fKrrGk= -github.com/libp2p/go-nat v0.2.0/go.mod h1:3MJr+GRpRkyT65EpVPBstXLvOlAPzUVlG6Pwg9ohLJk= -github.com/libp2p/go-netroute v0.2.1 h1:V8kVrpD8GK0Riv15/7VN6RbUQ3URNZVosw7H2v9tksU= -github.com/libp2p/go-netroute v0.2.1/go.mod h1:hraioZr0fhBjG0ZRXJJ6Zj2IVEVNx6tDTFQfSmcq7mQ= +github.com/libp2p/go-netroute v0.3.0 h1:nqPCXHmeNmgTJnktosJ/sIef9hvwYCrsLxXmfNks/oc= +github.com/libp2p/go-netroute v0.3.0/go.mod h1:Nkd5ShYgSMS5MUKy/MU2T57xFoOKvvLR92Lic48LEyA= github.com/libp2p/go-openssl v0.0.3/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s= github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU= -github.com/libp2p/go-yamux/v4 v4.0.1 h1:FfDR4S1wj6Bw2Pqbc8Uz7pCxeRBPbwsBbEdfwiCypkQ= -github.com/libp2p/go-yamux/v4 v4.0.1/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4= +github.com/libp2p/go-yamux/v5 v5.0.1 h1:f0WoX/bEF2E8SbE4c/k1Mo+/9z0O4oC/hWEA+nfYRSg= +github.com/libp2p/go-yamux/v5 v5.0.1/go.mod h1:en+3cdX51U0ZslwRdRLrvQsdayFt3TSUKvBGErzpWbU= github.com/libp2p/zeroconf/v2 v2.2.0 h1:Cup06Jv6u81HLhIj1KasuNM/RHHrJ8T7wOTS4+Tv53Q= github.com/libp2p/zeroconf/v2 v2.2.0/go.mod h1:fuJqLnUwZTshS3U/bMRJ3+ow/v9oid1n0DmyYyNO1Xs= -github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/marcopolo/simnet v0.0.1 h1:rSMslhPz6q9IvJeFWDoMGxMIrlsbXau3NkuIXHGJxfg= +github.com/marcopolo/simnet v0.0.1/go.mod h1:WDaQkgLAjqDUEBAOXz22+1j6wXKfGlC5sD5XWt3ddOs= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= -github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= -github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= +github.com/mholt/acmez/v3 v3.1.2 h1:auob8J/0FhmdClQicvJvuDavgd5ezwLBfKuYmynhYzc= +github.com/mholt/acmez/v3 v3.1.2/go.mod h1:L1wOU06KKvq7tswuMDwKdcHeKpFFgkppZy/y0DFxagQ= github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= -github.com/miekg/dns v1.1.61 h1:nLxbwF3XxhwVSm8g9Dghm9MHPaUZuqhPiGL+675ZmEs= -github.com/miekg/dns v1.1.61/go.mod h1:mnAarhS3nWaW+NVP2wTkYVIZyHNJ098SJZUki3eykwQ= +github.com/miekg/dns v1.1.68 h1:jsSRkNozw7G/mnmXULynzMNIsgY2dHC8LO6U6Ij2JEA= +github.com/miekg/dns v1.1.68/go.mod h1:fujopn7TB3Pu3JM69XaawiU0wqjpL9/8xGop5UrTPps= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms= github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= @@ -494,17 +457,16 @@ github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKo github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc= github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/minlz v1.0.1-0.20250507153514-87eb42fe8882 h1:0lgqHvJWHLGW5TuObJrfyEi6+ASTKDBWikGvPqy9Yiw= +github.com/minio/minlz v1.0.1-0.20250507153514-87eb42fe8882/go.mod h1:qT0aEB35q79LLornSzeDH75LBf3aH1MV+jB5w9Wasec= github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= @@ -519,11 +481,10 @@ github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a github.com/multiformats/go-multiaddr v0.1.0/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= -github.com/multiformats/go-multiaddr v0.13.0 h1:BCBzs61E3AGHcYYTv8dqRH43ZfyrqM8RXVPT8t13tLQ= -github.com/multiformats/go-multiaddr v0.13.0/go.mod h1:sBXrNzucqkFJhvKOiwwLyqamGa/P5EIXNPLovyhQCII= -github.com/multiformats/go-multiaddr-dns v0.3.0/go.mod h1:mNzQ4eTGDg0ll1N9jKPOUogZPoJ30W8a7zk66FQPpdQ= -github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= -github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= +github.com/multiformats/go-multiaddr v0.16.1 h1:fgJ0Pitow+wWXzN9do+1b8Pyjmo8m5WhGfzpL82MpCw= +github.com/multiformats/go-multiaddr v0.16.1/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0= +github.com/multiformats/go-multiaddr-dns v0.4.1 h1:whi/uCLbDS3mSEUMb1MsoT4uzUeZB0N32yzufqS0i5M= +github.com/multiformats/go-multiaddr-dns v0.4.1/go.mod h1:7hfthtB4E4pQwirrz+J0CcDUfbWzTqEzVyYKKIKpgkc= github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= github.com/multiformats/go-multiaddr-net v0.1.1/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= @@ -531,53 +492,45 @@ github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/g github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= -github.com/multiformats/go-multicodec v0.3.0/go.mod h1:qGGaQmioCDh+TeFOnxrbU0DaIPw8yFgAZgFG0V7p1qQ= -github.com/multiformats/go-multicodec v0.9.0 h1:pb/dlPnzee/Sxv/j4PmkDRxCOi3hXTz3IbPKOXWJkmg= -github.com/multiformats/go-multicodec v0.9.0/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k= +github.com/multiformats/go-multicodec v0.10.0 h1:UpP223cig/Cx8J76jWt91njpK3GTAO1w02sdcjZDSuc= +github.com/multiformats/go-multicodec v0.10.0/go.mod h1:wg88pM+s2kZJEQfRCKBNU+g32F5aWBEjyFHXvZLTcLI= github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= github.com/multiformats/go-multihash v0.0.15/go.mod h1:D6aZrWNLFTV/ynMpKsNtB40mJzmCl4jb1alC0OvHiHg= -github.com/multiformats/go-multihash v0.1.0/go.mod h1:RJlXsxt6vHGaia+S8We0ErjhojtKzPP2AH4+kYM7k84= github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= -github.com/multiformats/go-multistream v0.5.0 h1:5htLSLl7lvJk3xx3qT/8Zm9J4K8vEOf/QGkvOGQAyiE= -github.com/multiformats/go-multistream v0.5.0/go.mod h1:n6tMZiwiP2wUsR8DgfDWw1dydlEqV3l6N3/GBsX6ILA= +github.com/multiformats/go-multistream v0.6.1 h1:4aoX5v6T+yWmc2raBHsTvzmFhOI8WVOer28DeBBEYdQ= +github.com/multiformats/go-multistream v0.6.1/go.mod h1:ksQf6kqHAb6zIsyw7Zm+gAuVo57Qbq84E27YlYqavqw= github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= -github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= -github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= +github.com/multiformats/go-varint v0.1.0 h1:i2wqFp4sdl3IcIxfAonHQV9qU5OsZ4Ts9IOoETFs5dI= +github.com/multiformats/go-varint v0.1.0/go.mod h1:5KVAVXegtfmNQQm/lCY+ATvDzvJJhSkUlGQV9wgObdI= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= -github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= -github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.19.1 h1:QXgq3Z8Crl5EL1WBAC98A5sEBHARrAJNzAmMxzLcRF0= -github.com/onsi/ginkgo/v2 v2.19.1/go.mod h1:O3DtEWQkPa/F7fBMgmZQKKsluAy8pd3rEQdrjkPb9zA= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.34.0 h1:eSSPsPNp6ZpsG8X1OVmOTxig+CblTc4AxpPBykhe2Os= -github.com/onsi/gomega v1.34.0/go.mod h1:MIKI8c+f+QLWk+hxbePD4i0LMJSExPaZOVfkoex4cAo= -github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= -github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/onsi/gomega v1.36.3 h1:hID7cr8t3Wp26+cYnfcjR6HpJ00fdogN6dqZ1t6IylU= +github.com/onsi/gomega v1.36.3/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= -github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/openzipkin/zipkin-go v0.4.3 h1:9EGwpqkgnwdEIJ+Od7QVSEIH+ocmm5nPat0G7sjsSdg= github.com/openzipkin/zipkin-go v0.4.3/go.mod h1:M9wCJZFWCo2RiY+o1eBCEMe0Dp2S5LDHcMZmk3RmK7c= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= @@ -585,112 +538,88 @@ github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhM github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 h1:1/WtZae0yGtPq+TI6+Tv1WTxkukpXeMlviSxvL7SRgk= github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9/go.mod h1:x3N5drFsm2uilKKuuYo6LdyD8vZAW55sH/9w+pbo1sw= -github.com/pion/datachannel v1.5.8 h1:ph1P1NsGkazkjrvyMfhRBUAWMxugJjq2HfQifaOoSNo= -github.com/pion/datachannel v1.5.8/go.mod h1:PgmdpoaNBLX9HNzNClmdki4DYW5JtI7Yibu8QzbL3tI= +github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pion/datachannel v1.5.10 h1:ly0Q26K1i6ZkGf42W7D4hQYR90pZwzFOjTq5AuCKk4o= +github.com/pion/datachannel v1.5.10/go.mod h1:p/jJfC9arb29W7WrxyKbepTU20CFgyx5oLo8Rs4Py/M= github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s= github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk= github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE= -github.com/pion/ice/v2 v2.3.34 h1:Ic1ppYCj4tUOcPAp76U6F3fVrlSw8A9JtRXLqw6BbUM= -github.com/pion/ice/v2 v2.3.34/go.mod h1:mBF7lnigdqgtB+YHkaY/Y6s6tsyRyo4u4rPGRuOjUBQ= -github.com/pion/interceptor v0.1.29 h1:39fsnlP1U8gw2JzOFWdfCU82vHvhW9o0rZnZF56wF+M= -github.com/pion/interceptor v0.1.29/go.mod h1:ri+LGNjRUc5xUNtDEPzfdkmSqISixVTBF/z/Zms/6T4= -github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY= +github.com/pion/dtls/v3 v3.0.6 h1:7Hkd8WhAJNbRgq9RgdNh1aaWlZlGpYTzdqjy9x9sK2E= +github.com/pion/dtls/v3 v3.0.6/go.mod h1:iJxNQ3Uhn1NZWOMWlLxEEHAN5yX7GyPvvKw04v9bzYU= +github.com/pion/ice/v4 v4.0.10 h1:P59w1iauC/wPk9PdY8Vjl4fOFL5B+USq1+xbDcN6gT4= +github.com/pion/ice/v4 v4.0.10/go.mod h1:y3M18aPhIxLlcO/4dn9X8LzLLSma84cx6emMSu14FGw= +github.com/pion/interceptor v0.1.40 h1:e0BjnPcGpr2CFQgKhrQisBU7V3GXK6wrfYrGYaU6Jq4= +github.com/pion/interceptor v0.1.40/go.mod h1:Z6kqH7M/FYirg3frjGJ21VLSRJGBXB/KqaTIrdqnOic= github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms= -github.com/pion/mdns v0.0.12 h1:CiMYlY+O0azojWDmxdNr7ADGrnZ+V6Ilfner+6mSVK8= -github.com/pion/mdns v0.0.12/go.mod h1:VExJjv8to/6Wqm1FXK+Ii/Z9tsVk/F5sD/N70cnYFbk= +github.com/pion/logging v0.2.3 h1:gHuf0zpoh1GW67Nr6Gj4cv5Z9ZscU7g/EaoC/Ke/igI= +github.com/pion/logging v0.2.3/go.mod h1:z8YfknkquMe1csOrxK5kc+5/ZPAzMxbKLX5aXpbpC90= +github.com/pion/mdns/v2 v2.0.7 h1:c9kM8ewCgjslaAmicYMFQIde2H9/lrZpjBkN8VwoVtM= +github.com/pion/mdns/v2 v2.0.7/go.mod h1:vAdSYNAT0Jy3Ru0zl2YiW3Rm/fJCwIeM0nToenfOJKA= github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA= github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8= -github.com/pion/rtcp v1.2.12/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9L4= -github.com/pion/rtcp v1.2.14 h1:KCkGV3vJ+4DAJmvP0vaQShsb0xkRfWkO540Gy102KyE= -github.com/pion/rtcp v1.2.14/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9L4= -github.com/pion/rtp v1.8.3/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= -github.com/pion/rtp v1.8.8 h1:EtYFHI0rpUEjT/RMnGfb1vdJhbYmPG77szD72uUnSxs= -github.com/pion/rtp v1.8.8/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= -github.com/pion/sctp v1.8.20 h1:sOc3lkV/tQaP57ZUEXIMdM2V92IIB2ia5v/ygnBxaEg= -github.com/pion/sctp v1.8.20/go.mod h1:oTxw8i5m+WbDHZJL/xUpe6CPIn1Y0GIKKwTLF4h53H8= -github.com/pion/sdp/v3 v3.0.9 h1:pX++dCHoHUwq43kuwf3PyJfHlwIj4hXA7Vrifiq0IJY= -github.com/pion/sdp/v3 v3.0.9/go.mod h1:B5xmvENq5IXJimIO4zfp6LAe1fD9N+kFv+V/1lOdz8M= -github.com/pion/srtp/v2 v2.0.20 h1:HNNny4s+OUmG280ETrCdgFndp4ufx3/uy85EawYEhTk= -github.com/pion/srtp/v2 v2.0.20/go.mod h1:0KJQjA99A6/a0DOVTu1PhDSw0CXF2jTkqOoMg3ODqdA= +github.com/pion/rtcp v1.2.15 h1:LZQi2JbdipLOj4eBjK4wlVoQWfrZbh3Q6eHtWtJBZBo= +github.com/pion/rtcp v1.2.15/go.mod h1:jlGuAjHMEXwMUHK78RgX0UmEJFV4zUKOFHR7OP+D3D0= +github.com/pion/rtp v1.8.19 h1:jhdO/3XhL/aKm/wARFVmvTfq0lC/CvN1xwYKmduly3c= +github.com/pion/rtp v1.8.19/go.mod h1:bAu2UFKScgzyFqvUKmbvzSdPr+NGbZtv6UB2hesqXBk= +github.com/pion/sctp v1.8.39 h1:PJma40vRHa3UTO3C4MyeJDQ+KIobVYRZQZ0Nt7SjQnE= +github.com/pion/sctp v1.8.39/go.mod h1:cNiLdchXra8fHQwmIoqw0MbLLMs+f7uQ+dGMG2gWebE= +github.com/pion/sdp/v3 v3.0.13 h1:uN3SS2b+QDZnWXgdr69SM8KB4EbcnPnPf2Laxhty/l4= +github.com/pion/sdp/v3 v3.0.13/go.mod h1:88GMahN5xnScv1hIMTqLdu/cOcUkj6a9ytbncwMCq2E= +github.com/pion/srtp/v3 v3.0.6 h1:E2gyj1f5X10sB/qILUGIkL4C2CqK269Xq167PbGCc/4= +github.com/pion/srtp/v3 v3.0.6/go.mod h1:BxvziG3v/armJHAaJ87euvkhHqWe9I7iiOy50K2QkhY= github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4= github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8= +github.com/pion/stun/v3 v3.0.0 h1:4h1gwhWLWuZWOJIJR9s2ferRO+W3zA/b6ijOI6mKzUw= +github.com/pion/stun/v3 v3.0.0/go.mod h1:HvCN8txt8mwi4FBvS3EmDghW6aQJ24T+y+1TKjB5jyU= github.com/pion/transport/v2 v2.2.1/go.mod h1:cXXWavvCnFF6McHTft3DWS9iic2Mftcz1Aq29pGcU5g= -github.com/pion/transport/v2 v2.2.3/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= github.com/pion/transport/v2 v2.2.4/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= github.com/pion/transport/v2 v2.2.10 h1:ucLBLE8nuxiHfvkFKnkDQRYWYfp8ejf4YBOPfaQpw6Q= github.com/pion/transport/v2 v2.2.10/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E= -github.com/pion/transport/v3 v3.0.1/go.mod h1:UY7kiITrlMv7/IKgd5eTUcaahZx5oUN3l9SzK5f5xE0= -github.com/pion/transport/v3 v3.0.6 h1:k1mQU06bmmX143qSWgXFqSH1KUJceQvIUuVH/K5ELWw= -github.com/pion/transport/v3 v3.0.6/go.mod h1:HvJr2N/JwNJAfipsRleqwFoR3t/pWyHeZUs89v3+t5s= -github.com/pion/turn/v2 v2.1.3/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY= -github.com/pion/turn/v2 v2.1.6 h1:Xr2niVsiPTB0FPtt+yAWKFUkU1eotQbGgpTIld4x1Gc= -github.com/pion/turn/v2 v2.1.6/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY= -github.com/pion/webrtc/v3 v3.3.0 h1:Rf4u6n6U5t5sUxhYPQk/samzU/oDv7jk6BA5hyO2F9I= -github.com/pion/webrtc/v3 v3.3.0/go.mod h1:hVmrDJvwhEertRWObeb1xzulzHGeVUoPlWvxdGzcfU0= +github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0= +github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo= +github.com/pion/turn/v4 v4.0.2 h1:ZqgQ3+MjP32ug30xAbD6Mn+/K4Sxi3SdNOTFf+7mpps= +github.com/pion/turn/v4 v4.0.2/go.mod h1:pMMKP/ieNAG/fN5cZiN4SDuyKsXtNTr0ccN7IToA1zs= +github.com/pion/webrtc/v4 v4.1.2 h1:mpuUo/EJ1zMNKGE79fAdYNFZBX790KE7kQQpLMjjR54= +github.com/pion/webrtc/v4 v4.1.2/go.mod h1:xsCXiNAmMEjIdFxAYU0MbB3RwRieJsegSB2JZsGN+8U= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4= github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw= -github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/probe-lab/go-libdht v0.4.0 h1:LAqHuko/owRW6+0cs5wmJXbHzg09EUMJEh5DI37yXqo= +github.com/probe-lab/go-libdht v0.4.0/go.mod h1:hamw22kI6YkPQFGy5P6BrWWDrgE9ety5Si8iWAyuDvc= +github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= +github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= -github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= -github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= -github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= -github.com/quic-go/quic-go v0.45.2 h1:DfqBmqjb4ExSdxRIb/+qXhPC+7k6+DUNZha4oeiC9fY= -github.com/quic-go/quic-go v0.45.2/go.mod h1:1dLehS7TIR64+vxGR70GDcatWTOtMX2PUtnKsjbTurI= -github.com/quic-go/webtransport-go v0.8.0 h1:HxSrwun11U+LlmwpgM1kEqIqH90IT4N8auv/cD7QFJg= -github.com/quic-go/webtransport-go v0.8.0/go.mod h1:N99tjprW432Ut5ONql/aUhSLT0YVSlwHohQsuac9WaM= -github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= -github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= +github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= +github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= +github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= +github.com/quic-go/qpack v0.6.0 h1:g7W+BMYynC1LbYLSqRt8PBg5Tgwxn214ZZR34VIOjz8= +github.com/quic-go/qpack v0.6.0/go.mod h1:lUpLKChi8njB4ty2bFLX2x4gzDqXwUpaO1DP9qMDZII= +github.com/quic-go/quic-go v0.57.1 h1:25KAAR9QR8KZrCZRThWMKVAwGoiHIrNbT72ULHTuI10= +github.com/quic-go/quic-go v0.57.1/go.mod h1:ly4QBAjHA2VhdnxhojRsCUOeJwKYg+taDlos92xb1+s= +github.com/quic-go/webtransport-go v0.9.0 h1:jgys+7/wm6JarGDrW+lD/r9BGqBAmqY/ssklE09bA70= +github.com/quic-go/webtransport-go v0.9.0/go.mod h1:4FUYIiUc75XSsF6HShcLeXXYZJ9AGwo/xh3L8M/P1ao= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk= -github.com/samber/lo v1.46.0 h1:w8G+oaCPgz1PoCJztqymCFaKwXt+5cCXn51uPxExFfQ= -github.com/samber/lo v1.46.0/go.mod h1:RmDH9Ct32Qy3gduHQuKJ3gW1fMHAnE/fAzQuf6He5cU= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= -github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= -github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= -github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= -github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= -github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= -github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= -github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= -github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= -github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= -github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= -github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= -github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= -github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= -github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= -github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= -github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= -github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= -github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= -github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= -github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/slok/go-http-metrics v0.13.0 h1:lQDyJJx9wKhmbliyUsZ2l6peGnXRHjsjoqPt5VYzcP8= +github.com/slok/go-http-metrics v0.13.0/go.mod h1:HIr7t/HbN2sJaunvnt9wKP9xoBBVZFo1/KiHU3b0w+4= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs= github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= @@ -698,8 +627,6 @@ github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9 github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg= github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM= github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY= -github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= -github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= @@ -720,32 +647,26 @@ github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= -github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= -github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= -github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= -github.com/tj/assert v0.0.3 h1:Df/BlaZ20mq6kuai7f5z2TvPFiwC3xaWJSDQNiIS3Rk= -github.com/tj/assert v0.0.3/go.mod h1:Ne6X72Q+TB1AteidzQncjw9PabbMp4PBMZ1k+vd1Pvk= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c h1:u6SKchux2yDvFQnDHS3lPnIRmfVJ5Sxy3ao2SIdysLQ= github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb h1:Ywfo8sUltxogBpFuMOFRrrSifO788kAFxmvVw31PtQQ= github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb/go.mod h1:ikPs9bRWicNw3S7XpJ8sK/smGwU9WcSVU3dy9qahYBM= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= -github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= github.com/wangjia184/sortedset v0.0.0-20160527075905-f5d03557ba30/go.mod h1:YkocrP2K2tcw938x9gCOmT5G5eCD6jsTz0SZuyAqwIE= -github.com/warpfork/go-testmark v0.3.0/go.mod h1:jhEf8FVxd+F17juRubpmut64NEG6I2rgkUhlcqqXwE0= -github.com/warpfork/go-testmark v0.9.0/go.mod h1:jhEf8FVxd+F17juRubpmut64NEG6I2rgkUhlcqqXwE0= github.com/warpfork/go-testmark v0.12.1 h1:rMgCpJfwy1sJ50x0M0NgyphxYYPMOODIJHhsXyEHU0s= github.com/warpfork/go-testmark v0.12.1/go.mod h1:kHwy7wfvGSPh1rQJYKayD4AbtNaeyZdcGi9tNJTaa5Y= github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= @@ -755,8 +676,8 @@ github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc h1:BCPnHtcboa github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc/go.mod h1:r45hJU7yEoA81k6MWNhpMj/kms0n14dkzkxYHoB96UM= github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 h1:5HZfQkwe0mIfyDmc1Em5GqlNRzcdtlv4HTNmdpt7XH0= github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11/go.mod h1:Wlo/SzPmxVp6vXpGt/zaXhHH0fn4IxgqZc82aKg6bpQ= -github.com/whyrusleeping/cbor-gen v0.1.2 h1:WQFlrPhpcQl+M2/3dP5cvlTLWPVsL6LGBb9jJt6l/cA= -github.com/whyrusleeping/cbor-gen v0.1.2/go.mod h1:pM99HXyEbSQHcosHc0iW7YFmwnscr+t9Te4ibko05so= +github.com/whyrusleeping/cbor-gen v0.3.1 h1:82ioxmhEYut7LBVGhGq8xoRkXPLElVuh5mV67AFfdv0= +github.com/whyrusleeping/cbor-gen v0.3.1/go.mod h1:pM99HXyEbSQHcosHc0iW7YFmwnscr+t9Te4ibko05so= github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E= github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= @@ -764,15 +685,20 @@ github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM= github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 h1:E9S12nwJwEOXe2d6gT6qxdvqMnNq+VnSsKPgm2ZZNds= github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7/go.mod h1:X2c0RVCI1eSUFI8eLcY3c0423ykwiUdxLJtkDvruhjI= -github.com/wlynxg/anet v0.0.3 h1:PvR53psxFXstc12jelG6f1Lv4MWqE0tI76/hHGjh9rg= github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= +github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU= +github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= +github.com/zeebo/assert v1.1.0 h1:hU1L1vLTHsnO8x8c9KAR5GmM5QscxHg5RNU5z5qbUWY= +github.com/zeebo/assert v1.1.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= +github.com/zeebo/blake3 v0.2.4 h1:KYQPkhpRtcqh0ssGYcKLG1JYvddkEA8QwCM/yBqhaZI= +github.com/zeebo/blake3 v0.2.4/go.mod h1:7eeQ6d2iXWRGF6npfaxl2CU+xy2Fjo2gxeyZGCRUjcE= +github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo= +github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= @@ -780,66 +706,55 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 h1:QY7/0NeRPKlzusf40ZE4t1VlMKbqSNT7cJRYzWuja0s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0/go.mod h1:HVkSiDhTM9BoUJU8qE6j2eSWLLXvi1USXjyd2BXT8PY= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0 h1:/0YaXu3755A/cFbtXp+21lkXgI0QE5avTWA2HjU9/WE= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0/go.mod h1:m7SFxp0/7IxmJPLIY3JhOcU9CoFzDaCPL6xxQIxhA+o= -go.opentelemetry.io/otel/exporters/zipkin v1.27.0 h1:aXcxb7F6ZDC1o2Z52LDfS2g6M2FB5CrxdR2gzY4QRNs= -go.opentelemetry.io/otel/exporters/zipkin v1.27.0/go.mod h1:+WMURoi4KmVB7ypbFPx3xtZTWen2Ca3lRK9u6DVTO5M= -go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= -go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI= -go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= -go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= -go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= -go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc= -go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= -go.uber.org/fx v1.22.1 h1:nvvln7mwyT5s1q201YE29V/BFrGor6vMiDNpU/78Mys= -go.uber.org/fx v1.22.1/go.mod h1:HT2M7d7RHo+ebKGh9NRcrsrHHfpZ60nW3QRubMRfv48= -go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg= +go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= +go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 h1:lwI4Dc5leUqENgGuQImwLo4WnuXFPetmPpkLi2IrX54= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0/go.mod h1:Kz/oCE7z5wuyhPxsXDuaPteSWqjSBD5YaSdbxZYGbGk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 h1:aTL7F04bJHUlztTsNGJ2l+6he8c+y/b//eR0jjjemT4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0/go.mod h1:kldtb7jDTeol0l3ewcmd8SDvx3EmIE7lyvqbasU3QC4= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 h1:kJxSDN4SgWWTjG/hPp3O7LCGLcHXFlvS2/FFOrwL+SE= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0/go.mod h1:mgIOzS7iZeKJdeB8/NYHrJ48fdGc71Llo5bJ1J4DWUE= +go.opentelemetry.io/otel/exporters/zipkin v1.38.0 h1:0rJ2TmzpHDG+Ib9gPmu3J3cE0zXirumQcKS4wCoZUa0= +go.opentelemetry.io/otel/exporters/zipkin v1.38.0/go.mod h1:Su/nq/K5zRjDKKC3Il0xbViE3juWgG3JDoqLumFx5G0= +go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= +go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= +go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= +go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= +go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= +go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= +go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= +go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= +go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4= +go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE= +go.uber.org/dig v1.19.0 h1:BACLhebsYdpQ7IROQ1AGPjrXcP5dF80U3gKoFzbaq/4= +go.uber.org/dig v1.19.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= +go.uber.org/fx v1.24.0 h1:wE8mruvpg2kiiL1Vqd0CC+tr0/24XIB10Iwp2lLWzkg= +go.uber.org/fx v1.24.0/go.mod h1:AmDeGyS+ZARGKM4tlH4FY2Jr63VjbEDJHtqXTGP5hbo= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU= -go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko= +go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= -go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= -go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= -go4.org v0.0.0-20200411211856-f5505b9728dd/go.mod h1:CIiUVy99QCPfoE13bO4EZaz5GZMZXMSBGhxRdsvzbkg= +go.uber.org/zap/exp v0.3.0 h1:6JYzdifzYkGmTdRR59oYH+Ng7k49H9qVpWwNSsGJj3U= +go.uber.org/zap/exp v0.3.0/go.mod h1:5I384qq7XGxYyByIhHm6jg5CHkGY0nsTfbDLgDDlgJQ= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= go4.org v0.0.0-20230225012048-214862532bf5 h1:nifaUDeh+rPaBCMPMQHZmvJf+QdpLFnuQPwx+LxVmtc= go4.org v0.0.0-20230225012048-214862532bf5/go.mod h1:F57wTi5Lrj6WLyswp5EYV1ncrEbFGHD4hhz6S1ZYeaU= -golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -852,8 +767,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= -golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= +golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= +golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -862,11 +777,10 @@ golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= +golang.org/x/exp v0.0.0-20251125195548-87e1e737ad39 h1:DHNhtq3sNNzrvduZZIiFyXWOL9IWaDPHqTnLJp+rCBY= +golang.org/x/exp v0.0.0-20251125195548-87e1e737ad39/go.mod h1:46edojNIoXTNOhySWIWdix628clX9ODXwPsQuG6hsK0= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -884,21 +798,17 @@ golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= -golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= +golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -907,16 +817,16 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= @@ -924,16 +834,13 @@ golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= -golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= +golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= +golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -944,19 +851,16 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= +golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -971,24 +875,19 @@ golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -996,11 +895,12 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= +golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54 h1:E2/AqCUMZGgd73TQkxUMcMla25GB9i/5HOdLr+uH7Vo= +golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54/go.mod h1:hKdjCMrbv9skySur+Nek8Hd0uJ0GuxJIoIX2payrIdQ= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1020,17 +920,14 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= +golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181130052023-1c3d964395ce/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1048,8 +945,6 @@ golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1060,23 +955,21 @@ golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= -golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= +golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= +golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 h1:LLhsEBxRTBLuKlQxFBYUOU8xyFgXv6cOTp2HASDlsDk= -golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= -gonum.org/v1/gonum v0.15.0 h1:2lYxjRbTYyxkJxlhC+LvJIx3SsANPdRybu1tGj9/OrQ= -gonum.org/v1/gonum v0.15.0/go.mod h1:xzZVBJBtS+Mz4q0Yl2LJTk+OxOg4jiXZ7qBoM0uISGo= -google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -1086,17 +979,11 @@ google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsb google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= -google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -1110,13 +997,10 @@ google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4 h1:MuYw1wJzT+ZkybKfaOXKp5hJiZDn2iHaXRw0mRYdHSc= -google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4/go.mod h1:px9SlOOZBg1wM1zdnr8jEL4CNGUBZ+ZKYtNPApNQc4c= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240617180043-68d350f18fd4 h1:Di6ANFilr+S60a4S61ZM00vLdw0IrQOSMS2/6mrnOU0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240617180043-68d350f18fd4/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= -google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 h1:BIRfGDEjiHRrk0QKZe3Xv2ieMhtgRGeLcZQ0mIVn4EY= +google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5/go.mod h1:j3QtIyytwqGr1JUDtYXwtMXWPKsEa5LtzIFN1Wn5WvE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 h1:eaY8u2EuxbRv7c3NiGK0/NedzVsCcV6hDuU5qPX5EGE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5/go.mod h1:M4/wBTSeyLxupu3W3tJtOgB14jILAS/XWPSSa3TAlJc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1126,8 +1010,8 @@ google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= -google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= +google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4= +google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1137,8 +1021,10 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1146,10 +1032,6 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI= -gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/src-d/go-cli.v0 v0.0.0-20181105080154-d492247bbc0d/go.mod h1:z+K8VcOYVYcSwSjGebuDL6176A1XskgbtNl64NSg+n8= gopkg.in/src-d/go-log.v1 v1.0.1/go.mod h1:GN34hKP0g305ysm2/hctJ0Y8nWP3zxXXJ8GFabTyABE= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= @@ -1157,28 +1039,21 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -lukechampine.com/blake3 v1.1.6/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= -lukechampine.com/blake3 v1.3.0 h1:sJ3XhFINmHSrYCgl958hscfIa3bw8x4DqMP3u1YvoYE= -lukechampine.com/blake3 v1.3.0/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= -pgregory.net/rapid v0.4.7 h1:MTNRktPuv5FNqOO151TM9mDTa+XHcX6ypYeISDVD14g= -pgregory.net/rapid v0.4.7/go.mod h1:UYpPVyjFHzYBGHIxLFoupi8vwk6rXNzRY9OMvVxFIOU= +lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg= +lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo= +pgregory.net/rapid v1.1.0 h1:CMa0sjHSru3puNx+J0MIAuiiEV4N0qj8/cMWGBBCsjw= +pgregory.net/rapid v1.1.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= -sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/docs/examples/kubo-as-a-library/main.go b/docs/examples/kubo-as-a-library/main.go index 765e83c6d..8b2181ed7 100644 --- a/docs/examples/kubo-as-a-library/main.go +++ b/docs/examples/kubo-as-a-library/main.go @@ -10,6 +10,7 @@ import ( "path/filepath" "strings" "sync" + "time" "github.com/ipfs/boxo/files" "github.com/ipfs/boxo/path" @@ -58,6 +59,28 @@ func createTempRepo() (string, error) { return "", err } + // Use TCP-only on loopback with random port for reliable local testing. + // This matches what kubo's test harness uses (test/cli/transports_test.go). + // QUIC/UDP transports are avoided because they may be throttled on CI. + cfg.Addresses.Swarm = []string{ + "/ip4/127.0.0.1/tcp/0", + } + + // Explicitly disable non-TCP transports for reliability. + cfg.Swarm.Transports.Network.QUIC = config.False + cfg.Swarm.Transports.Network.Relay = config.False + cfg.Swarm.Transports.Network.WebTransport = config.False + cfg.Swarm.Transports.Network.WebRTCDirect = config.False + cfg.Swarm.Transports.Network.Websocket = config.False + cfg.AutoTLS.Enabled = config.False + + // Disable routing - we don't need DHT for direct peer connections. + // Bitswap works with directly connected peers without needing DHT lookups. + cfg.Routing.Type = config.NewOptionalString("none") + + // Disable bootstrap for this example - we manually connect only the peers we need. + cfg.Bootstrap = []string{} + // When creating the repository, you can define custom settings on the repository, such as enabling experimental // features (See experimental-features.md) or customizing the gateway endpoint. // To do such things, you should modify the variable `cfg`. For example: @@ -96,10 +119,14 @@ func createNode(ctx context.Context, repoPath string) (*core.IpfsNode, error) { // Construct the node nodeOptions := &core.BuildCfg{ - Online: true, - Routing: libp2p.DHTOption, // This option sets the node to be a full DHT node (both fetching and storing DHT Records) - // Routing: libp2p.DHTClientOption, // This option sets the node to be a client DHT node (only fetching records) - Repo: repo, + Online: true, + // For this example, we use NilRouterOption (no routing) since we connect peers directly. + // Bitswap works with directly connected peers without needing DHT lookups. + // In production, you would typically use: + // Routing: libp2p.DHTOption, // Full DHT node (stores and fetches records) + // Routing: libp2p.DHTClientOption, // DHT client (only fetches records) + Routing: libp2p.NilRouterOption, + Repo: repo, } return core.NewNode(ctx, nodeOptions) @@ -192,7 +219,7 @@ func main() { fmt.Println("-- Getting an IPFS node running -- ") - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) defer cancel() // Spawn a local peer using a temporary path, for testing purposes @@ -284,41 +311,31 @@ func main() { fmt.Printf("Got directory back from IPFS (IPFS path: %s) and wrote it to %s\n", cidDirectory.String(), outputPathDirectory) - /// --- Part IV: Getting a file from the IPFS Network + /// --- Part IV: Getting a file from another IPFS node - fmt.Println("\n-- Going to connect to a few nodes in the Network as bootstrappers --") + fmt.Println("\n-- Connecting to nodeA and fetching content via bitswap --") - peerMa := fmt.Sprintf("/ip4/127.0.0.1/udp/4010/p2p/%s", nodeA.Identity.String()) + // Get nodeA's actual listening address dynamically. + // We configured TCP-only on 127.0.0.1 with random port, so this will be a TCP address. + peerAddrs, err := ipfsA.Swarm().LocalAddrs(ctx) + if err != nil { + panic(fmt.Errorf("could not get peer addresses: %s", err)) + } + peerMa := peerAddrs[0].String() + "/p2p/" + nodeA.Identity.String() bootstrapNodes := []string{ - // IPFS Bootstrapper nodes. + // In production, use real bootstrap peers like: // "/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", - // "/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa", - // "/dnsaddr/bootstrap.libp2p.io/p2p/QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb", - // "/dnsaddr/bootstrap.libp2p.io/p2p/QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt", - - // IPFS Cluster Pinning nodes - // "/ip4/138.201.67.219/tcp/4001/p2p/QmUd6zHcbkbcs7SMxwLs48qZVX3vpcM8errYS7xEczwRMA", - // "/ip4/138.201.67.219/udp/4001/quic/p2p/QmUd6zHcbkbcs7SMxwLs48qZVX3vpcM8errYS7xEczwRMA", - // "/ip4/138.201.67.220/tcp/4001/p2p/QmNSYxZAiJHeLdkBg38roksAR9So7Y5eojks1yjEcUtZ7i", - // "/ip4/138.201.67.220/udp/4001/quic/p2p/QmNSYxZAiJHeLdkBg38roksAR9So7Y5eojks1yjEcUtZ7i", - // "/ip4/138.201.68.74/tcp/4001/p2p/QmdnXwLrC8p1ueiq2Qya8joNvk3TVVDAut7PrikmZwubtR", - // "/ip4/138.201.68.74/udp/4001/quic/p2p/QmdnXwLrC8p1ueiq2Qya8joNvk3TVVDAut7PrikmZwubtR", - // "/ip4/94.130.135.167/tcp/4001/p2p/QmUEMvxS2e7iDrereVYc5SWPauXPyNwxcy9BXZrC1QTcHE", - // "/ip4/94.130.135.167/udp/4001/quic/p2p/QmUEMvxS2e7iDrereVYc5SWPauXPyNwxcy9BXZrC1QTcHE", - - // You can add more nodes here, for example, another IPFS node you might have running locally, mine was: - // "/ip4/127.0.0.1/tcp/4010/p2p/QmZp2fhDLxjYue2RiUvLwT9MWdnbDxam32qYFnGmxZDh5L", - // "/ip4/127.0.0.1/udp/4010/quic/p2p/QmZp2fhDLxjYue2RiUvLwT9MWdnbDxam32qYFnGmxZDh5L", + // For this example, we only connect to nodeA which has our test content. peerMa, } - go func() { - err := connectToPeers(ctx, ipfsB, bootstrapNodes) - if err != nil { - log.Printf("failed connect to peers: %s", err) - } - }() + fmt.Println("Connecting to peer...") + err = connectToPeers(ctx, ipfsB, bootstrapNodes) + if err != nil { + panic(fmt.Errorf("failed to connect to peers: %s", err)) + } + fmt.Println("Connected to peer") exampleCIDStr := peerCidFile.RootCid().String() diff --git a/docs/examples/kubo-as-a-library/main_test.go b/docs/examples/kubo-as-a-library/main_test.go index ec34d62b1..ecc2a592a 100644 --- a/docs/examples/kubo-as-a-library/main_test.go +++ b/docs/examples/kubo-as-a-library/main_test.go @@ -1,17 +1,39 @@ package main import ( + "bytes" + "io" + "os" "os/exec" "strings" "testing" + "time" ) func TestExample(t *testing.T) { - out, err := exec.Command("go", "run", "main.go").Output() + t.Log("Starting go run main.go...") + start := time.Now() + + cmd := exec.Command("go", "run", "main.go") + cmd.Env = append(os.Environ(), "GOLOG_LOG_LEVEL=error") // reduce libp2p noise + + // Stream output to both test log and capture buffer for verification + // This ensures we see progress even if the process is killed + var buf bytes.Buffer + cmd.Stdout = io.MultiWriter(os.Stdout, &buf) + cmd.Stderr = io.MultiWriter(os.Stderr, &buf) + + err := cmd.Run() + + elapsed := time.Since(start) + t.Logf("Command completed in %v", elapsed) + + out := buf.String() if err != nil { - t.Fatalf("running example (%v)", err) + t.Fatalf("running example (%v):\n%s", err, out) } - if !strings.Contains(string(out), "All done!") { - t.Errorf("example did not run successfully") + + if !strings.Contains(out, "All done!") { + t.Errorf("example did not complete successfully, output:\n%s", out) } } diff --git a/docs/experimental-features.md b/docs/experimental-features.md index eb4f2ff14..fdca90fbe 100644 --- a/docs/experimental-features.md +++ b/docs/experimental-features.md @@ -65,6 +65,14 @@ Experimental. ### How to enable +> [!WARNING] +> **SECURITY CONSIDERATION** +> +> This feature provides the IPFS [`add` command](https://docs.ipfs.tech/reference/kubo/cli/#ipfs-add) with access to +> the local filesystem. Consequently, any user with access to CLI or the HTTP [`/v0/add` RPC API](https://docs.ipfs.tech/reference/kubo/rpc/#api-v0-add) can read +> files from the local filesystem with the same permissions as the Kubo daemon. +> If you enable this, secure your RPC API using [`API.Authorizations`](https://github.com/ipfs/kubo/blob/master/docs/config.md#apiauthorizations) or custom auth middleware. + Modify your ipfs config: ``` ipfs config --json Experimental.FilestoreEnabled true @@ -79,6 +87,10 @@ filestore instead of copying the files into your local IPFS repo. - [ ] Needs more people to use and report on how well it works. - [ ] Need to address error states and failure conditions + - [ ] cleanup of broken filesystem references (if file is deleted) + - [ ] tests that confirm ability to override preexisting filesystem links (allowing user to fix broken link) + - [ ] support for a single block having more than one sources in filesystem (blocks can be shared by unrelated files, and not be broken when some files are unpinned / gc'd) + - [ ] [other known issues](https://github.com/ipfs/kubo/issues/7161) - [ ] Need to write docs on usage, advantages, disadvantages - [ ] Need to merge utility commands to aid in maintenance and repair of filestore @@ -96,6 +108,14 @@ v0.4.17 ### How to enable +> [!WARNING] +> **SECURITY CONSIDERATION** +> +> This feature provides the IPFS [`add` CLI command](https://docs.ipfs.tech/reference/kubo/cli/#ipfs-add) with access to +> the local filesystem. Consequently, any user with access to the CLI or HTTP [`/v0/add` RPC API](https://docs.ipfs.tech/reference/kubo/rpc/#api-v0-add) can read +> files from the local filesystem with the same permissions as the Kubo daemon. +> If you enable this, secure your RPC API using [`API.Authorizations`](https://github.com/ipfs/kubo/blob/master/docs/config.md#apiauthorizations) or custom auth middleware. + Modify your ipfs config: ``` ipfs config --json Experimental.UrlstoreEnabled true @@ -106,6 +126,9 @@ And then add a file at a specific URL using `ipfs urlstore add ` ### Road to being a real feature - [ ] Needs more people to use and report on how well it works. - [ ] Need to address error states and failure conditions + - [ ] cleanup of broken URL+range references (if URL starts returning 404 or error) + - [ ] tests that confirm ability to override preexisting URL+range links (allowing user to fix broken link) + - [ ] support for a single block having more than one URL+range (blocks can be shared by unrelated URLs) - [ ] Need to write docs on usage, advantages, disadvantages - [ ] Need to implement caching - [ ] Need to add metrics to monitor performance @@ -176,9 +199,8 @@ configured, the daemon will fail to start. ## ipfs p2p -Allows tunneling of TCP connections through Libp2p streams. If you've ever used -port forwarding with SSH (the `-L` option in OpenSSH), this feature is quite -similar. +Allows tunneling of TCP connections through libp2p streams, similar to SSH port +forwarding (`ssh -L`). ### State @@ -190,7 +212,12 @@ Experimental, will be stabilized in 0.6.0 ### How to enable -The `p2p` command needs to be enabled in the config: +> [!WARNING] +> **SECURITY CONSIDERATION** +> +> This feature provides CLI and HTTP RPC user with ability to set up port forwarding for all localhost and LAN ports. +> If you enable this and plan to expose CLI or HTTP RPC to other users or machines, +> secure RPC API using [`API.Authorizations`](https://github.com/ipfs/kubo/blob/master/docs/config.md#apiauthorizations) or custom auth middleware. ```sh > ipfs config --json Experimental.Libp2pStreamMounting true @@ -198,90 +225,14 @@ The `p2p` command needs to be enabled in the config: ### How to use -**Netcat example:** - -First, pick a protocol name for your application. Think of the protocol name as -a port number, just significantly more user-friendly. In this example, we're -going to use `/x/kickass/1.0`. - -***Setup:*** - -1. A "server" node with peer ID `$SERVER_ID` -2. A "client" node. - -***On the "server" node:*** - -First, start your application and have it listen for TCP connections on -port `$APP_PORT`. - -Then, configure the p2p listener by running: - -```sh -> ipfs p2p listen /x/kickass/1.0 /ip4/127.0.0.1/tcp/$APP_PORT -``` - -This will configure IPFS to forward all incoming `/x/kickass/1.0` streams to -`127.0.0.1:$APP_PORT` (opening a new connection to `127.0.0.1:$APP_PORT` per -incoming stream. - -***On the "client" node:*** - -First, configure the client p2p dialer, so that it forwards all inbound -connections on `127.0.0.1:SOME_PORT` to the server node listening -on `/x/kickass/1.0`. - -```sh -> ipfs p2p forward /x/kickass/1.0 /ip4/127.0.0.1/tcp/$SOME_PORT /p2p/$SERVER_ID -``` - -Next, have your application open a connection to `127.0.0.1:$SOME_PORT`. This -connection will be forwarded to the service running on `127.0.0.1:$APP_PORT` on -the remote machine. You can test it with netcat: - -***On "server" node:*** -```sh -> nc -v -l -p $APP_PORT -``` - -***On "client" node:*** -```sh -> nc -v 127.0.0.1 $SOME_PORT -``` - -You should now see that a connection has been established and be able to -exchange messages between netcat instances. - -(note that depending on your netcat version you may need to drop the `-v` flag) - -**SSH example** - -**Setup:** - -1. A "server" node with peer ID `$SERVER_ID` and running ssh server on the - default port. -2. A "client" node. - -_you can get `$SERVER_ID` by running `ipfs id -f "\n"`_ - -***First, on the "server" node:*** - -```sh -ipfs p2p listen /x/ssh /ip4/127.0.0.1/tcp/22 -``` - -***Then, on "client" node:*** - -```sh -ipfs p2p forward /x/ssh /ip4/127.0.0.1/tcp/2222 /p2p/$SERVER_ID -``` - -You should now be able to connect to your ssh server through a libp2p connection -with `ssh [user]@127.0.0.1 -p 2222`. - +See [docs/p2p-tunnels.md](p2p-tunnels.md) for usage examples, foreground mode, +and systemd integration. ### Road to being a real feature -- [ ] More documentation +- [x] More documentation +- [x] `ipfs p2p forward` mode +- [ ] Ability to define tunnels via JSON config, similar to [`Peering.Peers`](https://github.com/ipfs/kubo/blob/master/docs/config.md#peeringpeers), see [kubo#5460](https://github.com/ipfs/kubo/issues/5460) ## p2p http proxy @@ -297,6 +248,13 @@ Experimental ### How to enable +> [!WARNING] +> **SECURITY CONSIDERATION** +> +> This feature provides CLI and HTTP RPC user with ability to set up HTTP forwarding for all localhost and LAN ports. +> If you enable this and plan to expose CLI or HTTP RPC to other users or machines, +> secure RPC API using [`API.Authorizations`](https://github.com/ipfs/kubo/blob/master/docs/config.md#apiauthorizations) or custom auth middleware. + The `p2p` command needs to be enabled in the config: ```sh @@ -361,13 +319,13 @@ We also support the use of protocol names of the form /x/$NAME/http where $NAME ### Road to being a real feature - [ ] Needs p2p streams to graduate from experiments -- [ ] Needs more people to use and report on how well it works / fits use cases +- [ ] Needs more people to use and report on how well it works and fits use cases - [ ] More documentation - [ ] Need better integration with the subdomain gateway feature. ## FUSE -FUSE makes it possible to mount `/ipfs` and `/ipns` namespaces in your OS, +FUSE makes it possible to mount `/ipfs`, `/ipns` and `/mfs` namespaces in your OS, allowing arbitrary apps access to IPFS using a subset of filesystem abstractions. It is considered EXPERIMENTAL due to limited (and buggy) support on some platforms. @@ -500,27 +458,9 @@ ipfs config --json Swarm.RelayClient.Enabled true ### State -Experimental, disabled by default. +`Experimental.StrategicProviding` was removed in Kubo v0.35. -Replaces the existing provide mechanism with a robust, strategic provider system. Currently enabling this option will provide nothing. - -### How to enable - -Modify your ipfs config: - -``` -ipfs config --json Experimental.StrategicProviding true -``` - -### Road to being a real feature - -- [ ] needs real-world testing -- [ ] needs adoption -- [ ] needs to support all provider subsystem features - - [X] provide nothing - - [ ] provide roots - - [ ] provide all - - [ ] provide strategic +Replaced by [`Provide.Enabled`](https://github.com/ipfs/kubo/blob/master/docs/config.md#provideenabled) and [`Provide.Strategy`](https://github.com/ipfs/kubo/blob/master/docs/config.md#providestrategy). ## GraphSync @@ -661,3 +601,4 @@ ipfs config --json Experimental.GatewayOverLibp2p true ## Accelerated DHT Client This feature now lives at [`Routing.AcceleratedDHTClient`](https://github.com/ipfs/kubo/blob/master/docs/config.md#routingaccelerateddhtclient). + diff --git a/docs/file-transfer.md b/docs/file-transfer.md index e61ddc1b3..94d809768 100644 --- a/docs/file-transfer.md +++ b/docs/file-transfer.md @@ -36,7 +36,7 @@ doesn't even know it has to connect to node A. ### Checking for existing connections -The first thing to do is to double check that both nodes are in fact running +The first thing to do is to double-check that both nodes are in fact running and online. To do this, run `ipfs id` on each machine. If both nodes show some addresses (like the example below), then your nodes are online. diff --git a/docs/fuse.md b/docs/fuse.md index 7744a0d45..b928b8860 100644 --- a/docs/fuse.md +++ b/docs/fuse.md @@ -2,7 +2,7 @@ **EXPERIMENTAL:** FUSE support is limited, YMMV. -Kubo makes it possible to mount `/ipfs` and `/ipns` namespaces in your OS, +Kubo makes it possible to mount `/ipfs`, `/ipns` and `/mfs` namespaces in your OS, allowing arbitrary apps access to IPFS. ## Install FUSE @@ -16,7 +16,7 @@ to your distribution manual to get things working. Install `fuse` with your favorite package manager: ``` -sudo apt-get install fuse +sudo apt-get install fuse3 ``` On some older Linux distributions, you may need to add yourself to the `fuse` group. @@ -48,20 +48,37 @@ go get github.com/jbenet/go-fuse-version/fuse-version If you run into any problems installing FUSE or mounting IPFS, hop on IRC and speak with us, or if you figure something new out, please add to this document! +#### FreeBSD +```sh +sudo pkg install fusefs-ext2 +``` + +Load the fuse kernel module: +```sh +sudo kldload fusefs +``` + +To load automatically on boot: +```sh +sudo echo fusefs_load="YES" >> /boot/loader.conf +``` + ## Prepare mountpoints -By default ipfs uses `/ipfs` and `/ipns` directories for mounting, this can be -changed in config. You will have to create the `/ipfs` and `/ipns` directories +By default ipfs uses `/ipfs`, `/ipns` and `/mfs` directories for mounting, this can be +changed in config. You will have to create the `/ipfs`, `/ipns` and `/mfs` directories explicitly. Note that modifying root requires sudo permissions. ```sh # make the directories sudo mkdir /ipfs sudo mkdir /ipns +sudo mkdir /mfs # chown them so ipfs can use them without root permissions sudo chown /ipfs sudo chown /ipns +sudo chown /mfs ``` Depending on whether you are using OSX or Linux, follow the proceeding instructions. @@ -105,6 +122,30 @@ ipfs config --json Mounts.FuseAllowOther true ipfs daemon --mount ``` +If using FreeBSD, it is necessary to run `ipfs` as root: +```sh +sudo HOME=$HOME ipfs daemon --mount +``` + +## MFS mountpoint + +Kubo v0.35.0 and later supports mounting the MFS (Mutable File System) root as +a FUSE filesystem, enabling manipulation of content-addressed data like regular +files. The CID for any file or directory is retrievable via the `ipfs_cid` +extended attribute. + +```sh +getfattr -n ipfs_cid /mfs/welcome-to-IPFS.jpg +getfattr: Removing leading '/' from absolute path names +# file: mfs/welcome-to-IPFS.jpg +ipfs_cid="QmaeXDdwpUeKQcMy7d5SFBfVB4y7LtREbhm5KizawPsBSH" +``` + +Please note that the operations supported by the MFS FUSE mountpoint are +limited. Since the MFS wasn't designed to store file attributes like ownership +information, permissions and creation date, some applications like `vim` and +`sed` may misbehave due to missing functionality. + ## Troubleshooting #### `Permission denied` or `fusermount: user has no write access to mountpoint` error in Linux @@ -145,6 +186,7 @@ set for user running `ipfs mount` command. ``` sudo umount /ipfs sudo umount /ipns +sudo umount /mfs ``` #### Mounting fails with "error mounting: could not resolve name" diff --git a/docs/gateway.md b/docs/gateway.md index 3a616a158..d51eab4cc 100644 --- a/docs/gateway.md +++ b/docs/gateway.md @@ -6,7 +6,7 @@ they were stored in a traditional web server. [More about Gateways](https://docs.ipfs.tech/concepts/ipfs-gateway/) and [addressing IPFS on the web](https://docs.ipfs.tech/how-to/address-ipfs-on-web/). -Kubo's Gateway implementation follows [ipfs/specs: Specification for HTTP Gateways](https://github.com/ipfs/specs/tree/main/http-gateways#readme). +Kubo's Gateway implementation follows [IPFS Gateway Specifications](https://specs.ipfs.tech/http-gateways/) and is tested with [Gateway Conformance Test Suite](https://github.com/ipfs/gateway-conformance). ### Local gateway @@ -14,14 +14,21 @@ By default, Kubo nodes run a [path gateway](https://docs.ipfs.tech/how-to/address-ipfs-on-web/#path-gateway) at `http://127.0.0.1:8080/` and a [subdomain gateway](https://docs.ipfs.tech/how-to/address-ipfs-on-web/#subdomain-gateway) at `http://localhost:8080/`. -The path one also implements [trustless gateway spec](https://specs.ipfs.tech/http-gateways/trustless-gateway/) -and supports [trustless responses](https://docs.ipfs.tech/reference/http/gateway/#trustless-verifiable-retrieval) as opt-in via `Accept` header. +> [!CAUTION] +> **For browsing websites, web apps, and dapps in a browser, use the subdomain +> gateway** (`localhost`). Each content root gets its own +> [web origin](https://developer.mozilla.org/en-US/docs/Web/Security/Same-origin_policy), +> isolating localStorage, cookies, and session data between sites. +> +> **For file retrieval, use the path gateway** (`127.0.0.1`). Path gateways are +> suited for downloading files or fetching [verifiable](https://docs.ipfs.tech/reference/http/gateway/#trustless-verifiable-retrieval) +> content, but lack origin isolation (all content shares the same origin). Additional listening addresses and gateway behaviors can be set in the [config](#configuration) file. ### Public gateways -Protocol Labs provides a public gateway at +IPFS Foundation [provides public gateways](https://docs.ipfs.tech/concepts/public-utilities/) at `https://ipfs.io` ([path](https://specs.ipfs.tech/http-gateways/path-gateway/)), `https://dweb.link` ([subdomain](https://docs.ipfs.tech/how-to/address-ipfs-on-web/#subdomain-gateway)), and `https://trustless-gateway.link` ([trustless](https://specs.ipfs.tech/http-gateways/trustless-gateway/) only). @@ -41,6 +48,80 @@ The gateway's log level can be changed with this command: > ipfs log level core/server debug ``` +## Running in Production + +When deploying Kubo's gateway in production, be aware of these important considerations: + + +> [!IMPORTANT] +> **Reverse Proxy:** When running Kubo behind a reverse proxy (such as nginx), +> the original `Host` header **must** be forwarded to Kubo for +> [`Gateway.PublicGateways`](config.md#gatewaypublicgateways) to work. +> Kubo uses the `Host` header to match configured hostnames and detect +> subdomain gateway patterns like `{cid}.ipfs.example.org` or DNSLink hostnames. +> +> If the `Host` header is not forwarded correctly, Kubo will not recognize +> the configured gateway hostnames and requests may be handled incorrectly. +> +> If `X-Forwarded-Proto` is not set, redirects over HTTPS will use wrong protocol +> and DNSLink names will not be inlined for subdomain gateways. +> +> Example: minimal nginx configuration for `example.org` +> +> ```nginx +> server { +> listen 80; +> listen [::]:80; +> +> # IMPORTANT: Include wildcard to match subdomain gateway requests. +> # The dot prefix matches both apex domain and all subdomains. +> server_name .example.org; +> +> location / { +> proxy_pass http://127.0.0.1:8080; +> +> # IMPORTANT: Forward the original Host header to Kubo. +> # Without this, PublicGateways configuration will not work. +> proxy_set_header Host $host; +> +> # IMPORTANT: X-Forwarded-Proto is required for correct behavior: +> # - Redirects will use https:// URLs when set to "https" +> # - DNSLink names will be inlined for subdomain gateways +> # (e.g., /ipns/en.wikipedia-on-ipfs.org → en-wikipedia--on--ipfs-org.ipns.example.org) +> proxy_set_header X-Forwarded-Proto $scheme; +> proxy_set_header X-Forwarded-Host $host; +> } +> } +> ``` +> +> Common mistakes to avoid: +> +> - **Missing wildcard in `server_name`:** Using only `server_name example.org;` +> will not match subdomain requests like `{cid}.ipfs.example.org`. Always +> include `*.example.org` or use the dot prefix `.example.org`. +> +> - **Wrong `Host` header value:** Using `proxy_set_header Host $proxy_host;` +> sends the backend's hostname (e.g., `127.0.0.1:8080`) instead of the +> original `Host` header. Always use `$host` or `$http_host`. +> +> - **Missing `Host` header entirely:** If `proxy_set_header Host` is not +> specified, nginx defaults to `$proxy_host`, which breaks gateway routing. + +> [!IMPORTANT] +> **Timeouts:** Configure [`Gateway.RetrievalTimeout`](config.md#gatewayretrievaltimeout) +> based on your expected content retrieval times. + +> [!IMPORTANT] +> **Rate Limiting:** Use [`Gateway.MaxConcurrentRequests`](config.md#gatewaymaxconcurrentrequests) +> to protect against traffic spikes. + +> [!IMPORTANT] +> **CDN/Cloudflare:** If using Cloudflare or other CDNs with +> [deserialized responses](config.md#gatewaydeserializedresponses) enabled, review +> [`Gateway.MaxRangeRequestFileSize`](config.md#gatewaymaxrangerequestfilesize) to avoid +> excess bandwidth billing from range request bugs. Cloudflare users may need additional +> protection via [Cloudflare Snippets](https://github.com/ipfs/boxo/issues/856#issuecomment-3523944976). + ## Directories For convenience, the gateway (mostly) acts like a normal web-server when serving @@ -53,7 +134,7 @@ a directory: 2. Dynamically build and serve a listing of the contents of the directory. This redirect is skipped if the query string contains a -`go-get=1` parameter. See [PR#3964](https://github.com/ipfs/kubo/pull/3963) +`go-get=1` parameter. See [PR#3963](https://github.com/ipfs/kubo/pull/3963) for details ## Static Websites @@ -107,10 +188,12 @@ This is equivalent of `ipfs block get`. ### `application/vnd.ipld.car` -Returns a [CAR](https://ipld.io/specs/transport/car/) stream for specific DAG and selector. +Returns a [CAR](https://ipld.io/specs/transport/car/) stream for a DAG or a subset of it. -Right now only 'full DAG' implicit selector is implemented. -Support for user-provided IPLD selectors is tracked in https://github.com/ipfs/kubo/issues/8769. +The `dag-scope` parameter controls which blocks are included: `all` (default, entire DAG), +`entity` (logical unit like a file), or `block` (single block). For [UnixFS](https://specs.ipfs.tech/unixfs/) files, +`entity-bytes` enables byte range requests. See [IPIP-402](https://specs.ipfs.tech/ipips/ipip-0402/) +for details. This is a rough equivalent of `ipfs dag export`. diff --git a/docs/implement-api-bindings.md b/docs/implement-api-bindings.md index 3587ac21f..d0273d9e7 100644 --- a/docs/implement-api-bindings.md +++ b/docs/implement-api-bindings.md @@ -39,12 +39,12 @@ function calls. For example: #### CLI API Transport In the commandline, IPFS uses a traditional flag and arg-based mapping, where: -- the first arguments selects the command, as in git - e.g. `ipfs dag get` +- the first arguments select the command, as in git - e.g. `ipfs dag get` - the flags specify options - e.g. `--enc=protobuf -q` - the rest are positional arguments - e.g. `ipfs key rename ` - files are specified by filename, or through stdin -(NOTE: When kubo runs the daemon, the CLI API is actually converted to HTTP +(NOTE: When kubo runs the daemon, the CLI API is converted to HTTP calls. otherwise, they execute in the same process) #### HTTP API Transport @@ -87,7 +87,7 @@ Despite all the generalization spoken about above, the IPFS API is actually very simple. You can inspect all the requests made with `nc` and the `--api` option (as of [this PR](https://github.com/ipfs/kubo/pull/1598), or `0.3.8`): -``` +```sh > nc -l 5002 & > ipfs --api /ip4/127.0.0.1/tcp/5002 swarm addrs local --enc=json POST /api/v0/version?enc=json&stream-channels=true HTTP/1.1 @@ -104,7 +104,7 @@ The only hard part is getting the file streaming right. It is (now) fairly easy to stream files to kubo using multipart. Basically, we end up with HTTP requests like this: -``` +```sh > nc -l 5002 & > ipfs --api /ip4/127.0.0.1/tcp/5002 add -r ~/demo/basic/test POST /api/v0/add?encoding=json&progress=true&r=true&stream-channels=true HTTP/1.1 diff --git a/docs/logo/kubo-logo.png b/docs/logo/kubo-logo.png new file mode 100644 index 000000000..c98eadd59 Binary files /dev/null and b/docs/logo/kubo-logo.png differ diff --git a/docs/logo/kubo-logo.svg b/docs/logo/kubo-logo.svg new file mode 100644 index 000000000..7dbd2ec67 --- /dev/null +++ b/docs/logo/kubo-logo.svg @@ -0,0 +1,34 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/metrics.md b/docs/metrics.md new file mode 100644 index 000000000..fe684cbc6 --- /dev/null +++ b/docs/metrics.md @@ -0,0 +1,118 @@ +## Kubo metrics + +By default, a Prometheus endpoint is exposed by Kubo at `http://127.0.0.1:5001/debug/metrics/prometheus`. + +It includes default [Prometheus Go client metrics](https://prometheus.io/docs/guides/go-application/) + Kubo-specific metrics listed below. + +### Table of Contents + +- [DHT RPC](#dht-rpc) + - [Inbound RPC metrics](#inbound-rpc-metrics) + - [Outbound RPC metrics](#outbound-rpc-metrics) +- [Provide](#provide) + - [Legacy Provider](#legacy-provider) + - [DHT Provider](#dht-provider) +- [Gateway (`boxo/gateway`)](#gateway-boxogateway) + - [HTTP metrics](#http-metrics) + - [Blockstore cache metrics](#blockstore-cache-metrics) + - [Backend metrics](#backend-metrics) +- [Generic HTTP Servers](#generic-http-servers) + - [Core HTTP metrics](#core-http-metrics-ipfs_http_) + - [HTTP Server metrics](#http-server-metrics-http_server_) +- [OpenTelemetry Metadata](#opentelemetry-metadata) + +> [!WARNING] +> This documentation is incomplete. For an up-to-date list of metrics available at daemon startup, see [test/sharness/t0119-prometheus-data/prometheus_metrics_added_by_measure_profile](https://github.com/ipfs/kubo/blob/master/test/sharness/t0119-prometheus-data/prometheus_metrics_added_by_measure_profile). +> +> Additional metrics may appear during runtime as some components (like boxo/gateway) register metrics only after their first event occurs (e.g., HTTP request/response). + +## DHT RPC + +Metrics from `go-libp2p-kad-dht` for DHT RPC operations: + +### Inbound RPC metrics + +- `rpc_inbound_messages_total` - Counter: total messages received per RPC +- `rpc_inbound_message_errors_total` - Counter: total errors for received messages +- `rpc_inbound_bytes_[bucket|sum|count]` - Histogram: distribution of received bytes per RPC +- `rpc_inbound_request_latency_[bucket|sum|count]` - Histogram: latency distribution for inbound RPCs + +### Outbound RPC metrics + +- `rpc_outbound_messages_total` - Counter: total messages sent per RPC +- `rpc_outbound_message_errors_total` - Counter: total errors for sent messages +- `rpc_outbound_requests_total` - Counter: total requests sent +- `rpc_outbound_request_errors_total` - Counter: total errors for sent requests +- `rpc_outbound_bytes_[bucket|sum|count]` - Histogram: distribution of sent bytes per RPC +- `rpc_outbound_request_latency_[bucket|sum|count]` - Histogram: latency distribution for outbound RPCs + +## Provide + +### Legacy Provider + +Metrics for the legacy provider system when `Provide.DHT.SweepEnabled=false`: + +- `provider_reprovider_provide_count` - Counter: total successful provide operations since node startup +- `provider_reprovider_reprovide_count` - Counter: total reprovide sweep operations since node startup + +### DHT Provider + +Metrics for the DHT provider system when `Provide.DHT.SweepEnabled=true`: + +- `provider_provides_total` - Counter: total successful provide operations since node startup (includes both one-time provides and periodic provides done on `Provide.DHT.Interval`) + +> [!NOTE] +> These metrics are exposed by [go-libp2p-kad-dht](https://github.com/libp2p/go-libp2p-kad-dht/). You can enable debug logging for DHT provider activity with `GOLOG_LOG_LEVEL=dht/provider=debug`. + +## Gateway (`boxo/gateway`) + +> [!TIP] +> These metrics are limited to [IPFS Gateway](https://specs.ipfs.tech/http-gateways/) endpoints. For general HTTP metrics across all endpoints, consider using a reverse proxy. + +Gateway metrics appear after the first HTTP request is processed: + +### HTTP metrics + +- `ipfs_http_gw_responses_total{code}` - Counter: total HTTP responses by status code +- `ipfs_http_gw_retrieval_timeouts_total{code,truncated}` - Counter: requests that timed out during content retrieval +- `ipfs_http_gw_concurrent_requests` - Gauge: number of requests currently being processed + +### Blockstore cache metrics + +- `ipfs_http_blockstore_cache_hit` - Counter: global block cache hits +- `ipfs_http_blockstore_cache_requests` - Counter: global block cache requests + +### Backend metrics + +- `ipfs_gw_backend_api_call_duration_seconds_[bucket|sum|count]{backend_method}` - Histogram: time spent in IPFSBackend API calls + +## Generic HTTP Servers + +> [!TIP] +> The metrics below are not very useful and exist mostly for historical reasons. If you need non-gateway HTTP metrics, it's better to put a reverse proxy in front of Kubo and use its metrics. + +### Core HTTP metrics (`ipfs_http_*`) + +Prometheus metrics for the HTTP API exposed at port 5001: + +- `ipfs_http_requests_total{method,code,handler}` - Counter: total HTTP requests (Legacy - new metrics are provided by boxo/gateway for gateway traffic) +- `ipfs_http_request_duration_seconds[_sum|_count]{handler}` - Summary: request processing duration +- `ipfs_http_request_size_bytes[_sum|_count]{handler}` - Summary: request body sizes +- `ipfs_http_response_size_bytes[_sum|_count]{handler}` - Summary: response body sizes + +### HTTP Server metrics (`http_server_*`) + +Additional HTTP instrumentation for all handlers (Gateway, API commands, etc.): + +- `http_server_request_body_size_bytes_[bucket|count|sum]` - Histogram: distribution of request body sizes +- `http_server_request_duration_seconds_[bucket|count|sum]` - Histogram: distribution of request processing times +- `http_server_response_body_size_bytes_[bucket|count|sum]` - Histogram: distribution of response body sizes + +These metrics are automatically added to Gateway handlers, Hostname Gateway, Libp2p Gateway, and API command handlers. + +## OpenTelemetry Metadata + +Kubo uses Prometheus for metrics collection for historical reasons, but OpenTelemetry metrics are automatically exposed through the same Prometheus endpoint. These metadata metrics provide context about the instrumentation: + +- `otel_scope_info` - Information about instrumentation libraries producing metrics +- `target_info` - Service metadata including version and instance information \ No newline at end of file diff --git a/docs/p2p-tunnels.md b/docs/p2p-tunnels.md new file mode 100644 index 000000000..9f3c310d8 --- /dev/null +++ b/docs/p2p-tunnels.md @@ -0,0 +1,214 @@ +# P2P Tunnels + +Kubo supports tunneling TCP connections through libp2p streams, similar to SSH +port forwarding (`ssh -L`). This allows exposing local services to remote peers +and forwarding remote services to local ports. + +- [Why P2P Tunnels?](#why-p2p-tunnels) +- [Quick Start](#quick-start) +- [Background Mode](#background-mode) +- [Foreground Mode](#foreground-mode) + - [systemd Integration](#systemd-integration) +- [Security Considerations](#security-considerations) +- [Troubleshooting](#troubleshooting) + +## Why P2P Tunnels? + +Unlike traditional SSH tunnels, libp2p-based tunnels do not require: + +- **No public IP or open ports**: The server does not need a static IP address + or port forwarding configured on the router. Connectivity to peers behind NAT + is facilitated by [Direct Connection Upgrade through Relay (DCUtR)](https://github.com/libp2p/specs/blob/master/relay/DCUtR.md), + which enables NAT hole-punching. + +- **No DNS or IP address management**: All you need is the server's PeerID and + an agreed-upon protocol name (e.g., `/x/ssh`). Kubo handles peer discovery + and routing via the [Amino DHT](https://specs.ipfs.tech/routing/kad-dht/). + +- **Simplified firewall rules**: Since connections are established through + libp2p's existing swarm connections, no additional firewall configuration is + needed beyond what Kubo already requires. + +This makes p2p tunnels useful for connecting to machines on home networks, +behind corporate firewalls, or in environments where traditional port forwarding +is not available. + +## Quick Start + +Enable the experimental feature: + +```console +$ ipfs config --json Experimental.Libp2pStreamMounting true +``` + +Test with netcat (`nc`) - no services required: + +**On the server:** + +```console +$ ipfs p2p listen /x/test /ip4/127.0.0.1/tcp/9999 +$ nc -l -p 9999 +``` + +**On the client:** + +Replace `$SERVER_ID` with the server's peer ID (get it with `ipfs id -f "\n"` +on the server). + +```console +$ ipfs p2p forward /x/test /ip4/127.0.0.1/tcp/9998 /p2p/$SERVER_ID +$ nc 127.0.0.1 9998 +``` + +Type in either terminal and the text appears in the other. Use Ctrl+C to exit. + +## Background Mode + +By default, `ipfs p2p listen` and `ipfs p2p forward` register the tunnel with +the daemon and return immediately. The tunnel persists until explicitly closed +with `ipfs p2p close` or the daemon shuts down. + +This example exposes a local SSH server (listening on `localhost:22`) to a +remote peer. The same pattern works for any TCP service. + +**On the server** (the machine running SSH): + +Register a p2p listener that forwards incoming connections to the local SSH +server. The protocol name `/x/ssh` is an arbitrary identifier that both peers +must agree on (the `/x/` prefix is required for custom protocols). + +```console +$ ipfs p2p listen /x/ssh /ip4/127.0.0.1/tcp/22 +``` + +**On the client:** + +Create a local port (`2222`) that tunnels through libp2p to the server's SSH +service. + +```console +$ ipfs p2p forward /x/ssh /ip4/127.0.0.1/tcp/2222 /p2p/$SERVER_ID +``` + +Now connect to SSH through the tunnel: + +```console +$ ssh user@127.0.0.1 -p 2222 +``` + +**Other services:** To tunnel a different service, change the port and protocol +name. For example, to expose a web server on port 8080, use `/x/mywebapp` and +`/ip4/127.0.0.1/tcp/8080`. + +## Foreground Mode + +Use `--foreground` (`-f`) to block until interrupted. The tunnel is +automatically removed when the command exits: + +```console +$ ipfs p2p listen /x/ssh /ip4/127.0.0.1/tcp/22 --foreground +Listening on /x/ssh, forwarding to /ip4/127.0.0.1/tcp/22, waiting for interrupt... +^C +Received interrupt, removing listener for /x/ssh +``` + +The listener/forwarder is automatically removed when: + +- The command receives Ctrl+C or SIGTERM +- `ipfs p2p close` is called +- The daemon shuts down + +This mode is useful for systemd services and scripts that need cleanup on exit. + +### systemd Integration + +The `--foreground` flag enables clean integration with systemd. The examples +below show how to run `ipfs p2p listen` as a user service that starts +automatically when the IPFS daemon is ready. + +Ensure IPFS daemon runs as a systemd user service. See +[misc/README.md](https://github.com/ipfs/kubo/blob/master/misc/README.md#systemd) +for setup instructions and where to place unit files. + +#### P2P listener with path-based activation + +Use a `.path` unit to wait for the daemon's RPC API to be ready before starting +the p2p listener. + +**`ipfs-p2p-tunnel.path`**: + +```systemd +[Unit] +Description=Monitor for IPFS daemon startup +After=ipfs.service +Requires=ipfs.service + +[Path] +PathExists=%h/.ipfs/api +Unit=ipfs-p2p-tunnel.service + +[Install] +WantedBy=default.target +``` + +The `%h` specifier expands to the user's home directory. If you use a custom +`IPFS_PATH`, adjust accordingly. + +**`ipfs-p2p-tunnel.service`**: + +```systemd +[Unit] +Description=IPFS p2p tunnel +Requires=ipfs.service + +[Service] +ExecStart=ipfs p2p listen /x/ssh /ip4/127.0.0.1/tcp/22 -f +Restart=on-failure +RestartSec=10 + +[Install] +WantedBy=default.target +``` + +#### Enabling the services + +```console +$ systemctl --user enable ipfs.service +$ systemctl --user enable ipfs-p2p-tunnel.path +$ systemctl --user start ipfs.service +``` + +The path unit monitors `~/.ipfs/api` and starts `ipfs-p2p-tunnel.service` +once the file exists. + +## Security Considerations + +> [!WARNING] +> This feature provides CLI and HTTP RPC users with the ability to set up port +> forwarding for localhost and LAN ports. If you enable this and plan to expose +> CLI or HTTP RPC to other users or machines, secure the RPC API using +> [`API.Authorizations`](https://github.com/ipfs/kubo/blob/master/docs/config.md#apiauthorizations) +> or custom auth middleware. + +## Troubleshooting + +### Foreground listener stops when terminal closes + +When using `--foreground`, the listener stops if the terminal closes. For +persistent foreground listeners, use a systemd service, `nohup`, `tmux`, or +`screen`. Without `--foreground`, the listener persists in the daemon regardless +of terminal state. + +### Connection refused errors + +Verify: + +1. The experimental feature is enabled: `ipfs config Experimental.Libp2pStreamMounting` +2. The listener is active: `ipfs p2p ls` +3. Both peers can connect: `ipfs swarm connect /p2p/$PEER_ID` + +### Persistent tunnel configuration + +There is currently no way to define tunnels in the Kubo JSON config file. Use +`--foreground` mode with a systemd service for persistent tunnels. Support for +configuring tunnels via JSON config may be added in the future (see [kubo#5460](https://github.com/ipfs/kubo/issues/5460) - PRs welcome!). diff --git a/docs/plugins.md b/docs/plugins.md index 86cfe1c51..8a388a533 100644 --- a/docs/plugins.md +++ b/docs/plugins.md @@ -117,6 +117,7 @@ Example: | [flatfs](https://github.com/ipfs/kubo/tree/master/plugin/plugins/flatfs) | Datastore | x | A stable filesystem-based datastore. | | [levelds](https://github.com/ipfs/kubo/tree/master/plugin/plugins/levelds) | Datastore | x | A stable, flexible datastore backend. | | [jaeger](https://github.com/ipfs/go-jaeger-plugin) | Tracing | | An opentracing backend. | +| [telemetry](https://github.com/ipfs/kubo/tree/master/plugin/plugins/telemetry) | Telemetry | x | Collects anonymized usage data for Kubo development. | * **Preloaded** plugins are built into the Kubo binary and do not need to be installed separately. At the moment, all in-tree plugins are preloaded. diff --git a/docs/provide-stats.md b/docs/provide-stats.md new file mode 100644 index 000000000..4d0e7031e --- /dev/null +++ b/docs/provide-stats.md @@ -0,0 +1,293 @@ +# Provide Stats + +The `ipfs provide stat` command gives you statistics about your local provide +system. This file provides a detailed explanation of the metrics reported by +this command. + +## Understanding the Metrics + +The statistics are organized into three types of measurements: + +### Per-worker rates + +Metrics like "CIDs reprovided/min/worker" measure the throughput of a single +worker processing one region. To estimate total system throughput, multiply by +the number of active workers of that type (see [Workers stats](#workers-stats)). + +Example: If "CIDs reprovided/min/worker" shows 100 and you have 10 active +periodic workers, your total reprovide throughput is approximately 1,000 +CIDs/min. + +### Per-region averages + +Metrics like "Avg CIDs/reprovide" measure properties of the work units (keyspace +regions). These represent the average size or characteristics of a region, not a +rate. Do NOT multiply these by worker count. + +Example: "Avg CIDs/reprovide: 250,000" means each region contains an average of +250,000 CIDs that get reprovided together as a batch. + +### System totals + +Metrics like "Total CIDs provided" are cumulative counts since node startup. +These aggregate all work across all workers over time. + +## Connectivity + +### Status + +Current connectivity status (`online`, `disconnected`, or `offline`) and when +it last changed (see [provide connectivity +status](./config.md#providedhtofflinedelay)). + +## Queues + +### Provide queue + +Number of CIDs waiting for initial provide, and the number of keyspace regions +they're grouped into. + +### Reprovide queue + +Number of regions with overdue reprovides. These regions missed their scheduled +reprovide time and will be processed as soon as possible. If decreasing, the +node is recovering from downtime. If increasing, either the node is offline or +the provide system needs more workers (see +[`Provide.DHT.MaxWorkers`](./config.md#providedhtmaxworkers) +and +[`Provide.DHT.DedicatedPeriodicWorkers`](./config.md#providedhtdedicatedperiodicworkers)). + +## Schedule + +### CIDs scheduled + +Total CIDs scheduled for reprovide. + +### Regions scheduled + +Number of keyspace regions scheduled for reprovide. Each CID is mapped to a +specific region, and all CIDs within the same region are reprovided together as +a batch for efficient processing. + +### Avg prefix length + +Average length of binary prefixes identifying the scheduled regions. Each +keyspace region is identified by a binary prefix, and this shows the average +prefix length across all regions in the schedule. Longer prefixes indicate the +keyspace is divided into more regions (because there are more DHT servers in the +swarm to distribute records across). + +### Next region prefix + +Keyspace prefix of the next region to be reprovided. + +### Next region reprovide + +When the next region is scheduled to be reprovided. + +## Timings + +### Uptime + +How long the provide system has been running since Kubo started, along with the +start timestamp. + +### Current time offset + +Elapsed time in the current reprovide cycle, showing cycle progress (e.g., '11h' +means 11 hours into a 22-hour cycle, roughly halfway through). + +### Cycle started + +When the current reprovide cycle began. + +### Reprovide interval + +How often each CID is reprovided (the complete cycle duration). + +## Network + +### Avg record holders + +Average number of provider records successfully sent for each CID to distinct +DHT servers. In practice, this is often lower than the [replication +factor](#replication-factor) due to unreachable peers or timeouts. Matching the +replication factor would indicate all DHT servers are reachable. + +Note: this counts successful sends; some DHT servers may have gone offline +afterward, so actual availability may be lower. + +### Peers swept + +Number of DHT servers to which we tried to send provider records in the last +reprovide cycle (sweep). Excludes peers contacted during initial provides or +DHT lookups. + +### Full keyspace coverage + +Whether provider records were sent to all DHT servers in the swarm during the +last reprovide cycle. If true, [peers swept](#peers-swept) approximates the +total DHT swarm size over the last [reprovide interval](#reprovide-interval). + +### Reachable peers + +Number and percentage of peers to which we successfully sent all provider +records assigned to them during the last reprovide cycle. + +### Avg region size + +Average number of DHT servers per keyspace region. + +### Replication factor + +Target number of DHT servers to receive each provider record. + +## Operations + +### Ongoing provides + +Number of CIDs and regions currently being provided for the first time. More +CIDs than regions indicates efficient batching. Each region provide uses a +[burst +worker](./config.md#providedhtdedicatedburstworkers). + +### Ongoing reprovides + +Number of CIDs and regions currently being reprovided. Each region reprovide +uses a [periodic +worker](./config.md#providedhtdedicatedperiodicworkers). + +### Total CIDs provided + +Total number of provide operations since node startup (includes both provides +and reprovides). + +### Total records provided + +Total provider records successfully sent to DHT servers since startup (includes +reprovides). + +### Total provide errors + +Number of failed region provide/reprovide operations since startup. Failed +regions are automatically retried unless the node is offline. + +### CIDs provided/min/worker + +Average rate of initial provides per minute per worker during the last +reprovide cycle (excludes reprovides). Each worker handles one keyspace region +at a time, providing all CIDs in that region. This measures the throughput of a +single worker only. + +To estimate total system provide throughput, multiply by the number of active +burst workers shown in [Workers stats](#workers-stats) (Burst > Active). + +Note: This rate only counts active time when initial provides are being +processed. If workers are idle, actual throughput may be lower. + +### CIDs reprovided/min/worker + +Average rate of reprovides per minute per worker during the last reprovide +cycle (excludes initial provides). Each worker handles one keyspace region at a +time, reproviding all CIDs in that region. This measures the throughput of a +single worker only. + +To estimate total system reprovide throughput, multiply by the number of active +periodic workers shown in [Workers stats](#workers-stats) (Periodic > Active). + +Example: If this shows 100 CIDs/min and you have 10 active periodic workers, +your total reprovide throughput is approximately 1,000 CIDs/min. + +Note: This rate only counts active time when regions are being reprovided. If +workers are idle due to network issues or queue exhaustion, actual throughput +may be lower. + +### Region reprovide duration + +Average time to reprovide all CIDs in a region during the last cycle. + +### Avg CIDs/reprovide + +Average number of CIDs per region during the last reprovide cycle. + +This measures the average size of a region (how many CIDs are batched together), +not a throughput rate. Do NOT multiply this by worker count. + +Combined with [Region reprovide duration](#region-reprovide-duration), this +helps estimate per-worker throughput: dividing Avg CIDs/reprovide by Region +reprovide duration gives CIDs/min/worker. + +### Regions reprovided (last cycle) + +Number of regions reprovided in the last cycle. + +> [!NOTE] +> (⚠️ 0.39 limitation) If this shows 1 region while using +> [`Routing.AcceleratedDHTClient`](./config.md#routingaccelerateddhtclient), sweep mode lost +> efficiency gains. Consider disabling the accelerated client. See [caveat 4](./config.md#routingaccelerateddhtclient). + +## Workers + +### Active workers + +Number of workers currently processing provide or reprovide operations. + +### Free workers + +Number of idle workers not reserved for periodic or burst tasks. + +### Workers stats + +Breakdown of worker status by type (periodic for scheduled reprovides, burst for +initial provides). For each type: + +- **Active**: Currently processing operations (use this count when calculating total throughput from per-worker rates) +- **Dedicated**: Reserved for this type +- **Available**: Idle dedicated workers + [free workers](#free-workers) +- **Queued**: 0 or 1 (workers acquired only when needed) + +The number of active workers determines your total system throughput. For +example, if you have 10 active periodic workers, multiply +[CIDs reprovided/min/worker](#cids-reprovidedminworker) by 10 to estimate total +reprovide throughput. + +See [provide queue](#provide-queue) and [reprovide queue](#reprovide-queue) for +regions waiting to be processed. + +### Max connections/worker + +Maximum concurrent DHT server connections per worker when sending provider +records for a region. + +## Capacity Planning + +### Estimating if your system can keep up with the reprovide schedule + +To check if your provide system has sufficient capacity: + +1. Calculate required throughput: + - Required CIDs/min = [CIDs scheduled](#cids-scheduled) / ([Reprovide interval](#reprovide-interval) in minutes) + - Example: 67M CIDs / (22 hours × 60 min) = 50,758 CIDs/min needed + +2. Calculate actual throughput: + - Actual CIDs/min = [CIDs reprovided/min/worker](#cids-reprovidedminworker) × Active periodic workers + - Example: 100 CIDs/min/worker × 256 active workers = 25,600 CIDs/min + +3. Compare: + - If actual < required: System is underprovisioned, increase [MaxWorkers](./config.md#providedhtmaxworkers) or [DedicatedPeriodicWorkers](./config.md#providedhtdedicatedperiodicworkers) + - If actual > required: System has excess capacity + - If [Reprovide queue](#reprovide-queue) is growing: System is falling behind + +### Understanding worker utilization + +- High active workers with growing reprovide queue: Need more workers or network connectivity is limiting throughput +- Low active workers with non-empty reprovide queue: Workers may be waiting for network or DHT operations +- Check [Reachable peers](#reachable-peers) to diagnose network connectivity issues +- (⚠️ 0.39 limitation) If [Regions scheduled](#regions-scheduled) shows 1 while using + [`Routing.AcceleratedDHTClient`](./config.md#routingaccelerateddhtclient), consider disabling + the accelerated client to restore sweep efficiency. See [caveat 4](./config.md#routingaccelerateddhtclient). + +## See Also + +- [Provide configuration reference](./config.md#provide) +- [Provide metrics for Prometheus](./metrics.md#provide) diff --git a/docs/releases.md b/docs/releases.md index d42feea7b..718c2da93 100644 --- a/docs/releases.md +++ b/docs/releases.md @@ -20,9 +20,9 @@ ## Release Philosophy -`kubo` aims to have release every six weeks, two releases per quarter. During these 6 week releases, we go through 4 different stages that gives us the opportunity to test the new version against our test environments (unit, interop, integration), QA in our current production environment, IPFS apps (e.g. Desktop and WebUI) and with our community and _early testers_[1] that have IPFS running in production. +`kubo` aims to have a release every six weeks, two releases per quarter. During these 6 week releases, we go through 4 different stages that allow us to test the new version against our test environments (unit, interop, integration), QA in our current production environment, IPFS apps (e.g. Desktop and WebUI) and with our community and _early testers_[1] that have IPFS running in production. -We might expand the six week release schedule in case of: +We might expand the six-week release schedule in case of: - No new updates to be added - In case of a large community event that takes the core team availability away (e.g. IPFS Conf, Dev Meetings, IPFS Camp, etc.) @@ -59,7 +59,7 @@ Test the release in as many non-production environments as possible. This is rel ### Stage 3 - Community Prod Testing -At this stage, we consider the release to be "production ready" and will ask the community and our early testers to (partially) deploy the release to their production infrastructure. +At this stage, we consider the release to be "production-ready" and will ask the community and our early testers to (partially) deploy the release to their production infrastructure. **Goals:** @@ -69,7 +69,7 @@ At this stage, we consider the release to be "production ready" and will ask the ### Stage 4 - Release -At this stage, the release is "battle hardened" and ready for wide deployment. +At this stage, the release is "battle-hardened" and ready for wide deployment. ## Release Cycle diff --git a/docs/telemetry.md b/docs/telemetry.md new file mode 100644 index 000000000..5b053ed34 --- /dev/null +++ b/docs/telemetry.md @@ -0,0 +1,149 @@ +# Telemetry Plugin Documentation + +The **Telemetry plugin** is a feature in Kubo that collects **anonymized usage data** to help the development team better understand how the software is used, identify areas for improvement, and guide future feature development. + +This data is not personally identifiable and is used solely for the purpose of improving the Kubo project. + +--- + +## 🛡️ How to Control Telemetry + +The behavior of the Telemetry plugin is controlled via the environment variable [`IPFS_TELEMETRY`](environment-variables.md#ipfs_telemetry) and optionally via the `Plugins.Plugins.telemetry.Config.Mode` in the IPFS config file. + +### Available Modes + +| Mode | Description | +|----------|-----------------------------------------------------------------------------| +| `on` | **Default**. Telemetry is enabled. Data is sent periodically. | +| `off` | Telemetry is disabled. No data is sent. Any existing telemetry UUID file is removed. | +| `auto` | Like `on`, but logs an informative message about the telemetry and gives user 15 minutes to opt-out before first collection. This mode is automatically used on the first run when `IPFS_TELEMETRY` is not set and telemetry UUID is not found (not generated yet). The informative message is only shown once. | + +You can set the mode in your environment: + +```bash +export IPFS_TELEMETRY="off" +``` + +Or in your IPFS config file: + +```json +{ + "Plugins": { + "Plugins": { + "telemetry": { + "Config": { + "Mode": "off" + } + } + } + } +} +``` + +--- + +## 📦 What Data is Collected? + +The telemetry plugin collects the following anonymized data: + +### General Information + +- **UUID**: Anonymous identifier for this node +- **Agent version**: Kubo version string +- **Private network**: Whether running in a private IPFS network +- **Repository size**: Categorized into privacy-preserving buckets (1GB, 5GB, 10GB, 100GB, 500GB, 1TB, 10TB, >10TB) +- **Uptime**: Categorized into privacy-preserving buckets (1d, 2d, 3d, 7d, 14d, 30d, >30d) + +### Routing & Discovery + +- **Custom bootstrap peers**: Whether custom `Bootstrap` peers are configured +- **Routing type**: The `Routing.Type` configured for the node +- **Accelerated DHT client**: Whether `Routing.AcceleratedDHTClient` is enabled +- **Delegated routing count**: Number of `Routing.DelegatedRouters` configured +- **AutoConf enabled**: Whether `AutoConf.Enabled` is set +- **Custom AutoConf URL**: Whether custom `AutoConf.URL` is configured +- **mDNS**: Whether `Discovery.MDNS.Enabled` is set + +### Content Providing + +- **Provide and Reprovide strategy**: The `Provide.Strategy` configured +- **Sweep-based provider**: Whether `Provide.DHT.SweepEnabled` is set +- **Custom Interval**: Whether custom `Provide.DHT.Interval` is configured +- **Custom MaxWorkers**: Whether custom `Provide.DHT.MaxWorkers` is configured + +### Network Configuration + +- **AutoNAT service mode**: The `AutoNAT.ServiceMode` configured +- **AutoNAT reachability**: Current reachability status determined by AutoNAT +- **Hole punching**: Whether `Swarm.EnableHolePunching` is enabled +- **Circuit relay addresses**: Whether the node advertises circuit relay addresses +- **Public IPv4 addresses**: Whether the node has public IPv4 addresses +- **Public IPv6 addresses**: Whether the node has public IPv6 addresses +- **AutoWSS**: Whether `AutoTLS.AutoWSS` is enabled +- **Custom domain suffix**: Whether custom `AutoTLS.DomainSuffix` is configured + +### Platform Information + +- **Operating system**: The OS the node is running on +- **CPU architecture**: The architecture the node is running on +- **Container detection**: Whether the node is running inside a container +- **VM detection**: Whether the node is running inside a virtual machine + +### Code Reference + +Data is organized in the `LogEvent` struct at [`plugin/plugins/telemetry/telemetry.go`](https://github.com/ipfs/kubo/blob/master/plugin/plugins/telemetry/telemetry.go). This struct is the authoritative source of truth for all telemetry data, including privacy-preserving buckets for repository size and uptime. Note that this documentation may not always be up-to-date - refer to the code for the current implementation. + +--- + +## 🧑‍🤝‍🧑 Privacy and Anonymization + +All data collected is: +- **Anonymized**: No personally identifiable information (PII) is sent. +- **Optional**: Users can choose to opt out at any time. +- **Secure**: Data is sent over HTTPS to a trusted endpoint. + +The telemetry UUID is stored in the IPFS repo folder and is used to identify the node across runs, but it does not contain any personal information. When you opt-out, this UUID file is automatically removed to ensure complete privacy. + +--- + +## 📦 Contributing to the Project + +By enabling telemetry, you are helping the Kubo team improve the software for the entire community. The data is used to: + +- Prioritize feature development +- Identify performance bottlenecks +- Improve user experience + +You can always disable telemetry at any time if you change your mind. + +--- + +## 🧪 Testing Telemetry + +If you're testing telemetry locally, you can change the endpoint by setting the `Endpoint` field in the config: + +```json +{ + "Plugins": { + "Plugins": { + "telemetry": { + "Config": { + "Mode": "on", + "Endpoint": "http://localhost:8080" + } + } + } + } +} +``` + +This allows you to capture and inspect telemetry data locally. + +--- + +## 📦 Further Reading + +For more information, see: +- [IPFS Environment Variables](docs/environment-variables.md) +- [IPFS Plugins](docs/plugins.md) +- [IPFS Configuration](docs/config.md) diff --git a/fuse/ipns/ipns_test.go b/fuse/ipns/ipns_test.go index ece386bf7..bbd5cbc98 100644 --- a/fuse/ipns/ipns_test.go +++ b/fuse/ipns/ipns_test.go @@ -1,5 +1,4 @@ //go:build !nofuse && !openbsd && !netbsd && !plan9 -// +build !nofuse,!openbsd,!netbsd,!plan9 package ipns diff --git a/fuse/ipns/ipns_unix.go b/fuse/ipns/ipns_unix.go index 23704cabd..44085e526 100644 --- a/fuse/ipns/ipns_unix.go +++ b/fuse/ipns/ipns_unix.go @@ -1,5 +1,4 @@ //go:build !nofuse && !openbsd && !netbsd && !plan9 -// +build !nofuse,!openbsd,!netbsd,!plan9 // package fuse/ipns implements a fuse filesystem that interfaces // with ipns, the naming system for ipfs. @@ -16,13 +15,14 @@ import ( dag "github.com/ipfs/boxo/ipld/merkledag" ft "github.com/ipfs/boxo/ipld/unixfs" + "github.com/ipfs/boxo/namesys" "github.com/ipfs/boxo/path" fuse "bazil.org/fuse" fs "bazil.org/fuse/fs" mfs "github.com/ipfs/boxo/mfs" cid "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" iface "github.com/ipfs/kubo/core/coreiface" options "github.com/ipfs/kubo/core/coreiface/options" ) @@ -95,7 +95,7 @@ func loadRoot(ctx context.Context, ipfs iface.CoreAPI, key iface.Key) (*mfs.Root node, err := ipfs.ResolveNode(ctx, key.Path()) switch err { case nil: - case iface.ErrResolveFailed: + case namesys.ErrResolveFailed: node = ft.EmptyDirNode() default: log.Errorf("looking up %s: %s", key.Path(), err) @@ -107,7 +107,10 @@ func loadRoot(ctx context.Context, ipfs iface.CoreAPI, key iface.Key) (*mfs.Root return nil, nil, dag.ErrNotProtobuf } - root, err := mfs.NewRoot(ctx, ipfs.Dag(), pbnode, ipnsPubFunc(ipfs, key)) + // We have no access to provider.System from the CoreAPI. The Routing + // part offers Provide through the router so it may be slow/risky + // to give that here to MFS. Therefore we leave as nil. + root, err := mfs.NewRoot(ctx, ipfs.Dag(), pbnode, ipnsPubFunc(ipfs, key), nil) if err != nil { return nil, nil, err } @@ -525,13 +528,6 @@ func (d *Directory) Rename(ctx context.Context, req *fuse.RenameRequest, newDir return nil } -func min(a, b int) int { - if a < b { - return a - } - return b -} - // to check that out Node implements all the interfaces we want. type ipnsRoot interface { fs.Node diff --git a/fuse/ipns/link_unix.go b/fuse/ipns/link_unix.go index da810c8f9..f95894b12 100644 --- a/fuse/ipns/link_unix.go +++ b/fuse/ipns/link_unix.go @@ -1,5 +1,4 @@ //go:build !nofuse && !openbsd && !netbsd && !plan9 -// +build !nofuse,!openbsd,!netbsd,!plan9 package ipns diff --git a/fuse/ipns/mount_unix.go b/fuse/ipns/mount_unix.go index 34a8eef51..da3a6ac0b 100644 --- a/fuse/ipns/mount_unix.go +++ b/fuse/ipns/mount_unix.go @@ -1,6 +1,4 @@ //go:build (linux || darwin || freebsd || netbsd || openbsd) && !nofuse -// +build linux darwin freebsd netbsd openbsd -// +build !nofuse package ipns @@ -29,5 +27,5 @@ func Mount(ipfs *core.IpfsNode, ipnsmp, ipfsmp string) (mount.Mount, error) { return nil, err } - return mount.NewMount(ipfs.Process, fsys, ipnsmp, allowOther) + return mount.NewMount(fsys, ipnsmp, allowOther) } diff --git a/fuse/mfs/mfs_test.go b/fuse/mfs/mfs_test.go new file mode 100644 index 000000000..a441246c7 --- /dev/null +++ b/fuse/mfs/mfs_test.go @@ -0,0 +1,341 @@ +//go:build !nofuse && !openbsd && !netbsd && !plan9 + +package mfs + +import ( + "bytes" + "context" + "crypto/rand" + "errors" + iofs "io/fs" + "os" + "slices" + "strconv" + "testing" + "time" + + "bazil.org/fuse" + "bazil.org/fuse/fs" + "bazil.org/fuse/fs/fstestutil" + "github.com/ipfs/kubo/core" + "github.com/ipfs/kubo/core/node" + "github.com/libp2p/go-libp2p-testing/ci" +) + +// Create an Ipfs.Node, a filesystem and a mount point. +func setUp(t *testing.T, ipfs *core.IpfsNode) (fs.FS, *fstestutil.Mount) { + if ci.NoFuse() { + t.Skip("Skipping FUSE tests") + } + + if ipfs == nil { + var err error + ipfs, err = core.NewNode(context.Background(), &node.BuildCfg{}) + if err != nil { + t.Fatal(err) + } + } + + fs := NewFileSystem(ipfs) + mnt, err := fstestutil.MountedT(t, fs, nil) + if err == fuse.ErrOSXFUSENotFound { + t.Skip(err) + } + if err != nil { + t.Fatal(err) + } + + return fs, mnt +} + +// Test reading and writing a file. +func TestReadWrite(t *testing.T) { + _, mnt := setUp(t, nil) + defer mnt.Close() + + path := mnt.Dir + "/testrw" + content := make([]byte, 8196) + _, err := rand.Read(content) + if err != nil { + t.Fatal(err) + } + + t.Run("write", func(t *testing.T) { + f, err := os.Create(path) + if err != nil { + t.Fatal(err) + } + defer f.Close() + + _, err = f.Write(content) + if err != nil { + t.Fatal(err) + } + }) + t.Run("read", func(t *testing.T) { + f, err := os.Open(path) + if err != nil { + t.Fatal(err) + } + defer f.Close() + + buf := make([]byte, 8196) + l, err := f.Read(buf) + if err != nil { + t.Fatal(err) + } + if bytes.Equal(content, buf[:l]) != true { + t.Fatal("read and write not equal") + } + }) +} + +// Test creating a directory. +func TestMkdir(t *testing.T) { + _, mnt := setUp(t, nil) + defer mnt.Close() + + path := mnt.Dir + "/foo/bar/baz/qux/quux" + + t.Run("write", func(t *testing.T) { + err := os.MkdirAll(path, iofs.ModeDir) + if err != nil { + t.Fatal(err) + } + }) + t.Run("read", func(t *testing.T) { + stat, err := os.Stat(path) + if err != nil { + t.Fatal(err) + } + if !stat.IsDir() { + t.Fatal("not dir") + } + }) +} + +// Test file persistence across mounts. +func TestPersistence(t *testing.T) { + ipfs, err := core.NewNode(context.Background(), &node.BuildCfg{}) + if err != nil { + t.Fatal(err) + } + + content := make([]byte, 8196) + _, err = rand.Read(content) + if err != nil { + t.Fatal(err) + } + + t.Run("write", func(t *testing.T) { + _, mnt := setUp(t, ipfs) + defer mnt.Close() + path := mnt.Dir + "/testpersistence" + + f, err := os.Create(path) + if err != nil { + t.Fatal(err) + } + defer f.Close() + + _, err = f.Write(content) + if err != nil { + t.Fatal(err) + } + }) + t.Run("read", func(t *testing.T) { + _, mnt := setUp(t, ipfs) + defer mnt.Close() + path := mnt.Dir + "/testpersistence" + + f, err := os.Open(path) + if err != nil { + t.Fatal(err) + } + defer f.Close() + + buf := make([]byte, 8196) + l, err := f.Read(buf) + if err != nil { + t.Fatal(err) + } + if bytes.Equal(content, buf[:l]) != true { + t.Fatal("read and write not equal") + } + }) +} + +// Test getting the file attributes. +func TestAttr(t *testing.T) { + _, mnt := setUp(t, nil) + defer mnt.Close() + + path := mnt.Dir + "/testattr" + content := make([]byte, 8196) + _, err := rand.Read(content) + if err != nil { + t.Fatal(err) + } + + t.Run("write", func(t *testing.T) { + f, err := os.Create(path) + if err != nil { + t.Fatal(err) + } + defer f.Close() + + _, err = f.Write(content) + if err != nil { + t.Fatal(err) + } + }) + t.Run("read", func(t *testing.T) { + fi, err := os.Stat(path) + if err != nil { + t.Fatal(err) + } + + if fi.IsDir() { + t.Fatal("file is a directory") + } + + if fi.ModTime().After(time.Now()) { + t.Fatal("future modtime") + } + if time.Since(fi.ModTime()) > time.Second { + t.Fatal("past modtime") + } + + if fi.Name() != "testattr" { + t.Fatal("invalid filename") + } + + if fi.Size() != 8196 { + t.Fatal("invalid size") + } + }) +} + +// Test concurrent access to the filesystem. +func TestConcurrentRW(t *testing.T) { + _, mnt := setUp(t, nil) + defer mnt.Close() + + files := 5 + fileWorkers := 5 + + path := mnt.Dir + "/testconcurrent" + content := make([][]byte, files) + + for i := range content { + content[i] = make([]byte, 8196) + _, err := rand.Read(content[i]) + if err != nil { + t.Fatal(err) + } + } + + t.Run("write", func(t *testing.T) { + errs := make(chan (error), 1) + for i := 0; i < files; i++ { + go func() { + var err error + defer func() { errs <- err }() + + f, err := os.Create(path + strconv.Itoa(i)) + if err != nil { + return + } + defer f.Close() + + _, err = f.Write(content[i]) + if err != nil { + return + } + }() + } + for i := 0; i < files; i++ { + err := <-errs + if err != nil { + t.Fatal(err) + } + } + }) + t.Run("read", func(t *testing.T) { + errs := make(chan (error), 1) + for i := 0; i < files*fileWorkers; i++ { + go func() { + var err error + defer func() { errs <- err }() + + f, err := os.Open(path + strconv.Itoa(i/fileWorkers)) + if err != nil { + return + } + defer f.Close() + + buf := make([]byte, 8196) + l, err := f.Read(buf) + if err != nil { + return + } + if bytes.Equal(content[i/fileWorkers], buf[:l]) != true { + err = errors.New("read and write not equal") + return + } + }() + } + for i := 0; i < files; i++ { + err := <-errs + if err != nil { + t.Fatal(err) + } + } + }) +} + +// Test ipfs_cid extended attribute +func TestMFSRootXattr(t *testing.T) { + ipfs, err := core.NewNode(context.Background(), &node.BuildCfg{}) + if err != nil { + t.Fatal(err) + } + + fs, mnt := setUp(t, ipfs) + defer mnt.Close() + + node, err := fs.Root() + if err != nil { + t.Fatal(err) + } + + root := node.(*Dir) + + listReq := fuse.ListxattrRequest{} + listRes := fuse.ListxattrResponse{} + err = root.Listxattr(context.Background(), &listReq, &listRes) + if err != nil { + t.Fatal(err) + } + if slices.Compare(listRes.Xattr, []byte("ipfs_cid\x00")) != 0 { + t.Fatal("list xattr returns invalid value") + } + + getReq := fuse.GetxattrRequest{ + Name: "ipfs_cid", + } + getRes := fuse.GetxattrResponse{} + err = root.Getxattr(context.Background(), &getReq, &getRes) + if err != nil { + t.Fatal(err) + } + + ipldNode, err := ipfs.FilesRoot.GetDirectory().GetNode() + if err != nil { + t.Fatal(err) + } + + if slices.Compare(getRes.Xattr, []byte(ipldNode.Cid().String())) != 0 { + t.Fatal("xattr cid not equal to mfs root cid") + } +} diff --git a/fuse/mfs/mfs_unix.go b/fuse/mfs/mfs_unix.go new file mode 100644 index 000000000..99ca5fe52 --- /dev/null +++ b/fuse/mfs/mfs_unix.go @@ -0,0 +1,412 @@ +//go:build (linux || darwin || freebsd || netbsd || openbsd) && !nofuse + +package mfs + +import ( + "context" + "io" + "os" + "sync" + "syscall" + "time" + + "bazil.org/fuse" + "bazil.org/fuse/fs" + + dag "github.com/ipfs/boxo/ipld/merkledag" + ft "github.com/ipfs/boxo/ipld/unixfs" + "github.com/ipfs/boxo/mfs" + "github.com/ipfs/kubo/core" +) + +const ( + ipfsCIDXattr = "ipfs_cid" + mfsDirMode = os.ModeDir | 0755 + mfsFileMode = 0644 + blockSize = 512 + dirSize = 8 +) + +// FUSE filesystem mounted at /mfs. +type FileSystem struct { + root Dir +} + +// Get filesystem root. +func (fs *FileSystem) Root() (fs.Node, error) { + return &fs.root, nil +} + +// FUSE Adapter for MFS directories. +type Dir struct { + mfsDir *mfs.Directory +} + +// Directory attributes (stat). +func (dir *Dir) Attr(ctx context.Context, attr *fuse.Attr) error { + attr.Mode = mfsDirMode + attr.Size = dirSize * blockSize + attr.Blocks = dirSize + return nil +} + +// Access files in a directory. +func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (fs.Node, error) { + mfsNode, err := dir.mfsDir.Child(req.Name) + switch err { + case os.ErrNotExist: + return nil, syscall.Errno(syscall.ENOENT) + case nil: + default: + return nil, err + } + + switch mfsNode.Type() { + case mfs.TDir: + result := Dir{ + mfsDir: mfsNode.(*mfs.Directory), + } + return &result, nil + case mfs.TFile: + result := File{ + mfsFile: mfsNode.(*mfs.File), + } + return &result, nil + } + + return nil, syscall.Errno(syscall.ENOENT) +} + +// List (ls) MFS directory. +func (dir *Dir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) { + var res []fuse.Dirent + nodes, err := dir.mfsDir.List(ctx) + if err != nil { + return nil, err + } + + for _, node := range nodes { + nodeType := fuse.DT_File + if node.Type == 1 { + nodeType = fuse.DT_Dir + } + res = append(res, fuse.Dirent{ + Type: nodeType, + Name: node.Name, + }) + } + return res, nil +} + +// Mkdir (mkdir) in MFS. +func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, error) { + mfsDir, err := dir.mfsDir.Mkdir(req.Name) + if err != nil { + return nil, err + } + return &Dir{ + mfsDir: mfsDir, + }, nil +} + +// Remove (rm/rmdir) an MFS file. +func (dir *Dir) Remove(ctx context.Context, req *fuse.RemoveRequest) error { + // Check for empty directory. + if req.Dir { + targetNode, err := dir.mfsDir.Child(req.Name) + if err != nil { + return err + } + target := targetNode.(*mfs.Directory) + + children, err := target.ListNames(ctx) + if err != nil { + return err + } + if len(children) > 0 { + return os.ErrExist + } + } + err := dir.mfsDir.Unlink(req.Name) + if err != nil { + return err + } + return dir.mfsDir.Flush() +} + +// Move (mv) an MFS file. +func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDir fs.Node) error { + file, err := dir.mfsDir.Child(req.OldName) + if err != nil { + return err + } + node, err := file.GetNode() + if err != nil { + return err + } + targetDir := newDir.(*Dir) + + // Remove file if exists + err = targetDir.mfsDir.Unlink(req.NewName) + if err != nil && err != os.ErrNotExist { + return err + } + + err = targetDir.mfsDir.AddChild(req.NewName, node) + if err != nil { + return err + } + + err = dir.mfsDir.Unlink(req.OldName) + if err != nil { + return err + } + + return dir.mfsDir.Flush() +} + +// Create (touch) an MFS file. +func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (fs.Node, fs.Handle, error) { + node := dag.NodeWithData(ft.FilePBData(nil, 0)) + if err := node.SetCidBuilder(dir.mfsDir.GetCidBuilder()); err != nil { + return nil, nil, err + } + + if err := dir.mfsDir.AddChild(req.Name, node); err != nil { + return nil, nil, err + } + + if err := dir.mfsDir.Flush(); err != nil { + return nil, nil, err + } + + mfsNode, err := dir.mfsDir.Child(req.Name) + if err != nil { + return nil, nil, err + } + if err := mfsNode.SetModTime(time.Now()); err != nil { + return nil, nil, err + } + + mfsFile := mfsNode.(*mfs.File) + + file := File{ + mfsFile: mfsFile, + } + + // Read access flags and create a handler. + accessMode := req.Flags & fuse.OpenAccessModeMask + flags := mfs.Flags{ + Read: accessMode == fuse.OpenReadOnly || accessMode == fuse.OpenReadWrite, + Write: accessMode == fuse.OpenWriteOnly || accessMode == fuse.OpenReadWrite, + Sync: req.Flags|fuse.OpenSync > 0, + } + + fd, err := mfsFile.Open(flags) + if err != nil { + return nil, nil, err + } + handler := FileHandler{ + mfsFD: fd, + } + + return &file, &handler, nil +} + +// List dir xattr. +func (dir *Dir) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error { + resp.Append(ipfsCIDXattr) + return nil +} + +// Get dir xattr. +func (dir *Dir) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error { + switch req.Name { + case ipfsCIDXattr: + node, err := dir.mfsDir.GetNode() + if err != nil { + return err + } + resp.Xattr = []byte(node.Cid().String()) + return nil + default: + return fuse.ErrNoXattr + } +} + +// FUSE adapter for MFS files. +type File struct { + mfsFile *mfs.File +} + +// File attributes. +func (file *File) Attr(ctx context.Context, attr *fuse.Attr) error { + size, _ := file.mfsFile.Size() + + attr.Size = uint64(size) + if size%blockSize == 0 { + attr.Blocks = uint64(size / blockSize) + } else { + attr.Blocks = uint64(size/blockSize + 1) + } + + mtime, _ := file.mfsFile.ModTime() + attr.Mtime = mtime + + attr.Mode = mfsFileMode + return nil +} + +// Open an MFS file. +func (file *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (fs.Handle, error) { + accessMode := req.Flags & fuse.OpenAccessModeMask + flags := mfs.Flags{ + Read: accessMode == fuse.OpenReadOnly || accessMode == fuse.OpenReadWrite, + Write: accessMode == fuse.OpenWriteOnly || accessMode == fuse.OpenReadWrite, + Sync: req.Flags|fuse.OpenSync > 0, + } + fd, err := file.mfsFile.Open(flags) + if err != nil { + return nil, err + } + + if flags.Write { + if err := file.mfsFile.SetModTime(time.Now()); err != nil { + return nil, err + } + } + + return &FileHandler{ + mfsFD: fd, + }, nil +} + +// Sync the file's contents to MFS. +func (file *File) Fsync(ctx context.Context, req *fuse.FsyncRequest) error { + return file.mfsFile.Sync() +} + +// List file xattr. +func (file *File) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error { + resp.Append(ipfsCIDXattr) + return nil +} + +// Get file xattr. +func (file *File) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error { + switch req.Name { + case ipfsCIDXattr: + node, err := file.mfsFile.GetNode() + if err != nil { + return err + } + resp.Xattr = []byte(node.Cid().String()) + return nil + default: + return fuse.ErrNoXattr + } +} + +// Wrapper for MFS's file descriptor that conforms to the FUSE fs.Handler +// interface. +type FileHandler struct { + mfsFD mfs.FileDescriptor + mu sync.Mutex +} + +// Read a opened MFS file. +func (fh *FileHandler) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error { + fh.mu.Lock() + defer fh.mu.Unlock() + + _, err := fh.mfsFD.Seek(req.Offset, io.SeekStart) + if err != nil { + return err + } + + buf := make([]byte, req.Size) + l, err := fh.mfsFD.Read(buf) + + resp.Data = buf[:l] + + switch err { + case nil, io.EOF, io.ErrUnexpectedEOF: + return nil + default: + return err + } +} + +// Write writes to an opened MFS file. +func (fh *FileHandler) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error { + fh.mu.Lock() + defer fh.mu.Unlock() + + l, err := fh.mfsFD.WriteAt(req.Data, req.Offset) + if err != nil { + return err + } + resp.Size = l + + return nil +} + +// Flushes the file's buffer. +func (fh *FileHandler) Flush(ctx context.Context, req *fuse.FlushRequest) error { + fh.mu.Lock() + defer fh.mu.Unlock() + + return fh.mfsFD.Flush() +} + +// Closes the file. +func (fh *FileHandler) Release(ctx context.Context, req *fuse.ReleaseRequest) error { + fh.mu.Lock() + defer fh.mu.Unlock() + + return fh.mfsFD.Close() +} + +// Create new filesystem. +func NewFileSystem(ipfs *core.IpfsNode) fs.FS { + return &FileSystem{ + root: Dir{ + mfsDir: ipfs.FilesRoot.GetDirectory(), + }, + } +} + +// Check that our structs implement all the interfaces we want. +type mfsDir interface { + fs.Node + fs.NodeGetxattrer + fs.NodeListxattrer + fs.HandleReadDirAller + fs.NodeRequestLookuper + fs.NodeMkdirer + fs.NodeRenamer + fs.NodeRemover + fs.NodeCreater +} + +var _ mfsDir = (*Dir)(nil) + +type mfsFile interface { + fs.Node + fs.NodeGetxattrer + fs.NodeListxattrer + fs.NodeOpener + fs.NodeFsyncer +} + +var _ mfsFile = (*File)(nil) + +type mfsHandler interface { + fs.Handle + fs.HandleReader + fs.HandleWriter + fs.HandleFlusher + fs.HandleReleaser +} + +var _ mfsHandler = (*FileHandler)(nil) diff --git a/fuse/mfs/mount_unix.go b/fuse/mfs/mount_unix.go new file mode 100644 index 000000000..92e0845bc --- /dev/null +++ b/fuse/mfs/mount_unix.go @@ -0,0 +1,19 @@ +//go:build (linux || darwin || freebsd || netbsd || openbsd) && !nofuse + +package mfs + +import ( + core "github.com/ipfs/kubo/core" + mount "github.com/ipfs/kubo/fuse/mount" +) + +// Mount mounts MFS at a given location, and returns a mount.Mount instance. +func Mount(ipfs *core.IpfsNode, mountpoint string) (mount.Mount, error) { + cfg, err := ipfs.Repo.Config() + if err != nil { + return nil, err + } + allowOther := cfg.Mounts.FuseAllowOther + fsys := NewFileSystem(ipfs) + return mount.NewMount(fsys, mountpoint, allowOther) +} diff --git a/fuse/mount/fuse.go b/fuse/mount/fuse.go index 02d733b89..313c4af6a 100644 --- a/fuse/mount/fuse.go +++ b/fuse/mount/fuse.go @@ -1,5 +1,4 @@ //go:build !nofuse && !windows && !openbsd && !netbsd && !plan9 -// +build !nofuse,!windows,!openbsd,!netbsd,!plan9 package mount @@ -11,7 +10,6 @@ import ( "bazil.org/fuse" "bazil.org/fuse/fs" - "github.com/jbenet/goprocess" ) var ErrNotMounted = errors.New("not mounted") @@ -25,12 +23,12 @@ type mount struct { active bool activeLock *sync.RWMutex - proc goprocess.Process + unmountOnce sync.Once } // Mount mounts a fuse fs.FS at a given location, and returns a Mount instance. -// parent is a ContextGroup to bind the mount's ContextGroup to. -func NewMount(p goprocess.Process, fsys fs.FS, mountpoint string, allowOther bool) (Mount, error) { +// ctx is parent is a ContextGroup to bind the mount's ContextGroup to. +func NewMount(fsys fs.FS, mountpoint string, allowOther bool) (Mount, error) { var conn *fuse.Conn var err error @@ -54,12 +52,10 @@ func NewMount(p goprocess.Process, fsys fs.FS, mountpoint string, allowOther boo filesys: fsys, active: false, activeLock: &sync.RWMutex{}, - proc: goprocess.WithParent(p), // link it to parent. } - m.proc.SetTeardown(m.unmount) // launch the mounting process. - if err := m.mount(); err != nil { + if err = m.mount(); err != nil { _ = m.Unmount() // just in case. return nil, err } @@ -135,10 +131,6 @@ func (m *mount) unmount() error { return nil } -func (m *mount) Process() goprocess.Process { - return m.proc -} - func (m *mount) MountPoint() string { return m.mpoint } @@ -148,8 +140,12 @@ func (m *mount) Unmount() error { return ErrNotMounted } - // call Process Close(), which calls unmount() exactly once. - return m.proc.Close() + var err error + m.unmountOnce.Do(func() { + err = m.unmount() + }) + + return err } func (m *mount) IsActive() bool { diff --git a/fuse/mount/mount.go b/fuse/mount/mount.go index a52374dd8..ca10405fe 100644 --- a/fuse/mount/mount.go +++ b/fuse/mount/mount.go @@ -8,8 +8,7 @@ import ( "runtime" "time" - logging "github.com/ipfs/go-log" - goprocess "github.com/jbenet/goprocess" + logging "github.com/ipfs/go-log/v2" ) var log = logging.Logger("mount") @@ -26,10 +25,6 @@ type Mount interface { // Checks if the mount is still active. IsActive() bool - - // Process returns the mount's Process to be able to link it - // to other processes. Unmount upon closing. - Process() goprocess.Process } // ForceUnmount attempts to forcibly unmount a given mount. diff --git a/fuse/node/mount_darwin.go b/fuse/node/mount_darwin.go index 4d2446ecd..57fbe4d90 100644 --- a/fuse/node/mount_darwin.go +++ b/fuse/node/mount_darwin.go @@ -1,5 +1,4 @@ -//go:build !nofuse -// +build !nofuse +//go:build !nofuse && darwin package node @@ -29,7 +28,7 @@ const dontCheckOSXFUSEConfigKey = "DontCheckOSXFUSE" const fuseVersionPkg = "github.com/jbenet/go-fuse-version/fuse-version" // errStrFuseRequired is returned when we're sure the user does not have fuse. -var errStrFuseRequired = `OSXFUSE not found. +const errStrFuseRequired = `OSXFUSE not found. OSXFUSE is required to mount, please install it. NOTE: Version 2.7.2 or higher required; prior versions are known to kernel panic! diff --git a/fuse/node/mount_nofuse.go b/fuse/node/mount_nofuse.go index e6f512f8e..026f002ff 100644 --- a/fuse/node/mount_nofuse.go +++ b/fuse/node/mount_nofuse.go @@ -1,5 +1,4 @@ //go:build !windows && nofuse -// +build !windows,nofuse package node @@ -9,6 +8,10 @@ import ( core "github.com/ipfs/kubo/core" ) -func Mount(node *core.IpfsNode, fsdir, nsdir string) error { +func Mount(node *core.IpfsNode, fsdir, nsdir, mfsdir string) error { return errors.New("not compiled in") } + +func Unmount(node *core.IpfsNode) { + return +} diff --git a/fuse/node/mount_notsupp.go b/fuse/node/mount_notsupp.go index e9762a3e4..d5f0d2cbe 100644 --- a/fuse/node/mount_notsupp.go +++ b/fuse/node/mount_notsupp.go @@ -1,5 +1,4 @@ //go:build (!nofuse && openbsd) || (!nofuse && netbsd) || (!nofuse && plan9) -// +build !nofuse,openbsd !nofuse,netbsd !nofuse,plan9 package node @@ -9,6 +8,10 @@ import ( core "github.com/ipfs/kubo/core" ) -func Mount(node *core.IpfsNode, fsdir, nsdir string) error { +func Mount(node *core.IpfsNode, fsdir, nsdir, mfsdir string) error { return errors.New("FUSE not supported on OpenBSD or NetBSD. See #5334 (https://github.com/ipfs/kubo/issues/5334).") } + +func Unmount(node *core.IpfsNode) { + return +} diff --git a/fuse/node/mount_test.go b/fuse/node/mount_test.go index 178fddcf6..b296e7e95 100644 --- a/fuse/node/mount_test.go +++ b/fuse/node/mount_test.go @@ -1,5 +1,4 @@ //go:build !openbsd && !nofuse && !netbsd && !plan9 -// +build !openbsd,!nofuse,!netbsd,!plan9 package node @@ -56,10 +55,12 @@ func TestExternalUnmount(t *testing.T) { ipfsDir := dir + "/ipfs" ipnsDir := dir + "/ipns" + mfsDir := dir + "/mfs" mkdir(t, ipfsDir) mkdir(t, ipnsDir) + mkdir(t, mfsDir) - err = Mount(node, ipfsDir, ipnsDir) + err = Mount(node, ipfsDir, ipnsDir, mfsDir) if err != nil { if strings.Contains(err.Error(), "unable to check fuse version") || err == fuse.ErrOSXFUSENotFound { t.Skip(err) diff --git a/fuse/node/mount_unix.go b/fuse/node/mount_unix.go index 1e509a243..6864e363b 100644 --- a/fuse/node/mount_unix.go +++ b/fuse/node/mount_unix.go @@ -1,5 +1,4 @@ //go:build !windows && !openbsd && !netbsd && !plan9 && !nofuse -// +build !windows,!openbsd,!netbsd,!plan9,!nofuse package node @@ -11,10 +10,11 @@ import ( core "github.com/ipfs/kubo/core" ipns "github.com/ipfs/kubo/fuse/ipns" + mfs "github.com/ipfs/kubo/fuse/mfs" mount "github.com/ipfs/kubo/fuse/mount" rofs "github.com/ipfs/kubo/fuse/readonly" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" ) var log = logging.Logger("node") @@ -31,27 +31,41 @@ var platformFuseChecks = func(*core.IpfsNode) error { return nil } -func Mount(node *core.IpfsNode, fsdir, nsdir string) error { +func Mount(node *core.IpfsNode, fsdir, nsdir, mfsdir string) error { // check if we already have live mounts. // if the user said "Mount", then there must be something wrong. // so, close them and try again. - if node.Mounts.Ipfs != nil && node.Mounts.Ipfs.IsActive() { - // best effort - _ = node.Mounts.Ipfs.Unmount() - } - if node.Mounts.Ipns != nil && node.Mounts.Ipns.IsActive() { - // best effort - _ = node.Mounts.Ipns.Unmount() - } + Unmount(node) if err := platformFuseChecks(node); err != nil { return err } - return doMount(node, fsdir, nsdir) + return doMount(node, fsdir, nsdir, mfsdir) } -func doMount(node *core.IpfsNode, fsdir, nsdir string) error { +func Unmount(node *core.IpfsNode) { + if node.Mounts.Ipfs != nil && node.Mounts.Ipfs.IsActive() { + // best effort + if err := node.Mounts.Ipfs.Unmount(); err != nil { + log.Errorf("error unmounting IPFS: %s", err) + } + } + if node.Mounts.Ipns != nil && node.Mounts.Ipns.IsActive() { + // best effort + if err := node.Mounts.Ipns.Unmount(); err != nil { + log.Errorf("error unmounting IPNS: %s", err) + } + } + if node.Mounts.Mfs != nil && node.Mounts.Mfs.IsActive() { + // best effort + if err := node.Mounts.Mfs.Unmount(); err != nil { + log.Errorf("error unmounting MFS: %s", err) + } + } +} + +func doMount(node *core.IpfsNode, fsdir, nsdir, mfsdir string) error { fmtFuseErr := func(err error, mountpoint string) error { s := err.Error() if strings.Contains(s, fuseNoDirectory) { @@ -67,8 +81,8 @@ func doMount(node *core.IpfsNode, fsdir, nsdir string) error { } // this sync stuff is so that both can be mounted simultaneously. - var fsmount, nsmount mount.Mount - var err1, err2 error + var fsmount, nsmount, mfmount mount.Mount + var err1, err2, err3 error var wg sync.WaitGroup @@ -86,32 +100,49 @@ func doMount(node *core.IpfsNode, fsdir, nsdir string) error { }() } + wg.Add(1) + go func() { + defer wg.Done() + mfmount, err3 = mfs.Mount(node, mfsdir) + }() + wg.Wait() if err1 != nil { - log.Errorf("error mounting: %s", err1) + log.Errorf("error mounting IPFS %s: %s", fsdir, err1) } if err2 != nil { - log.Errorf("error mounting: %s", err2) + log.Errorf("error mounting IPNS %s for IPFS %s: %s", nsdir, fsdir, err2) } - if err1 != nil || err2 != nil { + if err3 != nil { + log.Errorf("error mounting MFS %s: %s", mfsdir, err3) + } + + if err1 != nil || err2 != nil || err3 != nil { if fsmount != nil { _ = fsmount.Unmount() } if nsmount != nil { _ = nsmount.Unmount() } + if mfmount != nil { + _ = mfmount.Unmount() + } if err1 != nil { return fmtFuseErr(err1, fsdir) } - return fmtFuseErr(err2, nsdir) + if err2 != nil { + return fmtFuseErr(err2, nsdir) + } + return fmtFuseErr(err3, mfsdir) } - // setup node state, so that it can be cancelled + // setup node state, so that it can be canceled node.Mounts.Ipfs = fsmount node.Mounts.Ipns = nsmount + node.Mounts.Mfs = mfmount return nil } diff --git a/fuse/node/mount_windows.go b/fuse/node/mount_windows.go index 33393f99a..9f22fe59e 100644 --- a/fuse/node/mount_windows.go +++ b/fuse/node/mount_windows.go @@ -4,8 +4,14 @@ import ( "github.com/ipfs/kubo/core" ) -func Mount(node *core.IpfsNode, fsdir, nsdir string) error { +func Mount(node *core.IpfsNode, fsdir, nsdir, mfsdir string) error { // TODO // currently a no-op, but we don't want to return an error return nil } + +func Unmount(node *core.IpfsNode) { + // TODO + // currently a no-op + return +} diff --git a/fuse/readonly/ipfs_test.go b/fuse/readonly/ipfs_test.go index 6d667843c..8e7d6b34d 100644 --- a/fuse/readonly/ipfs_test.go +++ b/fuse/readonly/ipfs_test.go @@ -1,5 +1,4 @@ //go:build !nofuse && !openbsd && !netbsd && !plan9 -// +build !nofuse,!openbsd,!netbsd,!plan9 package readonly @@ -150,7 +149,10 @@ func TestIpfsStressRead(t *testing.T) { // Now make a bunch of dirs for i := 0; i < ndiriter; i++ { - db := uio.NewDirectory(nd.DAG) + db, err := uio.NewDirectory(nd.DAG) + if err != nil { + t.Fatal(err) + } for j := 0; j < 1+rand.Intn(10); j++ { name := fmt.Sprintf("child%d", j) @@ -184,7 +186,7 @@ func TestIpfsStressRead(t *testing.T) { defer wg.Done() for i := 0; i < 2000; i++ { - item, err := path.NewPath(paths[rand.Intn(len(paths))]) + item, err := path.NewPath("/ipfs/" + paths[rand.Intn(len(paths))]) if err != nil { errs <- err continue @@ -245,8 +247,11 @@ func TestIpfsBasicDirRead(t *testing.T) { fi, data := randObj(t, nd, 10000) // Make a directory and put that file in it - db := uio.NewDirectory(nd.DAG) - err := db.AddChild(nd.Context(), "actual", fi) + db, err := uio.NewDirectory(nd.DAG) + if err != nil { + t.Fatal(err) + } + err = db.AddChild(nd.Context(), "actual", fi) if err != nil { t.Fatal(err) } diff --git a/fuse/readonly/mount_unix.go b/fuse/readonly/mount_unix.go index 19be37abe..33565acd2 100644 --- a/fuse/readonly/mount_unix.go +++ b/fuse/readonly/mount_unix.go @@ -1,6 +1,4 @@ //go:build (linux || darwin || freebsd) && !nofuse -// +build linux darwin freebsd -// +build !nofuse package readonly @@ -17,5 +15,5 @@ func Mount(ipfs *core.IpfsNode, mountpoint string) (mount.Mount, error) { } allowOther := cfg.Mounts.FuseAllowOther fsys := NewFileSystem(ipfs) - return mount.NewMount(ipfs.Process, fsys, mountpoint, allowOther) + return mount.NewMount(fsys, mountpoint, allowOther) } diff --git a/fuse/readonly/readonly_unix.go b/fuse/readonly/readonly_unix.go index 32be8b123..c04262840 100644 --- a/fuse/readonly/readonly_unix.go +++ b/fuse/readonly/readonly_unix.go @@ -1,6 +1,4 @@ //go:build (linux || darwin || freebsd) && !nofuse -// +build linux darwin freebsd -// +build !nofuse package readonly @@ -19,7 +17,7 @@ import ( "github.com/ipfs/boxo/path" "github.com/ipfs/go-cid" ipld "github.com/ipfs/go-ipld-format" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" core "github.com/ipfs/kubo/core" ipldprime "github.com/ipld/go-ipld-prime" cidlink "github.com/ipld/go-ipld-prime/linking/cid" diff --git a/gc/gc.go b/gc/gc.go index 51df59e54..ac3f3d08f 100644 --- a/gc/gc.go +++ b/gc/gc.go @@ -16,7 +16,7 @@ import ( cid "github.com/ipfs/go-cid" dstore "github.com/ipfs/go-datastore" ipld "github.com/ipfs/go-ipld-format" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" ) var log = logging.Logger("gc") @@ -81,7 +81,7 @@ func GC(ctx context.Context, bs bstore.GCBlockstore, dstor dstore.Datastore, pn return } - keychan, err := bs.AllKeysChan(ctx) + keychain, err := bs.AllKeysChan(ctx) if err != nil { select { case output <- Result{Error: err}: @@ -96,11 +96,11 @@ func GC(ctx context.Context, bs bstore.GCBlockstore, dstor dstore.Datastore, pn loop: for ctx.Err() == nil { // select may not notice that we're "done". select { - case k, ok := <-keychan: + case k, ok := <-keychain: if !ok { break loop } - // NOTE: assumes that all CIDs returned by the keychan are _raw_ CIDv1 CIDs. + // NOTE: assumes that all CIDs returned by the keychain are _raw_ CIDv1 CIDs. // This means we keep the block as long as we want it somewhere (CIDv1, CIDv0, Raw, other...). if !gcs.Has(k) { err := bs.DeleteBlock(ctx, k) @@ -165,7 +165,7 @@ func Descendants(ctx context.Context, getLinks dag.GetLinks, set *cid.Set, roots } verboseCidError := func(err error) error { - if strings.Contains(err.Error(), verifcid.ErrBelowMinimumHashLength.Error()) || + if strings.Contains(err.Error(), verifcid.ErrDigestTooSmall.Error()) || strings.Contains(err.Error(), verifcid.ErrPossiblyInsecureHashFunction.Error()) { err = fmt.Errorf("\"%s\"\nPlease run 'ipfs pin verify'"+ // nolint " to list insecure hashes. If you want to read them,"+ diff --git a/go.mod b/go.mod index e188c1968..cba15d0a5 100644 --- a/go.mod +++ b/go.mod @@ -1,254 +1,273 @@ module github.com/ipfs/kubo +go 1.25 + require ( bazil.org/fuse v0.0.0-20200117225306-7b5117fecadc contrib.go.opencensus.io/exporter/prometheus v0.4.2 - github.com/benbjohnson/clock v1.3.5 + github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 github.com/blang/semver/v4 v4.0.0 + github.com/caddyserver/certmagic v0.23.0 github.com/cenkalti/backoff/v4 v4.3.0 - github.com/ceramicnetwork/go-dag-jose v0.1.0 + github.com/ceramicnetwork/go-dag-jose v0.1.1 github.com/cheggaaa/pb v1.0.29 + github.com/cockroachdb/pebble/v2 v2.1.3 github.com/coreos/go-systemd/v22 v22.5.0 github.com/dustin/go-humanize v1.0.1 github.com/elgris/jsondiff v0.0.0-20160530203242-765b5c24c302 github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5 - github.com/fsnotify/fsnotify v1.6.0 + github.com/fsnotify/fsnotify v1.9.0 github.com/google/uuid v1.6.0 - github.com/hashicorp/go-multierror v1.1.1 - github.com/hashicorp/go-version v1.6.0 - github.com/ipfs-shipyard/nopfs v0.0.12 - github.com/ipfs-shipyard/nopfs/ipfs v0.13.2-0.20231027223058-cde3b5ba964c - github.com/ipfs/boxo v0.23.1-0.20240927234853-19a402b7dc34 - github.com/ipfs/go-block-format v0.2.0 - github.com/ipfs/go-cid v0.4.1 + github.com/hashicorp/go-version v1.7.0 + github.com/ipfs-shipyard/nopfs v0.0.14 + github.com/ipfs-shipyard/nopfs/ipfs v0.25.0 + github.com/ipfs/boxo v0.35.3-0.20251202220026-0842ad274a0c + github.com/ipfs/go-block-format v0.2.3 + github.com/ipfs/go-cid v0.6.0 github.com/ipfs/go-cidutil v0.1.0 - github.com/ipfs/go-datastore v0.6.0 + github.com/ipfs/go-datastore v0.9.0 github.com/ipfs/go-detect-race v0.0.1 - github.com/ipfs/go-ds-badger v0.3.0 - github.com/ipfs/go-ds-flatfs v0.5.1 - github.com/ipfs/go-ds-leveldb v0.5.0 - github.com/ipfs/go-ds-measure v0.2.0 - github.com/ipfs/go-fs-lock v0.0.7 - github.com/ipfs/go-ipfs-cmds v0.13.0 - github.com/ipfs/go-ipld-cbor v0.1.0 - github.com/ipfs/go-ipld-format v0.6.0 + github.com/ipfs/go-ds-badger v0.3.4 + github.com/ipfs/go-ds-flatfs v0.6.0 + github.com/ipfs/go-ds-leveldb v0.5.2 + github.com/ipfs/go-ds-measure v0.2.2 + github.com/ipfs/go-ds-pebble v0.5.8 + github.com/ipfs/go-fs-lock v0.1.1 + github.com/ipfs/go-ipfs-cmds v0.15.0 + github.com/ipfs/go-ipld-cbor v0.2.1 + github.com/ipfs/go-ipld-format v0.6.3 github.com/ipfs/go-ipld-git v0.1.1 - github.com/ipfs/go-ipld-legacy v0.2.1 - github.com/ipfs/go-log v1.0.5 - github.com/ipfs/go-log/v2 v2.5.1 - github.com/ipfs/go-metrics-interface v0.0.1 - github.com/ipfs/go-metrics-prometheus v0.0.2 - github.com/ipfs/go-test v0.0.4 - github.com/ipfs/go-unixfsnode v1.9.1 - github.com/ipld/go-car v0.6.2 - github.com/ipld/go-car/v2 v2.13.1 - github.com/ipld/go-codec-dagpb v1.6.0 + github.com/ipfs/go-ipld-legacy v0.2.2 + github.com/ipfs/go-log/v2 v2.9.0 + github.com/ipfs/go-metrics-interface v0.3.0 + github.com/ipfs/go-metrics-prometheus v0.1.0 + github.com/ipfs/go-test v0.2.3 + github.com/ipfs/go-unixfsnode v1.10.2 + github.com/ipld/go-car/v2 v2.16.0 + github.com/ipld/go-codec-dagpb v1.7.0 github.com/ipld/go-ipld-prime v0.21.0 + github.com/ipshipyard/p2p-forge v0.7.0 github.com/jbenet/go-temp-err-catcher v0.1.0 - github.com/jbenet/goprocess v0.1.4 github.com/julienschmidt/httprouter v1.3.0 - github.com/libp2p/go-doh-resolver v0.4.0 - github.com/libp2p/go-libp2p v0.36.3 + github.com/libp2p/go-doh-resolver v0.5.0 + github.com/libp2p/go-libp2p v0.46.0 github.com/libp2p/go-libp2p-http v0.5.0 - github.com/libp2p/go-libp2p-kad-dht v0.26.1 - github.com/libp2p/go-libp2p-kbucket v0.6.3 - github.com/libp2p/go-libp2p-pubsub v0.11.0 + github.com/libp2p/go-libp2p-kad-dht v0.36.0 + github.com/libp2p/go-libp2p-kbucket v0.8.0 + github.com/libp2p/go-libp2p-pubsub v0.14.2 github.com/libp2p/go-libp2p-pubsub-router v0.6.0 - github.com/libp2p/go-libp2p-record v0.2.0 - github.com/libp2p/go-libp2p-routing-helpers v0.7.4 + github.com/libp2p/go-libp2p-record v0.3.1 + github.com/libp2p/go-libp2p-routing-helpers v0.7.5 github.com/libp2p/go-libp2p-testing v0.12.0 - github.com/libp2p/go-socket-activation v0.1.0 - github.com/mitchellh/go-homedir v1.1.0 - github.com/multiformats/go-multiaddr v0.13.0 - github.com/multiformats/go-multiaddr-dns v0.3.1 + github.com/libp2p/go-socket-activation v0.1.1 + github.com/miekg/dns v1.1.68 + github.com/multiformats/go-multiaddr v0.16.1 + github.com/multiformats/go-multiaddr-dns v0.4.1 github.com/multiformats/go-multibase v0.2.0 - github.com/multiformats/go-multicodec v0.9.0 + github.com/multiformats/go-multicodec v0.10.0 github.com/multiformats/go-multihash v0.2.3 github.com/opentracing/opentracing-go v1.2.0 github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 - github.com/prometheus/client_golang v1.19.1 - github.com/stretchr/testify v1.9.0 - github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 - github.com/tidwall/gjson v1.14.4 + github.com/probe-lab/go-libdht v0.4.0 + github.com/prometheus/client_golang v1.23.2 + github.com/stretchr/testify v1.11.1 + github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d + github.com/tidwall/gjson v1.16.0 github.com/tidwall/sjson v1.2.5 github.com/whyrusleeping/go-sysinfo v0.0.0-20190219211824-4a357d4b90b1 github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 go.opencensus.io v0.24.0 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 go.opentelemetry.io/contrib/propagators/autoprop v0.46.1 - go.opentelemetry.io/otel v1.28.0 - go.opentelemetry.io/otel/sdk v1.27.0 - go.opentelemetry.io/otel/trace v1.28.0 - go.uber.org/dig v1.17.1 - go.uber.org/fx v1.22.1 - go.uber.org/multierr v1.11.0 + go.opentelemetry.io/otel v1.38.0 + go.opentelemetry.io/otel/exporters/prometheus v0.56.0 + go.opentelemetry.io/otel/sdk v1.38.0 + go.opentelemetry.io/otel/sdk/metric v1.38.0 + go.opentelemetry.io/otel/trace v1.38.0 + go.uber.org/dig v1.19.0 + go.uber.org/fx v1.24.0 go.uber.org/zap v1.27.0 - golang.org/x/crypto v0.25.0 - golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 - golang.org/x/mod v0.19.0 - golang.org/x/sync v0.7.0 - golang.org/x/sys v0.24.0 - google.golang.org/protobuf v1.34.2 + golang.org/x/crypto v0.45.0 + golang.org/x/exp v0.0.0-20251125195548-87e1e737ad39 + golang.org/x/mod v0.30.0 + golang.org/x/sync v0.18.0 + golang.org/x/sys v0.38.0 + google.golang.org/protobuf v1.36.10 ) require ( github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 // indirect + github.com/DataDog/zstd v1.5.7 // indirect github.com/Jorropo/jsync v1.0.1 // indirect - github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 // indirect + github.com/RaduBerinde/axisds v0.0.0-20250419182453-5135a0650657 // indirect + github.com/RaduBerinde/btreemap v0.0.0-20250419174037-3d62b7205d54 // indirect + github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect github.com/alexbrainman/goissue34681 v0.0.0-20191006012335-3fc7a47baff5 // indirect + github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect + github.com/caddyserver/zerossl v0.1.3 // indirect + github.com/cenkalti/backoff/v5 v5.0.3 // indirect github.com/cespare/xxhash v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/containerd/cgroups v1.1.0 // indirect - github.com/crackcomm/go-gitignore v0.0.0-20231225121904-e25f5bc08668 // indirect + github.com/cockroachdb/crlib v0.0.0-20241112164430-1264a2edc35b // indirect + github.com/cockroachdb/errors v1.11.3 // indirect + github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect + github.com/cockroachdb/redact v1.1.5 // indirect + github.com/cockroachdb/swiss v0.0.0-20251224182025-b0f6560f979b // indirect + github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect + github.com/crackcomm/go-gitignore v0.0.0-20241020182519-7843d2ba8fdf // indirect github.com/cskr/pubsub v1.0.2 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect - github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect github.com/dgraph-io/badger v1.6.2 // indirect github.com/dgraph-io/ristretto v0.0.2 // indirect - github.com/docker/go-units v0.5.0 // indirect - github.com/elastic/gosigar v0.14.3 // indirect + github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect + github.com/fatih/color v1.15.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/filecoin-project/go-clock v0.1.0 // indirect github.com/flynn/noise v1.1.0 // indirect - github.com/francoispqt/gojay v1.2.13 // indirect - github.com/gabriel-vasile/mimetype v1.4.4 // indirect + github.com/gabriel-vasile/mimetype v1.4.10 // indirect + github.com/gammazero/chanqueue v1.1.1 // indirect + github.com/gammazero/deque v1.2.0 // indirect + github.com/getsentry/sentry-go v0.27.0 // indirect + github.com/go-jose/go-jose/v4 v4.1.1 // indirect github.com/go-kit/log v0.2.1 // indirect - github.com/go-logfmt/logfmt v0.5.1 // indirect - github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logfmt/logfmt v0.6.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-task/slim-sprig/v3 v3.0.0 // indirect - github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/golang/snappy v0.0.4 // indirect + github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e // indirect github.com/google/gopacket v1.1.19 // indirect - github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 // indirect github.com/gorilla/mux v1.8.1 // indirect github.com/gorilla/websocket v1.5.3 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect - github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect + github.com/guillaumemichel/reservedpool v0.3.0 // indirect github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/huin/goupnp v1.3.0 // indirect github.com/ipfs/bbloom v0.0.4 // indirect github.com/ipfs/go-bitfield v1.1.0 // indirect - github.com/ipfs/go-blockservice v0.5.2 // indirect - github.com/ipfs/go-ipfs-blockstore v1.3.1 // indirect - github.com/ipfs/go-ipfs-delay v0.0.1 // indirect + github.com/ipfs/go-dsqueue v0.1.1 // indirect github.com/ipfs/go-ipfs-ds-help v1.1.1 // indirect - github.com/ipfs/go-ipfs-exchange-interface v0.2.1 // indirect github.com/ipfs/go-ipfs-pq v0.0.3 // indirect - github.com/ipfs/go-ipfs-redirects-file v0.1.1 // indirect - github.com/ipfs/go-ipfs-util v0.0.3 // indirect - github.com/ipfs/go-merkledag v0.11.0 // indirect - github.com/ipfs/go-peertaskqueue v0.8.1 // indirect - github.com/ipfs/go-verifcid v0.0.3 // indirect + github.com/ipfs/go-ipfs-redirects-file v0.1.2 // indirect + github.com/ipfs/go-peertaskqueue v0.8.2 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect - github.com/klauspost/compress v1.17.9 // indirect - github.com/klauspost/cpuid/v2 v2.2.8 // indirect - github.com/koron/go-ssdp v0.0.4 // indirect + github.com/klauspost/compress v1.18.0 // indirect + github.com/klauspost/cpuid/v2 v2.3.0 // indirect + github.com/koron/go-ssdp v0.0.6 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/libdns/libdns v1.0.0-beta.1 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/libp2p/go-cidranger v1.1.0 // indirect - github.com/libp2p/go-flow-metrics v0.1.0 // indirect + github.com/libp2p/go-flow-metrics v0.3.0 // indirect github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect github.com/libp2p/go-libp2p-gostream v0.6.0 // indirect github.com/libp2p/go-libp2p-xor v0.1.0 // indirect github.com/libp2p/go-msgio v0.3.0 // indirect - github.com/libp2p/go-nat v0.2.0 // indirect - github.com/libp2p/go-netroute v0.2.1 // indirect + github.com/libp2p/go-netroute v0.3.0 // indirect github.com/libp2p/go-reuseport v0.4.0 // indirect - github.com/libp2p/go-yamux/v4 v4.0.1 // indirect + github.com/libp2p/go-yamux/v5 v5.0.1 // indirect github.com/libp2p/zeroconf/v2 v2.2.0 // indirect github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect - github.com/mattn/go-colorable v0.1.6 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mattn/go-runewidth v0.0.4 // indirect + github.com/mattn/go-runewidth v0.0.15 // indirect github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect - github.com/miekg/dns v1.1.61 // indirect + github.com/mholt/acmez/v3 v3.1.2 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect + github.com/minio/minlz v1.0.1-0.20250507153514-87eb42fe8882 // indirect github.com/minio/sha256-simd v1.0.1 // indirect github.com/mr-tron/base58 v1.2.0 // indirect github.com/multiformats/go-base32 v0.1.0 // indirect github.com/multiformats/go-base36 v0.2.0 // indirect github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect - github.com/multiformats/go-multistream v0.5.0 // indirect - github.com/multiformats/go-varint v0.0.7 // indirect + github.com/multiformats/go-multistream v0.6.1 // indirect + github.com/multiformats/go-varint v0.1.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/onsi/ginkgo/v2 v2.19.1 // indirect - github.com/opencontainers/runtime-spec v1.2.0 // indirect + github.com/onsi/gomega v1.36.3 // indirect github.com/openzipkin/zipkin-go v0.4.3 // indirect github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 // indirect - github.com/pion/datachannel v1.5.8 // indirect + github.com/pion/datachannel v1.5.10 // indirect github.com/pion/dtls/v2 v2.2.12 // indirect - github.com/pion/ice/v2 v2.3.34 // indirect - github.com/pion/interceptor v0.1.29 // indirect - github.com/pion/logging v0.2.2 // indirect - github.com/pion/mdns v0.0.12 // indirect + github.com/pion/dtls/v3 v3.0.6 // indirect + github.com/pion/ice/v4 v4.0.10 // indirect + github.com/pion/interceptor v0.1.40 // indirect + github.com/pion/logging v0.2.3 // indirect + github.com/pion/mdns/v2 v2.0.7 // indirect github.com/pion/randutil v0.1.0 // indirect - github.com/pion/rtcp v1.2.14 // indirect - github.com/pion/rtp v1.8.8 // indirect - github.com/pion/sctp v1.8.20 // indirect - github.com/pion/sdp/v3 v3.0.9 // indirect - github.com/pion/srtp/v2 v2.0.20 // indirect + github.com/pion/rtcp v1.2.15 // indirect + github.com/pion/rtp v1.8.19 // indirect + github.com/pion/sctp v1.8.39 // indirect + github.com/pion/sdp/v3 v3.0.13 // indirect + github.com/pion/srtp/v3 v3.0.6 // indirect github.com/pion/stun v0.6.1 // indirect + github.com/pion/stun/v3 v3.0.0 // indirect github.com/pion/transport/v2 v2.2.10 // indirect - github.com/pion/turn/v2 v2.1.6 // indirect - github.com/pion/webrtc/v3 v3.3.0 // indirect + github.com/pion/transport/v3 v3.0.7 // indirect + github.com/pion/turn/v4 v4.0.2 // indirect + github.com/pion/webrtc/v4 v4.1.2 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/polydawn/refmt v0.89.0 // indirect - github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.55.0 // indirect - github.com/prometheus/procfs v0.15.1 // indirect - github.com/prometheus/statsd_exporter v0.22.7 // indirect - github.com/quic-go/qpack v0.4.0 // indirect - github.com/quic-go/quic-go v0.45.2 // indirect - github.com/quic-go/webtransport-go v0.8.0 // indirect - github.com/raulk/go-watchdog v1.3.0 // indirect - github.com/rs/cors v1.10.1 // indirect - github.com/samber/lo v1.46.0 // indirect + github.com/prometheus/client_model v0.6.2 // indirect + github.com/prometheus/common v0.66.1 // indirect + github.com/prometheus/procfs v0.17.0 // indirect + github.com/prometheus/statsd_exporter v0.27.1 // indirect + github.com/quic-go/qpack v0.6.0 // indirect + github.com/quic-go/quic-go v0.57.1 // indirect + github.com/quic-go/webtransport-go v0.9.0 // indirect + github.com/rivo/uniseg v0.4.4 // indirect + github.com/rogpeppe/go-internal v1.14.1 // indirect + github.com/rs/cors v1.11.1 // indirect + github.com/slok/go-http-metrics v0.13.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/texttheater/golang-levenshtein v1.0.1 // indirect github.com/tidwall/match v1.1.1 // indirect - github.com/tidwall/pretty v1.2.0 // indirect + github.com/tidwall/pretty v1.2.1 // indirect github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb // indirect github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc // indirect github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 // indirect - github.com/whyrusleeping/cbor-gen v0.1.2 // indirect + github.com/whyrusleeping/cbor-gen v0.3.1 // indirect github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f // indirect github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect - github.com/wlynxg/anet v0.0.3 // indirect + github.com/wlynxg/anet v0.0.5 // indirect + github.com/zeebo/blake3 v0.2.4 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/contrib/propagators/aws v1.21.1 // indirect go.opentelemetry.io/contrib/propagators/b3 v1.21.1 // indirect go.opentelemetry.io/contrib/propagators/jaeger v1.21.1 // indirect go.opentelemetry.io/contrib/propagators/ot v1.21.1 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/zipkin v1.27.0 // indirect - go.opentelemetry.io/otel/metric v1.28.0 // indirect - go.opentelemetry.io/proto/otlp v1.3.1 // indirect - go.uber.org/atomic v1.11.0 // indirect - go.uber.org/mock v0.4.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/zipkin v1.38.0 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + go.opentelemetry.io/proto/otlp v1.7.1 // indirect + go.uber.org/mock v0.5.2 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap/exp v0.3.0 // indirect + go.yaml.in/yaml/v2 v2.4.3 // indirect go4.org v0.0.0-20230225012048-214862532bf5 // indirect - golang.org/x/net v0.27.0 // indirect - golang.org/x/oauth2 v0.21.0 // indirect - golang.org/x/term v0.23.0 // indirect - golang.org/x/text v0.16.0 // indirect - golang.org/x/tools v0.23.0 // indirect - golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 // indirect - gonum.org/v1/gonum v0.15.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240617180043-68d350f18fd4 // indirect - google.golang.org/grpc v1.64.0 // indirect - gopkg.in/square/go-jose.v2 v2.5.1 // indirect + golang.org/x/net v0.47.0 // indirect + golang.org/x/oauth2 v0.33.0 // indirect + golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54 // indirect + golang.org/x/term v0.37.0 // indirect + golang.org/x/text v0.31.0 // indirect + golang.org/x/time v0.12.0 // indirect + golang.org/x/tools v0.39.0 // indirect + golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect + gonum.org/v1/gonum v0.16.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 // indirect + google.golang.org/grpc v1.75.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - lukechampine.com/blake3 v1.3.0 // indirect + lukechampine.com/blake3 v1.4.1 // indirect ) - -go 1.22 diff --git a/go.sum b/go.sum index 246eef96b..b5833a54a 100644 --- a/go.sum +++ b/go.sum @@ -1,9 +1,7 @@ bazil.org/fuse v0.0.0-20200117225306-7b5117fecadc h1:utDghgcjE8u+EBjHOgYT+dJPcnDF05KqWMBcjuJy510= bazil.org/fuse v0.0.0-20200117225306-7b5117fecadc/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= @@ -36,21 +34,24 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= contrib.go.opencensus.io/exporter/prometheus v0.4.2 h1:sqfsYl5GIY/L570iT+l93ehxaWJs2/OwXtiWwew3oAg= contrib.go.opencensus.io/exporter/prometheus v0.4.2/go.mod h1:dvEHbiKmgvbr5pjaF9fpw1KeYcjrnC1J8B+JKjsZyRQ= -dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= -dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= -dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= -git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 h1:cTp8I5+VIoKjsnZuH8vjyaysT/ses3EvZeaV/1UkF2M= github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DataDog/zstd v1.5.7 h1:ybO8RBeh29qrxIhCA9E8gKY6xfONU9T6G6aP9DTKfLE= +github.com/DataDog/zstd v1.5.7/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/Jorropo/jsync v1.0.1 h1:6HgRolFZnsdfzRUj+ImB9og1JYOxQoReSywkHOGSaUU= github.com/Jorropo/jsync v1.0.1/go.mod h1:jCOZj3vrBCri3bSU3ErUYvevKlnbssrXeCivybS5ABQ= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/RaduBerinde/axisds v0.0.0-20250419182453-5135a0650657 h1:8XBWWQD+vFF+JqOsm16t0Kab1a7YWV8+GISVEP8AuZ8= +github.com/RaduBerinde/axisds v0.0.0-20250419182453-5135a0650657/go.mod h1:UHGJonU9z4YYGKJxSaC6/TNcLOBptpmM5m2Cksbnw0Y= +github.com/RaduBerinde/btreemap v0.0.0-20250419174037-3d62b7205d54 h1:bsU8Tzxr/PNz75ayvCnxKZWEYdLMPDkUgticP4a4Bvk= +github.com/RaduBerinde/btreemap v0.0.0-20250419174037-3d62b7205d54/go.mod h1:0tr7FllbE9gJkHq7CVeeDDFAFKQVy5RnCSSNBOvdqbc= +github.com/aclements/go-perfevent v0.0.0-20240301234650-f7843625020f h1:JjxwchlOepwsUWcQwD2mLUAGE9aCp0/ehy6yCHFBOvo= +github.com/aclements/go-perfevent v0.0.0-20240301234650-f7843625020f/go.mod h1:tMDTce/yLLN/SK8gMOxQfnyeMeCg8KGzp0D1cbECEeo= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -58,14 +59,13 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= -github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 h1:t3eaIm0rUkzbrIewtiFmMK5RXHej2XnoXNhxVsAYUfg= -github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= +github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b h1:mimo19zliBX/vSQ6PWWSL9lK8qwHozUj03+zLoEB8O0= +github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= github.com/alexbrainman/goissue34681 v0.0.0-20191006012335-3fc7a47baff5 h1:iW0a5ljuFxkLGPNem5Ui+KBjFJzKg4Fv2fnxe4dvzpM= github.com/alexbrainman/goissue34681 v0.0.0-20191006012335-3fc7a47baff5/go.mod h1:Y2QMoi1vgtOIfc+6DhrMOGkLoGzqSV2rKp4Sm+opsyA= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -74,7 +74,6 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= -github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= @@ -84,12 +83,17 @@ github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVa github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= -github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/caddyserver/certmagic v0.23.0 h1:CfpZ/50jMfG4+1J/u2LV6piJq4HOfO6ppOnOf7DkFEU= +github.com/caddyserver/certmagic v0.23.0/go.mod h1:9mEZIWqqWoI+Gf+4Trh04MOVPD0tGSxtqsxg87hAIH4= +github.com/caddyserver/zerossl v0.1.3 h1:onS+pxp3M8HnHpN5MMbOMyNjmTheJyWRaZYwn+YTAyA= +github.com/caddyserver/zerossl v0.1.3/go.mod h1:CxA0acn7oEGO6//4rtrRjYgEoa4MFw/XofZnrYwGqG4= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= +github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/ceramicnetwork/go-dag-jose v0.1.0 h1:yJ/HVlfKpnD3LdYP03AHyTvbm3BpPiz2oZiOeReJRdU= -github.com/ceramicnetwork/go-dag-jose v0.1.0/go.mod h1:qYA1nYt0X8u4XoMAVoOV3upUVKtrxy/I670Dg5F0wjI= +github.com/ceramicnetwork/go-dag-jose v0.1.1 h1:7pObs22egc14vSS3AfCFfS1VmaL4lQUsAK7OGC3PlKk= +github.com/ceramicnetwork/go-dag-jose v0.1.1/go.mod h1:8ptnYwY2Z2y/s5oJnNBn/UCxLg6CpramNJ2ZXF/5aNY= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -101,55 +105,61 @@ github.com/cheggaaa/pb v1.0.29/go.mod h1:W40334L7FMC5JKWldsTWbdGjLo0RxUKK73K+TuP github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= -github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= -github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= +github.com/cockroachdb/crlib v0.0.0-20241112164430-1264a2edc35b h1:SHlYZ/bMx7frnmeqCu+xm0TCxXLzX3jQIVuFbnFGtFU= +github.com/cockroachdb/crlib v0.0.0-20241112164430-1264a2edc35b/go.mod h1:Gq51ZeKaFCXk6QwuGM0w1dnaOqc/F5zKT2zA9D6Xeac= +github.com/cockroachdb/datadriven v1.0.3-0.20250407164829-2945557346d5 h1:UycK/E0TkisVrQbSoxvU827FwgBBcZ95nRRmpj/12QI= +github.com/cockroachdb/datadriven v1.0.3-0.20250407164829-2945557346d5/go.mod h1:jsaKMvD3RBCATk1/jbUZM8C9idWBJME9+VRZ5+Liq1g= +github.com/cockroachdb/errors v1.11.3 h1:5bA+k2Y6r+oz/6Z/RFlNeVCesGARKuC6YymtcDrbC/I= +github.com/cockroachdb/errors v1.11.3/go.mod h1:m4UIW4CDjx+R5cybPsNrRbreomiFqt8o1h1wUVazSd8= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= +github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895 h1:XANOgPYtvELQ/h4IrmPAohXqe2pWA8Bwhejr3VQoZsA= +github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895/go.mod h1:aPd7gM9ov9M8v32Yy5NJrDyOcD8z642dqs+F0CeNXfA= +github.com/cockroachdb/pebble/v2 v2.1.3 h1:irU503OnjRoJBrkZQIJvwv9c4WvpUeOJxhRApojB8D8= +github.com/cockroachdb/pebble/v2 v2.1.3/go.mod h1:B1UgWsyR+L+UvZXNgpxw+WqsUKA8VQ/bb//FXOHghB8= +github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= +github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cockroachdb/swiss v0.0.0-20251224182025-b0f6560f979b h1:VXvSNzmr8hMj8XTuY0PT9Ane9qZGul/p67vGYwl9BFI= +github.com/cockroachdb/swiss v0.0.0-20251224182025-b0f6560f979b/go.mod h1:yBRu/cnL4ks9bgy4vAASdjIW+/xMlFwuHKqtmh3GZQg= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= -github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/crackcomm/go-gitignore v0.0.0-20231225121904-e25f5bc08668 h1:ZFUue+PNxmHlu7pYv+IYMtqlaO/0VwaGEqKepZf9JpA= -github.com/crackcomm/go-gitignore v0.0.0-20231225121904-e25f5bc08668/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE= +github.com/crackcomm/go-gitignore v0.0.0-20241020182519-7843d2ba8fdf h1:dwGgBWn84wUS1pVikGiruW+x5XM4amhjaZO20vCjay4= +github.com/crackcomm/go-gitignore v0.0.0-20241020182519-7843d2ba8fdf/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= -github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y= -github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= +github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8= +github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40= github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= github.com/dgraph-io/badger v1.6.2 h1:mNw0qs90GVgGGWylh0umH5iag1j6n/PeJtNvL6KY/x8= github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= github.com/dgraph-io/ristretto v0.0.2 h1:a5WaUrDa0qm0YrAAS1tUykT5El3kt62KNZZeMxQn3po= github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= -github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= -github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= -github.com/elastic/gosigar v0.14.3 h1:xwkKwPia+hSfg9GqrCUKYdId102m9qTJIIr7egmK/uo= -github.com/elastic/gosigar v0.14.3/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/elgris/jsondiff v0.0.0-20160530203242-765b5c24c302 h1:QV0ZrfBLpFc2KDk+a4LJefDczXnonRwrYrQJY/9L4dA= github.com/elgris/jsondiff v0.0.0-20160530203242-765b5c24c302/go.mod h1:qBlWZqWeVx9BjvqBsnC/8RUlAYpIFmPvgROcw0n1scE= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -158,32 +168,43 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5 h1:BBso6MBKW8ncyZLv37o+KNyy0HrrHgfnOaGQC2qvN+A= github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5/go.mod h1:JpoxHjuQauoxiFMl1ie8Xc/7TfLuMZ5eOCONd1sUBHg= -github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= +github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/filecoin-project/go-clock v0.1.0 h1:SFbYIM75M8NnFm1yMHhN9Ahy3W5bEZV9gd6MPfXbKVU= +github.com/filecoin-project/go-clock v0.1.0/go.mod h1:4uB/O4PvOjlx1VCMdZ9MyDZXRm//gkj1ELEbxfI1AZs= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg= github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= -github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= -github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= -github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= -github.com/gabriel-vasile/mimetype v1.4.4 h1:QjV6pZ7/XZ7ryI2KuyeEDE8wnh7fHP9YnQy+R0LnH8I= -github.com/gabriel-vasile/mimetype v1.4.4/go.mod h1:JwLei5XPtWdGiMFB5Pjle1oEeoSeEuJfJE+TtfvdB/s= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/gabriel-vasile/mimetype v1.4.10 h1:zyueNbySn/z8mJZHLt6IPw0KoZsiQNszIpU+bX4+ZK0= +github.com/gabriel-vasile/mimetype v1.4.10/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s= +github.com/gammazero/chanqueue v1.1.1 h1:n9Y+zbBxw2f7uUE9wpgs0rOSkP/I/yhDLiNuhyVjojQ= +github.com/gammazero/chanqueue v1.1.1/go.mod h1:fMwpwEiuUgpab0sH4VHiVcEoji1pSi+EIzeG4TPeKPc= +github.com/gammazero/deque v1.2.0 h1:scEFO8Uidhw6KDU5qg1HA5fYwM0+us2qdeJqm43bitU= +github.com/gammazero/deque v1.2.0/go.mod h1:JVrR+Bj1NMQbPnYclvDlvSX0nVGReLrQZ0aUMuWLctg= +github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= +github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9 h1:r5GgOLGbza2wVHRzK7aAj6lWZjfbAwiu/RDCVOKjRyM= +github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs= github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= -github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-jose/go-jose/v4 v4.1.1 h1:JYhSgy4mXXzAdF3nUx3ygx347LRXJRrpgyU3adRmkAI= +github.com/go-jose/go-jose/v4 v4.1.1/go.mod h1:BdsZGqgdO3b6tTc6LSE56wcDbMMLuPsw5d4ZD5f94kA= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= @@ -193,21 +214,18 @@ github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBj github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= +github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= -github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= -github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= -github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= @@ -220,7 +238,6 @@ github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4er github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -247,8 +264,9 @@ github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiu github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e h1:4bw4WeyTYPp0smaXiJZCNnLrvVBqirQVreixayXezGc= +github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -258,16 +276,12 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= -github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= @@ -280,16 +294,12 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k= -github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= -github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -299,19 +309,14 @@ github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs= +github.com/guillaumemichel/reservedpool v0.3.0 h1:eqqO/QvTllLBrit7LVtVJBqw4cD0WdV9ajUe7WNTajw= +github.com/guillaumemichel/reservedpool v0.3.0/go.mod h1:sXSDIaef81TFdAJglsCFCMfgF5E5Z5xK1tFhjDhvbUc= github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= -github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= -github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= -github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= @@ -323,132 +328,102 @@ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpO github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/ipfs-shipyard/nopfs v0.0.12 h1:mvwaoefDF5VI9jyvgWCmaoTJIJFAfrbyQV5fJz35hlk= -github.com/ipfs-shipyard/nopfs v0.0.12/go.mod h1:mQyd0BElYI2gB/kq/Oue97obP4B3os4eBmgfPZ+hnrE= -github.com/ipfs-shipyard/nopfs/ipfs v0.13.2-0.20231027223058-cde3b5ba964c h1:7UynTbtdlt+w08ggb1UGLGaGjp1mMaZhoTZSctpn5Ak= -github.com/ipfs-shipyard/nopfs/ipfs v0.13.2-0.20231027223058-cde3b5ba964c/go.mod h1:6EekK/jo+TynwSE/ZOiOJd4eEvRXoavEC3vquKtv4yI= +github.com/ipfs-shipyard/nopfs v0.0.14 h1:HFepJt/MxhZ3/GsLZkkAPzIPdNYKaLO1Qb7YmPbWIKk= +github.com/ipfs-shipyard/nopfs v0.0.14/go.mod h1:mQyd0BElYI2gB/kq/Oue97obP4B3os4eBmgfPZ+hnrE= +github.com/ipfs-shipyard/nopfs/ipfs v0.25.0 h1:OqNqsGZPX8zh3eFMO8Lf8EHRRnSGBMqcdHUd7SDsUOY= +github.com/ipfs-shipyard/nopfs/ipfs v0.25.0/go.mod h1:BxhUdtBgOXg1B+gAPEplkg/GpyTZY+kCMSfsJvvydqU= github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= -github.com/ipfs/boxo v0.23.1-0.20240927234853-19a402b7dc34 h1:/Etgc4IR0OUF+nIoNdqwu12EYuaSMpd7/Nc5wRLd67U= -github.com/ipfs/boxo v0.23.1-0.20240927234853-19a402b7dc34/go.mod h1:ulu5I6avTmgGmvjuCaBRKwsaOOKjBfQw1EiOOQp8M6E= +github.com/ipfs/boxo v0.35.3-0.20251202220026-0842ad274a0c h1:mczpALnNzNhmggehO5Ehr9+Q8+NiJyKJfT4EPwi01d0= +github.com/ipfs/boxo v0.35.3-0.20251202220026-0842ad274a0c/go.mod h1:Abmp1if6bMQG87/0SQPIB9fkxJnZMLCt2nQw3yUZHH0= github.com/ipfs/go-bitfield v1.1.0 h1:fh7FIo8bSwaJEh6DdTWbCeZ1eqOaOkKFI74SCnsWbGA= github.com/ipfs/go-bitfield v1.1.0/go.mod h1:paqf1wjq/D2BBmzfTVFlJQ9IlFOZpg422HL0HqsGWHU= -github.com/ipfs/go-bitswap v0.11.0 h1:j1WVvhDX1yhG32NTC9xfxnqycqYIlhzEzLXG/cU1HyQ= -github.com/ipfs/go-bitswap v0.11.0/go.mod h1:05aE8H3XOU+LXpTedeAS0OZpcO1WFsj5niYQH9a1Tmk= github.com/ipfs/go-block-format v0.0.3/go.mod h1:4LmD4ZUw0mhO+JSKdpWwrzATiEfM7WWgQ8H5l6P8MVk= -github.com/ipfs/go-block-format v0.2.0 h1:ZqrkxBA2ICbDRbK8KJs/u0O3dlp6gmAuuXUJNiW1Ycs= -github.com/ipfs/go-block-format v0.2.0/go.mod h1:+jpL11nFx5A/SPpsoBn6Bzkra/zaArfSmsknbPMYgzM= -github.com/ipfs/go-blockservice v0.5.2 h1:in9Bc+QcXwd1apOVM7Un9t8tixPKdaHQFdLSUM1Xgk8= -github.com/ipfs/go-blockservice v0.5.2/go.mod h1:VpMblFEqG67A/H2sHKAemeH9vlURVavlysbdUI632yk= +github.com/ipfs/go-block-format v0.2.3 h1:mpCuDaNXJ4wrBJLrtEaGFGXkferrw5eqVvzaHhtFKQk= +github.com/ipfs/go-block-format v0.2.3/go.mod h1:WJaQmPAKhD3LspLixqlqNFxiZ3BZ3xgqxxoSR/76pnA= github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M= github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= -github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= -github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= +github.com/ipfs/go-cid v0.6.0 h1:DlOReBV1xhHBhhfy/gBNNTSyfOM6rLiIx9J7A4DGf30= +github.com/ipfs/go-cid v0.6.0/go.mod h1:NC4kS1LZjzfhK40UGmpXv5/qD2kcMzACYJNntCUiDhQ= github.com/ipfs/go-cidutil v0.1.0 h1:RW5hO7Vcf16dplUU60Hs0AKDkQAVPVplr7lk97CFL+Q= github.com/ipfs/go-cidutil v0.1.0/go.mod h1:e7OEVBMIv9JaOxt9zaGEmAoSlXW9jdFZ5lP/0PwcfpA= github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= -github.com/ipfs/go-datastore v0.5.0/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= -github.com/ipfs/go-datastore v0.6.0 h1:JKyz+Gvz1QEZw0LsX1IBn+JFCJQH4SJVFtM4uWU0Myk= -github.com/ipfs/go-datastore v0.6.0/go.mod h1:rt5M3nNbSO/8q1t4LNkLyUwRs8HupMeN/8O4Vn9YAT8= +github.com/ipfs/go-datastore v0.9.0 h1:WocriPOayqalEsueHv6SdD4nPVl4rYMfYGLD4bqCZ+w= +github.com/ipfs/go-datastore v0.9.0/go.mod h1:uT77w/XEGrvJWwHgdrMr8bqCN6ZTW9gzmi+3uK+ouHg= github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= github.com/ipfs/go-ds-badger v0.0.7/go.mod h1:qt0/fWzZDoPW6jpQeqUjR5kBfhDNB65jd9YlmAvpQBk= -github.com/ipfs/go-ds-badger v0.3.0 h1:xREL3V0EH9S219kFFueOYJJTcjgNSZ2HY1iSvN7U1Ro= -github.com/ipfs/go-ds-badger v0.3.0/go.mod h1:1ke6mXNqeV8K3y5Ak2bAA0osoTfmxUdupVCGm4QUIek= -github.com/ipfs/go-ds-flatfs v0.5.1 h1:ZCIO/kQOS/PSh3vcF1H6a8fkRGS7pOfwfPdx4n/KJH4= -github.com/ipfs/go-ds-flatfs v0.5.1/go.mod h1:RWTV7oZD/yZYBKdbVIFXTX2fdY2Tbvl94NsWqmoyAX4= +github.com/ipfs/go-ds-badger v0.3.4 h1:MmqFicftE0KrwMC77WjXTrPuoUxhwyFsjKONSeWrlOo= +github.com/ipfs/go-ds-badger v0.3.4/go.mod h1:HfqsKJcNnIr9ZhZ+rkwS1J5PpaWjJjg6Ipmxd7KPfZ8= +github.com/ipfs/go-ds-flatfs v0.6.0 h1:olAEnDNBK1VMoTRZvfzgo90H5kBP4qIZPpYMtNlBBws= +github.com/ipfs/go-ds-flatfs v0.6.0/go.mod h1:p8a/YhmAFYyuonxDbvuIANlDCgS69uqVv+iH5f8fAxY= github.com/ipfs/go-ds-leveldb v0.1.0/go.mod h1:hqAW8y4bwX5LWcCtku2rFNX3vjDZCy5LZCg+cSZvYb8= -github.com/ipfs/go-ds-leveldb v0.5.0 h1:s++MEBbD3ZKc9/8/njrn4flZLnCuY9I79v94gBUNumo= -github.com/ipfs/go-ds-leveldb v0.5.0/go.mod h1:d3XG9RUDzQ6V4SHi8+Xgj9j1XuEk1z82lquxrVbml/Q= -github.com/ipfs/go-ds-measure v0.2.0 h1:sG4goQe0KDTccHMyT45CY1XyUbxe5VwTKpg2LjApYyQ= -github.com/ipfs/go-ds-measure v0.2.0/go.mod h1:SEUD/rE2PwRa4IQEC5FuNAmjJCyYObZr9UvVh8V3JxE= -github.com/ipfs/go-fs-lock v0.0.7 h1:6BR3dajORFrFTkb5EpCUFIAypsoxpGpDSVUdFwzgL9U= -github.com/ipfs/go-fs-lock v0.0.7/go.mod h1:Js8ka+FNYmgQRLrRXzU3CB/+Csr1BwrRilEcvYrHhhc= -github.com/ipfs/go-ipfs-blockstore v1.3.1 h1:cEI9ci7V0sRNivqaOr0elDsamxXFxJMMMy7PTTDQNsQ= -github.com/ipfs/go-ipfs-blockstore v1.3.1/go.mod h1:KgtZyc9fq+P2xJUiCAzbRdhhqJHvsw8u2Dlqy2MyRTE= -github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ= -github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk= -github.com/ipfs/go-ipfs-cmds v0.13.0 h1:+WVHZMrQNkPqwAQdrSFGbJgHpOc8H2G8eszNxnvoCQA= -github.com/ipfs/go-ipfs-cmds v0.13.0/go.mod h1:GYqjGSt6u9k9tyxIDT7M0ROWeB2raPGH94uuVnpWgY0= +github.com/ipfs/go-ds-leveldb v0.5.2 h1:6nmxlQ2zbp4LCNdJVsmHfs9GP0eylfBNxpmY1csp0x0= +github.com/ipfs/go-ds-leveldb v0.5.2/go.mod h1:2fAwmcvD3WoRT72PzEekHBkQmBDhc39DJGoREiuGmYo= +github.com/ipfs/go-ds-measure v0.2.2 h1:4kwvBGbbSXNYe4ANlg7qTIYoZU6mNlqzQHdVqICkqGI= +github.com/ipfs/go-ds-measure v0.2.2/go.mod h1:b/87ak0jMgH9Ylt7oH0+XGy4P8jHx9KG09Qz+pOeTIs= +github.com/ipfs/go-ds-pebble v0.5.8 h1:NbAfKQo+m39Nka6gt8PARAyH+VoHtRInB6CFCmT+wqo= +github.com/ipfs/go-ds-pebble v0.5.8/go.mod h1:AJjJTHgads/Fn5+tuWmaDGjGEbks7Wgx82NQ/pwmEhc= +github.com/ipfs/go-dsqueue v0.1.1 h1:6PQlHDyf9PSTN69NmwUir5+0is3tU0vRJj8zLlgK8Mc= +github.com/ipfs/go-dsqueue v0.1.1/go.mod h1:Xxg353WSwwzYn3FGSzZ+taSQII3pIZ+EJC8/oWRDM10= +github.com/ipfs/go-fs-lock v0.1.1 h1:TecsP/Uc7WqYYatasreZQiP9EGRy4ZnKoG4yXxR33nw= +github.com/ipfs/go-fs-lock v0.1.1/go.mod h1:2goSXMCw7QfscHmSe09oXiR34DQeUdm+ei+dhonqly0= +github.com/ipfs/go-ipfs-cmds v0.15.0 h1:nQDgKadrzyiFyYoZMARMIoVoSwe3gGTAfGvrWLeAQbQ= +github.com/ipfs/go-ipfs-cmds v0.15.0/go.mod h1:VABf/mv/wqvYX6hLG6Z+40eNAEw3FQO0bSm370Or3Wk= github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ= github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= github.com/ipfs/go-ipfs-ds-help v1.1.1 h1:B5UJOH52IbcfS56+Ul+sv8jnIV10lbjLF5eOO0C66Nw= github.com/ipfs/go-ipfs-ds-help v1.1.1/go.mod h1:75vrVCkSdSFidJscs8n4W+77AtTpCIAdDGAwjitJMIo= -github.com/ipfs/go-ipfs-exchange-interface v0.2.1 h1:jMzo2VhLKSHbVe+mHNzYgs95n0+t0Q69GQ5WhRDZV/s= -github.com/ipfs/go-ipfs-exchange-interface v0.2.1/go.mod h1:MUsYn6rKbG6CTtsDp+lKJPmVt3ZrCViNyH3rfPGsZ2E= -github.com/ipfs/go-ipfs-exchange-offline v0.3.0 h1:c/Dg8GDPzixGd0MC8Jh6mjOwU57uYokgWRFidfvEkuA= -github.com/ipfs/go-ipfs-exchange-offline v0.3.0/go.mod h1:MOdJ9DChbb5u37M1IcbrRB02e++Z7521fMxqCNRrz9s= github.com/ipfs/go-ipfs-pq v0.0.3 h1:YpoHVJB+jzK15mr/xsWC574tyDLkezVrDNeaalQBsTE= github.com/ipfs/go-ipfs-pq v0.0.3/go.mod h1:btNw5hsHBpRcSSgZtiNm/SLj5gYIZ18AKtv3kERkRb4= -github.com/ipfs/go-ipfs-redirects-file v0.1.1 h1:Io++k0Vf/wK+tfnhEh63Yte1oQK5VGT2hIEYpD0Rzx8= -github.com/ipfs/go-ipfs-redirects-file v0.1.1/go.mod h1:tAwRjCV0RjLTjH8DR/AU7VYvfQECg+lpUy2Mdzv7gyk= -github.com/ipfs/go-ipfs-routing v0.3.0 h1:9W/W3N+g+y4ZDeffSgqhgo7BsBSJwPMcyssET9OWevc= -github.com/ipfs/go-ipfs-routing v0.3.0/go.mod h1:dKqtTFIql7e1zYsEuWLyuOU+E0WJWW8JjbTPLParDWo= +github.com/ipfs/go-ipfs-redirects-file v0.1.2 h1:QCK7VtL91FH17KROVVy5KrzDx2hu68QvB2FTWk08ZQk= +github.com/ipfs/go-ipfs-redirects-file v0.1.2/go.mod h1:yIiTlLcDEM/8lS6T3FlCEXZktPPqSOyuY6dEzVqw7Fw= github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ= -github.com/ipfs/go-ipfs-util v0.0.3 h1:2RFdGez6bu2ZlZdI+rWfIdbQb1KudQp3VGwPtdNCmE0= -github.com/ipfs/go-ipfs-util v0.0.3/go.mod h1:LHzG1a0Ig4G+iZ26UUOMjHd+lfM84LZCrn17xAKWBvs= -github.com/ipfs/go-ipld-cbor v0.1.0 h1:dx0nS0kILVivGhfWuB6dUpMa/LAwElHPw1yOGYopoYs= -github.com/ipfs/go-ipld-cbor v0.1.0/go.mod h1:U2aYlmVrJr2wsUBU67K4KgepApSZddGRDWBYR0H4sCk= -github.com/ipfs/go-ipld-format v0.6.0 h1:VEJlA2kQ3LqFSIm5Vu6eIlSxD/Ze90xtc4Meten1F5U= -github.com/ipfs/go-ipld-format v0.6.0/go.mod h1:g4QVMTn3marU3qXchwjpKPKgJv+zF+OlaKMyhJ4LHPg= +github.com/ipfs/go-ipld-cbor v0.2.1 h1:H05yEJbK/hxg0uf2AJhyerBDbjOuHX4yi+1U/ogRa7E= +github.com/ipfs/go-ipld-cbor v0.2.1/go.mod h1:x9Zbeq8CoE5R2WicYgBMcr/9mnkQ0lHddYWJP2sMV3A= +github.com/ipfs/go-ipld-format v0.6.3 h1:9/lurLDTotJpZSuL++gh3sTdmcFhVkCwsgx2+rAh4j8= +github.com/ipfs/go-ipld-format v0.6.3/go.mod h1:74ilVN12NXVMIV+SrBAyC05UJRk0jVvGqdmrcYZvCBk= github.com/ipfs/go-ipld-git v0.1.1 h1:TWGnZjS0htmEmlMFEkA3ogrNCqWjIxwr16x1OsdhG+Y= github.com/ipfs/go-ipld-git v0.1.1/go.mod h1:+VyMqF5lMcJh4rwEppV0e6g4nCCHXThLYYDpKUkJubI= -github.com/ipfs/go-ipld-legacy v0.2.1 h1:mDFtrBpmU7b//LzLSypVrXsD8QxkEWxu5qVxN99/+tk= -github.com/ipfs/go-ipld-legacy v0.2.1/go.mod h1:782MOUghNzMO2DER0FlBR94mllfdCJCkTtDtPM51otM= +github.com/ipfs/go-ipld-legacy v0.2.2 h1:DThbqCPVLpWBcGtU23KDLiY2YRZZnTkXQyfz8aOfBkQ= +github.com/ipfs/go-ipld-legacy v0.2.2/go.mod h1:hhkj+b3kG9b2BcUNw8IFYAsfeNo8E3U7eYlWeAOPyDU= github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= -github.com/ipfs/go-log v1.0.3/go.mod h1:OsLySYkwIbiSUR/yBTdv1qPtcE4FW3WPWk/ewz9Ru+A= -github.com/ipfs/go-log v1.0.5 h1:2dOuUCB1Z7uoczMWgAyDck5JLb72zHzrMnGnCNNbvY8= -github.com/ipfs/go-log v1.0.5/go.mod h1:j0b8ZoR+7+R99LD9jZ6+AJsrzkPbSXbZfGakb5JPtIo= -github.com/ipfs/go-log/v2 v2.0.3/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= -github.com/ipfs/go-log/v2 v2.0.5/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw= -github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g= -github.com/ipfs/go-log/v2 v2.3.0/go.mod h1:QqGoj30OTpnKaG/LKTGTxoP2mmQtjVMEnK72gynbe/g= -github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= -github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= -github.com/ipfs/go-merkledag v0.11.0 h1:DgzwK5hprESOzS4O1t/wi6JDpyVQdvm9Bs59N/jqfBY= -github.com/ipfs/go-merkledag v0.11.0/go.mod h1:Q4f/1ezvBiJV0YCIXvt51W/9/kqJGH4I1LsA7+djsM4= -github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg= -github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY= -github.com/ipfs/go-metrics-prometheus v0.0.2 h1:9i2iljLg12S78OhC6UAiXi176xvQGiZaGVF1CUVdE+s= -github.com/ipfs/go-metrics-prometheus v0.0.2/go.mod h1:ELLU99AQQNi+zX6GCGm2lAgnzdSH3u5UVlCdqSXnEks= -github.com/ipfs/go-peertaskqueue v0.8.1 h1:YhxAs1+wxb5jk7RvS0LHdyiILpNmRIRnZVztekOF0pg= -github.com/ipfs/go-peertaskqueue v0.8.1/go.mod h1:Oxxd3eaK279FxeydSPPVGHzbwVeHjatZ2GA8XD+KbPU= -github.com/ipfs/go-test v0.0.4 h1:DKT66T6GBB6PsDFLoO56QZPrOmzJkqU1FZH5C9ySkew= -github.com/ipfs/go-test v0.0.4/go.mod h1:qhIM1EluEfElKKM6fnWxGn822/z9knUGM1+I/OAQNKI= -github.com/ipfs/go-unixfs v0.4.5 h1:wj8JhxvV1G6CD7swACwSKYa+NgtdWC1RUit+gFnymDU= -github.com/ipfs/go-unixfs v0.4.5/go.mod h1:BIznJNvt/gEx/ooRMI4Us9K8+qeGO7vx1ohnbk8gjFg= -github.com/ipfs/go-unixfsnode v1.9.1 h1:2cdSIDQCt7emNhlyUqUFQnKo2XvecARoIcurIKFjPD8= -github.com/ipfs/go-unixfsnode v1.9.1/go.mod h1:u8WxhmXzyrq3xfSYkhfx+uI+n91O+0L7KFjq3TS7d6g= -github.com/ipfs/go-verifcid v0.0.3 h1:gmRKccqhWDocCRkC+a59g5QW7uJw5bpX9HWBevXa0zs= -github.com/ipfs/go-verifcid v0.0.3/go.mod h1:gcCtGniVzelKrbk9ooUSX/pM3xlH73fZZJDzQJRvOUw= -github.com/ipld/go-car v0.6.2 h1:Hlnl3Awgnq8icK+ze3iRghk805lu8YNq3wlREDTF2qc= -github.com/ipld/go-car v0.6.2/go.mod h1:oEGXdwp6bmxJCZ+rARSkDliTeYnVzv3++eXajZ+Bmr8= -github.com/ipld/go-car/v2 v2.13.1 h1:KnlrKvEPEzr5IZHKTXLAEub+tPrzeAFQVRlSQvuxBO4= -github.com/ipld/go-car/v2 v2.13.1/go.mod h1:QkdjjFNGit2GIkpQ953KBwowuoukoM75nP/JI1iDJdo= -github.com/ipld/go-codec-dagpb v1.6.0 h1:9nYazfyu9B1p3NAgfVdpRco3Fs2nFC72DqVsMj6rOcc= -github.com/ipld/go-codec-dagpb v1.6.0/go.mod h1:ANzFhfP2uMJxRBr8CE+WQWs5UsNa0pYtmKZ+agnUw9s= +github.com/ipfs/go-log/v2 v2.9.0 h1:l4b06AwVXwldIzbVPZy5z7sKp9lHFTX0KWfTBCtHaOk= +github.com/ipfs/go-log/v2 v2.9.0/go.mod h1:UhIYAwMV7Nb4ZmihUxfIRM2Istw/y9cAk3xaK+4Zs2c= +github.com/ipfs/go-metrics-interface v0.3.0 h1:YwG7/Cy4R94mYDUuwsBfeziJCVm9pBMJ6q/JR9V40TU= +github.com/ipfs/go-metrics-interface v0.3.0/go.mod h1:OxxQjZDGocXVdyTPocns6cOLwHieqej/jos7H4POwoY= +github.com/ipfs/go-metrics-prometheus v0.1.0 h1:bApWOHkrH3VTBHzTHrZSfq4n4weOZDzZFxUXv+HyKcA= +github.com/ipfs/go-metrics-prometheus v0.1.0/go.mod h1:2GtL525C/4yxtvSXpRJ4dnE45mCX9AS0XRa03vHx7G0= +github.com/ipfs/go-peertaskqueue v0.8.2 h1:PaHFRaVFdxQk1Qo3OKiHPYjmmusQy7gKQUaL8JDszAU= +github.com/ipfs/go-peertaskqueue v0.8.2/go.mod h1:L6QPvou0346c2qPJNiJa6BvOibxDfaiPlqHInmzg0FA= +github.com/ipfs/go-test v0.2.3 h1:Z/jXNAReQFtCYyn7bsv/ZqUwS6E7iIcSpJ2CuzCvnrc= +github.com/ipfs/go-test v0.2.3/go.mod h1:QW8vSKkwYvWFwIZQLGQXdkt9Ud76eQXRQ9Ao2H+cA1o= +github.com/ipfs/go-unixfsnode v1.10.2 h1:TREegX1J4X+k1w4AhoDuxxFvVcS9SegMRvrmxF6Tca8= +github.com/ipfs/go-unixfsnode v1.10.2/go.mod h1:ImDPTSiKZ+2h4UVdkSDITJHk87bUAp7kX/lgifjRicg= +github.com/ipld/go-car/v2 v2.16.0 h1:LWe0vmN/QcQmUU4tr34W5Nv5mNraW+G6jfN2s+ndBco= +github.com/ipld/go-car/v2 v2.16.0/go.mod h1:RqFGWN9ifcXVmCrTAVnfnxiWZk1+jIx67SYhenlmL34= +github.com/ipld/go-codec-dagpb v1.7.0 h1:hpuvQjCSVSLnTnHXn+QAMR0mLmb1gA6wl10LExo2Ts0= +github.com/ipld/go-codec-dagpb v1.7.0/go.mod h1:rD3Zg+zub9ZnxcLwfol/OTQRVjaLzXypgy4UqHQvilM= github.com/ipld/go-ipld-prime v0.11.0/go.mod h1:+WIAkokurHmZ/KwzDOMUuoeJgaRQktHtEaLglS3ZeV8= -github.com/ipld/go-ipld-prime v0.14.1/go.mod h1:QcE4Y9n/ZZr8Ijg5bGPT0GqYWgZ1704nH0RDcQtgTP0= github.com/ipld/go-ipld-prime v0.21.0 h1:n4JmcpOlPDIxBcY037SVfpd1G+Sj1nKZah0m6QH9C2E= github.com/ipld/go-ipld-prime v0.21.0/go.mod h1:3RLqy//ERg/y5oShXXdx5YIp50cFGOanyMctpPjsvxQ= -github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20230102063945-1a409dc236dd h1:gMlw/MhNr2Wtp5RwGdsW23cs+yCuj9k2ON7i9MiJlRo= -github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20230102063945-1a409dc236dd/go.mod h1:wZ8hH8UxeryOs4kJEJaiui/s00hDSbE37OKsL47g+Sw= +github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20250821084354-a425e60cd714 h1:cqNk8PEwHnK0vqWln+U/YZhQc9h2NB3KjUjDPZo5Q2s= +github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20250821084354-a425e60cd714/go.mod h1:ZEUdra3CoqRVRYgAX/jAJO9aZGz6SKtKEG628fHHktY= +github.com/ipshipyard/p2p-forge v0.7.0 h1:PQayexxZC1FR2Vx0XOSbmZ6wDPliidS48I+xXWuF+YU= +github.com/ipshipyard/p2p-forge v0.7.0/go.mod h1:i2wg0p7WmHGyo5vYaK9COZBp8BN5Drncfu3WoQNZlQY= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= -github.com/jbenet/go-cienv v0.1.0 h1:Vc/s0QbQtoxX8MwwSLWWh+xNNZvM3Lw7NsTcHrvvhMc= github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk= github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY= github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= -github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o= -github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= -github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= @@ -470,42 +445,43 @@ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQL github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= -github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= -github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= -github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= +github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0= -github.com/koron/go-ssdp v0.0.4/go.mod h1:oDXq+E5IL5q0U8uSBcoAXzTzInwy5lEgC91HoKtbmZk= +github.com/koron/go-ssdp v0.0.6 h1:Jb0h04599eq/CY7rB5YEqPS83HmRfHP2azkxMN2rFtU= +github.com/koron/go-ssdp v0.0.6/go.mod h1:0R9LfRJGek1zWTjN3JUNlm5INCDYGpRDfAptnct63fI= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/libdns/libdns v1.0.0-beta.1 h1:KIf4wLfsrEpXpZ3vmc/poM8zCATXT2klbdPe6hyOBjQ= +github.com/libdns/libdns v1.0.0-beta.1/go.mod h1:4Bj9+5CQiNMVGf87wjX4CY3HQJypUHRuLvlsfsZqLWQ= github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ= github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c= github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic= -github.com/libp2p/go-doh-resolver v0.4.0 h1:gUBa1f1XsPwtpE1du0O+nnZCUqtG7oYi7Bb+0S7FQqw= -github.com/libp2p/go-doh-resolver v0.4.0/go.mod h1:v1/jwsFusgsWIGX/c6vCRrnJ60x7bhTiq/fs2qt0cAg= +github.com/libp2p/go-doh-resolver v0.5.0 h1:4h7plVVW+XTS+oUBw2+8KfoM1jF6w8XmO7+skhePFdE= +github.com/libp2p/go-doh-resolver v0.5.0/go.mod h1:aPDxfiD2hNURgd13+hfo29z9IC22fv30ee5iM31RzxU= github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZxBdp967ls1g+k8= github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= -github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= -github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= -github.com/libp2p/go-libp2p v0.36.3 h1:NHz30+G7D8Y8YmznrVZZla0ofVANrvBl2c+oARfMeDQ= -github.com/libp2p/go-libp2p v0.36.3/go.mod h1:4Y5vFyCUiJuluEPmpnKYf6WFx5ViKPUYs/ixe9ANFZ8= +github.com/libp2p/go-flow-metrics v0.3.0 h1:q31zcHUvHnwDO0SHaukewPYgwOBSxtt830uJtUx6784= +github.com/libp2p/go-flow-metrics v0.3.0/go.mod h1:nuhlreIwEguM1IvHAew3ij7A8BMlyHQJ279ao24eZZo= +github.com/libp2p/go-libp2p v0.46.0 h1:0T2yvIKpZ3DVYCuPOFxPD1layhRU486pj9rSlGWYnDM= +github.com/libp2p/go-libp2p v0.46.0/go.mod h1:TbIDnpDjBLa7isdgYpbxozIVPBTmM/7qKOJP4SFySrQ= github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= github.com/libp2p/go-libp2p-core v0.2.4/go.mod h1:STh4fdfa5vDYr0/SzYYeqnt+E6KfEV5VxfIrm0bcI0g= @@ -514,20 +490,20 @@ github.com/libp2p/go-libp2p-gostream v0.6.0 h1:QfAiWeQRce6pqnYfmIVWJFXNdDyfiR/qk github.com/libp2p/go-libp2p-gostream v0.6.0/go.mod h1:Nywu0gYZwfj7Jc91PQvbGU8dIpqbQQkjWgDuOrFaRdA= github.com/libp2p/go-libp2p-http v0.5.0 h1:+x0AbLaUuLBArHubbbNRTsgWz0RjNTy6DJLOxQ3/QBc= github.com/libp2p/go-libp2p-http v0.5.0/go.mod h1:glh87nZ35XCQyFsdzZps6+F4HYI6DctVFY5u1fehwSg= -github.com/libp2p/go-libp2p-kad-dht v0.26.1 h1:AazV3LCImYVkDUGAHx5lIEgZ9iUI2QQKH5GMRQU8uEA= -github.com/libp2p/go-libp2p-kad-dht v0.26.1/go.mod h1:mqRUGJ/+7ziQ3XknU2kKHfsbbgb9xL65DXjPOJwmZF8= +github.com/libp2p/go-libp2p-kad-dht v0.36.0 h1:7QuXhV36+Vyj+L6A7mrYkn2sYLrbRcbjvsYDu/gXhn8= +github.com/libp2p/go-libp2p-kad-dht v0.36.0/go.mod h1:O24LxTH9Rt3I5XU8nmiA9VynS4TrTwAyj+zBJKB05vQ= github.com/libp2p/go-libp2p-kbucket v0.3.1/go.mod h1:oyjT5O7tS9CQurok++ERgc46YLwEpuGoFq9ubvoUOio= -github.com/libp2p/go-libp2p-kbucket v0.6.3 h1:p507271wWzpy2f1XxPzCQG9NiN6R6lHL9GiSErbQQo0= -github.com/libp2p/go-libp2p-kbucket v0.6.3/go.mod h1:RCseT7AH6eJWxxk2ol03xtP9pEHetYSPXOaJnOiD8i0= +github.com/libp2p/go-libp2p-kbucket v0.8.0 h1:QAK7RzKJpYe+EuSEATAaaHYMYLkPDGC18m9jxPLnU8s= +github.com/libp2p/go-libp2p-kbucket v0.8.0/go.mod h1:JMlxqcEyKwO6ox716eyC0hmiduSWZZl6JY93mGaaqc4= github.com/libp2p/go-libp2p-peerstore v0.1.4/go.mod h1:+4BDbDiiKf4PzpANZDAT+knVdLxvqh7hXOujessqdzs= -github.com/libp2p/go-libp2p-pubsub v0.11.0 h1:+JvS8Kty0OiyUiN0i8H5JbaCgjnJTRnTHe4rU88dLFc= -github.com/libp2p/go-libp2p-pubsub v0.11.0/go.mod h1:QEb+hEV9WL9wCiUAnpY29FZR6W3zK8qYlaml8R4q6gQ= +github.com/libp2p/go-libp2p-pubsub v0.14.2 h1:nT5lFHPQOFJcp9CW8hpKtvbpQNdl2udJuzLQWbgRum8= +github.com/libp2p/go-libp2p-pubsub v0.14.2/go.mod h1:MKPU5vMI8RRFyTP0HfdsF9cLmL1nHAeJm44AxJGJx44= github.com/libp2p/go-libp2p-pubsub-router v0.6.0 h1:D30iKdlqDt5ZmLEYhHELCMRj8b4sFAqrUcshIUvVP/s= github.com/libp2p/go-libp2p-pubsub-router v0.6.0/go.mod h1:FY/q0/RBTKsLA7l4vqC2cbRbOvyDotg8PJQ7j8FDudE= -github.com/libp2p/go-libp2p-record v0.2.0 h1:oiNUOCWno2BFuxt3my4i1frNrt7PerzB3queqa1NkQ0= -github.com/libp2p/go-libp2p-record v0.2.0/go.mod h1:I+3zMkvvg5m2OcSdoL0KPljyJyvNDFGKX7QdlpYUcwk= -github.com/libp2p/go-libp2p-routing-helpers v0.7.4 h1:6LqS1Bzn5CfDJ4tzvP9uwh42IB7TJLNFJA6dEeGBv84= -github.com/libp2p/go-libp2p-routing-helpers v0.7.4/go.mod h1:we5WDj9tbolBXOuF1hGOkR+r7Uh1408tQbAKaT5n1LE= +github.com/libp2p/go-libp2p-record v0.3.1 h1:cly48Xi5GjNw5Wq+7gmjfBiG9HCzQVkiZOUZ8kUl+Fg= +github.com/libp2p/go-libp2p-record v0.3.1/go.mod h1:T8itUkLcWQLCYMqtX7Th6r7SexyUJpIyPgks757td/E= +github.com/libp2p/go-libp2p-routing-helpers v0.7.5 h1:HdwZj9NKovMx0vqq6YNPTh6aaNzey5zHD7HeLJtq6fI= +github.com/libp2p/go-libp2p-routing-helpers v0.7.5/go.mod h1:3YaxrwP0OBPDD7my3D0KxfR89FlcX/IEbxDEDfAmj98= github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg= github.com/libp2p/go-libp2p-xor v0.1.0 h1:hhQwT4uGrBcuAkUGXADuPltalOdpf9aag9kaYNT2tLA= @@ -535,50 +511,47 @@ github.com/libp2p/go-libp2p-xor v0.1.0/go.mod h1:LSTM5yRnjGZbWNTA/hRwq2gGFrvRIbQ github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0= github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM= -github.com/libp2p/go-nat v0.2.0 h1:Tyz+bUFAYqGyJ/ppPPymMGbIgNRH+WqC5QrT5fKrrGk= -github.com/libp2p/go-nat v0.2.0/go.mod h1:3MJr+GRpRkyT65EpVPBstXLvOlAPzUVlG6Pwg9ohLJk= -github.com/libp2p/go-netroute v0.2.1 h1:V8kVrpD8GK0Riv15/7VN6RbUQ3URNZVosw7H2v9tksU= -github.com/libp2p/go-netroute v0.2.1/go.mod h1:hraioZr0fhBjG0ZRXJJ6Zj2IVEVNx6tDTFQfSmcq7mQ= +github.com/libp2p/go-netroute v0.3.0 h1:nqPCXHmeNmgTJnktosJ/sIef9hvwYCrsLxXmfNks/oc= +github.com/libp2p/go-netroute v0.3.0/go.mod h1:Nkd5ShYgSMS5MUKy/MU2T57xFoOKvvLR92Lic48LEyA= github.com/libp2p/go-openssl v0.0.3/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s= github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU= -github.com/libp2p/go-socket-activation v0.1.0 h1:OImQPhtbGlCNaF/KSTl6pBBy+chA5eBt5i9uMJNtEdY= -github.com/libp2p/go-socket-activation v0.1.0/go.mod h1:gzda2dNkMG5Ti2OfWNNwW0FDIbj0g/aJJU320FcLfhk= -github.com/libp2p/go-yamux/v4 v4.0.1 h1:FfDR4S1wj6Bw2Pqbc8Uz7pCxeRBPbwsBbEdfwiCypkQ= -github.com/libp2p/go-yamux/v4 v4.0.1/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4= +github.com/libp2p/go-socket-activation v0.1.1 h1:wkLBj6RqKffjt7BI794ewoSt241UV52NKYvIbpzhn4Q= +github.com/libp2p/go-socket-activation v0.1.1/go.mod h1:NBfVUPXTRL/FU6UmSOM+1O7/vJkpS523sQiriw0Qln8= +github.com/libp2p/go-yamux/v5 v5.0.1 h1:f0WoX/bEF2E8SbE4c/k1Mo+/9z0O4oC/hWEA+nfYRSg= +github.com/libp2p/go-yamux/v5 v5.0.1/go.mod h1:en+3cdX51U0ZslwRdRLrvQsdayFt3TSUKvBGErzpWbU= github.com/libp2p/zeroconf/v2 v2.2.0 h1:Cup06Jv6u81HLhIj1KasuNM/RHHrJ8T7wOTS4+Tv53Q= github.com/libp2p/zeroconf/v2 v2.2.0/go.mod h1:fuJqLnUwZTshS3U/bMRJ3+ow/v9oid1n0DmyYyNO1Xs= -github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/marcopolo/simnet v0.0.1 h1:rSMslhPz6q9IvJeFWDoMGxMIrlsbXau3NkuIXHGJxfg= +github.com/marcopolo/simnet v0.0.1/go.mod h1:WDaQkgLAjqDUEBAOXz22+1j6wXKfGlC5sD5XWt3ddOs= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= -github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= +github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= -github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= -github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= +github.com/mholt/acmez/v3 v3.1.2 h1:auob8J/0FhmdClQicvJvuDavgd5ezwLBfKuYmynhYzc= +github.com/mholt/acmez/v3 v3.1.2/go.mod h1:L1wOU06KKvq7tswuMDwKdcHeKpFFgkppZy/y0DFxagQ= github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= -github.com/miekg/dns v1.1.61 h1:nLxbwF3XxhwVSm8g9Dghm9MHPaUZuqhPiGL+675ZmEs= -github.com/miekg/dns v1.1.61/go.mod h1:mnAarhS3nWaW+NVP2wTkYVIZyHNJ098SJZUki3eykwQ= +github.com/miekg/dns v1.1.68 h1:jsSRkNozw7G/mnmXULynzMNIsgY2dHC8LO6U6Ij2JEA= +github.com/miekg/dns v1.1.68/go.mod h1:fujopn7TB3Pu3JM69XaawiU0wqjpL9/8xGop5UrTPps= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms= github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= @@ -586,13 +559,14 @@ github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKo github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc= github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/minlz v1.0.1-0.20250507153514-87eb42fe8882 h1:0lgqHvJWHLGW5TuObJrfyEi6+ASTKDBWikGvPqy9Yiw= +github.com/minio/minlz v1.0.1-0.20250507153514-87eb42fe8882/go.mod h1:qT0aEB35q79LLornSzeDH75LBf3aH1MV+jB5w9Wasec= github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -614,12 +588,10 @@ github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a github.com/multiformats/go-multiaddr v0.1.0/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= -github.com/multiformats/go-multiaddr v0.4.0/go.mod h1:YcpyLH8ZPudLxQlemYBPhSm0/oCXAT8Z4mzFpyoPyRc= -github.com/multiformats/go-multiaddr v0.13.0 h1:BCBzs61E3AGHcYYTv8dqRH43ZfyrqM8RXVPT8t13tLQ= -github.com/multiformats/go-multiaddr v0.13.0/go.mod h1:sBXrNzucqkFJhvKOiwwLyqamGa/P5EIXNPLovyhQCII= -github.com/multiformats/go-multiaddr-dns v0.3.0/go.mod h1:mNzQ4eTGDg0ll1N9jKPOUogZPoJ30W8a7zk66FQPpdQ= -github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= -github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= +github.com/multiformats/go-multiaddr v0.16.1 h1:fgJ0Pitow+wWXzN9do+1b8Pyjmo8m5WhGfzpL82MpCw= +github.com/multiformats/go-multiaddr v0.16.1/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0= +github.com/multiformats/go-multiaddr-dns v0.4.1 h1:whi/uCLbDS3mSEUMb1MsoT4uzUeZB0N32yzufqS0i5M= +github.com/multiformats/go-multiaddr-dns v0.4.1/go.mod h1:7hfthtB4E4pQwirrz+J0CcDUfbWzTqEzVyYKKIKpgkc= github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= github.com/multiformats/go-multiaddr-net v0.1.1/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= @@ -627,55 +599,47 @@ github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/g github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= -github.com/multiformats/go-multicodec v0.3.0/go.mod h1:qGGaQmioCDh+TeFOnxrbU0DaIPw8yFgAZgFG0V7p1qQ= -github.com/multiformats/go-multicodec v0.9.0 h1:pb/dlPnzee/Sxv/j4PmkDRxCOi3hXTz3IbPKOXWJkmg= -github.com/multiformats/go-multicodec v0.9.0/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k= +github.com/multiformats/go-multicodec v0.10.0 h1:UpP223cig/Cx8J76jWt91njpK3GTAO1w02sdcjZDSuc= +github.com/multiformats/go-multicodec v0.10.0/go.mod h1:wg88pM+s2kZJEQfRCKBNU+g32F5aWBEjyFHXvZLTcLI= github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= github.com/multiformats/go-multihash v0.0.15/go.mod h1:D6aZrWNLFTV/ynMpKsNtB40mJzmCl4jb1alC0OvHiHg= -github.com/multiformats/go-multihash v0.1.0/go.mod h1:RJlXsxt6vHGaia+S8We0ErjhojtKzPP2AH4+kYM7k84= github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= -github.com/multiformats/go-multistream v0.5.0 h1:5htLSLl7lvJk3xx3qT/8Zm9J4K8vEOf/QGkvOGQAyiE= -github.com/multiformats/go-multistream v0.5.0/go.mod h1:n6tMZiwiP2wUsR8DgfDWw1dydlEqV3l6N3/GBsX6ILA= +github.com/multiformats/go-multistream v0.6.1 h1:4aoX5v6T+yWmc2raBHsTvzmFhOI8WVOer28DeBBEYdQ= +github.com/multiformats/go-multistream v0.6.1/go.mod h1:ksQf6kqHAb6zIsyw7Zm+gAuVo57Qbq84E27YlYqavqw= github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= -github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= -github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= +github.com/multiformats/go-varint v0.1.0 h1:i2wqFp4sdl3IcIxfAonHQV9qU5OsZ4Ts9IOoETFs5dI= +github.com/multiformats/go-varint v0.1.0/go.mod h1:5KVAVXegtfmNQQm/lCY+ATvDzvJJhSkUlGQV9wgObdI= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= -github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= -github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.19.1 h1:QXgq3Z8Crl5EL1WBAC98A5sEBHARrAJNzAmMxzLcRF0= -github.com/onsi/ginkgo/v2 v2.19.1/go.mod h1:O3DtEWQkPa/F7fBMgmZQKKsluAy8pd3rEQdrjkPb9zA= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.34.0 h1:eSSPsPNp6ZpsG8X1OVmOTxig+CblTc4AxpPBykhe2Os= -github.com/onsi/gomega v1.34.0/go.mod h1:MIKI8c+f+QLWk+hxbePD4i0LMJSExPaZOVfkoex4cAo= -github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= -github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/onsi/gomega v1.36.3 h1:hID7cr8t3Wp26+cYnfcjR6HpJ00fdogN6dqZ1t6IylU= +github.com/onsi/gomega v1.36.3/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= -github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/openzipkin/zipkin-go v0.4.3 h1:9EGwpqkgnwdEIJ+Od7QVSEIH+ocmm5nPat0G7sjsSdg= github.com/openzipkin/zipkin-go v0.4.3/go.mod h1:M9wCJZFWCo2RiY+o1eBCEMe0Dp2S5LDHcMZmk3RmK7c= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= @@ -683,143 +647,121 @@ github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhM github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 h1:1/WtZae0yGtPq+TI6+Tv1WTxkukpXeMlviSxvL7SRgk= github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9/go.mod h1:x3N5drFsm2uilKKuuYo6LdyD8vZAW55sH/9w+pbo1sw= -github.com/pion/datachannel v1.5.8 h1:ph1P1NsGkazkjrvyMfhRBUAWMxugJjq2HfQifaOoSNo= -github.com/pion/datachannel v1.5.8/go.mod h1:PgmdpoaNBLX9HNzNClmdki4DYW5JtI7Yibu8QzbL3tI= +github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pion/datachannel v1.5.10 h1:ly0Q26K1i6ZkGf42W7D4hQYR90pZwzFOjTq5AuCKk4o= +github.com/pion/datachannel v1.5.10/go.mod h1:p/jJfC9arb29W7WrxyKbepTU20CFgyx5oLo8Rs4Py/M= github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s= github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk= github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE= -github.com/pion/ice/v2 v2.3.34 h1:Ic1ppYCj4tUOcPAp76U6F3fVrlSw8A9JtRXLqw6BbUM= -github.com/pion/ice/v2 v2.3.34/go.mod h1:mBF7lnigdqgtB+YHkaY/Y6s6tsyRyo4u4rPGRuOjUBQ= -github.com/pion/interceptor v0.1.29 h1:39fsnlP1U8gw2JzOFWdfCU82vHvhW9o0rZnZF56wF+M= -github.com/pion/interceptor v0.1.29/go.mod h1:ri+LGNjRUc5xUNtDEPzfdkmSqISixVTBF/z/Zms/6T4= -github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY= +github.com/pion/dtls/v3 v3.0.6 h1:7Hkd8WhAJNbRgq9RgdNh1aaWlZlGpYTzdqjy9x9sK2E= +github.com/pion/dtls/v3 v3.0.6/go.mod h1:iJxNQ3Uhn1NZWOMWlLxEEHAN5yX7GyPvvKw04v9bzYU= +github.com/pion/ice/v4 v4.0.10 h1:P59w1iauC/wPk9PdY8Vjl4fOFL5B+USq1+xbDcN6gT4= +github.com/pion/ice/v4 v4.0.10/go.mod h1:y3M18aPhIxLlcO/4dn9X8LzLLSma84cx6emMSu14FGw= +github.com/pion/interceptor v0.1.40 h1:e0BjnPcGpr2CFQgKhrQisBU7V3GXK6wrfYrGYaU6Jq4= +github.com/pion/interceptor v0.1.40/go.mod h1:Z6kqH7M/FYirg3frjGJ21VLSRJGBXB/KqaTIrdqnOic= github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms= -github.com/pion/mdns v0.0.12 h1:CiMYlY+O0azojWDmxdNr7ADGrnZ+V6Ilfner+6mSVK8= -github.com/pion/mdns v0.0.12/go.mod h1:VExJjv8to/6Wqm1FXK+Ii/Z9tsVk/F5sD/N70cnYFbk= +github.com/pion/logging v0.2.3 h1:gHuf0zpoh1GW67Nr6Gj4cv5Z9ZscU7g/EaoC/Ke/igI= +github.com/pion/logging v0.2.3/go.mod h1:z8YfknkquMe1csOrxK5kc+5/ZPAzMxbKLX5aXpbpC90= +github.com/pion/mdns/v2 v2.0.7 h1:c9kM8ewCgjslaAmicYMFQIde2H9/lrZpjBkN8VwoVtM= +github.com/pion/mdns/v2 v2.0.7/go.mod h1:vAdSYNAT0Jy3Ru0zl2YiW3Rm/fJCwIeM0nToenfOJKA= github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA= github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8= -github.com/pion/rtcp v1.2.12/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9L4= -github.com/pion/rtcp v1.2.14 h1:KCkGV3vJ+4DAJmvP0vaQShsb0xkRfWkO540Gy102KyE= -github.com/pion/rtcp v1.2.14/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9L4= -github.com/pion/rtp v1.8.3/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= -github.com/pion/rtp v1.8.8 h1:EtYFHI0rpUEjT/RMnGfb1vdJhbYmPG77szD72uUnSxs= -github.com/pion/rtp v1.8.8/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= -github.com/pion/sctp v1.8.20 h1:sOc3lkV/tQaP57ZUEXIMdM2V92IIB2ia5v/ygnBxaEg= -github.com/pion/sctp v1.8.20/go.mod h1:oTxw8i5m+WbDHZJL/xUpe6CPIn1Y0GIKKwTLF4h53H8= -github.com/pion/sdp/v3 v3.0.9 h1:pX++dCHoHUwq43kuwf3PyJfHlwIj4hXA7Vrifiq0IJY= -github.com/pion/sdp/v3 v3.0.9/go.mod h1:B5xmvENq5IXJimIO4zfp6LAe1fD9N+kFv+V/1lOdz8M= -github.com/pion/srtp/v2 v2.0.20 h1:HNNny4s+OUmG280ETrCdgFndp4ufx3/uy85EawYEhTk= -github.com/pion/srtp/v2 v2.0.20/go.mod h1:0KJQjA99A6/a0DOVTu1PhDSw0CXF2jTkqOoMg3ODqdA= +github.com/pion/rtcp v1.2.15 h1:LZQi2JbdipLOj4eBjK4wlVoQWfrZbh3Q6eHtWtJBZBo= +github.com/pion/rtcp v1.2.15/go.mod h1:jlGuAjHMEXwMUHK78RgX0UmEJFV4zUKOFHR7OP+D3D0= +github.com/pion/rtp v1.8.19 h1:jhdO/3XhL/aKm/wARFVmvTfq0lC/CvN1xwYKmduly3c= +github.com/pion/rtp v1.8.19/go.mod h1:bAu2UFKScgzyFqvUKmbvzSdPr+NGbZtv6UB2hesqXBk= +github.com/pion/sctp v1.8.39 h1:PJma40vRHa3UTO3C4MyeJDQ+KIobVYRZQZ0Nt7SjQnE= +github.com/pion/sctp v1.8.39/go.mod h1:cNiLdchXra8fHQwmIoqw0MbLLMs+f7uQ+dGMG2gWebE= +github.com/pion/sdp/v3 v3.0.13 h1:uN3SS2b+QDZnWXgdr69SM8KB4EbcnPnPf2Laxhty/l4= +github.com/pion/sdp/v3 v3.0.13/go.mod h1:88GMahN5xnScv1hIMTqLdu/cOcUkj6a9ytbncwMCq2E= +github.com/pion/srtp/v3 v3.0.6 h1:E2gyj1f5X10sB/qILUGIkL4C2CqK269Xq167PbGCc/4= +github.com/pion/srtp/v3 v3.0.6/go.mod h1:BxvziG3v/armJHAaJ87euvkhHqWe9I7iiOy50K2QkhY= github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4= github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8= +github.com/pion/stun/v3 v3.0.0 h1:4h1gwhWLWuZWOJIJR9s2ferRO+W3zA/b6ijOI6mKzUw= +github.com/pion/stun/v3 v3.0.0/go.mod h1:HvCN8txt8mwi4FBvS3EmDghW6aQJ24T+y+1TKjB5jyU= github.com/pion/transport/v2 v2.2.1/go.mod h1:cXXWavvCnFF6McHTft3DWS9iic2Mftcz1Aq29pGcU5g= -github.com/pion/transport/v2 v2.2.3/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= github.com/pion/transport/v2 v2.2.4/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= github.com/pion/transport/v2 v2.2.10 h1:ucLBLE8nuxiHfvkFKnkDQRYWYfp8ejf4YBOPfaQpw6Q= github.com/pion/transport/v2 v2.2.10/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E= -github.com/pion/transport/v3 v3.0.1/go.mod h1:UY7kiITrlMv7/IKgd5eTUcaahZx5oUN3l9SzK5f5xE0= -github.com/pion/transport/v3 v3.0.6 h1:k1mQU06bmmX143qSWgXFqSH1KUJceQvIUuVH/K5ELWw= -github.com/pion/transport/v3 v3.0.6/go.mod h1:HvJr2N/JwNJAfipsRleqwFoR3t/pWyHeZUs89v3+t5s= -github.com/pion/turn/v2 v2.1.3/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY= -github.com/pion/turn/v2 v2.1.6 h1:Xr2niVsiPTB0FPtt+yAWKFUkU1eotQbGgpTIld4x1Gc= -github.com/pion/turn/v2 v2.1.6/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY= -github.com/pion/webrtc/v3 v3.3.0 h1:Rf4u6n6U5t5sUxhYPQk/samzU/oDv7jk6BA5hyO2F9I= -github.com/pion/webrtc/v3 v3.3.0/go.mod h1:hVmrDJvwhEertRWObeb1xzulzHGeVUoPlWvxdGzcfU0= +github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0= +github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo= +github.com/pion/turn/v4 v4.0.2 h1:ZqgQ3+MjP32ug30xAbD6Mn+/K4Sxi3SdNOTFf+7mpps= +github.com/pion/turn/v4 v4.0.2/go.mod h1:pMMKP/ieNAG/fN5cZiN4SDuyKsXtNTr0ccN7IToA1zs= +github.com/pion/webrtc/v4 v4.1.2 h1:mpuUo/EJ1zMNKGE79fAdYNFZBX790KE7kQQpLMjjR54= +github.com/pion/webrtc/v4 v4.1.2/go.mod h1:xsCXiNAmMEjIdFxAYU0MbB3RwRieJsegSB2JZsGN+8U= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4= github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw= -github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/probe-lab/go-libdht v0.4.0 h1:LAqHuko/owRW6+0cs5wmJXbHzg09EUMJEh5DI37yXqo= +github.com/probe-lab/go-libdht v0.4.0/go.mod h1:hamw22kI6YkPQFGy5P6BrWWDrgE9ety5Si8iWAyuDvc= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= +github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= -github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= +github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= -github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= -github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/prometheus/statsd_exporter v0.22.7 h1:7Pji/i2GuhK6Lu7DHrtTkFmNBCudCPT1pX2CziuyQR0= +github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= +github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= github.com/prometheus/statsd_exporter v0.22.7/go.mod h1:N/TevpjkIh9ccs6nuzY3jQn9dFqnUakOjnEuMPJJJnI= -github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= -github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= -github.com/quic-go/quic-go v0.45.2 h1:DfqBmqjb4ExSdxRIb/+qXhPC+7k6+DUNZha4oeiC9fY= -github.com/quic-go/quic-go v0.45.2/go.mod h1:1dLehS7TIR64+vxGR70GDcatWTOtMX2PUtnKsjbTurI= -github.com/quic-go/webtransport-go v0.8.0 h1:HxSrwun11U+LlmwpgM1kEqIqH90IT4N8auv/cD7QFJg= -github.com/quic-go/webtransport-go v0.8.0/go.mod h1:N99tjprW432Ut5ONql/aUhSLT0YVSlwHohQsuac9WaM= -github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= -github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU= +github.com/prometheus/statsd_exporter v0.27.1 h1:tcRJOmwlA83HPfWzosAgr2+zEN5XDFv+M2mn/uYkn5Y= +github.com/prometheus/statsd_exporter v0.27.1/go.mod h1:vA6ryDfsN7py/3JApEst6nLTJboq66XsNcJGNmC88NQ= +github.com/quic-go/qpack v0.6.0 h1:g7W+BMYynC1LbYLSqRt8PBg5Tgwxn214ZZR34VIOjz8= +github.com/quic-go/qpack v0.6.0/go.mod h1:lUpLKChi8njB4ty2bFLX2x4gzDqXwUpaO1DP9qMDZII= +github.com/quic-go/quic-go v0.57.1 h1:25KAAR9QR8KZrCZRThWMKVAwGoiHIrNbT72ULHTuI10= +github.com/quic-go/quic-go v0.57.1/go.mod h1:ly4QBAjHA2VhdnxhojRsCUOeJwKYg+taDlos92xb1+s= +github.com/quic-go/webtransport-go v0.9.0 h1:jgys+7/wm6JarGDrW+lD/r9BGqBAmqY/ssklE09bA70= +github.com/quic-go/webtransport-go v0.9.0/go.mod h1:4FUYIiUc75XSsF6HShcLeXXYZJ9AGwo/xh3L8M/P1ao= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= +github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/rs/cors v1.10.1 h1:L0uuZVXIKlI1SShY2nhFfo44TYvDPQ1w4oFkUJNfhyo= -github.com/rs/cors v1.10.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA= +github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk= -github.com/samber/lo v1.46.0 h1:w8G+oaCPgz1PoCJztqymCFaKwXt+5cCXn51uPxExFfQ= -github.com/samber/lo v1.46.0/go.mod h1:RmDH9Ct32Qy3gduHQuKJ3gW1fMHAnE/fAzQuf6He5cU= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= -github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= -github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= -github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= -github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= -github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= -github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= -github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= -github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= -github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= -github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= -github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= -github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= -github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= -github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= -github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= -github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= -github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= -github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= -github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= -github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/slok/go-http-metrics v0.13.0 h1:lQDyJJx9wKhmbliyUsZ2l6peGnXRHjsjoqPt5VYzcP8= +github.com/slok/go-http-metrics v0.13.0/go.mod h1:HIr7t/HbN2sJaunvnt9wKP9xoBBVZFo1/KiHU3b0w+4= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs= github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= @@ -827,8 +769,6 @@ github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9 github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg= github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM= github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY= -github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= -github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= @@ -849,45 +789,40 @@ github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/stvp/go-udp-testing v0.0.0-20201019212854-469649b16807/go.mod h1:7jxmlfBCDBXRzr0eAQJ48XC1hBu1np4CS5+cHEYfwpc= github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= -github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= -github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= -github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= github.com/texttheater/golang-levenshtein v1.0.1 h1:+cRNoVrfiwufQPhoMzB6N0Yf/Mqajr6t1lOv8GyGE2U= github.com/texttheater/golang-levenshtein v1.0.1/go.mod h1:PYAKrbF5sAiq9wd+H82hs7gNaen0CplQ9uvm6+enD/8= github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= -github.com/tidwall/gjson v1.14.4 h1:uo0p8EbA09J7RQaflQ1aBRffTR7xedD2bcIVSYxLnkM= -github.com/tidwall/gjson v1.14.4/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.16.0 h1:SyXa+dsSPpUlcwEDuKuEBJEz5vzTvOea+9rjyYodQFg= +github.com/tidwall/gjson v1.16.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= -github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= -github.com/tj/assert v0.0.3 h1:Df/BlaZ20mq6kuai7f5z2TvPFiwC3xaWJSDQNiIS3Rk= -github.com/tj/assert v0.0.3/go.mod h1:Ne6X72Q+TB1AteidzQncjw9PabbMp4PBMZ1k+vd1Pvk= github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c h1:u6SKchux2yDvFQnDHS3lPnIRmfVJ5Sxy3ao2SIdysLQ= github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb h1:Ywfo8sUltxogBpFuMOFRrrSifO788kAFxmvVw31PtQQ= github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb/go.mod h1:ikPs9bRWicNw3S7XpJ8sK/smGwU9WcSVU3dy9qahYBM= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= -github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= github.com/wangjia184/sortedset v0.0.0-20160527075905-f5d03557ba30/go.mod h1:YkocrP2K2tcw938x9gCOmT5G5eCD6jsTz0SZuyAqwIE= -github.com/warpfork/go-testmark v0.3.0/go.mod h1:jhEf8FVxd+F17juRubpmut64NEG6I2rgkUhlcqqXwE0= -github.com/warpfork/go-testmark v0.9.0/go.mod h1:jhEf8FVxd+F17juRubpmut64NEG6I2rgkUhlcqqXwE0= github.com/warpfork/go-testmark v0.12.1 h1:rMgCpJfwy1sJ50x0M0NgyphxYYPMOODIJHhsXyEHU0s= github.com/warpfork/go-testmark v0.12.1/go.mod h1:kHwy7wfvGSPh1rQJYKayD4AbtNaeyZdcGi9tNJTaa5Y= github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= @@ -897,8 +832,8 @@ github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc h1:BCPnHtcboa github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc/go.mod h1:r45hJU7yEoA81k6MWNhpMj/kms0n14dkzkxYHoB96UM= github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 h1:5HZfQkwe0mIfyDmc1Em5GqlNRzcdtlv4HTNmdpt7XH0= github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11/go.mod h1:Wlo/SzPmxVp6vXpGt/zaXhHH0fn4IxgqZc82aKg6bpQ= -github.com/whyrusleeping/cbor-gen v0.1.2 h1:WQFlrPhpcQl+M2/3dP5cvlTLWPVsL6LGBb9jJt6l/cA= -github.com/whyrusleeping/cbor-gen v0.1.2/go.mod h1:pM99HXyEbSQHcosHc0iW7YFmwnscr+t9Te4ibko05so= +github.com/whyrusleeping/cbor-gen v0.3.1 h1:82ioxmhEYut7LBVGhGq8xoRkXPLElVuh5mV67AFfdv0= +github.com/whyrusleeping/cbor-gen v0.3.1/go.mod h1:pM99HXyEbSQHcosHc0iW7YFmwnscr+t9Te4ibko05so= github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E= github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= @@ -908,17 +843,22 @@ github.com/whyrusleeping/go-sysinfo v0.0.0-20190219211824-4a357d4b90b1 h1:ctS9An github.com/whyrusleeping/go-sysinfo v0.0.0-20190219211824-4a357d4b90b1/go.mod h1:tKH72zYNt/exx6/5IQO6L9LoQ0rEjd5SbbWaDTs9Zso= github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 h1:E9S12nwJwEOXe2d6gT6qxdvqMnNq+VnSsKPgm2ZZNds= github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7/go.mod h1:X2c0RVCI1eSUFI8eLcY3c0423ykwiUdxLJtkDvruhjI= -github.com/wlynxg/anet v0.0.3 h1:PvR53psxFXstc12jelG6f1Lv4MWqE0tI76/hHGjh9rg= github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= +github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU= +github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= +github.com/zeebo/assert v1.1.0 h1:hU1L1vLTHsnO8x8c9KAR5GmM5QscxHg5RNU5z5qbUWY= +github.com/zeebo/assert v1.1.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= +github.com/zeebo/blake3 v0.2.4 h1:KYQPkhpRtcqh0ssGYcKLG1JYvddkEA8QwCM/yBqhaZI= +github.com/zeebo/blake3 v0.2.4/go.mod h1:7eeQ6d2iXWRGF6npfaxl2CU+xy2Fjo2gxeyZGCRUjcE= +github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo= +github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= @@ -928,8 +868,10 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg= go.opentelemetry.io/contrib/propagators/autoprop v0.46.1 h1:cXTYcMjY0dsYokAuo8LbNBQxpF8VgTHdiHJJ1zlIXl4= go.opentelemetry.io/contrib/propagators/autoprop v0.46.1/go.mod h1:WZxgny1/6+j67B1s72PLJ4bGjidoWFzSmLNfJKVt2bo= go.opentelemetry.io/contrib/propagators/aws v1.21.1 h1:uQIQIDWb0gzyvon2ICnghpLAf9w7ADOCUiIiwCQgR2o= @@ -940,64 +882,53 @@ go.opentelemetry.io/contrib/propagators/jaeger v1.21.1 h1:f4beMGDKiVzg9IcX7/VuWV go.opentelemetry.io/contrib/propagators/jaeger v1.21.1/go.mod h1:U9jhkEl8d1LL+QXY7q3kneJWJugiN3kZJV2OWz3hkBY= go.opentelemetry.io/contrib/propagators/ot v1.21.1 h1:3TN5vkXjKYWp0YdMcnUEC/A+pBPvqz9V3nCS2xmcurk= go.opentelemetry.io/contrib/propagators/ot v1.21.1/go.mod h1:oy0MYCbS/b3cqUDW37wBWtlwBIsutngS++Lklpgh+fc= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 h1:QY7/0NeRPKlzusf40ZE4t1VlMKbqSNT7cJRYzWuja0s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0/go.mod h1:HVkSiDhTM9BoUJU8qE6j2eSWLLXvi1USXjyd2BXT8PY= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0 h1:/0YaXu3755A/cFbtXp+21lkXgI0QE5avTWA2HjU9/WE= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0/go.mod h1:m7SFxp0/7IxmJPLIY3JhOcU9CoFzDaCPL6xxQIxhA+o= -go.opentelemetry.io/otel/exporters/zipkin v1.27.0 h1:aXcxb7F6ZDC1o2Z52LDfS2g6M2FB5CrxdR2gzY4QRNs= -go.opentelemetry.io/otel/exporters/zipkin v1.27.0/go.mod h1:+WMURoi4KmVB7ypbFPx3xtZTWen2Ca3lRK9u6DVTO5M= -go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= -go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI= -go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= -go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= -go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= -go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc= -go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= -go.uber.org/fx v1.22.1 h1:nvvln7mwyT5s1q201YE29V/BFrGor6vMiDNpU/78Mys= -go.uber.org/fx v1.22.1/go.mod h1:HT2M7d7RHo+ebKGh9NRcrsrHHfpZ60nW3QRubMRfv48= -go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= +go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 h1:lwI4Dc5leUqENgGuQImwLo4WnuXFPetmPpkLi2IrX54= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0/go.mod h1:Kz/oCE7z5wuyhPxsXDuaPteSWqjSBD5YaSdbxZYGbGk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 h1:aTL7F04bJHUlztTsNGJ2l+6he8c+y/b//eR0jjjemT4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0/go.mod h1:kldtb7jDTeol0l3ewcmd8SDvx3EmIE7lyvqbasU3QC4= +go.opentelemetry.io/otel/exporters/prometheus v0.56.0 h1:GnCIi0QyG0yy2MrJLzVrIM7laaJstj//flf1zEJCG+E= +go.opentelemetry.io/otel/exporters/prometheus v0.56.0/go.mod h1:JQcVZtbIIPM+7SWBB+T6FK+xunlyidwLp++fN0sUaOk= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 h1:kJxSDN4SgWWTjG/hPp3O7LCGLcHXFlvS2/FFOrwL+SE= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0/go.mod h1:mgIOzS7iZeKJdeB8/NYHrJ48fdGc71Llo5bJ1J4DWUE= +go.opentelemetry.io/otel/exporters/zipkin v1.38.0 h1:0rJ2TmzpHDG+Ib9gPmu3J3cE0zXirumQcKS4wCoZUa0= +go.opentelemetry.io/otel/exporters/zipkin v1.38.0/go.mod h1:Su/nq/K5zRjDKKC3Il0xbViE3juWgG3JDoqLumFx5G0= +go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= +go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= +go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= +go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= +go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= +go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= +go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= +go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= +go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4= +go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE= +go.uber.org/dig v1.19.0 h1:BACLhebsYdpQ7IROQ1AGPjrXcP5dF80U3gKoFzbaq/4= +go.uber.org/dig v1.19.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= +go.uber.org/fx v1.24.0 h1:wE8mruvpg2kiiL1Vqd0CC+tr0/24XIB10Iwp2lLWzkg= +go.uber.org/fx v1.24.0/go.mod h1:AmDeGyS+ZARGKM4tlH4FY2Jr63VjbEDJHtqXTGP5hbo= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU= -go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko= +go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= -go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= -go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= -go4.org v0.0.0-20200411211856-f5505b9728dd/go.mod h1:CIiUVy99QCPfoE13bO4EZaz5GZMZXMSBGhxRdsvzbkg= +go.uber.org/zap/exp v0.3.0 h1:6JYzdifzYkGmTdRR59oYH+Ng7k49H9qVpWwNSsGJj3U= +go.uber.org/zap/exp v0.3.0/go.mod h1:5I384qq7XGxYyByIhHm6jg5CHkGY0nsTfbDLgDDlgJQ= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= go4.org v0.0.0-20230225012048-214862532bf5 h1:nifaUDeh+rPaBCMPMQHZmvJf+QdpLFnuQPwx+LxVmtc= go4.org v0.0.0-20230225012048-214862532bf5/go.mod h1:F57wTi5Lrj6WLyswp5EYV1ncrEbFGHD4hhz6S1ZYeaU= -golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -1010,8 +941,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= -golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= +golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= +golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1022,11 +953,10 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= +golang.org/x/exp v0.0.0-20251125195548-87e1e737ad39 h1:DHNhtq3sNNzrvduZZIiFyXWOL9IWaDPHqTnLJp+rCBY= +golang.org/x/exp v0.0.0-20251125195548-87e1e737ad39/go.mod h1:46edojNIoXTNOhySWIWdix628clX9ODXwPsQuG6hsK0= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1045,23 +975,18 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= -golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= +golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -1084,17 +1009,17 @@ golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= @@ -1102,20 +1027,17 @@ golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= -golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= +golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= +golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= -golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= -golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= +golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo= +golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1130,20 +1052,17 @@ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= +golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1162,9 +1081,7 @@ golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1174,40 +1091,38 @@ golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220708085239-5a0f0661e09d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= +golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54 h1:E2/AqCUMZGgd73TQkxUMcMla25GB9i/5HOdLr+uH7Vo= +golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54/go.mod h1:hKdjCMrbv9skySur+Nek8Hd0uJ0GuxJIoIX2payrIdQ= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1216,8 +1131,8 @@ golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= -golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= -golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= +golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1229,18 +1144,15 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= +golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181130052023-1c3d964395ce/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1258,8 +1170,6 @@ golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1286,23 +1196,21 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= -golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= +golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= +golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 h1:LLhsEBxRTBLuKlQxFBYUOU8xyFgXv6cOTp2HASDlsDk= -golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= -gonum.org/v1/gonum v0.15.0 h1:2lYxjRbTYyxkJxlhC+LvJIx3SsANPdRybu1tGj9/OrQ= -gonum.org/v1/gonum v0.15.0/go.mod h1:xzZVBJBtS+Mz4q0Yl2LJTk+OxOg4jiXZ7qBoM0uISGo= -google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -1320,18 +1228,12 @@ google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= -google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -1360,13 +1262,10 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4 h1:MuYw1wJzT+ZkybKfaOXKp5hJiZDn2iHaXRw0mRYdHSc= -google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4/go.mod h1:px9SlOOZBg1wM1zdnr8jEL4CNGUBZ+ZKYtNPApNQc4c= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240617180043-68d350f18fd4 h1:Di6ANFilr+S60a4S61ZM00vLdw0IrQOSMS2/6mrnOU0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240617180043-68d350f18fd4/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= -google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 h1:BIRfGDEjiHRrk0QKZe3Xv2ieMhtgRGeLcZQ0mIVn4EY= +google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5/go.mod h1:j3QtIyytwqGr1JUDtYXwtMXWPKsEa5LtzIFN1Wn5WvE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 h1:eaY8u2EuxbRv7c3NiGK0/NedzVsCcV6hDuU5qPX5EGE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5/go.mod h1:M4/wBTSeyLxupu3W3tJtOgB14jILAS/XWPSSa3TAlJc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1380,8 +1279,8 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= -google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= +google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4= +google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1396,8 +1295,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1406,9 +1305,6 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= -gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/src-d/go-cli.v0 v0.0.0-20181105080154-d492247bbc0d/go.mod h1:z+K8VcOYVYcSwSjGebuDL6176A1XskgbtNl64NSg+n8= gopkg.in/src-d/go-log.v1 v1.0.1/go.mod h1:GN34hKP0g305ysm2/hctJ0Y8nWP3zxXXJ8GFabTyABE= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= @@ -1417,16 +1313,12 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1434,13 +1326,10 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -lukechampine.com/blake3 v1.1.6/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= -lukechampine.com/blake3 v1.3.0 h1:sJ3XhFINmHSrYCgl958hscfIa3bw8x4DqMP3u1YvoYE= -lukechampine.com/blake3 v1.3.0/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= -pgregory.net/rapid v0.4.7 h1:MTNRktPuv5FNqOO151TM9mDTa+XHcX6ypYeISDVD14g= -pgregory.net/rapid v0.4.7/go.mod h1:UYpPVyjFHzYBGHIxLFoupi8vwk6rXNzRY9OMvVxFIOU= +lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg= +lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo= +pgregory.net/rapid v1.1.0 h1:CMa0sjHSru3puNx+J0MIAuiiEV4N0qj8/cMWGBBCsjw= +pgregory.net/rapid v1.1.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= -sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/misc/README.md b/misc/README.md index 28511d3fc..ea683519b 100644 --- a/misc/README.md +++ b/misc/README.md @@ -39,6 +39,12 @@ To run this in your user session, save it as `~/.config/systemd/user/ipfs.servic ``` Read more about `--user` services here: [wiki.archlinux.org:Systemd ](https://wiki.archlinux.org/index.php/Systemd/User#Automatic_start-up_of_systemd_user_instances) +#### P2P tunnel services + +For running `ipfs p2p listen` or `ipfs p2p forward` as systemd services, +see [docs/p2p-tunnels.md](../docs/p2p-tunnels.md) for examples using the +`--foreground` flag and path-based activation. + ### initd - Here is a full-featured sample service file: https://github.com/dylanPowers/ipfs-linux-service/blob/master/init.d/ipfs diff --git a/misc/fsutil/fsutil.go b/misc/fsutil/fsutil.go new file mode 100644 index 000000000..6773ec12f --- /dev/null +++ b/misc/fsutil/fsutil.go @@ -0,0 +1,82 @@ +package fsutil + +import ( + "errors" + "fmt" + "io/fs" + "os" + "path/filepath" +) + +// DirWritable checks if a directory is writable. If the directory does +// not exist it is created with writable permission. +func DirWritable(dir string) error { + if dir == "" { + return errors.New("directory not specified") + } + + var err error + dir, err = ExpandHome(dir) + if err != nil { + return err + } + + fi, err := os.Stat(dir) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + // Directory does not exist, so create it. + err = os.Mkdir(dir, 0775) + if err == nil { + return nil + } + } + if errors.Is(err, fs.ErrPermission) { + err = fs.ErrPermission + } + return fmt.Errorf("directory not writable: %s: %w", dir, err) + } + if !fi.IsDir() { + return fmt.Errorf("not a directory: %s", dir) + } + + // Directory exists, check that a file can be written. + file, err := os.CreateTemp(dir, "writetest") + if err != nil { + if errors.Is(err, fs.ErrPermission) { + err = fs.ErrPermission + } + return fmt.Errorf("directory not writable: %s: %w", dir, err) + } + file.Close() + return os.Remove(file.Name()) +} + +// ExpandHome expands the path to include the home directory if the path is +// prefixed with `~`. If it isn't prefixed with `~`, the path is returned +// as-is. +func ExpandHome(path string) (string, error) { + if path == "" { + return path, nil + } + + if path[0] != '~' { + return path, nil + } + + if len(path) > 1 && path[1] != '/' && path[1] != '\\' { + return "", errors.New("cannot expand user-specific home dir") + } + + dir, err := os.UserHomeDir() + if err != nil { + return "", err + } + + return filepath.Join(dir, path[1:]), nil +} + +// FileExists return true if the file exists +func FileExists(filename string) bool { + _, err := os.Lstat(filename) + return !errors.Is(err, os.ErrNotExist) +} diff --git a/misc/fsutil/fsutil_test.go b/misc/fsutil/fsutil_test.go new file mode 100644 index 000000000..72834ac10 --- /dev/null +++ b/misc/fsutil/fsutil_test.go @@ -0,0 +1,92 @@ +package fsutil_test + +import ( + "io/fs" + "os" + "path/filepath" + "runtime" + "testing" + + "github.com/ipfs/kubo/misc/fsutil" + "github.com/stretchr/testify/require" +) + +func TestDirWritable(t *testing.T) { + err := fsutil.DirWritable("") + require.Error(t, err) + + err = fsutil.DirWritable("~nosuchuser/tmp") + require.Error(t, err) + + tmpDir := t.TempDir() + + wrDir := filepath.Join(tmpDir, "readwrite") + err = fsutil.DirWritable(wrDir) + require.NoError(t, err) + + // Check that DirWritable created directory. + fi, err := os.Stat(wrDir) + require.NoError(t, err) + require.True(t, fi.IsDir()) + + err = fsutil.DirWritable(wrDir) + require.NoError(t, err) + + // If running on Windows, skip read-only directory tests. + if runtime.GOOS == "windows" { + t.SkipNow() + } + + roDir := filepath.Join(tmpDir, "readonly") + require.NoError(t, os.Mkdir(roDir, 0500)) + err = fsutil.DirWritable(roDir) + require.ErrorIs(t, err, fs.ErrPermission) + + roChild := filepath.Join(roDir, "child") + err = fsutil.DirWritable(roChild) + require.ErrorIs(t, err, fs.ErrPermission) +} + +func TestFileExists(t *testing.T) { + fileName := filepath.Join(t.TempDir(), "somefile") + require.False(t, fsutil.FileExists(fileName)) + + file, err := os.Create(fileName) + require.NoError(t, err) + file.Close() + + require.True(t, fsutil.FileExists(fileName)) +} + +func TestExpandHome(t *testing.T) { + dir, err := fsutil.ExpandHome("") + require.NoError(t, err) + require.Equal(t, "", dir) + + origDir := filepath.Join("somedir", "somesub") + dir, err = fsutil.ExpandHome(origDir) + require.NoError(t, err) + require.Equal(t, origDir, dir) + + _, err = fsutil.ExpandHome(filepath.FromSlash("~nosuchuser/somedir")) + require.Error(t, err) + + homeEnv := "HOME" + if runtime.GOOS == "windows" { + homeEnv = "USERPROFILE" + } + origHome := os.Getenv(homeEnv) + defer os.Setenv(homeEnv, origHome) + homeDir := filepath.Join(t.TempDir(), "testhome") + os.Setenv(homeEnv, homeDir) + + const subDir = "mytmp" + origDir = filepath.Join("~", subDir) + dir, err = fsutil.ExpandHome(origDir) + require.NoError(t, err) + require.Equal(t, filepath.Join(homeDir, subDir), dir) + + os.Unsetenv(homeEnv) + _, err = fsutil.ExpandHome(origDir) + require.Error(t, err) +} diff --git a/mk/golang.mk b/mk/golang.mk index 3b32a65f9..53bf5fca2 100644 --- a/mk/golang.mk +++ b/mk/golang.mk @@ -1,5 +1,4 @@ # golang utilities -GO_MIN_VERSION = 1.18 export GO111MODULE=on @@ -26,10 +25,10 @@ TEST_GO := TEST_GO_BUILD := CHECK_GO := -go-pkg-name=$(shell $(GOCC) list $(go-tags) github.com/ipfs/kubo/$(1)) +go-pkg-name=$(shell GOFLAGS=-buildvcs=false $(GOCC) list $(go-tags) github.com/ipfs/kubo/$(1)) go-main-name=$(notdir $(call go-pkg-name,$(1)))$(?exe) go-curr-pkg-tgt=$(d)/$(call go-main-name,$(d)) -go-pkgs=$(shell $(GOCC) list github.com/ipfs/kubo/...) +go-pkgs=$(shell GOFLAGS=-buildvcs=false $(GOCC) list github.com/ipfs/kubo/...) go-tags=$(if $(GOTAGS), -tags="$(call join-with,$(space),$(GOTAGS))") go-flags-with-tags=$(GOFLAGS)$(go-tags) @@ -42,44 +41,57 @@ define go-build $(GOCC) build $(go-flags-with-tags) -o "$@" "$(1)" endef -define go-try-build -$(GOCC) build $(go-flags-with-tags) -o /dev/null "$(call go-pkg-name,$<)" -endef +# Only disable colors when running in CI (non-interactive terminal) +GOTESTSUM_NOCOLOR := $(if $(CI),--no-color,) -test_go_test: $$(DEPS_GO) - $(GOCC) test $(go-flags-with-tags) $(GOTFLAGS) ./... -.PHONY: test_go_test +# Packages excluded from coverage (test code and examples are not production code) +COVERPKG_EXCLUDE := /(test|docs/examples)/ -test_go_build: $$(TEST_GO_BUILD) +# Packages excluded from unit tests: coverage exclusions + client/rpc (tested by test_cli) +UNIT_EXCLUDE := /(test|docs/examples)/|/client/rpc$$ -test_go_short: GOTFLAGS += -test.short -test_go_short: test_go_test -.PHONY: test_go_short +# Unit tests with coverage +# Produces JSON for CI reporting and coverage profile for Codecov +test_unit: test/bin/gotestsum $$(DEPS_GO) + mkdir -p test/unit coverage + rm -f test/unit/gotest.json coverage/unit_tests.coverprofile + gotestsum $(GOTESTSUM_NOCOLOR) --jsonfile test/unit/gotest.json -- $(go-flags-with-tags) $(GOTFLAGS) -covermode=atomic -coverprofile=coverage/unit_tests.coverprofile -coverpkg=$$($(GOCC) list $(go-tags) ./... | grep -vE '$(COVERPKG_EXCLUDE)' | tr '\n' ',' | sed 's/,$$//') $$($(GOCC) list $(go-tags) ./... | grep -vE '$(UNIT_EXCLUDE)') +.PHONY: test_unit -test_go_race: GOTFLAGS += -race -test_go_race: test_go_test -.PHONY: test_go_race +# CLI/integration tests (requires built binary in PATH) +# Includes test/cli, test/integration, and client/rpc +# Produces JSON for CI reporting +# Override TEST_CLI_TIMEOUT for local development: make test_cli TEST_CLI_TIMEOUT=5m +TEST_CLI_TIMEOUT ?= 10m +test_cli: cmd/ipfs/ipfs test/bin/gotestsum $$(DEPS_GO) + mkdir -p test/cli + rm -f test/cli/cli-tests.json + PATH="$(CURDIR)/cmd/ipfs:$(CURDIR)/test/bin:$$PATH" gotestsum $(GOTESTSUM_NOCOLOR) --jsonfile test/cli/cli-tests.json -- -v -timeout=$(TEST_CLI_TIMEOUT) ./test/cli/... ./test/integration/... ./client/rpc/... +.PHONY: test_cli -test_go_expensive: test_go_test test_go_build -.PHONY: test_go_expensive -TEST_GO += test_go_expensive +# Example tests (docs/examples/kubo-as-a-library) +# Tests against both published and current kubo versions +# Uses timeout to ensure CI gets output before job-level timeout kills everything +TEST_EXAMPLES_TIMEOUT ?= 2m +test_examples: + cd docs/examples/kubo-as-a-library && go test -v -timeout=$(TEST_EXAMPLES_TIMEOUT) ./... && cp go.mod go.mod.bak && cp go.sum go.sum.bak && (go mod edit -replace github.com/ipfs/kubo=./../../.. && go mod tidy && go test -v -timeout=$(TEST_EXAMPLES_TIMEOUT) ./...; ret=$$?; mv go.mod.bak go.mod; mv go.sum.bak go.sum; exit $$ret) +.PHONY: test_examples +# Build kubo for all platforms from .github/build-platforms.yml +test_go_build: + bin/test-go-build-platforms +.PHONY: test_go_build + +# Check Go source formatting test_go_fmt: bin/test-go-fmt .PHONY: test_go_fmt -TEST_GO += test_go_fmt +# Run golangci-lint (used by CI) test_go_lint: test/bin/golangci-lint golangci-lint run --timeout=3m ./... .PHONY: test_go_lint -test_go: $(TEST_GO) - -check_go_version: - @$(GOCC) version - bin/check_go_version $(GO_MIN_VERSION) -.PHONY: check_go_version -DEPS_GO += check_go_version - +TEST_GO := test_go_fmt test_unit test_cli test_examples TEST += $(TEST_GO) -TEST_SHORT += test_go_fmt test_go_short +TEST_SHORT += test_go_fmt test_unit diff --git a/mk/util.mk b/mk/util.mk index 2ce48583f..3eb9f76d0 100644 --- a/mk/util.mk +++ b/mk/util.mk @@ -9,26 +9,9 @@ else PATH_SEP :=: endif -SUPPORTED_PLATFORMS += windows-386 -SUPPORTED_PLATFORMS += windows-amd64 - -SUPPORTED_PLATFORMS += linux-arm -SUPPORTED_PLATFORMS += linux-arm64 -SUPPORTED_PLATFORMS += linux-386 -SUPPORTED_PLATFORMS += linux-amd64 - -SUPPORTED_PLATFORMS += darwin-amd64 -ifeq ($(shell bin/check_go_version "1.16.0" 2>/dev/null; echo $$?),0) -SUPPORTED_PLATFORMS += darwin-arm64 -endif -SUPPORTED_PLATFORMS += freebsd-386 -SUPPORTED_PLATFORMS += freebsd-amd64 - -SUPPORTED_PLATFORMS += openbsd-386 -SUPPORTED_PLATFORMS += openbsd-amd64 - -SUPPORTED_PLATFORMS += netbsd-386 -SUPPORTED_PLATFORMS += netbsd-amd64 +# Platforms are now defined in .github/build-platforms.yml +# The cmd/ipfs-try-build target is deprecated in favor of GitHub Actions +# Use 'make supported' to see the list of platforms space:=$() $() comma:=, diff --git a/p2p/listener.go b/p2p/listener.go index f5942ffa0..823f68e81 100644 --- a/p2p/listener.go +++ b/p2p/listener.go @@ -20,6 +20,10 @@ type Listener interface { // close closes the listener. Does not affect child streams close() + + // Done returns a channel that is closed when the listener is closed. + // This allows callers to detect when a listener has been removed. + Done() <-chan struct{} } // Listeners manages a group of Listener implementations, @@ -73,15 +77,13 @@ func (r *Listeners) Register(l Listener) error { return nil } +// Close removes and closes all listeners for which matchFunc returns true. +// Returns the number of listeners closed. func (r *Listeners) Close(matchFunc func(listener Listener) bool) int { - todo := make([]Listener, 0) + var todo []Listener r.Lock() for _, l := range r.Listeners { - if !matchFunc(l) { - continue - } - - if _, ok := r.Listeners[l.key()]; ok { + if matchFunc(l) { delete(r.Listeners, l.key()) todo = append(todo, l) } diff --git a/p2p/local.go b/p2p/local.go index 98028c5d4..31f70e5fc 100644 --- a/p2p/local.go +++ b/p2p/local.go @@ -23,6 +23,7 @@ type localListener struct { peer peer.ID listener manet.Listener + done chan struct{} } // ForwardLocal creates new P2P stream to a remote listener. @@ -32,6 +33,7 @@ func (p2p *P2P) ForwardLocal(ctx context.Context, peer peer.ID, proto protocol.I p2p: p2p, proto: proto, peer: peer, + done: make(chan struct{}), } maListener, err := manet.Listen(bindAddr) @@ -98,6 +100,11 @@ func (l *localListener) setupStream(local manet.Conn) { func (l *localListener) close() { l.listener.Close() + close(l.done) +} + +func (l *localListener) Done() <-chan struct{} { + return l.done } func (l *localListener) Protocol() protocol.ID { diff --git a/p2p/p2p.go b/p2p/p2p.go index 1d0989421..1d14dfb80 100644 --- a/p2p/p2p.go +++ b/p2p/p2p.go @@ -1,7 +1,7 @@ package p2p import ( - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" p2phost "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/peer" pstore "github.com/libp2p/go-libp2p/core/peerstore" diff --git a/p2p/remote.go b/p2p/remote.go index b867cb313..fb7b7ccba 100644 --- a/p2p/remote.go +++ b/p2p/remote.go @@ -25,6 +25,8 @@ type remoteListener struct { // reportRemote if set to true makes the handler send '\n' // to target before any data is forwarded reportRemote bool + + done chan struct{} } // ForwardRemote creates new p2p listener. @@ -36,6 +38,7 @@ func (p2p *P2P) ForwardRemote(ctx context.Context, proto protocol.ID, addr ma.Mu addr: addr, reportRemote: reportRemote, + done: make(chan struct{}), } if err := p2p.ListenersP2P.Register(listener); err != nil { @@ -99,7 +102,13 @@ func (l *remoteListener) TargetAddress() ma.Multiaddr { return l.addr } -func (l *remoteListener) close() {} +func (l *remoteListener) close() { + close(l.done) +} + +func (l *remoteListener) Done() <-chan struct{} { + return l.done +} func (l *remoteListener) key() protocol.ID { return l.proto diff --git a/plugin/loader/load_nocgo.go b/plugin/loader/load_nocgo.go index 9de31a9eb..3e0f39337 100644 --- a/plugin/loader/load_nocgo.go +++ b/plugin/loader/load_nocgo.go @@ -1,7 +1,4 @@ //go:build !cgo && !noplugin && (linux || darwin || freebsd) -// +build !cgo -// +build !noplugin -// +build linux darwin freebsd package loader diff --git a/plugin/loader/load_noplugin.go b/plugin/loader/load_noplugin.go index fc56b16a0..dddeac91d 100644 --- a/plugin/loader/load_noplugin.go +++ b/plugin/loader/load_noplugin.go @@ -1,5 +1,4 @@ //go:build noplugin -// +build noplugin package loader diff --git a/plugin/loader/load_unix.go b/plugin/loader/load_unix.go index 4a5dccb40..05af30197 100644 --- a/plugin/loader/load_unix.go +++ b/plugin/loader/load_unix.go @@ -1,7 +1,4 @@ //go:build cgo && !noplugin && (linux || darwin || freebsd) -// +build cgo -// +build !noplugin -// +build linux darwin freebsd package loader diff --git a/plugin/loader/loader.go b/plugin/loader/loader.go index 4ea3ac226..624907614 100644 --- a/plugin/loader/loader.go +++ b/plugin/loader/loader.go @@ -18,7 +18,7 @@ import ( plugin "github.com/ipfs/kubo/plugin" fsrepo "github.com/ipfs/kubo/repo/fsrepo" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" opentracing "github.com/opentracing/opentracing-go" ) diff --git a/plugin/loader/preload.go b/plugin/loader/preload.go index 2ad84e594..eb1bd5a6e 100644 --- a/plugin/loader/preload.go +++ b/plugin/loader/preload.go @@ -8,7 +8,9 @@ import ( pluginipldgit "github.com/ipfs/kubo/plugin/plugins/git" pluginlevelds "github.com/ipfs/kubo/plugin/plugins/levelds" pluginnopfs "github.com/ipfs/kubo/plugin/plugins/nopfs" + pluginpebbleds "github.com/ipfs/kubo/plugin/plugins/pebbleds" pluginpeerlog "github.com/ipfs/kubo/plugin/plugins/peerlog" + plugintelemetry "github.com/ipfs/kubo/plugin/plugins/telemetry" ) // DO NOT EDIT THIS FILE @@ -21,7 +23,9 @@ func init() { Preload(pluginbadgerds.Plugins...) Preload(pluginflatfs.Plugins...) Preload(pluginlevelds.Plugins...) + Preload(pluginpebbleds.Plugins...) Preload(pluginpeerlog.Plugins...) Preload(pluginfxtest.Plugins...) Preload(pluginnopfs.Plugins...) + Preload(plugintelemetry.Plugins...) } diff --git a/plugin/loader/preload_list b/plugin/loader/preload_list index 462a3f393..80e5b9cc9 100644 --- a/plugin/loader/preload_list +++ b/plugin/loader/preload_list @@ -9,6 +9,8 @@ iplddagjose github.com/ipfs/kubo/plugin/plugins/dagjose * badgerds github.com/ipfs/kubo/plugin/plugins/badgerds * flatfs github.com/ipfs/kubo/plugin/plugins/flatfs * levelds github.com/ipfs/kubo/plugin/plugins/levelds * +pebbleds github.com/ipfs/kubo/plugin/plugins/pebbleds * peerlog github.com/ipfs/kubo/plugin/plugins/peerlog * fxtest github.com/ipfs/kubo/plugin/plugins/fxtest * -nopfs github.com/ipfs/kubo/plugin/plugins/nopfs * \ No newline at end of file +nopfs github.com/ipfs/kubo/plugin/plugins/nopfs * +telemetry github.com/ipfs/kubo/plugin/plugins/telemetry * diff --git a/plugin/plugins/badgerds/badgerds.go b/plugin/plugins/badgerds/badgerds.go index 5f5781f8f..2410f196c 100644 --- a/plugin/plugins/badgerds/badgerds.go +++ b/plugin/plugins/badgerds/badgerds.go @@ -108,6 +108,7 @@ func (c *datastoreConfig) DiskSpec() fsrepo.DiskSpec { } func (c *datastoreConfig) Create(path string) (repo.Datastore, error) { + fmt.Fprintln(os.Stderr, "⚠️ badgerds is based on badger 1.x, which has known bugs and is no longer supported by the upstream team. Please switch to a newer datastore such as pebbleds or flatfs.") p := c.path if !filepath.IsAbs(p) { p = filepath.Join(path, p) diff --git a/plugin/plugins/flatfs/flatfs.go b/plugin/plugins/flatfs/flatfs.go index 1a23dfcca..944f912ae 100644 --- a/plugin/plugins/flatfs/flatfs.go +++ b/plugin/plugins/flatfs/flatfs.go @@ -42,7 +42,7 @@ type datastoreConfig struct { syncField bool } -// BadgerdsDatastoreConfig returns a configuration stub for a badger datastore +// DatastoreConfigParser returns a configuration stub for a flatfs datastore // from the given parameters. func (*flatfsPlugin) DatastoreConfigParser() fsrepo.ConfigFromMap { return func(params map[string]interface{}) (fsrepo.DatastoreConfig, error) { diff --git a/plugin/plugins/fxtest/fxtest.go b/plugin/plugins/fxtest/fxtest.go index 175dc6ec6..4205e3eb8 100644 --- a/plugin/plugins/fxtest/fxtest.go +++ b/plugin/plugins/fxtest/fxtest.go @@ -3,7 +3,7 @@ package fxtest import ( "os" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" "github.com/ipfs/kubo/core" "github.com/ipfs/kubo/plugin" "go.uber.org/fx" diff --git a/plugin/plugins/levelds/levelds.go b/plugin/plugins/levelds/levelds.go index b08872de6..78331730e 100644 --- a/plugin/plugins/levelds/levelds.go +++ b/plugin/plugins/levelds/levelds.go @@ -42,7 +42,7 @@ type datastoreConfig struct { compression ldbopts.Compression } -// BadgerdsDatastoreConfig returns a configuration stub for a badger datastore +// DatastoreConfigParser returns a configuration stub for a badger datastore // from the given parameters. func (*leveldsPlugin) DatastoreConfigParser() fsrepo.ConfigFromMap { return func(params map[string]interface{}) (fsrepo.DatastoreConfig, error) { diff --git a/plugin/plugins/pebbleds/pebbleds.go b/plugin/plugins/pebbleds/pebbleds.go new file mode 100644 index 000000000..141eff74b --- /dev/null +++ b/plugin/plugins/pebbleds/pebbleds.go @@ -0,0 +1,195 @@ +package pebbleds + +import ( + "fmt" + "path/filepath" + "time" + + "github.com/cockroachdb/pebble/v2" + pebbleds "github.com/ipfs/go-ds-pebble" + "github.com/ipfs/kubo/misc/fsutil" + "github.com/ipfs/kubo/plugin" + "github.com/ipfs/kubo/repo" + "github.com/ipfs/kubo/repo/fsrepo" +) + +// Plugins is exported list of plugins that will be loaded. +var Plugins = []plugin.Plugin{ + &pebbledsPlugin{}, +} + +type pebbledsPlugin struct{} + +var _ plugin.PluginDatastore = (*pebbledsPlugin)(nil) + +func (*pebbledsPlugin) Name() string { + return "ds-pebble" +} + +func (*pebbledsPlugin) Version() string { + return "0.1.0" +} + +func (*pebbledsPlugin) Init(_ *plugin.Environment) error { + return nil +} + +func (*pebbledsPlugin) DatastoreTypeName() string { + return "pebbleds" +} + +type datastoreConfig struct { + path string + cacheSize int64 + + // Documentation of these values: https://pkg.go.dev/github.com/cockroachdb/pebble@v1.1.2#Options + pebbleOpts *pebble.Options +} + +// PebbleDatastoreConfig returns a configuration stub for a pebble datastore +// from the given parameters. +func (*pebbledsPlugin) DatastoreConfigParser() fsrepo.ConfigFromMap { + return func(params map[string]any) (fsrepo.DatastoreConfig, error) { + var c datastoreConfig + var ok bool + + c.path, ok = params["path"].(string) + if !ok { + return nil, fmt.Errorf("'path' field is missing or not string") + } + + cacheSize, err := getConfigInt("cacheSize", params) + if err != nil { + return nil, err + } + c.cacheSize = int64(cacheSize) + + bytesPerSync, err := getConfigInt("bytesPerSync", params) + if err != nil { + return nil, err + } + disableWAL, err := getConfigBool("disableWAL", params) + if err != nil { + return nil, err + } + fmv, err := getConfigInt("formatMajorVersion", params) + if err != nil { + return nil, err + } + formatMajorVersion := pebble.FormatMajorVersion(fmv) + l0CompactionThreshold, err := getConfigInt("l0CompactionThreshold", params) + if err != nil { + return nil, err + } + l0StopWritesThreshold, err := getConfigInt("l0StopWritesThreshold", params) + if err != nil { + return nil, err + } + lBaseMaxBytes, err := getConfigInt("lBaseMaxBytes", params) + if err != nil { + return nil, err + } + maxConcurrentCompactions, err := getConfigInt("maxConcurrentCompactions", params) + if err != nil { + return nil, err + } + memTableSize, err := getConfigInt("memTableSize", params) + if err != nil { + return nil, err + } + memTableStopWritesThreshold, err := getConfigInt("memTableStopWritesThreshold", params) + if err != nil { + return nil, err + } + walBytesPerSync, err := getConfigInt("walBytesPerSync", params) + if err != nil { + return nil, err + } + walMinSyncSec, err := getConfigInt("walMinSyncIntervalSeconds", params) + if err != nil { + return nil, err + } + + if formatMajorVersion == 0 { + // Pebble DB format not configured. Automatically ratchet the + // database to the latest format. This may prevent downgrade. + formatMajorVersion = pebble.FormatNewest + } else if formatMajorVersion < pebble.FormatNewest { + // Pebble DB format is configured, but is not the latest. + fmt.Println("⚠️ A newer pebble db format is available.") + fmt.Println(" To upgrade, set the following in the pebble datastore config:") + fmt.Println(" \"formatMajorVersion\":", int(pebble.FormatNewest)) + } + + if bytesPerSync != 0 || disableWAL || formatMajorVersion != 0 || l0CompactionThreshold != 0 || l0StopWritesThreshold != 0 || lBaseMaxBytes != 0 || maxConcurrentCompactions != 0 || memTableSize != 0 || memTableStopWritesThreshold != 0 || walBytesPerSync != 0 || walMinSyncSec != 0 { + c.pebbleOpts = &pebble.Options{ + BytesPerSync: bytesPerSync, + DisableWAL: disableWAL, + FormatMajorVersion: formatMajorVersion, + L0CompactionThreshold: l0CompactionThreshold, + L0StopWritesThreshold: l0StopWritesThreshold, + LBaseMaxBytes: int64(lBaseMaxBytes), + MemTableSize: uint64(memTableSize), + MemTableStopWritesThreshold: memTableStopWritesThreshold, + WALBytesPerSync: walBytesPerSync, + } + if maxConcurrentCompactions != 0 { + c.pebbleOpts.CompactionConcurrencyRange = func() (int, int) { return 1, maxConcurrentCompactions } + } + if walMinSyncSec != 0 { + c.pebbleOpts.WALMinSyncInterval = func() time.Duration { return time.Duration(walMinSyncSec) * time.Second } + } + } + + return &c, nil + } +} + +func getConfigBool(name string, params map[string]any) (bool, error) { + val, ok := params[name] + if ok { + bval, ok := val.(bool) + if !ok { + return false, fmt.Errorf("%q field was not a bool", name) + } + return bval, nil + } + return false, nil +} + +func getConfigInt(name string, params map[string]any) (int, error) { + val, ok := params[name] + if ok { + // TODO: see why val may be an int or a float64. + ival, ok := val.(int) + if !ok { + fval, ok := val.(float64) + if !ok { + return 0, fmt.Errorf("%q field was not an integer or a float64", name) + } + return int(fval), nil + } + return ival, nil + } + return 0, nil +} + +func (c *datastoreConfig) DiskSpec() fsrepo.DiskSpec { + return map[string]interface{}{ + "type": "pebbleds", + "path": c.path, + } +} + +func (c *datastoreConfig) Create(path string) (repo.Datastore, error) { + p := c.path + if !filepath.IsAbs(p) { + p = filepath.Join(path, p) + } + + if err := fsutil.DirWritable(p); err != nil { + return nil, err + } + + return pebbleds.NewDatastore(p, pebbleds.WithCacheSize(c.cacheSize), pebbleds.WithPebbleOpts(c.pebbleOpts)) +} diff --git a/plugin/plugins/peerlog/peerlog.go b/plugin/plugins/peerlog/peerlog.go index d55a7f0b9..822a636c8 100644 --- a/plugin/plugins/peerlog/peerlog.go +++ b/plugin/plugins/peerlog/peerlog.go @@ -5,7 +5,7 @@ import ( "sync/atomic" "time" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" core "github.com/ipfs/kubo/core" plugin "github.com/ipfs/kubo/plugin" event "github.com/libp2p/go-libp2p/core/event" @@ -40,7 +40,7 @@ type plEvent struct { // // Usage: // -// GOLOG_FILE=~/peer.log IPFS_LOGGING_FMT=json ipfs daemon +// GOLOG_FILE=~/peer.log GOLOG_LOG_FMT=json ipfs daemon // // Output: // @@ -186,7 +186,7 @@ func (pl *peerLogPlugin) Start(node *core.IpfsNode) error { return nil } - // Ensure logs from this plugin get printed regardless of global IPFS_LOGGING value + // Ensure logs from this plugin get printed regardless of global GOLOG_LOG_LEVEL value if err := logging.SetLogLevel("plugin/peerlog", "info"); err != nil { return fmt.Errorf("failed to set log level: %w", err) } diff --git a/plugin/plugins/telemetry/telemetry.go b/plugin/plugins/telemetry/telemetry.go new file mode 100644 index 000000000..054cd6601 --- /dev/null +++ b/plugin/plugins/telemetry/telemetry.go @@ -0,0 +1,660 @@ +package telemetry + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net/http" + "os" + "path" + "runtime" + "slices" + "strings" + "sync" + "time" + + "github.com/google/uuid" + logging "github.com/ipfs/go-log/v2" + ipfs "github.com/ipfs/kubo" + "github.com/ipfs/kubo/config" + "github.com/ipfs/kubo/core" + "github.com/ipfs/kubo/core/corerepo" + "github.com/ipfs/kubo/plugin" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/pnet" + multiaddr "github.com/multiformats/go-multiaddr" + manet "github.com/multiformats/go-multiaddr/net" +) + +var log = logging.Logger("telemetry") + +// Caching for virtualization detection - these values never change during process lifetime +var ( + containerDetectionOnce sync.Once + vmDetectionOnce sync.Once + isContainerCached bool + isVMCached bool +) + +const ( + modeEnvVar = "IPFS_TELEMETRY" + uuidFilename = "telemetry_uuid" + endpoint = "https://telemetry.ipshipyard.dev" + sendDelay = 15 * time.Minute // delay before first telemetry collection after daemon start + sendInterval = 24 * time.Hour // interval between telemetry collections after the first one + httpTimeout = 30 * time.Second // timeout for telemetry HTTP requests +) + +type pluginMode int + +const ( + modeAuto pluginMode = iota + modeOn + modeOff +) + +// repoSizeBuckets defines size thresholds for categorizing repository sizes. +// Each value represents the upper limit of a bucket in bytes (except the last) +var repoSizeBuckets = []uint64{ + 1 << 30, // 1 GB + 5 << 30, // 5 GB + 10 << 30, // 10 GB + 100 << 30, // 100 GB + 500 << 30, // 500 GB + 1 << 40, // 1 TB + 10 << 40, // 10 TB + 11 << 40, // + anything more than 10TB falls here. +} + +var uptimeBuckets = []time.Duration{ + 1 * 24 * time.Hour, + 2 * 24 * time.Hour, + 3 * 24 * time.Hour, + 7 * 24 * time.Hour, + 14 * 24 * time.Hour, + 30 * 24 * time.Hour, + 31 * 24 * time.Hour, // + anything more than 30 days falls here. +} + +// A LogEvent is the object sent to the telemetry endpoint. +// See https://github.com/ipfs/kubo/blob/master/docs/telemetry.md for details. +type LogEvent struct { + UUID string `json:"uuid"` + + AgentVersion string `json:"agent_version"` + + PrivateNetwork bool `json:"private_network"` + + BootstrappersCustom bool `json:"bootstrappers_custom"` + + RepoSizeBucket uint64 `json:"repo_size_bucket"` + + UptimeBucket time.Duration `json:"uptime_bucket"` + + ReproviderStrategy string `json:"reprovider_strategy"` + ProvideDHTSweepEnabled bool `json:"provide_dht_sweep_enabled"` + ProvideDHTIntervalCustom bool `json:"provide_dht_interval_custom"` + ProvideDHTMaxWorkersCustom bool `json:"provide_dht_max_workers_custom"` + + RoutingType string `json:"routing_type"` + RoutingAcceleratedDHTClient bool `json:"routing_accelerated_dht_client"` + RoutingDelegatedCount int `json:"routing_delegated_count"` + + AutoNATServiceMode string `json:"autonat_service_mode"` + AutoNATReachability string `json:"autonat_reachability"` + + AutoConf bool `json:"autoconf"` + AutoConfCustom bool `json:"autoconf_custom"` + + SwarmEnableHolePunching bool `json:"swarm_enable_hole_punching"` + SwarmCircuitAddresses bool `json:"swarm_circuit_addresses"` + SwarmIPv4PublicAddresses bool `json:"swarm_ipv4_public_addresses"` + SwarmIPv6PublicAddresses bool `json:"swarm_ipv6_public_addresses"` + + AutoTLSAutoWSS bool `json:"auto_tls_auto_wss"` + AutoTLSDomainSuffixCustom bool `json:"auto_tls_domain_suffix_custom"` + + DiscoveryMDNSEnabled bool `json:"discovery_mdns_enabled"` + + PlatformOS string `json:"platform_os"` + PlatformArch string `json:"platform_arch"` + PlatformContainerized bool `json:"platform_containerized"` + PlatformVM bool `json:"platform_vm"` +} + +var Plugins = []plugin.Plugin{ + &telemetryPlugin{}, +} + +type telemetryPlugin struct { + uuidFilename string + mode pluginMode + endpoint string + runOnce bool // test-only flag: when true, sends telemetry immediately without delay + sendDelay time.Duration + + node *core.IpfsNode + config *config.Config + event *LogEvent + startTime time.Time +} + +func (p *telemetryPlugin) Name() string { + return "telemetry" +} + +func (p *telemetryPlugin) Version() string { + return "0.0.1" +} + +func readFromConfig(cfg interface{}, key string) string { + if cfg == nil { + return "" + } + + pcfg, ok := cfg.(map[string]interface{}) + if !ok { + return "" + } + + val, ok := pcfg[key].(string) + if !ok { + return "" + } + return val +} + +func (p *telemetryPlugin) Init(env *plugin.Environment) error { + // logging.SetLogLevel("telemetry", "DEBUG") + log.Debug("telemetry plugin Init()") + p.event = &LogEvent{} + p.startTime = time.Now() + + repoPath := env.Repo + p.uuidFilename = path.Join(repoPath, uuidFilename) + + v := os.Getenv(modeEnvVar) + if v != "" { + log.Debug("mode set from env-var") + } else if pmode := readFromConfig(env.Config, "Mode"); pmode != "" { + v = pmode + log.Debug("mode set from config") + } + + // read "Delay" from the config. Parse as duration. Set p.sendDelay to it + // or set default. + if delayStr := readFromConfig(env.Config, "Delay"); delayStr != "" { + delay, err := time.ParseDuration(delayStr) + if err != nil { + log.Debug("sendDelay set from default") + p.sendDelay = sendDelay + } else { + log.Debug("sendDelay set from config") + p.sendDelay = delay + } + } else { + log.Debug("sendDelay set from default") + p.sendDelay = sendDelay + } + + p.endpoint = endpoint + if ep := readFromConfig(env.Config, "Endpoint"); ep != "" { + log.Debug("endpoint set from config", ep) + p.endpoint = ep + } + + switch v { + case "off": + p.mode = modeOff + log.Debug("telemetry disabled via opt-out") + // Remove UUID file if it exists when user opts out + if _, err := os.Stat(p.uuidFilename); err == nil { + if err := os.Remove(p.uuidFilename); err != nil { + log.Debugf("failed to remove telemetry UUID file: %s", err) + } else { + log.Debug("removed existing telemetry UUID file due to opt-out") + } + } + return nil + case "auto": + p.mode = modeAuto + default: + p.mode = modeOn + } + log.Debug("telemetry mode: ", p.mode) + return nil +} + +func (p *telemetryPlugin) loadUUID() error { + // Generate or read our UUID from disk + b, err := os.ReadFile(p.uuidFilename) + if err != nil { + if !os.IsNotExist(err) { + log.Errorf("error reading telemetry uuid from disk: %s", err) + return err + } + uid, err := uuid.NewRandom() + if err != nil { + log.Errorf("cannot generate telemetry uuid: %s", err) + return err + } + p.event.UUID = uid.String() + p.mode = modeAuto + log.Debugf("new telemetry UUID %s. Mode set to Auto", uid) + + // Write the UUID to disk + if err := os.WriteFile(p.uuidFilename, []byte(p.event.UUID), 0600); err != nil { + log.Errorf("cannot write telemetry uuid: %s", err) + return err + } + return nil + } + + v := string(b) + v = strings.TrimSpace(v) + uid, err := uuid.Parse(v) + if err != nil { + log.Errorf("cannot parse telemetry uuid: %s", err) + return err + } + log.Debugf("uuid read from disk %s", uid) + p.event.UUID = uid.String() + return nil +} + +func (p *telemetryPlugin) hasDefaultBootstrapPeers() bool { + // With autoconf, default bootstrap is represented as ["auto"] + currentPeers := p.config.Bootstrap + return len(currentPeers) == 1 && currentPeers[0] == "auto" +} + +func (p *telemetryPlugin) showInfo() { + fmt.Printf(` + +ℹ️ Anonymous telemetry will be enabled in %s + +Kubo will collect anonymous usage data to help improve the software: +• What: Feature usage and configuration (no personal data) + Use GOLOG_LOG_LEVEL="telemetry=debug" to inspect collected data +• When: First collection in %s, then every 24h +• How: HTTP POST to %s + Anonymous ID: %s + +No data sent yet. To opt-out before collection starts: +• Set environment: %s=off +• Or run: ipfs config Plugins.Plugins.telemetry.Config.Mode off +• Then restart daemon + +This message is shown only once. +Learn more: https://github.com/ipfs/kubo/blob/master/docs/telemetry.md + + +`, p.sendDelay, p.sendDelay, endpoint, p.event.UUID, modeEnvVar) +} + +// Start finishes telemetry initialization once the IpfsNode is ready, +// collects telemetry data and sends it to the endpoint. +func (p *telemetryPlugin) Start(n *core.IpfsNode) error { + // We should not be crashing the daemon due to problems with telemetry + // so this is always going to return nil and panics are going to be + // handled. + defer func() { + if r := recover(); r != nil { + log.Errorf("telemetry plugin panicked: %v", r) + } + }() + + p.node = n + cfg, err := n.Repo.Config() + if err != nil { + log.Error("error getting the repo.Config: %s", err) + return nil + } + p.config = cfg + if p.mode == modeOff { + log.Debug("telemetry collection skipped: opted out") + return nil + } + + if !n.IsDaemon || !n.IsOnline { + log.Debugf("skipping telemetry. Daemon: %t. Online: %t", n.IsDaemon, n.IsOnline) + return nil + } + + // loadUUID might switch to modeAuto when generating a new uuid + if err := p.loadUUID(); err != nil { + p.mode = modeOff + return nil + } + + if p.mode == modeAuto { + p.showInfo() + } + + // runOnce is only used in tests to send telemetry immediately. + // In production, this is always false, ensuring users get the 15-minute delay. + if p.runOnce { + p.prepareEvent() + return p.sendTelemetry() + } + + go func() { + timer := time.NewTimer(p.sendDelay) + for range timer.C { + p.prepareEvent() + if err := p.sendTelemetry(); err != nil { + log.Warnf("telemetry submission failed: %s (will retry in %s)", err, sendInterval) + } + timer.Reset(sendInterval) + } + }() + + return nil +} + +func (p *telemetryPlugin) prepareEvent() { + p.collectBasicInfo() + p.collectRoutingInfo() + p.collectProvideInfo() + p.collectAutoNATInfo() + p.collectAutoConfInfo() + p.collectSwarmInfo() + p.collectAutoTLSInfo() + p.collectDiscoveryInfo() + p.collectPlatformInfo() +} + +func (p *telemetryPlugin) collectBasicInfo() { + p.event.AgentVersion = ipfs.GetUserAgentVersion() + + privNet := false + if pnet.ForcePrivateNetwork { + privNet = true + } else if key, _ := p.node.Repo.SwarmKey(); key != nil { + privNet = true + } + p.event.PrivateNetwork = privNet + + p.event.BootstrappersCustom = !p.hasDefaultBootstrapPeers() + + repoSizeBucket := repoSizeBuckets[len(repoSizeBuckets)-1] + sizeStat, err := corerepo.RepoSize(context.Background(), p.node) + if err == nil { + for _, b := range repoSizeBuckets { + if sizeStat.RepoSize > b { + continue + } + repoSizeBucket = b + break + } + p.event.RepoSizeBucket = repoSizeBucket + } else { + log.Debugf("error setting sizeStat: %s", err) + } + + uptime := time.Since(p.startTime) + uptimeBucket := uptimeBuckets[len(uptimeBuckets)-1] + for _, bucket := range uptimeBuckets { + if uptime > bucket { + continue + + } + uptimeBucket = bucket + break + } + p.event.UptimeBucket = uptimeBucket +} + +func (p *telemetryPlugin) collectRoutingInfo() { + p.event.RoutingType = p.config.Routing.Type.WithDefault("auto") + p.event.RoutingAcceleratedDHTClient = p.config.Routing.AcceleratedDHTClient.WithDefault(false) + p.event.RoutingDelegatedCount = len(p.config.Routing.DelegatedRouters) +} + +func (p *telemetryPlugin) collectProvideInfo() { + p.event.ReproviderStrategy = p.config.Provide.Strategy.WithDefault(config.DefaultProvideStrategy) + p.event.ProvideDHTSweepEnabled = p.config.Provide.DHT.SweepEnabled.WithDefault(config.DefaultProvideDHTSweepEnabled) + p.event.ProvideDHTIntervalCustom = !p.config.Provide.DHT.Interval.IsDefault() + p.event.ProvideDHTMaxWorkersCustom = !p.config.Provide.DHT.MaxWorkers.IsDefault() +} + +type reachabilityHost interface { + Reachability() network.Reachability +} + +func (p *telemetryPlugin) collectAutoNATInfo() { + autonat := p.config.AutoNAT.ServiceMode + if autonat == config.AutoNATServiceUnset { + autonat = config.AutoNATServiceEnabled + } + autoNATSvcModeB, err := autonat.MarshalText() + if err == nil { + autoNATSvcMode := string(autoNATSvcModeB) + if autoNATSvcMode == "" { + autoNATSvcMode = "unset" + } + p.event.AutoNATServiceMode = autoNATSvcMode + } + + h := p.node.PeerHost + reachHost, ok := h.(reachabilityHost) + if ok { + p.event.AutoNATReachability = reachHost.Reachability().String() + } +} + +func (p *telemetryPlugin) collectSwarmInfo() { + p.event.SwarmEnableHolePunching = p.config.Swarm.EnableHolePunching.WithDefault(true) + + var circuitAddrs, publicIP4Addrs, publicIP6Addrs bool + for _, addr := range p.node.PeerHost.Addrs() { + if manet.IsPublicAddr(addr) { + if _, err := addr.ValueForProtocol(multiaddr.P_IP4); err == nil { + publicIP4Addrs = true + } else if _, err := addr.ValueForProtocol(multiaddr.P_IP6); err == nil { + publicIP6Addrs = true + } + } + if _, err := addr.ValueForProtocol(multiaddr.P_CIRCUIT); err == nil { + circuitAddrs = true + } + } + + p.event.SwarmCircuitAddresses = circuitAddrs + p.event.SwarmIPv4PublicAddresses = publicIP4Addrs + p.event.SwarmIPv6PublicAddresses = publicIP6Addrs +} + +func (p *telemetryPlugin) collectAutoTLSInfo() { + p.event.AutoTLSAutoWSS = p.config.AutoTLS.AutoWSS.WithDefault(config.DefaultAutoWSS) + domainSuffix := p.config.AutoTLS.DomainSuffix.WithDefault(config.DefaultDomainSuffix) + p.event.AutoTLSDomainSuffixCustom = domainSuffix != config.DefaultDomainSuffix +} + +func (p *telemetryPlugin) collectAutoConfInfo() { + p.event.AutoConf = p.config.AutoConf.Enabled.WithDefault(config.DefaultAutoConfEnabled) + p.event.AutoConfCustom = p.config.AutoConf.URL.WithDefault(config.DefaultAutoConfURL) != config.DefaultAutoConfURL +} + +func (p *telemetryPlugin) collectDiscoveryInfo() { + p.event.DiscoveryMDNSEnabled = p.config.Discovery.MDNS.Enabled +} + +func (p *telemetryPlugin) collectPlatformInfo() { + p.event.PlatformOS = runtime.GOOS + p.event.PlatformArch = runtime.GOARCH + p.event.PlatformContainerized = isRunningInContainer() + p.event.PlatformVM = isRunningInVM() +} + +func isRunningInContainer() bool { + containerDetectionOnce.Do(func() { + isContainerCached = detectContainer() + }) + return isContainerCached +} + +func detectContainer() bool { + // Docker creates /.dockerenv inside containers + if _, err := os.Stat("/.dockerenv"); err == nil { + return true + } + + // Kubernetes mounts service account tokens inside pods + if _, err := os.Stat("/var/run/secrets/kubernetes.io"); err == nil { + return true + } + + // systemd-nspawn creates this file inside containers + if _, err := os.Stat("/run/systemd/container"); err == nil { + return true + } + + // Check if our process is running inside a container cgroup + // Look for container-specific patterns in the cgroup path after "::/" + if content, err := os.ReadFile("/proc/self/cgroup"); err == nil { + for line := range strings.Lines(string(content)) { + // cgroup lines format: "ID:subsystem:/path" + // We want to check the path part after the last ":" + parts := strings.SplitN(line, ":", 3) + if len(parts) == 3 { + cgroupPath := parts[2] + // Check for container-specific paths + containerIndicators := []string{ + "/docker/", // Docker containers + "/containerd/", // containerd runtime + "/cri-o/", // CRI-O runtime + "/lxc/", // LXC containers + "/podman/", // Podman containers + "/kubepods/", // Kubernetes pods + } + for _, indicator := range containerIndicators { + if strings.Contains(cgroupPath, indicator) { + return true + } + } + } + } + } + + // WSL is technically a container-like environment + if runtime.GOOS == "linux" { + if content, err := os.ReadFile("/proc/sys/kernel/osrelease"); err == nil { + osrelease := strings.ToLower(string(content)) + if strings.Contains(osrelease, "microsoft") || strings.Contains(osrelease, "wsl") { + return true + } + } + } + + // LXC sets container environment variable + if content, err := os.ReadFile("/proc/1/environ"); err == nil { + if strings.Contains(string(content), "container=lxc") { + return true + } + } + + // Additional check: In containers, PID 1 is often not systemd/init + if content, err := os.ReadFile("/proc/1/comm"); err == nil { + pid1 := strings.TrimSpace(string(content)) + // Common container init processes + containerInits := []string{"tini", "dumb-init", "s6-svscan", "runit"} + if slices.Contains(containerInits, pid1) { + return true + } + } + + return false +} + +func isRunningInVM() bool { + vmDetectionOnce.Do(func() { + isVMCached = detectVM() + }) + return isVMCached +} + +func detectVM() bool { + // Check for VM-specific files and drivers that only exist inside VMs + vmIndicators := []string{ + "/proc/xen", // Xen hypervisor guest + "/sys/hypervisor/uuid", // KVM/Xen hypervisor guest + "/dev/vboxguest", // VirtualBox guest additions + "/sys/module/vmw_balloon", // VMware balloon driver (guest only) + "/sys/module/hv_vmbus", // Hyper-V VM bus driver (guest only) + } + + for _, path := range vmIndicators { + if _, err := os.Stat(path); err == nil { + return true + } + } + + // Check DMI for VM vendors - these strings only appear inside VMs + // DMI (Desktop Management Interface) is populated by the hypervisor + dmiFiles := map[string][]string{ + "/sys/class/dmi/id/sys_vendor": { + "qemu", "kvm", "vmware", "virtualbox", "xen", + "parallels", // Parallels Desktop + // Note: Removed "microsoft corporation" as it can match Surface devices + }, + "/sys/class/dmi/id/product_name": { + "virtualbox", "vmware", "kvm", "qemu", + "hvm domu", // Xen HVM guest + // Note: Removed generic "virtual machine" to avoid false positives + }, + "/sys/class/dmi/id/chassis_vendor": { + "qemu", "oracle", // Oracle for VirtualBox + }, + } + + for path, signatures := range dmiFiles { + if content, err := os.ReadFile(path); err == nil { + contentStr := strings.ToLower(strings.TrimSpace(string(content))) + for _, sig := range signatures { + if strings.Contains(contentStr, sig) { + return true + } + } + } + } + + return false +} + +func (p *telemetryPlugin) sendTelemetry() error { + data, err := json.MarshalIndent(p.event, "", " ") + if err != nil { + return err + } + + log.Debugf("sending telemetry:\n %s", data) + + req, err := http.NewRequest("POST", p.endpoint, bytes.NewBuffer(data)) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("User-Agent", ipfs.GetUserAgentVersion()) + req.Close = true + + // Use client with timeout to prevent hanging + client := &http.Client{ + Timeout: httpTimeout, + } + resp, err := client.Do(req) + if err != nil { + log.Debugf("failed to send telemetry: %s", err) + return err + } + defer resp.Body.Close() + + if resp.StatusCode >= 400 { + err := fmt.Errorf("telemetry endpoint returned HTTP %d", resp.StatusCode) + log.Debug(err) + return err + } + log.Debugf("telemetry sent successfully (%d)", resp.StatusCode) + return nil +} diff --git a/plugin/plugins/telemetry/telemetry_test.go b/plugin/plugins/telemetry/telemetry_test.go new file mode 100644 index 000000000..6b88ced92 --- /dev/null +++ b/plugin/plugins/telemetry/telemetry_test.go @@ -0,0 +1,170 @@ +package telemetry + +import ( + "context" + "encoding/json" + "io" + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/cockroachdb/pebble/v2" + logging "github.com/ipfs/go-log/v2" + "github.com/ipfs/kubo/config" + "github.com/ipfs/kubo/core" + "github.com/ipfs/kubo/core/node/libp2p" + "github.com/ipfs/kubo/plugin" + "github.com/ipfs/kubo/plugin/plugins/pebbleds" + "github.com/ipfs/kubo/repo/fsrepo" +) + +func mockServer(t *testing.T) (*httptest.Server, func() LogEvent) { + t.Helper() + + var e LogEvent + + // Create a mock HTTP test server + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Check if the request is POST to the correct endpoint + if r.Method != "POST" || r.URL.Path != "/" { + t.Log("invalid request") + http.Error(w, "invalid request", http.StatusBadRequest) + return + } + + // Check content type + if r.Header.Get("Content-Type") != "application/json" { + t.Log("invalid content type") + http.Error(w, "invalid content type", http.StatusBadRequest) + return + } + + // Check if the body is not empty + if r.Body == nil { + t.Log("empty body") + http.Error(w, "empty body", http.StatusBadRequest) + return + } + + // Read the body + body, _ := io.ReadAll(r.Body) + if len(body) == 0 { + t.Log("zero-length body") + http.Error(w, "empty body", http.StatusBadRequest) + return + } + + t.Logf("Received telemetry:\n %s", string(body)) + + err := json.Unmarshal(body, &e) + if err != nil { + t.Log("error unmarshaling event", err) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + // Return success + w.WriteHeader(http.StatusOK) + })), func() LogEvent { return e } +} + +func makeNode(t *testing.T) (node *core.IpfsNode, repopath string) { + t.Helper() + + // Create a Temporary Repo + repoPath, err := os.MkdirTemp("", "ipfs-shell") + if err != nil { + t.Fatal(err) + } + + pebbledspli := pebbleds.Plugins[0] + pebbledspl, ok := pebbledspli.(plugin.PluginDatastore) + if !ok { + t.Fatal("bad datastore plugin") + } + + err = fsrepo.AddDatastoreConfigHandler(pebbledspl.DatastoreTypeName(), pebbledspl.DatastoreConfigParser()) + if err != nil { + t.Fatal(err) + } + + // Create a config with default options and a 2048 bit key + cfg, err := config.Init(io.Discard, 2048) + if err != nil { + t.Fatal(err) + } + + cfg.Datastore.Spec = map[string]interface{}{ + "type": "pebbleds", + "prefix": "pebble.datastore", + "path": "pebbleds", + "formatMajorVersion": int(pebble.FormatNewest), + } + + // Create the repo with the config + err = fsrepo.Init(repoPath, cfg) + if err != nil { + t.Fatal(err) + } + + // Open the repo + repo, err := fsrepo.Open(repoPath) + if err != nil { + t.Fatal(err) + } + + // Construct the node + + nodeOptions := &core.BuildCfg{ + Online: true, + Routing: libp2p.NilRouterOption, + Repo: repo, + } + + node, err = core.NewNode(context.Background(), nodeOptions) + if err != nil { + t.Fatal(err) + } + + node.IsDaemon = true + return +} + +func TestSendTelemetry(t *testing.T) { + if err := logging.SetLogLevel("telemetry", "DEBUG"); err != nil { + t.Fatal(err) + } + ts, eventGetter := mockServer(t) + defer ts.Close() + + node, repoPath := makeNode(t) + + // Create a plugin instance + p := &telemetryPlugin{ + runOnce: true, + } + + // Initialize the plugin + pe := &plugin.Environment{ + Repo: repoPath, + Config: nil, + } + err := p.Init(pe) + if err != nil { + t.Fatalf("Init() failed: %v", err) + } + + p.endpoint = ts.URL + + // Start the plugin + err = p.Start(node) + if err != nil { + t.Fatalf("Start() failed: %v", err) + } + + e := eventGetter() + if e.UUID != p.event.UUID { + t.Fatal("uuid mismatch") + } +} diff --git a/plugin/plugins/telemetry/telemetry_uuid b/plugin/plugins/telemetry/telemetry_uuid new file mode 100644 index 000000000..f80cb9c3f --- /dev/null +++ b/plugin/plugins/telemetry/telemetry_uuid @@ -0,0 +1 @@ +289ffed8-c770-49ae-922f-b020c8f776f2 \ No newline at end of file diff --git a/profile/profile.go b/profile/profile.go index be1e5adbb..32df334e3 100644 --- a/profile/profile.go +++ b/profile/profile.go @@ -14,7 +14,7 @@ import ( "sync" "time" - "github.com/ipfs/go-log" + "github.com/ipfs/go-log/v2" version "github.com/ipfs/kubo" ) diff --git a/repo/common/common.go b/repo/common/common.go index ab74ffca8..7c82f3ec3 100644 --- a/repo/common/common.go +++ b/repo/common/common.go @@ -2,6 +2,7 @@ package common import ( "fmt" + "maps" "strings" ) @@ -61,13 +62,13 @@ func MapSetKV(v map[string]interface{}, key string, value interface{}) error { return nil } -// Merges the right map into the left map, recursively traversing child maps -// until a non-map value is found. +// MapMergeDeep merges the right map into the left map, recursively traversing +// child maps until a non-map value is found. func MapMergeDeep(left, right map[string]interface{}) map[string]interface{} { // We want to alter a copy of the map, not the original - result := make(map[string]interface{}) - for k, v := range left { - result[k] = v + result := maps.Clone(left) + if result == nil { + result = make(map[string]interface{}) } for key, rightVal := range right { diff --git a/repo/common/common_test.go b/repo/common/common_test.go index b999db459..3fb7198ec 100644 --- a/repo/common/common_test.go +++ b/repo/common/common_test.go @@ -3,7 +3,7 @@ package common import ( "testing" - "github.com/ipfs/kubo/thirdparty/assert" + "github.com/stretchr/testify/require" ) func TestMapMergeDeepReturnsNew(t *testing.T) { @@ -15,7 +15,7 @@ func TestMapMergeDeepReturnsNew(t *testing.T) { MapMergeDeep(leftMap, rightMap) - assert.True(leftMap["A"] == "Hello World", t, "MapMergeDeep should return a new map instance") + require.Equal(t, "Hello World", leftMap["A"], "MapMergeDeep should return a new map instance") } func TestMapMergeDeepNewKey(t *testing.T) { @@ -46,7 +46,7 @@ func TestMapMergeDeepNewKey(t *testing.T) { } */ - assert.True(result["B"] == "Bar", t, "New keys in right map should exist in resulting map") + require.Equal(t, "Bar", result["B"], "New keys in right map should exist in resulting map") } func TestMapMergeDeepRecursesOnMaps(t *testing.T) { @@ -92,8 +92,8 @@ func TestMapMergeDeepRecursesOnMaps(t *testing.T) { */ resultA := result["A"].(map[string]interface{}) - assert.True(resultA["B"] == "A value!", t, "Unaltered values should not change") - assert.True(resultA["C"] == "A different value!", t, "Nested values should be altered") + require.Equal(t, "A value!", resultA["B"], "Unaltered values should not change") + require.Equal(t, "A different value!", resultA["C"], "Nested values should be altered") } func TestMapMergeDeepRightNotAMap(t *testing.T) { @@ -128,5 +128,5 @@ func TestMapMergeDeepRightNotAMap(t *testing.T) { } */ - assert.True(result["A"] == "Not a map!", t, "Right values that are not a map should be set on the result") + require.Equal(t, "Not a map!", result["A"], "Right values that are not a map should be set on the result") } diff --git a/repo/fsrepo/datastores.go b/repo/fsrepo/datastores.go index 86ed0a863..a8c6fa45c 100644 --- a/repo/fsrepo/datastores.go +++ b/repo/fsrepo/datastores.go @@ -17,16 +17,15 @@ import ( // ConfigFromMap creates a new datastore config from a map. type ConfigFromMap func(map[string]interface{}) (DatastoreConfig, error) -// DatastoreConfig is an abstraction of a datastore config. A "spec" -// is first converted to a DatastoreConfig and then Create() is called -// to instantiate a new datastore. +// DatastoreConfig is an abstraction of a datastore config. A "spec" is first +// converted to a DatastoreConfig and then Create() is called to instantiate a +// new datastore. type DatastoreConfig interface { - // DiskSpec returns a minimal configuration of the datastore - // represting what is stored on disk. Run time values are - // excluded. + // DiskSpec returns a minimal configuration of the datastore representing + // what is stored on disk. Run time values are excluded. DiskSpec() DiskSpec - // Create instantiate a new datastore from this config + // Create instantiates a new datastore from this config. Create(path string) (repo.Datastore, error) } diff --git a/repo/fsrepo/fsrepo.go b/repo/fsrepo/fsrepo.go index 6e9e01dab..718d5614d 100644 --- a/repo/fsrepo/fsrepo.go +++ b/repo/fsrepo/fsrepo.go @@ -10,23 +10,23 @@ import ( "path/filepath" "strings" "sync" + "time" filestore "github.com/ipfs/boxo/filestore" keystore "github.com/ipfs/boxo/keystore" + version "github.com/ipfs/kubo" repo "github.com/ipfs/kubo/repo" "github.com/ipfs/kubo/repo/common" - dir "github.com/ipfs/kubo/thirdparty/dir" rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager" - util "github.com/ipfs/boxo/util" ds "github.com/ipfs/go-datastore" measure "github.com/ipfs/go-ds-measure" lockfile "github.com/ipfs/go-fs-lock" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" config "github.com/ipfs/kubo/config" serialize "github.com/ipfs/kubo/config/serialize" + "github.com/ipfs/kubo/misc/fsutil" "github.com/ipfs/kubo/repo/fsrepo/migrations" - homedir "github.com/mitchellh/go-homedir" ma "github.com/multiformats/go-multiaddr" ) @@ -37,7 +37,7 @@ const LockFile = "repo.lock" var log = logging.Logger("fsrepo") // RepoVersion is the version number that we are currently expecting to see. -var RepoVersion = 16 +var RepoVersion = version.RepoVersion var migrationInstructions = `See https://github.com/ipfs/fs-repo-migrations/blob/master/run.md Sorry for the inconvenience. In the future, these will run automatically.` @@ -147,7 +147,23 @@ func open(repoPath string, userConfigFilePath string) (repo.Repo, error) { return nil, err } - r.lockfile, err = lockfile.Lock(r.path, LockFile) + text := os.Getenv("IPFS_WAIT_REPO_LOCK") + if text != "" { + var lockWaitTime time.Duration + lockWaitTime, err = time.ParseDuration(text) + if err != nil { + log.Errorw("Cannot parse value of IPFS_WAIT_REPO_LOCK as duration, not waiting for repo lock", "err", err, "value", text) + r.lockfile, err = lockfile.Lock(r.path, LockFile) + } else if lockWaitTime <= 0 { + r.lockfile, err = lockfile.WaitLock(context.Background(), r.path, LockFile) + } else { + ctx, cancel := context.WithTimeout(context.Background(), lockWaitTime) + r.lockfile, err = lockfile.WaitLock(ctx, r.path, LockFile) + cancel() + } + } else { + r.lockfile, err = lockfile.Lock(r.path, LockFile) + } if err != nil { return nil, err } @@ -176,7 +192,7 @@ func open(repoPath string, userConfigFilePath string) (repo.Repo, error) { } // check repo path, then check all constituent parts. - if err := dir.Writable(r.path); err != nil { + if err := fsutil.DirWritable(r.path); err != nil { return nil, err } @@ -207,7 +223,7 @@ func open(repoPath string, userConfigFilePath string) (repo.Repo, error) { } func newFSRepo(rpath string, userConfigFilePath string) (*FSRepo, error) { - expPath, err := homedir.Expand(filepath.Clean(rpath)) + expPath, err := fsutil.ExpandHome(filepath.Clean(rpath)) if err != nil { return nil, err } @@ -239,7 +255,7 @@ func configIsInitialized(path string) bool { if err != nil { return false } - if !util.FileExists(configFilename) { + if !fsutil.FileExists(configFilename) { return false } return true @@ -269,7 +285,7 @@ func initSpec(path string, conf map[string]interface{}) error { return err } - if util.FileExists(fn) { + if fsutil.FileExists(fn) { return nil } @@ -377,6 +393,7 @@ func (r *FSRepo) SetAPIAddr(addr ma.Multiaddr) error { } if _, err = f.WriteString(addr.String()); err != nil { + f.Close() return err } if err = f.Close(); err != nil { @@ -677,6 +694,12 @@ func (r *FSRepo) SetConfigKey(key string, value interface{}) error { return errors.New("repo is closed") } + // Validate the key's presence in the config structure. + err := config.CheckKey(key) + if err != nil { + return err + } + // Load into a map so we don't end up writing any additional defaults to the config file. var mapconf map[string]interface{} if err := serialize.ReadConfigFile(r.configFilePath, &mapconf); err != nil { diff --git a/repo/fsrepo/fsrepo_test.go b/repo/fsrepo/fsrepo_test.go index 6b30b107a..91d8e887a 100644 --- a/repo/fsrepo/fsrepo_test.go +++ b/repo/fsrepo/fsrepo_test.go @@ -7,17 +7,16 @@ import ( "path/filepath" "testing" - "github.com/ipfs/kubo/thirdparty/assert" - datastore "github.com/ipfs/go-datastore" config "github.com/ipfs/kubo/config" + "github.com/stretchr/testify/require" ) func TestInitIdempotence(t *testing.T) { t.Parallel() path := t.TempDir() for i := 0; i < 10; i++ { - assert.Nil(Init(path, &config.Config{Datastore: config.DefaultDatastoreConfig()}), t, "multiple calls to init should succeed") + require.NoError(t, Init(path, &config.Config{Datastore: config.DefaultDatastoreConfig()}), "multiple calls to init should succeed") } } @@ -32,78 +31,78 @@ func TestCanManageReposIndependently(t *testing.T) { pathB := t.TempDir() t.Log("initialize two repos") - assert.Nil(Init(pathA, &config.Config{Datastore: config.DefaultDatastoreConfig()}), t, "a", "should initialize successfully") - assert.Nil(Init(pathB, &config.Config{Datastore: config.DefaultDatastoreConfig()}), t, "b", "should initialize successfully") + require.NoError(t, Init(pathA, &config.Config{Datastore: config.DefaultDatastoreConfig()}), "a", "should initialize successfully") + require.NoError(t, Init(pathB, &config.Config{Datastore: config.DefaultDatastoreConfig()}), "b", "should initialize successfully") t.Log("ensure repos initialized") - assert.True(IsInitialized(pathA), t, "a should be initialized") - assert.True(IsInitialized(pathB), t, "b should be initialized") + require.True(t, IsInitialized(pathA), "a should be initialized") + require.True(t, IsInitialized(pathB), "b should be initialized") t.Log("open the two repos") repoA, err := Open(pathA) - assert.Nil(err, t, "a") + require.NoError(t, err, "a") repoB, err := Open(pathB) - assert.Nil(err, t, "b") + require.NoError(t, err, "b") t.Log("close and remove b while a is open") - assert.Nil(repoB.Close(), t, "close b") - assert.Nil(Remove(pathB), t, "remove b") + require.NoError(t, repoB.Close(), "close b") + require.NoError(t, Remove(pathB), "remove b") t.Log("close and remove a") - assert.Nil(repoA.Close(), t) - assert.Nil(Remove(pathA), t) + require.NoError(t, repoA.Close()) + require.NoError(t, Remove(pathA)) } func TestDatastoreGetNotAllowedAfterClose(t *testing.T) { t.Parallel() path := t.TempDir() - assert.True(!IsInitialized(path), t, "should NOT be initialized") - assert.Nil(Init(path, &config.Config{Datastore: config.DefaultDatastoreConfig()}), t, "should initialize successfully") + require.False(t, IsInitialized(path), "should NOT be initialized") + require.NoError(t, Init(path, &config.Config{Datastore: config.DefaultDatastoreConfig()}), "should initialize successfully") r, err := Open(path) - assert.Nil(err, t, "should open successfully") + require.NoError(t, err, "should open successfully") k := "key" data := []byte(k) - assert.Nil(r.Datastore().Put(context.Background(), datastore.NewKey(k), data), t, "Put should be successful") + require.NoError(t, r.Datastore().Put(context.Background(), datastore.NewKey(k), data), "Put should be successful") - assert.Nil(r.Close(), t) + require.NoError(t, r.Close()) _, err = r.Datastore().Get(context.Background(), datastore.NewKey(k)) - assert.Err(err, t, "after closer, Get should be fail") + require.Error(t, err, "after closer, Get should be fail") } func TestDatastorePersistsFromRepoToRepo(t *testing.T) { t.Parallel() path := t.TempDir() - assert.Nil(Init(path, &config.Config{Datastore: config.DefaultDatastoreConfig()}), t) + require.NoError(t, Init(path, &config.Config{Datastore: config.DefaultDatastoreConfig()})) r1, err := Open(path) - assert.Nil(err, t) + require.NoError(t, err) k := "key" expected := []byte(k) - assert.Nil(r1.Datastore().Put(context.Background(), datastore.NewKey(k), expected), t, "using first repo, Put should be successful") - assert.Nil(r1.Close(), t) + require.NoError(t, r1.Datastore().Put(context.Background(), datastore.NewKey(k), expected), "using first repo, Put should be successful") + require.NoError(t, r1.Close()) r2, err := Open(path) - assert.Nil(err, t) + require.NoError(t, err) actual, err := r2.Datastore().Get(context.Background(), datastore.NewKey(k)) - assert.Nil(err, t, "using second repo, Get should be successful") - assert.Nil(r2.Close(), t) - assert.True(bytes.Equal(expected, actual), t, "data should match") + require.NoError(t, err, "using second repo, Get should be successful") + require.NoError(t, r2.Close()) + require.True(t, bytes.Equal(expected, actual), "data should match") } func TestOpenMoreThanOnceInSameProcess(t *testing.T) { t.Parallel() path := t.TempDir() - assert.Nil(Init(path, &config.Config{Datastore: config.DefaultDatastoreConfig()}), t) + require.NoError(t, Init(path, &config.Config{Datastore: config.DefaultDatastoreConfig()})) r1, err := Open(path) - assert.Nil(err, t, "first repo should open successfully") + require.NoError(t, err, "first repo should open successfully") r2, err := Open(path) - assert.Nil(err, t, "second repo should open successfully") - assert.True(r1 == r2, t, "second open returns same value") + require.NoError(t, err, "second repo should open successfully") + require.Equal(t, r1, r2, "second open returns same value") - assert.Nil(r1.Close(), t) - assert.Nil(r2.Close(), t) + require.NoError(t, r1.Close()) + require.NoError(t, r2.Close()) } diff --git a/repo/fsrepo/migrations/README.md b/repo/fsrepo/migrations/README.md new file mode 100644 index 000000000..cc4b85ca3 --- /dev/null +++ b/repo/fsrepo/migrations/README.md @@ -0,0 +1,134 @@ +# IPFS Repository Migrations + +This directory contains the migration system for IPFS repositories, handling both embedded and external migrations. + +## Migration System Overview + +### Embedded vs External Migrations + +Starting from **repo version 17**, Kubo uses **embedded migrations** that are built into the binary, eliminating the need to download external migration tools. + +- **Repo versions <17**: Use external binary migrations downloaded from fs-repo-migrations +- **Repo version 17+**: Use embedded migrations built into Kubo + +### Migration Functions + +#### `migrations.RunEmbeddedMigrations()` +- **Purpose**: Runs migrations that are embedded directly in the Kubo binary +- **Scope**: Handles repo version 17+ migrations +- **Performance**: Fast execution, no network downloads required +- **Dependencies**: Self-contained, uses only Kubo's internal dependencies +- **Usage**: Primary migration method for modern repo versions + +**Parameters**: +- `ctx`: Context for cancellation and timeouts +- `targetVersion`: Target repository version to migrate to +- `repoPath`: Path to the IPFS repository directory +- `allowDowngrade`: Whether to allow downgrade migrations + +```go +err = migrations.RunEmbeddedMigrations(ctx, targetVersion, repoPath, allowDowngrade) +if err != nil { + // Handle migration failure, may fall back to external migrations +} +``` + +#### `migrations.RunMigration()` with `migrations.ReadMigrationConfig()` +- **Purpose**: Runs external binary migrations downloaded from fs-repo-migrations +- **Scope**: Handles legacy repo versions <17 and serves as fallback +- **Performance**: Slower due to network downloads and external process execution +- **Dependencies**: Requires fs-repo-migrations binaries and network access +- **Usage**: Fallback method for legacy migrations + +```go +// Read migration configuration for external migrations +migrationCfg, err := migrations.ReadMigrationConfig(repoPath, configFile) +fetcher, err := migrations.GetMigrationFetcher(migrationCfg.DownloadSources, ...) +err = migrations.RunMigration(ctx, fetcher, targetVersion, repoPath, allowDowngrade) +``` + +## Migration Flow in Daemon Startup + +1. **Primary**: Try embedded migrations first (`RunEmbeddedMigrations`) +2. **Fallback**: If embedded migration fails, fall back to external migrations (`RunMigration`) +3. **Legacy Support**: External migrations ensure compatibility with older repo versions + +## Directory Structure + +``` +repo/fsrepo/migrations/ +├── README.md # This file +├── embedded.go # Embedded migration system +├── embedded_test.go # Tests for embedded migrations +├── migrations.go # External migration system +├── fs-repo-16-to-17/ # First embedded migration (16→17) +│ ├── migration/ +│ │ ├── migration.go # Migration logic +│ │ └── migration_test.go # Migration tests +│ ├── atomicfile/ +│ │ └── atomicfile.go # Atomic file operations +│ ├── main.go # Standalone migration binary +│ └── README.md # Migration-specific documentation +└── [other migration utilities] +``` + +## Adding New Embedded Migrations + +To add a new embedded migration (e.g., fs-repo-17-to-18): + +1. **Create migration package**: `fs-repo-17-to-18/migration/migration.go` +2. **Implement interface**: Ensure your migration implements the `EmbeddedMigration` interface +3. **Register migration**: Add to `embeddedMigrations` map in `embedded.go` +4. **Add tests**: Create comprehensive tests for your migration logic +5. **Update repo version**: Increment `RepoVersion` in `fsrepo.go` + +```go +// In embedded.go +var embeddedMigrations = map[string]EmbeddedMigration{ + "fs-repo-16-to-17": &mg16.Migration{}, + "fs-repo-17-to-18": &mg17.Migration{}, // Add new migration +} +``` + +## Migration Requirements + +Each embedded migration must: +- Implement the `EmbeddedMigration` interface +- Be reversible with proper backup handling +- Use atomic file operations to prevent corruption +- Preserve user customizations +- Include comprehensive tests +- Follow the established naming pattern + +## External Migration Support + +External migrations are maintained for: +- **Backward compatibility** with repo versions <17 +- **Fallback mechanism** if embedded migrations fail +- **Legacy installations** that cannot be upgraded directly + +The external migration system will continue to work but is not the preferred method for new migrations. + +## Security and Safety + +All migrations (embedded and external) include: +- **Atomic operations**: Prevent repository corruption +- **Backup creation**: Allow rollback if migration fails +- **Version validation**: Ensure migrations run on correct repo versions +- **Error handling**: Graceful failure with informative messages +- **User preservation**: Maintain custom configurations during migration + +## Testing + +Test both embedded and external migration systems: + +```bash +# Test embedded migrations +go test ./repo/fsrepo/migrations/ -run TestEmbedded + +# Test specific migration +go test ./repo/fsrepo/migrations/fs-repo-16-to-17/migration/ + +# Test migration registration +go test ./repo/fsrepo/migrations/ -run TestHasEmbedded +``` \ No newline at end of file diff --git a/repo/fsrepo/migrations/atomicfile/atomicfile.go b/repo/fsrepo/migrations/atomicfile/atomicfile.go new file mode 100644 index 000000000..209b8c368 --- /dev/null +++ b/repo/fsrepo/migrations/atomicfile/atomicfile.go @@ -0,0 +1,64 @@ +package atomicfile + +import ( + "fmt" + "io" + "os" + "path/filepath" +) + +// File represents an atomic file writer +type File struct { + *os.File + path string +} + +// New creates a new atomic file writer +func New(path string, mode os.FileMode) (*File, error) { + dir := filepath.Dir(path) + tempFile, err := os.CreateTemp(dir, ".tmp-"+filepath.Base(path)) + if err != nil { + return nil, err + } + + if err := tempFile.Chmod(mode); err != nil { + tempFile.Close() + os.Remove(tempFile.Name()) + return nil, err + } + + return &File{ + File: tempFile, + path: path, + }, nil +} + +// Close atomically replaces the target file with the temporary file +func (f *File) Close() error { + closeErr := f.File.Close() + if closeErr != nil { + // Try to cleanup temp file, but prioritize close error + _ = os.Remove(f.File.Name()) + return closeErr + } + return os.Rename(f.File.Name(), f.path) +} + +// Abort removes the temporary file without replacing the target +func (f *File) Abort() error { + closeErr := f.File.Close() + removeErr := os.Remove(f.File.Name()) + + if closeErr != nil && removeErr != nil { + return fmt.Errorf("abort failed: close: %w, remove: %v", closeErr, removeErr) + } + if closeErr != nil { + return closeErr + } + return removeErr +} + +// ReadFrom reads from the given reader into the atomic file +func (f *File) ReadFrom(r io.Reader) (int64, error) { + return io.Copy(f.File, r) +} diff --git a/repo/fsrepo/migrations/atomicfile/atomicfile_test.go b/repo/fsrepo/migrations/atomicfile/atomicfile_test.go new file mode 100644 index 000000000..668045d12 --- /dev/null +++ b/repo/fsrepo/migrations/atomicfile/atomicfile_test.go @@ -0,0 +1,208 @@ +package atomicfile + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "runtime" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// TestNew_Success verifies atomic file creation +func TestNew_Success(t *testing.T) { + dir := t.TempDir() + path := filepath.Join(dir, "test.txt") + + af, err := New(path, 0644) + require.NoError(t, err) + defer func() { _ = af.Abort() }() + + // Verify temp file exists + assert.FileExists(t, af.File.Name()) + + // Verify temp file is in same directory + assert.Equal(t, dir, filepath.Dir(af.File.Name())) +} + +// TestClose_Success verifies atomic replacement +func TestClose_Success(t *testing.T) { + dir := t.TempDir() + path := filepath.Join(dir, "test.txt") + + af, err := New(path, 0644) + require.NoError(t, err) + + content := []byte("test content") + _, err = af.Write(content) + require.NoError(t, err) + + tempName := af.File.Name() + + require.NoError(t, af.Close()) + + // Verify target file exists with correct content + data, err := os.ReadFile(path) + require.NoError(t, err) + assert.Equal(t, content, data) + + // Verify temp file removed + assert.NoFileExists(t, tempName) +} + +// TestAbort_Success verifies cleanup +func TestAbort_Success(t *testing.T) { + dir := t.TempDir() + path := filepath.Join(dir, "test.txt") + + af, err := New(path, 0644) + require.NoError(t, err) + + tempName := af.File.Name() + + require.NoError(t, af.Abort()) + + // Verify temp file removed + assert.NoFileExists(t, tempName) + + // Verify target not created + assert.NoFileExists(t, path) +} + +// TestAbort_ErrorHandling tests error capture +func TestAbort_ErrorHandling(t *testing.T) { + dir := t.TempDir() + path := filepath.Join(dir, "test.txt") + + af, err := New(path, 0644) + require.NoError(t, err) + + // Close file to force close error + af.File.Close() + + // Remove temp file to force remove error + os.Remove(af.File.Name()) + + err = af.Abort() + // Should get both errors + require.Error(t, err) + assert.Contains(t, err.Error(), "abort failed") +} + +// TestClose_CloseError verifies cleanup on close failure +func TestClose_CloseError(t *testing.T) { + dir := t.TempDir() + path := filepath.Join(dir, "test.txt") + + af, err := New(path, 0644) + require.NoError(t, err) + + tempName := af.File.Name() + + // Close file to force close error + af.File.Close() + + err = af.Close() + require.Error(t, err) + + // Verify temp file cleaned up even on error + assert.NoFileExists(t, tempName) +} + +// TestReadFrom verifies io.Copy integration +func TestReadFrom(t *testing.T) { + dir := t.TempDir() + path := filepath.Join(dir, "test.txt") + + af, err := New(path, 0644) + require.NoError(t, err) + defer func() { _ = af.Abort() }() + + content := []byte("test content from reader") + n, err := af.ReadFrom(bytes.NewReader(content)) + require.NoError(t, err) + assert.Equal(t, int64(len(content)), n) +} + +// TestFilePermissions verifies mode is set correctly +func TestFilePermissions(t *testing.T) { + dir := t.TempDir() + path := filepath.Join(dir, "test.txt") + + af, err := New(path, 0600) + require.NoError(t, err) + + _, err = af.Write([]byte("test")) + require.NoError(t, err) + + require.NoError(t, af.Close()) + + info, err := os.Stat(path) + require.NoError(t, err) + + // On Unix, check exact permissions + if runtime.GOOS != "windows" { + mode := info.Mode().Perm() + assert.Equal(t, os.FileMode(0600), mode) + } +} + +// TestMultipleAbortsSafe verifies calling Abort multiple times is safe +func TestMultipleAbortsSafe(t *testing.T) { + dir := t.TempDir() + path := filepath.Join(dir, "test.txt") + + af, err := New(path, 0644) + require.NoError(t, err) + + tempName := af.File.Name() + + // First abort should succeed + require.NoError(t, af.Abort()) + assert.NoFileExists(t, tempName, "temp file should be removed after first abort") + + // Second abort should handle gracefully (file already gone) + err = af.Abort() + // Error is acceptable since file is already removed, but it should not panic + t.Logf("Second Abort() returned: %v", err) +} + +// TestNoTempFilesAfterOperations verifies no .tmp-* files remain after operations +func TestNoTempFilesAfterOperations(t *testing.T) { + const testIterations = 5 + + tests := []struct { + name string + operation func(*File) error + }{ + {"close", (*File).Close}, + {"abort", (*File).Abort}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + dir := t.TempDir() + + // Perform multiple operations + for i := 0; i < testIterations; i++ { + path := filepath.Join(dir, fmt.Sprintf("test%d.txt", i)) + + af, err := New(path, 0644) + require.NoError(t, err) + + _, err = af.Write([]byte("test data")) + require.NoError(t, err) + + require.NoError(t, tt.operation(af)) + } + + // Check for any .tmp-* files + tmpFiles, err := filepath.Glob(filepath.Join(dir, ".tmp-*")) + require.NoError(t, err) + assert.Empty(t, tmpFiles, "should be no temp files after %s", tt.name) + }) + } +} diff --git a/repo/fsrepo/migrations/common/base.go b/repo/fsrepo/migrations/common/base.go new file mode 100644 index 000000000..9b9ef635d --- /dev/null +++ b/repo/fsrepo/migrations/common/base.go @@ -0,0 +1,97 @@ +package common + +import ( + "fmt" + "io" + "path/filepath" +) + +// BaseMigration provides common functionality for migrations +type BaseMigration struct { + FromVersion string + ToVersion string + Description string + Convert func(in io.ReadSeeker, out io.Writer) error +} + +// Versions returns the version string for this migration +func (m *BaseMigration) Versions() string { + return fmt.Sprintf("%s-to-%s", m.FromVersion, m.ToVersion) +} + +// configBackupSuffix returns the backup suffix for the config file +// e.g. ".16-to-17.bak" results in "config.16-to-17.bak" +func (m *BaseMigration) configBackupSuffix() string { + return fmt.Sprintf(".%s-to-%s.bak", m.FromVersion, m.ToVersion) +} + +// Reversible returns true as we keep backups +func (m *BaseMigration) Reversible() bool { + return true +} + +// Apply performs the migration +func (m *BaseMigration) Apply(opts Options) error { + if opts.Verbose { + fmt.Printf("applying %s repo migration\n", m.Versions()) + if m.Description != "" { + fmt.Printf("> %s\n", m.Description) + } + } + + // Check version + if err := CheckVersion(opts.Path, m.FromVersion); err != nil { + return err + } + + configPath := filepath.Join(opts.Path, "config") + + // Perform migration with backup + if err := WithBackup(configPath, m.configBackupSuffix(), m.Convert); err != nil { + return err + } + + // Update version + if err := WriteVersion(opts.Path, m.ToVersion); err != nil { + if opts.Verbose { + fmt.Printf("failed to update version file to %s\n", m.ToVersion) + } + return err + } + + if opts.Verbose { + fmt.Println("updated version file") + fmt.Printf("Migration %s succeeded\n", m.Versions()) + } + + return nil +} + +// Revert reverts the migration +func (m *BaseMigration) Revert(opts Options) error { + if opts.Verbose { + fmt.Println("reverting migration") + } + + // Check we're at the expected version + if err := CheckVersion(opts.Path, m.ToVersion); err != nil { + return err + } + + // Restore backup + configPath := filepath.Join(opts.Path, "config") + if err := RevertBackup(configPath, m.configBackupSuffix()); err != nil { + return err + } + + // Revert version + if err := WriteVersion(opts.Path, m.FromVersion); err != nil { + return err + } + + if opts.Verbose { + fmt.Printf("lowered version number to %s\n", m.FromVersion) + } + + return nil +} diff --git a/repo/fsrepo/migrations/common/config_helpers.go b/repo/fsrepo/migrations/common/config_helpers.go new file mode 100644 index 000000000..22b99f84d --- /dev/null +++ b/repo/fsrepo/migrations/common/config_helpers.go @@ -0,0 +1,353 @@ +package common + +import ( + "fmt" + "maps" + "slices" + "strings" +) + +// GetField retrieves a field from a nested config structure using a dot-separated path +// Example: GetField(config, "DNS.Resolvers") returns config["DNS"]["Resolvers"] +func GetField(config map[string]any, path string) (any, bool) { + parts := strings.Split(path, ".") + current := config + + for i, part := range parts { + // Last part - return the value + if i == len(parts)-1 { + val, exists := current[part] + return val, exists + } + + // Navigate deeper + next, exists := current[part] + if !exists { + return nil, false + } + + // Ensure it's a map + nextMap, ok := next.(map[string]any) + if !ok { + return nil, false + } + current = nextMap + } + + return nil, false +} + +// SetField sets a field in a nested config structure using a dot-separated path +// It creates intermediate maps as needed +func SetField(config map[string]any, path string, value any) { + parts := strings.Split(path, ".") + current := config + + for i, part := range parts { + // Last part - set the value + if i == len(parts)-1 { + current[part] = value + return + } + + // Navigate or create intermediate maps + next, exists := current[part] + if !exists { + // Create new intermediate map + newMap := make(map[string]any) + current[part] = newMap + current = newMap + } else { + // Ensure it's a map + nextMap, ok := next.(map[string]any) + if !ok { + // Can't navigate further, replace with new map + newMap := make(map[string]any) + current[part] = newMap + current = newMap + } else { + current = nextMap + } + } + } +} + +// DeleteField removes a field from a nested config structure +func DeleteField(config map[string]any, path string) bool { + parts := strings.Split(path, ".") + + // Handle simple case + if len(parts) == 1 { + _, exists := config[parts[0]] + delete(config, parts[0]) + return exists + } + + // Navigate to parent + parentPath := strings.Join(parts[:len(parts)-1], ".") + parent, exists := GetField(config, parentPath) + if !exists { + return false + } + + parentMap, ok := parent.(map[string]any) + if !ok { + return false + } + + fieldName := parts[len(parts)-1] + _, exists = parentMap[fieldName] + delete(parentMap, fieldName) + return exists +} + +// MoveField moves a field from one location to another +func MoveField(config map[string]any, from, to string) error { + value, exists := GetField(config, from) + if !exists { + return fmt.Errorf("source field %s does not exist", from) + } + + SetField(config, to, value) + DeleteField(config, from) + return nil +} + +// RenameField renames a field within the same parent +func RenameField(config map[string]any, path, oldName, newName string) error { + var parent map[string]any + if path == "" { + parent = config + } else { + p, exists := GetField(config, path) + if !exists { + return fmt.Errorf("parent path %s does not exist", path) + } + var ok bool + parent, ok = p.(map[string]any) + if !ok { + return fmt.Errorf("parent path %s is not a map", path) + } + } + + value, exists := parent[oldName] + if !exists { + return fmt.Errorf("field %s does not exist", oldName) + } + + parent[newName] = value + delete(parent, oldName) + return nil +} + +// SetDefault sets a field value only if it doesn't already exist +func SetDefault(config map[string]any, path string, value any) { + if _, exists := GetField(config, path); !exists { + SetField(config, path, value) + } +} + +// TransformField applies a transformation function to a field value +func TransformField(config map[string]any, path string, transformer func(any) any) error { + value, exists := GetField(config, path) + if !exists { + return fmt.Errorf("field %s does not exist", path) + } + + newValue := transformer(value) + SetField(config, path, newValue) + return nil +} + +// EnsureFieldIs checks if a field equals expected value, sets it if missing +func EnsureFieldIs(config map[string]any, path string, expected any) { + current, exists := GetField(config, path) + if !exists || current != expected { + SetField(config, path, expected) + } +} + +// MergeInto merges multiple source fields into a destination map +func MergeInto(config map[string]any, destination string, sources ...string) { + var destMap map[string]any + + // Get existing destination if it exists + if existing, exists := GetField(config, destination); exists { + if m, ok := existing.(map[string]any); ok { + destMap = m + } + } + + // Merge each source + for _, source := range sources { + if value, exists := GetField(config, source); exists { + if sourceMap, ok := value.(map[string]any); ok { + if destMap == nil { + destMap = make(map[string]any) + } + maps.Copy(destMap, sourceMap) + } + } + } + + if destMap != nil { + SetField(config, destination, destMap) + } +} + +// CopyField copies a field value to a new location (keeps original) +func CopyField(config map[string]any, from, to string) error { + value, exists := GetField(config, from) + if !exists { + return fmt.Errorf("source field %s does not exist", from) + } + + SetField(config, to, value) + return nil +} + +// ConvertInterfaceSlice converts []interface{} to []string +func ConvertInterfaceSlice(slice []interface{}) []string { + result := make([]string, 0, len(slice)) + for _, item := range slice { + if str, ok := item.(string); ok { + result = append(result, str) + } + } + return result +} + +// GetOrCreateSection gets or creates a map section in config +func GetOrCreateSection(config map[string]any, path string) map[string]any { + existing, exists := GetField(config, path) + if exists { + if section, ok := existing.(map[string]any); ok { + return section + } + } + + // Create new section + section := make(map[string]any) + SetField(config, path, section) + return section +} + +// SafeCastMap safely casts to map[string]any with fallback to empty map +func SafeCastMap(value any) map[string]any { + if m, ok := value.(map[string]any); ok { + return m + } + return make(map[string]any) +} + +// SafeCastSlice safely casts to []interface{} with fallback to empty slice +func SafeCastSlice(value any) []interface{} { + if s, ok := value.([]interface{}); ok { + return s + } + return []interface{}{} +} + +// ReplaceDefaultsWithAuto replaces default values with "auto" in a map +func ReplaceDefaultsWithAuto(values map[string]any, defaults map[string]string) map[string]string { + result := make(map[string]string) + for k, v := range values { + if vStr, ok := v.(string); ok { + if replacement, isDefault := defaults[vStr]; isDefault { + result[k] = replacement + } else { + result[k] = vStr + } + } + } + return result +} + +// EnsureSliceContains ensures a slice field contains a value +func EnsureSliceContains(config map[string]any, path string, value string) { + existing, exists := GetField(config, path) + if !exists { + SetField(config, path, []string{value}) + return + } + + if slice, ok := existing.([]interface{}); ok { + // Check if value already exists + for _, item := range slice { + if str, ok := item.(string); ok && str == value { + return // Already contains value + } + } + // Add value + SetField(config, path, append(slice, value)) + } else if strSlice, ok := existing.([]string); ok { + if !slices.Contains(strSlice, value) { + SetField(config, path, append(strSlice, value)) + } + } else { + // Replace with new slice containing value + SetField(config, path, []string{value}) + } +} + +// ReplaceInSlice replaces old values with new in a slice field +func ReplaceInSlice(config map[string]any, path string, oldValue, newValue string) { + existing, exists := GetField(config, path) + if !exists { + return + } + + if slice, ok := existing.([]interface{}); ok { + result := make([]string, 0, len(slice)) + for _, item := range slice { + if str, ok := item.(string); ok { + if str == oldValue { + result = append(result, newValue) + } else { + result = append(result, str) + } + } + } + SetField(config, path, result) + } +} + +// GetMapSection gets a map section with error handling +func GetMapSection(config map[string]any, path string) (map[string]any, error) { + value, exists := GetField(config, path) + if !exists { + return nil, fmt.Errorf("section %s does not exist", path) + } + + section, ok := value.(map[string]any) + if !ok { + return nil, fmt.Errorf("section %s is not a map", path) + } + + return section, nil +} + +// CloneStringMap clones a map[string]any to map[string]string +func CloneStringMap(m map[string]any) map[string]string { + result := make(map[string]string, len(m)) + for k, v := range m { + if str, ok := v.(string); ok { + result[k] = str + } + } + return result +} + +// IsEmptySlice checks if a value is an empty slice +func IsEmptySlice(value any) bool { + if value == nil { + return true + } + if slice, ok := value.([]interface{}); ok { + return len(slice) == 0 + } + if slice, ok := value.([]string); ok { + return len(slice) == 0 + } + return false +} diff --git a/repo/fsrepo/migrations/common/migration.go b/repo/fsrepo/migrations/common/migration.go new file mode 100644 index 000000000..7d72cfea3 --- /dev/null +++ b/repo/fsrepo/migrations/common/migration.go @@ -0,0 +1,16 @@ +// Package common contains common types and interfaces for file system repository migrations +package common + +// Options contains migration options for embedded migrations +type Options struct { + Path string + Verbose bool +} + +// Migration is the interface that all migrations must implement +type Migration interface { + Versions() string + Apply(opts Options) error + Revert(opts Options) error + Reversible() bool +} diff --git a/repo/fsrepo/migrations/common/testing_helpers.go b/repo/fsrepo/migrations/common/testing_helpers.go new file mode 100644 index 000000000..5ed08e18e --- /dev/null +++ b/repo/fsrepo/migrations/common/testing_helpers.go @@ -0,0 +1,290 @@ +package common + +import ( + "bytes" + "encoding/json" + "fmt" + "maps" + "os" + "path/filepath" + "reflect" + "testing" +) + +// TestCase represents a single migration test case +type TestCase struct { + Name string + InputConfig map[string]any + Assertions []ConfigAssertion +} + +// ConfigAssertion represents an assertion about the migrated config +type ConfigAssertion struct { + Path string + Expected any +} + +// RunMigrationTest runs a migration test with the given test case +func RunMigrationTest(t *testing.T, migration Migration, tc TestCase) { + t.Helper() + + // Convert input to JSON + inputJSON, err := json.MarshalIndent(tc.InputConfig, "", " ") + if err != nil { + t.Fatalf("failed to marshal input config: %v", err) + } + + // Run the migration's convert function + var output bytes.Buffer + if baseMig, ok := migration.(*BaseMigration); ok { + err = baseMig.Convert(bytes.NewReader(inputJSON), &output) + if err != nil { + t.Fatalf("migration failed: %v", err) + } + } else { + t.Skip("migration is not a BaseMigration") + } + + // Parse output + var result map[string]any + err = json.Unmarshal(output.Bytes(), &result) + if err != nil { + t.Fatalf("failed to unmarshal output: %v", err) + } + + // Run assertions + for _, assertion := range tc.Assertions { + AssertConfigField(t, result, assertion.Path, assertion.Expected) + } +} + +// AssertConfigField asserts that a field in the config has the expected value +func AssertConfigField(t *testing.T, config map[string]any, path string, expected any) { + t.Helper() + + actual, exists := GetField(config, path) + if expected == nil { + if exists { + t.Errorf("expected field %s to not exist, but it has value: %v", path, actual) + } + return + } + + if !exists { + t.Errorf("expected field %s to exist with value %v, but it doesn't exist", path, expected) + return + } + + // Handle different types of comparisons + switch exp := expected.(type) { + case []string: + actualSlice, ok := actual.([]interface{}) + if !ok { + t.Errorf("field %s: expected []string, got %T", path, actual) + return + } + if len(exp) != len(actualSlice) { + t.Errorf("field %s: expected slice of length %d, got %d", path, len(exp), len(actualSlice)) + return + } + for i, expVal := range exp { + if actualSlice[i] != expVal { + t.Errorf("field %s[%d]: expected %v, got %v", path, i, expVal, actualSlice[i]) + } + } + case map[string]string: + actualMap, ok := actual.(map[string]any) + if !ok { + t.Errorf("field %s: expected map, got %T", path, actual) + return + } + for k, v := range exp { + if actualMap[k] != v { + t.Errorf("field %s[%s]: expected %v, got %v", path, k, v, actualMap[k]) + } + } + default: + if actual != expected { + t.Errorf("field %s: expected %v, got %v", path, expected, actual) + } + } +} + +// GenerateTestConfig creates a basic test config with the given fields +func GenerateTestConfig(fields map[string]any) map[string]any { + // Start with a minimal valid config + config := map[string]any{ + "Identity": map[string]any{ + "PeerID": "QmTest", + }, + } + + // Merge in the provided fields + maps.Copy(config, fields) + + return config +} + +// CreateTestRepo creates a temporary test repository with the given version and config +func CreateTestRepo(t *testing.T, version int, config map[string]any) string { + t.Helper() + + tempDir := t.TempDir() + + // Write version file + versionPath := filepath.Join(tempDir, "version") + err := os.WriteFile(versionPath, []byte(fmt.Sprintf("%d", version)), 0644) + if err != nil { + t.Fatalf("failed to write version file: %v", err) + } + + // Write config file + configPath := filepath.Join(tempDir, "config") + configData, err := json.MarshalIndent(config, "", " ") + if err != nil { + t.Fatalf("failed to marshal config: %v", err) + } + err = os.WriteFile(configPath, configData, 0644) + if err != nil { + t.Fatalf("failed to write config file: %v", err) + } + + return tempDir +} + +// AssertMigrationSuccess runs a full migration and checks that it succeeds +func AssertMigrationSuccess(t *testing.T, migration Migration, fromVersion, toVersion int, inputConfig map[string]any) map[string]any { + t.Helper() + + // Create test repo + repoPath := CreateTestRepo(t, fromVersion, inputConfig) + + // Run migration + opts := Options{ + Path: repoPath, + Verbose: false, + } + + err := migration.Apply(opts) + if err != nil { + t.Fatalf("migration failed: %v", err) + } + + // Check version was updated + versionBytes, err := os.ReadFile(filepath.Join(repoPath, "version")) + if err != nil { + t.Fatalf("failed to read version file: %v", err) + } + actualVersion := string(versionBytes) + if actualVersion != fmt.Sprintf("%d", toVersion) { + t.Errorf("expected version %d, got %s", toVersion, actualVersion) + } + + // Read and return the migrated config + configBytes, err := os.ReadFile(filepath.Join(repoPath, "config")) + if err != nil { + t.Fatalf("failed to read config file: %v", err) + } + + var result map[string]any + err = json.Unmarshal(configBytes, &result) + if err != nil { + t.Fatalf("failed to unmarshal config: %v", err) + } + + return result +} + +// AssertMigrationReversible checks that a migration can be reverted +func AssertMigrationReversible(t *testing.T, migration Migration, fromVersion, toVersion int, inputConfig map[string]any) { + t.Helper() + + // Create test repo at target version + repoPath := CreateTestRepo(t, toVersion, inputConfig) + + // Create backup file (simulating a previous migration) + backupPath := filepath.Join(repoPath, fmt.Sprintf("config.%d-to-%d.bak", fromVersion, toVersion)) + originalConfig, err := json.MarshalIndent(inputConfig, "", " ") + if err != nil { + t.Fatalf("failed to marshal original config: %v", err) + } + + if err := os.WriteFile(backupPath, originalConfig, 0644); err != nil { + t.Fatalf("failed to write backup file: %v", err) + } + + // Run revert + if err := migration.Revert(Options{Path: repoPath}); err != nil { + t.Fatalf("revert failed: %v", err) + } + + // Verify version was reverted + versionBytes, err := os.ReadFile(filepath.Join(repoPath, "version")) + if err != nil { + t.Fatalf("failed to read version file: %v", err) + } + + if actualVersion := string(versionBytes); actualVersion != fmt.Sprintf("%d", fromVersion) { + t.Errorf("expected version %d after revert, got %s", fromVersion, actualVersion) + } + + // Verify config was reverted + configBytes, err := os.ReadFile(filepath.Join(repoPath, "config")) + if err != nil { + t.Fatalf("failed to read reverted config file: %v", err) + } + + var revertedConfig map[string]any + if err := json.Unmarshal(configBytes, &revertedConfig); err != nil { + t.Fatalf("failed to unmarshal reverted config: %v", err) + } + + // Compare reverted config with original + compareConfigs(t, inputConfig, revertedConfig, "") +} + +// compareConfigs recursively compares two config maps and reports differences +func compareConfigs(t *testing.T, expected, actual map[string]any, path string) { + t.Helper() + + // Build current path helper + buildPath := func(key string) string { + if path == "" { + return key + } + return path + "." + key + } + + // Check all expected fields exist and match + for key, expectedValue := range expected { + currentPath := buildPath(key) + + actualValue, exists := actual[key] + if !exists { + t.Errorf("reverted config missing field %s", currentPath) + continue + } + + switch exp := expectedValue.(type) { + case map[string]any: + act, ok := actualValue.(map[string]any) + if !ok { + t.Errorf("field %s: expected map, got %T", currentPath, actualValue) + continue + } + compareConfigs(t, exp, act, currentPath) + default: + if !reflect.DeepEqual(expectedValue, actualValue) { + t.Errorf("field %s: expected %v, got %v after revert", + currentPath, expectedValue, actualValue) + } + } + } + + // Check for unexpected fields using maps.Keys (Go 1.23+) + for key := range actual { + if _, exists := expected[key]; !exists { + t.Errorf("reverted config has unexpected field %s", buildPath(key)) + } + } +} diff --git a/repo/fsrepo/migrations/common/utils.go b/repo/fsrepo/migrations/common/utils.go new file mode 100644 index 000000000..e7d704dad --- /dev/null +++ b/repo/fsrepo/migrations/common/utils.go @@ -0,0 +1,112 @@ +package common + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/ipfs/kubo/repo/fsrepo/migrations/atomicfile" +) + +// CheckVersion verifies the repo is at the expected version +func CheckVersion(repoPath string, expectedVersion string) error { + versionPath := filepath.Join(repoPath, "version") + versionBytes, err := os.ReadFile(versionPath) + if err != nil { + return fmt.Errorf("could not read version file: %w", err) + } + version := strings.TrimSpace(string(versionBytes)) + if version != expectedVersion { + return fmt.Errorf("expected version %s, got %s", expectedVersion, version) + } + return nil +} + +// WriteVersion writes the version to the repo +func WriteVersion(repoPath string, version string) error { + versionPath := filepath.Join(repoPath, "version") + return os.WriteFile(versionPath, []byte(version), 0644) +} + +// Must panics if the error is not nil. Use only for errors that cannot be handled gracefully. +func Must(err error) { + if err != nil { + panic(fmt.Errorf("error can't be dealt with transactionally: %w", err)) + } +} + +// WithBackup performs a config file operation with automatic backup and rollback on error +func WithBackup(configPath string, backupSuffix string, fn func(in io.ReadSeeker, out io.Writer) error) error { + // Read the entire file into memory first + // This allows us to close the file before doing atomic operations, + // which is necessary on Windows where open files can't be renamed + data, err := os.ReadFile(configPath) + if err != nil { + return fmt.Errorf("failed to read config file %s: %w", configPath, err) + } + + // Create an in-memory reader for the data + in := bytes.NewReader(data) + + // Create backup atomically to prevent partial backup on interruption + backupPath := configPath + backupSuffix + backup, err := atomicfile.New(backupPath, 0600) + if err != nil { + return fmt.Errorf("failed to create backup file for %s: %w", backupPath, err) + } + if _, err := backup.Write(data); err != nil { + Must(backup.Abort()) + return fmt.Errorf("failed to write backup data: %w", err) + } + if err := backup.Close(); err != nil { + Must(backup.Abort()) + return fmt.Errorf("failed to finalize backup: %w", err) + } + + // Create output file atomically + out, err := atomicfile.New(configPath, 0600) + if err != nil { + // Clean up backup on error + os.Remove(backupPath) + return fmt.Errorf("failed to create atomic file for %s: %w", configPath, err) + } + + // Run the conversion function + if err := fn(in, out); err != nil { + Must(out.Abort()) + // Clean up backup on error + os.Remove(backupPath) + return fmt.Errorf("config conversion failed: %w", err) + } + + // Close the output file atomically + Must(out.Close()) + // Backup remains for potential revert + + return nil +} + +// RevertBackup restores a backup file +func RevertBackup(configPath string, backupSuffix string) error { + return os.Rename(configPath+backupSuffix, configPath) +} + +// ReadConfig reads and unmarshals a JSON config file into a map +func ReadConfig(r io.Reader) (map[string]any, error) { + confMap := make(map[string]any) + if err := json.NewDecoder(r).Decode(&confMap); err != nil { + return nil, err + } + return confMap, nil +} + +// WriteConfig marshals and writes a config map as indented JSON +func WriteConfig(w io.Writer, config map[string]any) error { + enc := json.NewEncoder(w) + enc.SetIndent("", " ") + return enc.Encode(config) +} diff --git a/repo/fsrepo/migrations/embedded.go b/repo/fsrepo/migrations/embedded.go new file mode 100644 index 000000000..a8218be63 --- /dev/null +++ b/repo/fsrepo/migrations/embedded.go @@ -0,0 +1,159 @@ +package migrations + +import ( + "context" + "fmt" + "log" + "os" + + lockfile "github.com/ipfs/go-fs-lock" + "github.com/ipfs/kubo/repo/fsrepo/migrations/common" + mg16 "github.com/ipfs/kubo/repo/fsrepo/migrations/fs-repo-16-to-17/migration" + mg17 "github.com/ipfs/kubo/repo/fsrepo/migrations/fs-repo-17-to-18/migration" +) + +// embeddedMigrations contains all embedded migrations +// Using a slice to maintain order and allow for future range-based operations +var embeddedMigrations = []common.Migration{ + mg16.Migration, + mg17.Migration, +} + +// migrationsByName provides quick lookup by name +var migrationsByName = make(map[string]common.Migration) + +func init() { + for _, m := range embeddedMigrations { + migrationsByName["fs-repo-"+m.Versions()] = m + } +} + +// RunEmbeddedMigration runs an embedded migration if available +func RunEmbeddedMigration(ctx context.Context, migrationName string, ipfsDir string, revert bool) error { + migration, exists := migrationsByName[migrationName] + if !exists { + return fmt.Errorf("embedded migration %s not found", migrationName) + } + + if revert && !migration.Reversible() { + return fmt.Errorf("migration %s is not reversible", migrationName) + } + + logger := log.New(os.Stdout, "", 0) + logger.Printf("Running embedded migration %s...", migrationName) + + opts := common.Options{ + Path: ipfsDir, + Verbose: true, + } + + var err error + if revert { + err = migration.Revert(opts) + } else { + err = migration.Apply(opts) + } + + if err != nil { + return fmt.Errorf("embedded migration %s failed: %w", migrationName, err) + } + + logger.Printf("Embedded migration %s completed successfully", migrationName) + return nil +} + +// HasEmbeddedMigration checks if a migration is available as embedded +func HasEmbeddedMigration(migrationName string) bool { + _, exists := migrationsByName[migrationName] + return exists +} + +// RunEmbeddedMigrations runs all needed embedded migrations from current version to target version. +// +// This function migrates an IPFS repository using embedded migrations that are built into the Kubo binary. +// Embedded migrations are available for repo version 17+ and provide fast, network-free migration execution. +// +// Parameters: +// - ctx: Context for cancellation and deadlines +// - targetVer: Target repository version to migrate to +// - ipfsDir: Path to the IPFS repository directory +// - allowDowngrade: Whether to allow downgrade migrations (reduces target version) +// +// Returns: +// - nil on successful migration +// - error if migration fails, repo path is invalid, or no embedded migrations are available +// +// Behavior: +// - Validates that ipfsDir contains a valid IPFS repository +// - Determines current repository version automatically +// - Returns immediately if already at target version +// - Prevents downgrades unless allowDowngrade is true +// - Runs all necessary migrations in sequence (e.g., 16→17→18 if going from 16 to 18) +// - Creates backups and uses atomic operations to prevent corruption +// +// Error conditions: +// - Repository path is invalid or inaccessible +// - Current version cannot be determined +// - Downgrade attempted with allowDowngrade=false +// - No embedded migrations available for the version range +// - Individual migration fails during execution +// +// Example: +// +// err := RunEmbeddedMigrations(ctx, 17, "/path/to/.ipfs", false) +// if err != nil { +// // Handle migration failure, may need to fall back to external migrations +// } +func RunEmbeddedMigrations(ctx context.Context, targetVer int, ipfsDir string, allowDowngrade bool) error { + ipfsDir, err := CheckIpfsDir(ipfsDir) + if err != nil { + return err + } + + // Acquire lock once for all embedded migrations to prevent concurrent access + lk, err := lockfile.Lock(ipfsDir, "repo.lock") + if err != nil { + return fmt.Errorf("failed to acquire repo lock: %w", err) + } + defer lk.Close() + + fromVer, err := RepoVersion(ipfsDir) + if err != nil { + return fmt.Errorf("could not get repo version: %w", err) + } + + if fromVer == targetVer { + return nil + } + + revert := fromVer > targetVer + if revert && !allowDowngrade { + return fmt.Errorf("downgrade not allowed from %d to %d", fromVer, targetVer) + } + + logger := log.New(os.Stdout, "", 0) + logger.Print("Looking for embedded migrations.") + + migrations, _, err := findMigrations(ctx, fromVer, targetVer) + if err != nil { + return err + } + + embeddedCount := 0 + for _, migrationName := range migrations { + if HasEmbeddedMigration(migrationName) { + err = RunEmbeddedMigration(ctx, migrationName, ipfsDir, revert) + if err != nil { + return err + } + embeddedCount++ + } + } + + if embeddedCount == 0 { + return fmt.Errorf("no embedded migrations found for version %d to %d", fromVer, targetVer) + } + + logger.Printf("Success: fs-repo migrated to version %d using embedded migrations.\n", targetVer) + return nil +} diff --git a/repo/fsrepo/migrations/embedded_test.go b/repo/fsrepo/migrations/embedded_test.go new file mode 100644 index 000000000..b739d1e0c --- /dev/null +++ b/repo/fsrepo/migrations/embedded_test.go @@ -0,0 +1,36 @@ +package migrations + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestHasEmbeddedMigration(t *testing.T) { + // Test that the 16-to-17 migration is registered + assert.True(t, HasEmbeddedMigration("fs-repo-16-to-17"), + "fs-repo-16-to-17 migration should be registered") + + // Test that a non-existent migration is not found + assert.False(t, HasEmbeddedMigration("fs-repo-99-to-100"), + "fs-repo-99-to-100 migration should not be registered") +} + +func TestEmbeddedMigrations(t *testing.T) { + // Test that we have at least one embedded migration + assert.NotEmpty(t, embeddedMigrations, "No embedded migrations found") + + // Test that all registered migrations implement the interface + for name, migration := range embeddedMigrations { + assert.NotEmpty(t, migration.Versions(), + "Migration %s has empty versions", name) + } +} + +func TestRunEmbeddedMigration(t *testing.T) { + // Test that running a non-existent migration returns an error + err := RunEmbeddedMigration(context.Background(), "non-existent", "/tmp", false) + require.Error(t, err, "Expected error for non-existent migration") +} diff --git a/repo/fsrepo/migrations/fetch_test.go b/repo/fsrepo/migrations/fetch_test.go index 6e87c966b..9236eb655 100644 --- a/repo/fsrepo/migrations/fetch_test.go +++ b/repo/fsrepo/migrations/fetch_test.go @@ -3,7 +3,6 @@ package migrations import ( "bufio" "bytes" - "context" "fmt" "os" "path/filepath" @@ -20,10 +19,7 @@ func TestGetDistPath(t *testing.T) { } testDist := "/unit/test/dist" - err := os.Setenv(envIpfsDistPath, testDist) - if err != nil { - panic(err) - } + t.Setenv(envIpfsDistPath, testDist) defer func() { os.Unsetenv(envIpfsDistPath) }() @@ -45,8 +41,7 @@ func TestGetDistPath(t *testing.T) { } func TestHttpFetch(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() fetcher := NewHttpFetcher(testIpfsDist, testServer.URL, "", 0) @@ -82,8 +77,7 @@ func TestHttpFetch(t *testing.T) { func TestFetchBinary(t *testing.T) { tmpDir := t.TempDir() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() fetcher := NewHttpFetcher(testIpfsDist, testServer.URL, "", 0) @@ -139,18 +133,12 @@ func TestFetchBinary(t *testing.T) { if err != nil { panic(err) } - err = os.Setenv("TMPDIR", tmpDir) - if err != nil { - panic(err) - } + t.Setenv("TMPDIR", tmpDir) _, err = FetchBinary(ctx, fetcher, "go-ipfs", "v1.0.0", "ipfs", tmpDir) if !os.IsPermission(err) { t.Error("expected 'permission' error, got:", err) } - err = os.Setenv("TMPDIR", "/tmp") - if err != nil { - panic(err) - } + t.Setenv("TMPDIR", "/tmp") err = os.Chmod(tmpDir, 0o755) if err != nil { panic(err) @@ -171,8 +159,7 @@ func TestFetchBinary(t *testing.T) { } func TestMultiFetcher(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() badFetcher := NewHttpFetcher("", "bad-url", "", 0) fetcher := NewHttpFetcher(testIpfsDist, testServer.URL, "", 0) diff --git a/repo/fsrepo/migrations/fetcher.go b/repo/fsrepo/migrations/fetcher.go index db7a5c0c1..cc48a3b77 100644 --- a/repo/fsrepo/migrations/fetcher.go +++ b/repo/fsrepo/migrations/fetcher.go @@ -2,11 +2,10 @@ package migrations import ( "context" + "errors" "fmt" "io" "os" - - "github.com/hashicorp/go-multierror" ) const ( @@ -49,23 +48,23 @@ func NewMultiFetcher(f ...Fetcher) *MultiFetcher { // Fetch attempts to fetch the file at each of its fetchers until one succeeds. func (f *MultiFetcher) Fetch(ctx context.Context, ipfsPath string) ([]byte, error) { - var errs error + var errs []error for _, fetcher := range f.fetchers { out, err := fetcher.Fetch(ctx, ipfsPath) if err == nil { return out, nil } fmt.Printf("Error fetching: %s\n", err.Error()) - errs = multierror.Append(errs, err) + errs = append(errs, err) } - return nil, errs + return nil, errors.Join(errs...) } func (f *MultiFetcher) Close() error { var errs error for _, fetcher := range f.fetchers { if err := fetcher.Close(); err != nil { - errs = multierror.Append(errs, err) + errs = errors.Join(errs, err) } } return errs @@ -79,7 +78,7 @@ func (f *MultiFetcher) Fetchers() []Fetcher { return f.fetchers } -// NewLimitReadCloser returns a new io.ReadCloser with the reader wrappen in a +// NewLimitReadCloser returns a new io.ReadCloser with the reader wrapped in a // io.LimitedReader limited to reading the amount specified. func NewLimitReadCloser(rc io.ReadCloser, limit int64) io.ReadCloser { return limitReadCloser{ @@ -93,7 +92,7 @@ func NewLimitReadCloser(rc io.ReadCloser, limit int64) io.ReadCloser { // variable is not set, then returns the provided distPath, and if that is not set // then returns the IPNS path. // -// To get the IPFS path of the latest distribution, if not overriddin by the +// To get the IPFS path of the latest distribution, if not overridden by the // environ variable: GetDistPathEnv(CurrentIpfsDist). func GetDistPathEnv(distPath string) string { if dist := os.Getenv(envIpfsDistPath); dist != "" { diff --git a/repo/fsrepo/migrations/fs-repo-16-to-17/main.go b/repo/fsrepo/migrations/fs-repo-16-to-17/main.go new file mode 100644 index 000000000..835b002fb --- /dev/null +++ b/repo/fsrepo/migrations/fs-repo-16-to-17/main.go @@ -0,0 +1,63 @@ +// Package main implements fs-repo-16-to-17 migration for IPFS repositories. +// +// This migration transitions repositories from version 16 to 17, introducing +// the AutoConf system that replaces hardcoded network defaults with dynamic +// configuration fetched from autoconf.json. +// +// Changes made: +// - Enables AutoConf system with default settings +// - Migrates default bootstrap peers to "auto" sentinel value +// - Sets DNS.Resolvers["."] to "auto" for dynamic DNS resolver configuration +// - Migrates Routing.DelegatedRouters to ["auto"] +// - Migrates Ipns.DelegatedPublishers to ["auto"] +// - Preserves user customizations (custom bootstrap peers, DNS resolvers) +// +// The migration is reversible and creates config.16-to-17.bak for rollback. +// +// Usage: +// +// fs-repo-16-to-17 -path /path/to/ipfs/repo [-verbose] [-revert] +// +// This migration is embedded in Kubo starting from version 0.37 and runs +// automatically during daemon startup. This standalone binary is provided +// for manual migration scenarios. +package main + +import ( + "flag" + "fmt" + "os" + + "github.com/ipfs/kubo/repo/fsrepo/migrations/common" + mg16 "github.com/ipfs/kubo/repo/fsrepo/migrations/fs-repo-16-to-17/migration" +) + +func main() { + var path = flag.String("path", "", "Path to IPFS repository") + var verbose = flag.Bool("verbose", false, "Enable verbose output") + var revert = flag.Bool("revert", false, "Revert migration") + flag.Parse() + + if *path == "" { + fmt.Fprintf(os.Stderr, "Error: -path flag is required\n") + flag.Usage() + os.Exit(1) + } + + opts := common.Options{ + Path: *path, + Verbose: *verbose, + } + + var err error + if *revert { + err = mg16.Migration.Revert(opts) + } else { + err = mg16.Migration.Apply(opts) + } + + if err != nil { + fmt.Fprintf(os.Stderr, "Migration failed: %v\n", err) + os.Exit(1) + } +} diff --git a/repo/fsrepo/migrations/fs-repo-16-to-17/migration/migration.go b/repo/fsrepo/migrations/fs-repo-16-to-17/migration/migration.go new file mode 100644 index 000000000..248423b28 --- /dev/null +++ b/repo/fsrepo/migrations/fs-repo-16-to-17/migration/migration.go @@ -0,0 +1,221 @@ +// package mg16 contains the code to perform 16-17 repository migration in Kubo. +// This handles the following: +// - Migrate default bootstrap peers to "auto" +// - Migrate DNS resolvers to use "auto" for "." eTLD +// - Enable AutoConf system with default settings +// - Increment repo version to 17 +package mg16 + +import ( + "io" + "slices" + + "github.com/ipfs/kubo/config" + "github.com/ipfs/kubo/repo/fsrepo/migrations/common" +) + +// DefaultBootstrapAddresses are the hardcoded bootstrap addresses from Kubo 0.36 +// for IPFS. they are nodes run by the IPFS team. docs on these later. +// As with all p2p networks, bootstrap is an important security concern. +// This list is used during migration to detect which peers are defaults vs custom. +var DefaultBootstrapAddresses = []string{ + "/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", + "/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa", // rust-libp2p-server + "/dnsaddr/bootstrap.libp2p.io/p2p/QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb", + "/dnsaddr/bootstrap.libp2p.io/p2p/QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt", + "/dnsaddr/va1.bootstrap.libp2p.io/p2p/12D3KooWKnDdG3iXw9eTFijk3EWSunZcFi54Zka4wmtqtt6rPxc8", // js-libp2p-amino-dht-bootstrapper + "/ip4/104.131.131.82/tcp/4001/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", // mars.i.ipfs.io + "/ip4/104.131.131.82/udp/4001/quic-v1/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", // mars.i.ipfs.io +} + +// Migration is the main exported migration for 16-to-17 +var Migration = &common.BaseMigration{ + FromVersion: "16", + ToVersion: "17", + Description: "Upgrading config to use AutoConf system", + Convert: convert, +} + +// NewMigration creates a new migration instance (for compatibility) +func NewMigration() common.Migration { + return Migration +} + +// convert converts the config from version 16 to 17 +func convert(in io.ReadSeeker, out io.Writer) error { + confMap, err := common.ReadConfig(in) + if err != nil { + return err + } + + // Enable AutoConf system + if err := enableAutoConf(confMap); err != nil { + return err + } + + // Migrate Bootstrap peers + if err := migrateBootstrap(confMap); err != nil { + return err + } + + // Migrate DNS resolvers + if err := migrateDNSResolvers(confMap); err != nil { + return err + } + + // Migrate DelegatedRouters + if err := migrateDelegatedRouters(confMap); err != nil { + return err + } + + // Migrate DelegatedPublishers + if err := migrateDelegatedPublishers(confMap); err != nil { + return err + } + + // Save new config + return common.WriteConfig(out, confMap) +} + +// enableAutoConf adds AutoConf section to config +func enableAutoConf(confMap map[string]any) error { + // Add empty AutoConf section if it doesn't exist - all fields will use implicit defaults: + // - Enabled defaults to true (via DefaultAutoConfEnabled) + // - URL defaults to mainnet URL (via DefaultAutoConfURL) + // - RefreshInterval defaults to 24h (via DefaultAutoConfRefreshInterval) + // - TLSInsecureSkipVerify defaults to false (no WithDefault, but false is zero value) + common.SetDefault(confMap, "AutoConf", map[string]any{}) + return nil +} + +// migrateBootstrap migrates bootstrap peers to use "auto" +func migrateBootstrap(confMap map[string]any) error { + bootstrap, exists := confMap["Bootstrap"] + if !exists { + // No bootstrap section, add "auto" + confMap["Bootstrap"] = []string{config.AutoPlaceholder} + return nil + } + + // Convert to string slice using helper + bootstrapPeers := common.ConvertInterfaceSlice(common.SafeCastSlice(bootstrap)) + if len(bootstrapPeers) == 0 && bootstrap != nil { + // Invalid bootstrap format, replace with "auto" + confMap["Bootstrap"] = []string{config.AutoPlaceholder} + return nil + } + + // Process bootstrap peers according to migration rules + newBootstrap := processBootstrapPeers(bootstrapPeers) + confMap["Bootstrap"] = newBootstrap + + return nil +} + +// processBootstrapPeers processes bootstrap peers according to migration rules +func processBootstrapPeers(peers []string) []string { + // If empty, use "auto" + if len(peers) == 0 { + return []string{config.AutoPlaceholder} + } + + // Filter out default peers to get only custom ones + customPeers := slices.DeleteFunc(slices.Clone(peers), func(peer string) bool { + return slices.Contains(DefaultBootstrapAddresses, peer) + }) + + // Check if any default peers were removed + hasDefaultPeers := len(customPeers) < len(peers) + + // If we have default peers, replace them with "auto" + if hasDefaultPeers { + return append([]string{config.AutoPlaceholder}, customPeers...) + } + + // No default peers found, keep as is + return peers +} + +// migrateDNSResolvers migrates DNS resolvers to use "auto" for "." eTLD +func migrateDNSResolvers(confMap map[string]any) error { + // Get or create DNS section + dns := common.GetOrCreateSection(confMap, "DNS") + + // Get existing resolvers or create empty map + resolvers := common.SafeCastMap(dns["Resolvers"]) + + // Define default resolvers that should be replaced with "auto" + defaultResolvers := map[string]string{ + "https://dns.eth.limo/dns-query": config.AutoPlaceholder, + "https://dns.eth.link/dns-query": config.AutoPlaceholder, + "https://resolver.cloudflare-eth.com/dns-query": config.AutoPlaceholder, + } + + // Replace default resolvers with "auto" + stringResolvers := common.ReplaceDefaultsWithAuto(resolvers, defaultResolvers) + + // Ensure "." is set to "auto" if not already set + if _, exists := stringResolvers["."]; !exists { + stringResolvers["."] = config.AutoPlaceholder + } + + dns["Resolvers"] = stringResolvers + return nil +} + +// migrateDelegatedRouters migrates DelegatedRouters to use "auto" +func migrateDelegatedRouters(confMap map[string]any) error { + // Get or create Routing section + routing := common.GetOrCreateSection(confMap, "Routing") + + // Get existing delegated routers + delegatedRouters, exists := routing["DelegatedRouters"] + + // Check if it's empty or nil + if !exists || common.IsEmptySlice(delegatedRouters) { + routing["DelegatedRouters"] = []string{config.AutoPlaceholder} + return nil + } + + // Process the list to replace cid.contact with "auto" and preserve others + routers := common.ConvertInterfaceSlice(common.SafeCastSlice(delegatedRouters)) + var newRouters []string + hasAuto := false + + for _, router := range routers { + if router == "https://cid.contact" { + if !hasAuto { + newRouters = append(newRouters, config.AutoPlaceholder) + hasAuto = true + } + } else { + newRouters = append(newRouters, router) + } + } + + // If empty after processing, add "auto" + if len(newRouters) == 0 { + newRouters = []string{config.AutoPlaceholder} + } + + routing["DelegatedRouters"] = newRouters + return nil +} + +// migrateDelegatedPublishers migrates DelegatedPublishers to use "auto" +func migrateDelegatedPublishers(confMap map[string]any) error { + // Get or create Ipns section + ipns := common.GetOrCreateSection(confMap, "Ipns") + + // Get existing delegated publishers + delegatedPublishers, exists := ipns["DelegatedPublishers"] + + // Check if it's empty or nil - only then replace with "auto" + // Otherwise preserve custom publishers + if !exists || common.IsEmptySlice(delegatedPublishers) { + ipns["DelegatedPublishers"] = []string{config.AutoPlaceholder} + } + // If there are custom publishers, leave them as is + + return nil +} diff --git a/repo/fsrepo/migrations/fs-repo-16-to-17/migration/migration_test.go b/repo/fsrepo/migrations/fs-repo-16-to-17/migration/migration_test.go new file mode 100644 index 000000000..ef13eb92a --- /dev/null +++ b/repo/fsrepo/migrations/fs-repo-16-to-17/migration/migration_test.go @@ -0,0 +1,477 @@ +package mg16 + +import ( + "bytes" + "encoding/json" + "os" + "path/filepath" + "testing" + + "github.com/ipfs/kubo/repo/fsrepo/migrations/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// Helper function to run migration on JSON input and return result +func runMigrationOnJSON(t *testing.T, input string) map[string]interface{} { + t.Helper() + var output bytes.Buffer + err := convert(bytes.NewReader([]byte(input)), &output) + require.NoError(t, err) + + var result map[string]interface{} + err = json.Unmarshal(output.Bytes(), &result) + require.NoError(t, err) + + return result +} + +// Helper function to assert nested map key has expected value +func assertMapKeyEquals(t *testing.T, result map[string]interface{}, path []string, key string, expected interface{}) { + t.Helper() + current := result + for _, p := range path { + section, exists := current[p] + require.True(t, exists, "Section %s not found in path %v", p, path) + current = section.(map[string]interface{}) + } + + assert.Equal(t, expected, current[key], "Expected %s to be %v", key, expected) +} + +// Helper function to assert slice contains expected values +func assertSliceEquals(t *testing.T, result map[string]interface{}, path []string, expected []string) { + t.Helper() + current := result + for i, p := range path[:len(path)-1] { + section, exists := current[p] + require.True(t, exists, "Section %s not found in path %v at index %d", p, path, i) + current = section.(map[string]interface{}) + } + + sliceKey := path[len(path)-1] + slice, exists := current[sliceKey] + require.True(t, exists, "Slice %s not found", sliceKey) + + actualSlice := slice.([]interface{}) + require.Equal(t, len(expected), len(actualSlice), "Expected slice length %d, got %d", len(expected), len(actualSlice)) + + for i, exp := range expected { + assert.Equal(t, exp, actualSlice[i], "Expected slice[%d] to be %s", i, exp) + } +} + +// Helper to build test config JSON with specified fields +func buildTestConfig(fields map[string]interface{}) string { + config := map[string]interface{}{ + "Identity": map[string]interface{}{"PeerID": "QmTest"}, + } + for k, v := range fields { + config[k] = v + } + data, _ := json.MarshalIndent(config, "", " ") + return string(data) +} + +// Helper to run migration and get DNS resolvers +func runMigrationAndGetDNSResolvers(t *testing.T, input string) map[string]interface{} { + t.Helper() + result := runMigrationOnJSON(t, input) + dns := result["DNS"].(map[string]interface{}) + return dns["Resolvers"].(map[string]interface{}) +} + +// Helper to assert multiple resolver values +func assertResolvers(t *testing.T, resolvers map[string]interface{}, expected map[string]string) { + t.Helper() + for key, expectedValue := range expected { + assert.Equal(t, expectedValue, resolvers[key], "Expected %s resolver to be %v", key, expectedValue) + } +} + +// ============================================================================= +// End-to-End Migration Tests +// ============================================================================= + +func TestMigration(t *testing.T) { + // Create a temporary directory for testing + tempDir, err := os.MkdirTemp("", "migration-test-16-to-17") + require.NoError(t, err) + defer os.RemoveAll(tempDir) + + // Create a test config with default bootstrap peers + testConfig := map[string]interface{}{ + "Bootstrap": []string{ + "/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", + "/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa", + "/ip4/192.168.1.1/tcp/4001/p2p/QmCustomPeer", // Custom peer + }, + "DNS": map[string]interface{}{ + "Resolvers": map[string]string{}, + }, + "Routing": map[string]interface{}{ + "DelegatedRouters": []string{}, + }, + "Ipns": map[string]interface{}{ + "ResolveCacheSize": 128, + }, + "Identity": map[string]interface{}{ + "PeerID": "QmTest", + }, + "Version": map[string]interface{}{ + "Current": "0.36.0", + }, + } + + // Write test config + configPath := filepath.Join(tempDir, "config") + configData, err := json.MarshalIndent(testConfig, "", " ") + require.NoError(t, err) + err = os.WriteFile(configPath, configData, 0644) + require.NoError(t, err) + + // Create version file + versionPath := filepath.Join(tempDir, "version") + err = os.WriteFile(versionPath, []byte("16"), 0644) + require.NoError(t, err) + + // Run migration + opts := common.Options{ + Path: tempDir, + Verbose: true, + } + + err = Migration.Apply(opts) + require.NoError(t, err) + + // Verify version was updated + versionData, err := os.ReadFile(versionPath) + require.NoError(t, err) + assert.Equal(t, "17", string(versionData), "Expected version 17") + + // Verify config was updated + configData, err = os.ReadFile(configPath) + require.NoError(t, err) + + var updatedConfig map[string]interface{} + err = json.Unmarshal(configData, &updatedConfig) + require.NoError(t, err) + + // Check AutoConf was added + autoConf, exists := updatedConfig["AutoConf"] + assert.True(t, exists, "AutoConf section not added") + autoConfMap := autoConf.(map[string]interface{}) + // URL is not set explicitly in migration (uses implicit default) + _, hasURL := autoConfMap["URL"] + assert.False(t, hasURL, "AutoConf URL should not be explicitly set in migration") + + // Check Bootstrap was updated + bootstrap := updatedConfig["Bootstrap"].([]interface{}) + assert.Equal(t, 2, len(bootstrap), "Expected 2 bootstrap entries") + assert.Equal(t, "auto", bootstrap[0], "Expected first bootstrap entry to be 'auto'") + assert.Equal(t, "/ip4/192.168.1.1/tcp/4001/p2p/QmCustomPeer", bootstrap[1], "Expected custom peer to be preserved") + + // Check DNS.Resolvers was updated + dns := updatedConfig["DNS"].(map[string]interface{}) + resolvers := dns["Resolvers"].(map[string]interface{}) + assert.Equal(t, "auto", resolvers["."], "Expected DNS resolver for '.' to be 'auto'") + + // Check Routing.DelegatedRouters was updated + routing := updatedConfig["Routing"].(map[string]interface{}) + delegatedRouters := routing["DelegatedRouters"].([]interface{}) + assert.Equal(t, 1, len(delegatedRouters)) + assert.Equal(t, "auto", delegatedRouters[0], "Expected DelegatedRouters to be ['auto']") + + // Check Ipns.DelegatedPublishers was updated + ipns := updatedConfig["Ipns"].(map[string]interface{}) + delegatedPublishers := ipns["DelegatedPublishers"].([]interface{}) + assert.Equal(t, 1, len(delegatedPublishers)) + assert.Equal(t, "auto", delegatedPublishers[0], "Expected DelegatedPublishers to be ['auto']") + + // Test revert + err = Migration.Revert(opts) + require.NoError(t, err) + + // Verify version was reverted + versionData, err = os.ReadFile(versionPath) + require.NoError(t, err) + assert.Equal(t, "16", string(versionData), "Expected version 16 after revert") +} + +func TestConvert(t *testing.T) { + t.Parallel() + input := buildTestConfig(map[string]interface{}{ + "Bootstrap": []string{ + "/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", + "/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa", + }, + }) + + result := runMigrationOnJSON(t, input) + + // Check that AutoConf section was added but is empty (using implicit defaults) + autoConf, exists := result["AutoConf"] + require.True(t, exists, "AutoConf section should exist") + autoConfMap, ok := autoConf.(map[string]interface{}) + require.True(t, ok, "AutoConf should be a map") + require.Empty(t, autoConfMap, "AutoConf should be empty (using implicit defaults)") + + // Check that Bootstrap was updated to "auto" + assertSliceEquals(t, result, []string{"Bootstrap"}, []string{"auto"}) +} + +// ============================================================================= +// Bootstrap Migration Tests +// ============================================================================= + +func TestBootstrapMigration(t *testing.T) { + t.Parallel() + + t.Run("process bootstrap peers logic verification", func(t *testing.T) { + t.Parallel() + tests := []struct { + name string + peers []string + expected []string + }{ + { + name: "empty peers", + peers: []string{}, + expected: []string{"auto"}, + }, + { + name: "only default peers", + peers: []string{ + "/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", + "/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa", + }, + expected: []string{"auto"}, + }, + { + name: "mixed default and custom peers", + peers: []string{ + "/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", + "/ip4/192.168.1.1/tcp/4001/p2p/QmCustomPeer", + }, + expected: []string{"auto", "/ip4/192.168.1.1/tcp/4001/p2p/QmCustomPeer"}, + }, + { + name: "only custom peers", + peers: []string{ + "/ip4/192.168.1.1/tcp/4001/p2p/QmCustomPeer1", + "/ip4/192.168.1.2/tcp/4001/p2p/QmCustomPeer2", + }, + expected: []string{ + "/ip4/192.168.1.1/tcp/4001/p2p/QmCustomPeer1", + "/ip4/192.168.1.2/tcp/4001/p2p/QmCustomPeer2", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + result := processBootstrapPeers(tt.peers) + require.Equal(t, len(tt.expected), len(result), "Expected %d peers, got %d", len(tt.expected), len(result)) + for i, expected := range tt.expected { + assert.Equal(t, expected, result[i], "Expected peer %d to be %s", i, expected) + } + }) + } + }) + + t.Run("replaces all old default bootstrapper peers with auto entry", func(t *testing.T) { + t.Parallel() + input := buildTestConfig(map[string]interface{}{ + "Bootstrap": []string{ + "/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", + "/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa", + "/dnsaddr/bootstrap.libp2p.io/p2p/QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb", + "/dnsaddr/bootstrap.libp2p.io/p2p/QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt", + "/dnsaddr/va1.bootstrap.libp2p.io/p2p/12D3KooWKnDdG3iXw9eTFijk3EWSunZcFi54Zka4wmtqtt6rPxc8", + "/ip4/104.131.131.82/tcp/4001/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", + "/ip4/104.131.131.82/udp/4001/quic-v1/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", + }, + }) + + result := runMigrationOnJSON(t, input) + assertSliceEquals(t, result, []string{"Bootstrap"}, []string{"auto"}) + }) + + t.Run("creates Bootstrap section with auto when missing", func(t *testing.T) { + t.Parallel() + input := `{"Identity": {"PeerID": "QmTest"}}` + result := runMigrationOnJSON(t, input) + assertSliceEquals(t, result, []string{"Bootstrap"}, []string{"auto"}) + }) +} + +// ============================================================================= +// DNS Migration Tests +// ============================================================================= + +func TestDNSMigration(t *testing.T) { + t.Parallel() + + t.Run("creates DNS section with auto resolver when missing", func(t *testing.T) { + t.Parallel() + input := `{"Identity": {"PeerID": "QmTest"}}` + result := runMigrationOnJSON(t, input) + assertMapKeyEquals(t, result, []string{"DNS", "Resolvers"}, ".", "auto") + }) + + t.Run("preserves all custom DNS resolvers unchanged", func(t *testing.T) { + t.Parallel() + input := buildTestConfig(map[string]interface{}{ + "DNS": map[string]interface{}{ + "Resolvers": map[string]string{ + ".": "https://my-custom-resolver.com", + ".eth": "https://eth.resolver", + }, + }, + }) + + resolvers := runMigrationAndGetDNSResolvers(t, input) + assertResolvers(t, resolvers, map[string]string{ + ".": "https://my-custom-resolver.com", + ".eth": "https://eth.resolver", + }) + }) + + t.Run("preserves custom dot and eth resolvers unchanged", func(t *testing.T) { + t.Parallel() + input := buildTestConfig(map[string]interface{}{ + "DNS": map[string]interface{}{ + "Resolvers": map[string]string{ + ".": "https://cloudflare-dns.com/dns-query", + ".eth": "https://example.com/dns-query", + }, + }, + }) + + resolvers := runMigrationAndGetDNSResolvers(t, input) + assertResolvers(t, resolvers, map[string]string{ + ".": "https://cloudflare-dns.com/dns-query", + ".eth": "https://example.com/dns-query", + }) + }) + + t.Run("replaces old default eth resolver with auto", func(t *testing.T) { + t.Parallel() + input := buildTestConfig(map[string]interface{}{ + "DNS": map[string]interface{}{ + "Resolvers": map[string]string{ + ".": "https://cloudflare-dns.com/dns-query", + ".eth": "https://dns.eth.limo/dns-query", // should be replaced + ".crypto": "https://resolver.cloudflare-eth.com/dns-query", // should be replaced + ".link": "https://dns.eth.link/dns-query", // should be replaced + }, + }, + }) + + resolvers := runMigrationAndGetDNSResolvers(t, input) + assertResolvers(t, resolvers, map[string]string{ + ".": "https://cloudflare-dns.com/dns-query", // preserved + ".eth": "auto", // replaced + ".crypto": "auto", // replaced + ".link": "auto", // replaced + }) + }) +} + +// ============================================================================= +// Routing Migration Tests +// ============================================================================= + +func TestRoutingMigration(t *testing.T) { + t.Parallel() + + t.Run("creates Routing section with auto DelegatedRouters when missing", func(t *testing.T) { + t.Parallel() + input := `{"Identity": {"PeerID": "QmTest"}}` + result := runMigrationOnJSON(t, input) + assertSliceEquals(t, result, []string{"Routing", "DelegatedRouters"}, []string{"auto"}) + }) + + t.Run("replaces cid.contact with auto while preserving custom routers added by user", func(t *testing.T) { + t.Parallel() + input := buildTestConfig(map[string]interface{}{ + "Routing": map[string]interface{}{ + "DelegatedRouters": []string{ + "https://cid.contact", + "https://my-custom-router.com", + }, + }, + }) + + result := runMigrationOnJSON(t, input) + assertSliceEquals(t, result, []string{"Routing", "DelegatedRouters"}, []string{"auto", "https://my-custom-router.com"}) + }) +} + +// ============================================================================= +// IPNS Migration Tests +// ============================================================================= + +func TestIpnsMigration(t *testing.T) { + t.Parallel() + + t.Run("creates Ipns section with auto DelegatedPublishers when missing", func(t *testing.T) { + t.Parallel() + input := `{"Identity": {"PeerID": "QmTest"}}` + result := runMigrationOnJSON(t, input) + assertSliceEquals(t, result, []string{"Ipns", "DelegatedPublishers"}, []string{"auto"}) + }) + + t.Run("preserves existing custom DelegatedPublishers unchanged", func(t *testing.T) { + t.Parallel() + input := buildTestConfig(map[string]interface{}{ + "Ipns": map[string]interface{}{ + "DelegatedPublishers": []string{ + "https://my-publisher.com", + "https://another-publisher.com", + }, + }, + }) + + result := runMigrationOnJSON(t, input) + assertSliceEquals(t, result, []string{"Ipns", "DelegatedPublishers"}, []string{"https://my-publisher.com", "https://another-publisher.com"}) + }) + + t.Run("adds auto DelegatedPublishers to existing Ipns section", func(t *testing.T) { + t.Parallel() + input := buildTestConfig(map[string]interface{}{ + "Ipns": map[string]interface{}{ + "ResolveCacheSize": 128, + }, + }) + + result := runMigrationOnJSON(t, input) + assertMapKeyEquals(t, result, []string{"Ipns"}, "ResolveCacheSize", float64(128)) + assertSliceEquals(t, result, []string{"Ipns", "DelegatedPublishers"}, []string{"auto"}) + }) +} + +// ============================================================================= +// AutoConf Migration Tests +// ============================================================================= + +func TestAutoConfMigration(t *testing.T) { + t.Parallel() + + t.Run("preserves existing AutoConf fields unchanged", func(t *testing.T) { + t.Parallel() + input := buildTestConfig(map[string]interface{}{ + "AutoConf": map[string]interface{}{ + "URL": "https://custom.example.com/autoconf.json", + "Enabled": false, + "CustomField": "preserved", + }, + }) + + result := runMigrationOnJSON(t, input) + assertMapKeyEquals(t, result, []string{"AutoConf"}, "URL", "https://custom.example.com/autoconf.json") + assertMapKeyEquals(t, result, []string{"AutoConf"}, "Enabled", false) + assertMapKeyEquals(t, result, []string{"AutoConf"}, "CustomField", "preserved") + }) +} diff --git a/repo/fsrepo/migrations/fs-repo-17-to-18/main.go b/repo/fsrepo/migrations/fs-repo-17-to-18/main.go new file mode 100644 index 000000000..777c242d2 --- /dev/null +++ b/repo/fsrepo/migrations/fs-repo-17-to-18/main.go @@ -0,0 +1,60 @@ +// Package main implements fs-repo-17-to-18 migration for IPFS repositories. +// +// This migration consolidates the Provider and Reprovider configurations into +// a unified Provide configuration section. +// +// Changes made: +// - Migrates Provider.Enabled to Provide.Enabled +// - Migrates Provider.WorkerCount to Provide.DHT.MaxWorkers +// - Migrates Reprovider.Strategy to Provide.Strategy (converts "flat" to "all") +// - Migrates Reprovider.Interval to Provide.DHT.Interval +// - Removes deprecated Provider and Reprovider sections +// +// The migration is reversible and creates config.17-to-18.bak for rollback. +// +// Usage: +// +// fs-repo-17-to-18 -path /path/to/ipfs/repo [-verbose] [-revert] +// +// This migration is embedded in Kubo and runs automatically during daemon startup. +// This standalone binary is provided for manual migration scenarios. +package main + +import ( + "flag" + "fmt" + "os" + + "github.com/ipfs/kubo/repo/fsrepo/migrations/common" + mg17 "github.com/ipfs/kubo/repo/fsrepo/migrations/fs-repo-17-to-18/migration" +) + +func main() { + var path = flag.String("path", "", "Path to IPFS repository") + var verbose = flag.Bool("verbose", false, "Enable verbose output") + var revert = flag.Bool("revert", false, "Revert migration") + flag.Parse() + + if *path == "" { + fmt.Fprintf(os.Stderr, "Error: -path flag is required\n") + flag.Usage() + os.Exit(1) + } + + opts := common.Options{ + Path: *path, + Verbose: *verbose, + } + + var err error + if *revert { + err = mg17.Migration.Revert(opts) + } else { + err = mg17.Migration.Apply(opts) + } + + if err != nil { + fmt.Fprintf(os.Stderr, "Migration failed: %v\n", err) + os.Exit(1) + } +} diff --git a/repo/fsrepo/migrations/fs-repo-17-to-18/migration/migration.go b/repo/fsrepo/migrations/fs-repo-17-to-18/migration/migration.go new file mode 100644 index 000000000..27fd9a7de --- /dev/null +++ b/repo/fsrepo/migrations/fs-repo-17-to-18/migration/migration.go @@ -0,0 +1,121 @@ +// package mg17 contains the code to perform 17-18 repository migration in Kubo. +// This handles the following: +// - Migrate Provider and Reprovider configs to unified Provide config +// - Clear deprecated Provider and Reprovider fields +// - Increment repo version to 18 +package mg17 + +import ( + "fmt" + "io" + + "github.com/ipfs/kubo/repo/fsrepo/migrations/common" +) + +// Migration is the main exported migration for 17-to-18 +var Migration = &common.BaseMigration{ + FromVersion: "17", + ToVersion: "18", + Description: "Migrating Provider and Reprovider configuration to unified Provide configuration", + Convert: convert, +} + +// NewMigration creates a new migration instance (for compatibility) +func NewMigration() common.Migration { + return Migration +} + +// convert performs the actual configuration transformation +func convert(in io.ReadSeeker, out io.Writer) error { + // Read the configuration + confMap, err := common.ReadConfig(in) + if err != nil { + return err + } + + // Create new Provide section with DHT subsection from Provider and Reprovider + provide := make(map[string]any) + dht := make(map[string]any) + hasNonDefaultValues := false + + // Migrate Provider fields if they exist + provider := common.SafeCastMap(confMap["Provider"]) + if enabled, exists := provider["Enabled"]; exists { + provide["Enabled"] = enabled + // Log migration for non-default values + if enabledBool, ok := enabled.(bool); ok && !enabledBool { + fmt.Printf(" Migrated Provider.Enabled=%v to Provide.Enabled=%v\n", enabledBool, enabledBool) + hasNonDefaultValues = true + } + } + if workerCount, exists := provider["WorkerCount"]; exists { + dht["MaxWorkers"] = workerCount + // Log migration for all worker count values + if count, ok := workerCount.(float64); ok { + fmt.Printf(" Migrated Provider.WorkerCount=%v to Provide.DHT.MaxWorkers=%v\n", int(count), int(count)) + hasNonDefaultValues = true + + // Additional guidance for high WorkerCount + if count > 5 { + fmt.Printf(" ⚠️ For better resource utilization, consider enabling Provide.DHT.SweepEnabled=true\n") + fmt.Printf(" and adjusting Provide.DHT.DedicatedBurstWorkers if announcement of new CIDs\n") + fmt.Printf(" should take priority over periodic reprovide interval.\n") + } + } + } + // Note: Skip Provider.Strategy as it was unused + + // Migrate Reprovider fields if they exist + reprovider := common.SafeCastMap(confMap["Reprovider"]) + if strategy, exists := reprovider["Strategy"]; exists { + if strategyStr, ok := strategy.(string); ok { + // Convert deprecated "flat" strategy to "all" + if strategyStr == "flat" { + provide["Strategy"] = "all" + fmt.Printf(" Migrated deprecated Reprovider.Strategy=\"flat\" to Provide.Strategy=\"all\"\n") + } else { + // Migrate any other strategy value as-is + provide["Strategy"] = strategyStr + fmt.Printf(" Migrated Reprovider.Strategy=\"%s\" to Provide.Strategy=\"%s\"\n", strategyStr, strategyStr) + } + hasNonDefaultValues = true + } else { + // Not a string, set to default "all" to ensure valid config + provide["Strategy"] = "all" + fmt.Printf(" Warning: Reprovider.Strategy was not a string, setting Provide.Strategy=\"all\"\n") + hasNonDefaultValues = true + } + } + if interval, exists := reprovider["Interval"]; exists { + dht["Interval"] = interval + // Log migration for non-default intervals + if intervalStr, ok := interval.(string); ok && intervalStr != "22h" && intervalStr != "" { + fmt.Printf(" Migrated Reprovider.Interval=\"%s\" to Provide.DHT.Interval=\"%s\"\n", intervalStr, intervalStr) + hasNonDefaultValues = true + } + } + // Note: Sweep is a new field introduced in v0.38, not present in v0.37 + // So we don't need to migrate it from Reprovider + + // Set the DHT section if we have any DHT fields to migrate + if len(dht) > 0 { + provide["DHT"] = dht + } + + // Set the new Provide section if we have any fields to migrate + if len(provide) > 0 { + confMap["Provide"] = provide + } + + // Clear old Provider and Reprovider sections + delete(confMap, "Provider") + delete(confMap, "Reprovider") + + // Print documentation link if we migrated any non-default values + if hasNonDefaultValues { + fmt.Printf(" See: https://github.com/ipfs/kubo/blob/master/docs/config.md#provide\n") + } + + // Write the updated config + return common.WriteConfig(out, confMap) +} diff --git a/repo/fsrepo/migrations/fs-repo-17-to-18/migration/migration_test.go b/repo/fsrepo/migrations/fs-repo-17-to-18/migration/migration_test.go new file mode 100644 index 000000000..2987a407a --- /dev/null +++ b/repo/fsrepo/migrations/fs-repo-17-to-18/migration/migration_test.go @@ -0,0 +1,176 @@ +package mg17 + +import ( + "testing" + + "github.com/ipfs/kubo/repo/fsrepo/migrations/common" +) + +func TestMigration17to18(t *testing.T) { + migration := NewMigration() + + testCases := []common.TestCase{ + { + Name: "Migrate Provider and Reprovider to Provide", + InputConfig: common.GenerateTestConfig(map[string]any{ + "Provider": map[string]any{ + "Enabled": true, + "WorkerCount": 8, + "Strategy": "unused", // This field was unused and should be ignored + }, + "Reprovider": map[string]any{ + "Strategy": "pinned", + "Interval": "12h", + }, + }), + Assertions: []common.ConfigAssertion{ + {Path: "Provide.Enabled", Expected: true}, + {Path: "Provide.DHT.MaxWorkers", Expected: float64(8)}, // JSON unmarshals to float64 + {Path: "Provide.Strategy", Expected: "pinned"}, + {Path: "Provide.DHT.Interval", Expected: "12h"}, + {Path: "Provider", Expected: nil}, // Should be deleted + {Path: "Reprovider", Expected: nil}, // Should be deleted + }, + }, + { + Name: "Convert flat strategy to all", + InputConfig: common.GenerateTestConfig(map[string]any{ + "Provider": map[string]any{ + "Enabled": false, + }, + "Reprovider": map[string]any{ + "Strategy": "flat", // Deprecated, should be converted to "all" + "Interval": "24h", + }, + }), + Assertions: []common.ConfigAssertion{ + {Path: "Provide.Enabled", Expected: false}, + {Path: "Provide.Strategy", Expected: "all"}, // "flat" converted to "all" + {Path: "Provide.DHT.Interval", Expected: "24h"}, + {Path: "Provider", Expected: nil}, + {Path: "Reprovider", Expected: nil}, + }, + }, + { + Name: "Handle missing Provider section", + InputConfig: common.GenerateTestConfig(map[string]any{ + "Reprovider": map[string]any{ + "Strategy": "roots", + "Interval": "6h", + }, + }), + Assertions: []common.ConfigAssertion{ + {Path: "Provide.Strategy", Expected: "roots"}, + {Path: "Provide.DHT.Interval", Expected: "6h"}, + {Path: "Provider", Expected: nil}, + {Path: "Reprovider", Expected: nil}, + }, + }, + { + Name: "Handle missing Reprovider section", + InputConfig: common.GenerateTestConfig(map[string]any{ + "Provider": map[string]any{ + "Enabled": true, + "WorkerCount": 16, + }, + }), + Assertions: []common.ConfigAssertion{ + {Path: "Provide.Enabled", Expected: true}, + {Path: "Provide.DHT.MaxWorkers", Expected: float64(16)}, + {Path: "Provider", Expected: nil}, + {Path: "Reprovider", Expected: nil}, + }, + }, + { + Name: "Handle empty Provider and Reprovider sections", + InputConfig: common.GenerateTestConfig(map[string]any{ + "Provider": map[string]any{}, + "Reprovider": map[string]any{}, + }), + Assertions: []common.ConfigAssertion{ + {Path: "Provide", Expected: nil}, // No fields to migrate + {Path: "Provider", Expected: nil}, + {Path: "Reprovider", Expected: nil}, + }, + }, + { + Name: "Handle missing both sections", + InputConfig: common.GenerateTestConfig(map[string]any{ + "Datastore": map[string]any{ + "StorageMax": "10GB", + }, + }), + Assertions: []common.ConfigAssertion{ + {Path: "Provide", Expected: nil}, // No Provider/Reprovider to migrate + {Path: "Provider", Expected: nil}, + {Path: "Reprovider", Expected: nil}, + {Path: "Datastore.StorageMax", Expected: "10GB"}, // Other config preserved + }, + }, + { + Name: "Preserve other config sections", + InputConfig: common.GenerateTestConfig(map[string]any{ + "Provider": map[string]any{ + "Enabled": true, + }, + "Reprovider": map[string]any{ + "Strategy": "all", + }, + "Swarm": map[string]any{ + "ConnMgr": map[string]any{ + "Type": "basic", + }, + }, + }), + Assertions: []common.ConfigAssertion{ + {Path: "Provide.Enabled", Expected: true}, + {Path: "Provide.Strategy", Expected: "all"}, + {Path: "Swarm.ConnMgr.Type", Expected: "basic"}, // Other config preserved + {Path: "Provider", Expected: nil}, + {Path: "Reprovider", Expected: nil}, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.Name, func(t *testing.T) { + common.RunMigrationTest(t, migration, tc) + }) + } +} + +func TestMigration17to18Reversible(t *testing.T) { + migration := NewMigration() + + // Test that migration is reversible + inputConfig := common.GenerateTestConfig(map[string]any{ + "Provide": map[string]any{ + "Enabled": true, + "WorkerCount": 8, + "Strategy": "pinned", + "Interval": "12h", + }, + }) + + // Test full migration and revert + migratedConfig := common.AssertMigrationSuccess(t, migration, 17, 18, inputConfig) + + // Check that Provide section exists after migration + common.AssertConfigField(t, migratedConfig, "Provide.Enabled", true) + + // Test revert + common.AssertMigrationReversible(t, migration, 17, 18, migratedConfig) +} + +func TestMigration17to18Integration(t *testing.T) { + migration := NewMigration() + + // Test that the migration properly integrates with the common framework + if migration.Versions() != "17-to-18" { + t.Errorf("expected versions '17-to-18', got '%s'", migration.Versions()) + } + + if !migration.Reversible() { + t.Error("migration should be reversible") + } +} diff --git a/repo/fsrepo/migrations/ipfsdir.go b/repo/fsrepo/migrations/ipfsdir.go index 464118d1c..88b39459b 100644 --- a/repo/fsrepo/migrations/ipfsdir.go +++ b/repo/fsrepo/migrations/ipfsdir.go @@ -8,19 +8,14 @@ import ( "strconv" "strings" - "github.com/mitchellh/go-homedir" + "github.com/ipfs/kubo/config" + "github.com/ipfs/kubo/misc/fsutil" ) const ( - envIpfsPath = "IPFS_PATH" - defIpfsDir = ".ipfs" versionFile = "version" ) -func init() { - homedir.DisableCache = true -} - // IpfsDir returns the path of the ipfs directory. If dir specified, then // returns the expanded version dir. If dir is "", then return the directory // set by IPFS_PATH, or if IPFS_PATH is not set, then return the default @@ -28,25 +23,16 @@ func init() { func IpfsDir(dir string) (string, error) { var err error if dir == "" { - dir = os.Getenv(envIpfsPath) - } - if dir != "" { - dir, err = homedir.Expand(dir) + dir, err = config.PathRoot() if err != nil { return "", err } - return dir, nil } - - home, err := homedir.Dir() + dir, err = fsutil.ExpandHome(dir) if err != nil { return "", err } - if home == "" { - return "", errors.New("could not determine IPFS_PATH, home dir not set") - } - - return filepath.Join(home, defIpfsDir), nil + return dir, nil } // CheckIpfsDir gets the ipfs directory and checks that the directory exists. diff --git a/repo/fsrepo/migrations/ipfsdir_test.go b/repo/fsrepo/migrations/ipfsdir_test.go index e4e626794..c18721bae 100644 --- a/repo/fsrepo/migrations/ipfsdir_test.go +++ b/repo/fsrepo/migrations/ipfsdir_test.go @@ -4,24 +4,30 @@ import ( "os" "path/filepath" "testing" -) -var ( - fakeHome string - fakeIpfs string + "github.com/ipfs/kubo/config" ) func TestRepoDir(t *testing.T) { - fakeHome = t.TempDir() - os.Setenv("HOME", fakeHome) - fakeIpfs = filepath.Join(fakeHome, ".ipfs") + fakeHome := t.TempDir() + t.Setenv("HOME", fakeHome) + // On Windows, os.UserHomeDir() uses USERPROFILE, not HOME + t.Setenv("USERPROFILE", fakeHome) + fakeIpfs := filepath.Join(fakeHome, ".ipfs") + t.Setenv(config.EnvDir, fakeIpfs) - t.Run("testIpfsDir", testIpfsDir) - t.Run("testCheckIpfsDir", testCheckIpfsDir) - t.Run("testRepoVersion", testRepoVersion) + t.Run("testIpfsDir", func(t *testing.T) { + testIpfsDir(t, fakeIpfs) + }) + t.Run("testCheckIpfsDir", func(t *testing.T) { + testCheckIpfsDir(t, fakeIpfs) + }) + t.Run("testRepoVersion", func(t *testing.T) { + testRepoVersion(t, fakeIpfs) + }) } -func testIpfsDir(t *testing.T) { +func testIpfsDir(t *testing.T, fakeIpfs string) { _, err := CheckIpfsDir("") if err == nil { t.Fatal("expected error when no .ipfs directory to find") @@ -37,16 +43,16 @@ func testIpfsDir(t *testing.T) { t.Fatal(err) } if dir != fakeIpfs { - t.Fatal("wrong ipfs directory:", dir) + t.Fatalf("wrong ipfs directory: got %s, expected %s", dir, fakeIpfs) } - os.Setenv(envIpfsPath, "~/.ipfs") + t.Setenv(config.EnvDir, "~/.ipfs") dir, err = IpfsDir("") if err != nil { t.Fatal(err) } if dir != fakeIpfs { - t.Fatal("wrong ipfs directory:", dir) + t.Fatalf("wrong ipfs directory: got %s, expected %s", dir, fakeIpfs) } _, err = IpfsDir("~somesuer/foo") @@ -54,15 +60,12 @@ func testIpfsDir(t *testing.T) { t.Fatal("expected error with user-specific home dir") } - err = os.Setenv(envIpfsPath, "~somesuer/foo") - if err != nil { - panic(err) - } + t.Setenv(config.EnvDir, "~somesuer/foo") _, err = IpfsDir("~somesuer/foo") if err == nil { t.Fatal("expected error with user-specific home dir") } - err = os.Unsetenv(envIpfsPath) + err = os.Unsetenv(config.EnvDir) if err != nil { panic(err) } @@ -72,7 +75,7 @@ func testIpfsDir(t *testing.T) { t.Fatal(err) } if dir != fakeIpfs { - t.Fatal("wrong ipfs directory:", dir) + t.Fatalf("wrong ipfs directory: got %s, expected %s", dir, fakeIpfs) } _, err = IpfsDir("") @@ -81,7 +84,7 @@ func testIpfsDir(t *testing.T) { } } -func testCheckIpfsDir(t *testing.T) { +func testCheckIpfsDir(t *testing.T, fakeIpfs string) { _, err := CheckIpfsDir("~somesuer/foo") if err == nil { t.Fatal("expected error with user-specific home dir") @@ -101,7 +104,7 @@ func testCheckIpfsDir(t *testing.T) { } } -func testRepoVersion(t *testing.T) { +func testRepoVersion(t *testing.T, fakeIpfs string) { badDir := "~somesuer/foo" _, err := RepoVersion(badDir) if err == nil { diff --git a/repo/fsrepo/migrations/ipfsfetcher/ipfsfetcher_test.go b/repo/fsrepo/migrations/ipfsfetcher/ipfsfetcher_test.go index 7323d0172..8fc568450 100644 --- a/repo/fsrepo/migrations/ipfsfetcher/ipfsfetcher_test.go +++ b/repo/fsrepo/migrations/ipfsfetcher/ipfsfetcher_test.go @@ -3,7 +3,6 @@ package ipfsfetcher import ( "bufio" "bytes" - "context" "fmt" "os" "path/filepath" @@ -23,8 +22,7 @@ func init() { func TestIpfsFetcher(t *testing.T) { skipUnlessEpic(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() fetcher := NewIpfsFetcher("", 0, nil, "") defer fetcher.Close() @@ -58,8 +56,7 @@ func TestIpfsFetcher(t *testing.T) { } func TestInitIpfsFetcher(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() f := NewIpfsFetcher("", 0, nil, "") defer f.Close() diff --git a/repo/fsrepo/migrations/migrations.go b/repo/fsrepo/migrations/migrations.go index e612b8abb..c5b23a17d 100644 --- a/repo/fsrepo/migrations/migrations.go +++ b/repo/fsrepo/migrations/migrations.go @@ -25,6 +25,10 @@ const ( // RunMigration finds, downloads, and runs the individual migrations needed to // migrate the repo from its current version to the target version. +// +// Deprecated: This function downloads migration binaries from the internet and will be removed +// in a future version. Use RunHybridMigrations for modern migrations with embedded support, +// or RunEmbeddedMigrations for repo versions ≥16. func RunMigration(ctx context.Context, fetcher Fetcher, targetVer int, ipfsDir string, allowDowngrade bool) error { ipfsDir, err := CheckIpfsDir(ipfsDir) if err != nil { @@ -114,6 +118,9 @@ func ExeName(name string) string { // ReadMigrationConfig reads the Migration section of the IPFS config, avoiding // reading anything other than the Migration section. That way, we're free to // make arbitrary changes to all _other_ sections in migrations. +// +// Deprecated: This function is used by legacy migration downloads and will be removed +// in a future version. Use RunHybridMigrations or RunEmbeddedMigrations instead. func ReadMigrationConfig(repoRoot string, userConfigFile string) (*config.Migration, error) { var cfg struct { Migration config.Migration @@ -151,7 +158,10 @@ func ReadMigrationConfig(repoRoot string, userConfigFile string) (*config.Migrat } // GetMigrationFetcher creates one or more fetchers according to -// downloadSources,. +// downloadSources. +// +// Deprecated: This function is used by legacy migration downloads and will be removed +// in a future version. Use RunHybridMigrations or RunEmbeddedMigrations instead. func GetMigrationFetcher(downloadSources []string, distPath string, newIpfsFetcher func(string) Fetcher) (Fetcher, error) { const httpUserAgent = "kubo/migration" const numTriesPerHTTP = 3 @@ -163,9 +173,7 @@ func GetMigrationFetcher(downloadSources []string, distPath string, newIpfsFetch case "HTTPS", "https", "HTTP", "http": fetchers = append(fetchers, &RetryFetcher{NewHttpFetcher(distPath, "", httpUserAgent, 0), numTriesPerHTTP}) case "IPFS", "ipfs": - if newIpfsFetcher != nil { - fetchers = append(fetchers, newIpfsFetcher(distPath)) - } + return nil, errors.New("IPFS downloads are not supported for legacy migrations (repo versions <16). Please use only HTTPS in Migration.DownloadSources") case "": // Ignore empty string default: @@ -202,6 +210,9 @@ func migrationName(from, to int) string { // findMigrations returns a list of migrations, ordered from first to last // migration to apply, and a map of locations of migration binaries of any // migrations that were found. +// +// Deprecated: This function is used by legacy migration downloads and will be removed +// in a future version. func findMigrations(ctx context.Context, from, to int) ([]string, map[string]string, error) { step := 1 count := to - from @@ -250,6 +261,9 @@ func runMigration(ctx context.Context, binPath, ipfsDir string, revert bool, log // fetchMigrations downloads the requested migrations, and returns a slice with // the paths of each binary, in the same order specified by needed. +// +// Deprecated: This function downloads migration binaries from the internet and will be removed +// in a future version. Use RunHybridMigrations or RunEmbeddedMigrations instead. func fetchMigrations(ctx context.Context, fetcher Fetcher, needed []string, destDir string, logger *log.Logger) ([]string, error) { osv, err := osWithVariant() if err != nil { @@ -300,3 +314,224 @@ func fetchMigrations(ctx context.Context, fetcher Fetcher, needed []string, dest return bins, nil } + +// RunHybridMigrations intelligently runs migrations using external tools for legacy versions +// and embedded migrations for modern versions. This handles the transition from external +// fs-repo-migrations binaries (for repo versions <16) to embedded migrations (for repo versions ≥16). +// +// The function automatically: +// 1. Uses external migrations to get from current version to v16 (if needed) +// 2. Uses embedded migrations for v16+ steps +// 3. Handles pure external, pure embedded, or mixed migration scenarios +// +// Legacy external migrations (repo versions <16) only support HTTPS downloads. +// +// Parameters: +// - ctx: Context for cancellation and timeouts +// - targetVer: Target repository version to migrate to +// - ipfsDir: Path to the IPFS repository directory +// - allowDowngrade: Whether to allow downgrade migrations +// +// Returns error if migration fails at any step. +func RunHybridMigrations(ctx context.Context, targetVer int, ipfsDir string, allowDowngrade bool) error { + const embeddedMigrationsMinVersion = 16 + + // Get current repo version + currentVer, err := RepoVersion(ipfsDir) + if err != nil { + return fmt.Errorf("could not get current repo version: %w", err) + } + + var logger = log.New(os.Stdout, "", 0) + + // Check if migration is needed + if currentVer == targetVer { + logger.Printf("Repository is already at version %d", targetVer) + return nil + } + + // Validate downgrade request + if targetVer < currentVer && !allowDowngrade { + return fmt.Errorf("downgrade from version %d to %d requires allowDowngrade=true", currentVer, targetVer) + } + + // Determine migration strategy based on version ranges + needsExternal := currentVer < embeddedMigrationsMinVersion + needsEmbedded := targetVer >= embeddedMigrationsMinVersion + + // Case 1: Pure embedded migration (both current and target ≥ 16) + if !needsExternal && needsEmbedded { + return RunEmbeddedMigrations(ctx, targetVer, ipfsDir, allowDowngrade) + } + + // For cases requiring external migrations, we check if migration binaries + // are available in PATH before attempting network downloads + + // Case 2: Pure external migration (target < 16) + if needsExternal && !needsEmbedded { + + // Check for migration binaries in PATH first (for testing/local development) + migrations, binPaths, err := findMigrations(ctx, currentVer, targetVer) + if err != nil { + return fmt.Errorf("could not determine migration paths: %w", err) + } + + foundAll := true + for _, migName := range migrations { + if _, exists := binPaths[migName]; !exists { + foundAll = false + break + } + } + + if foundAll { + return runMigrationsFromPath(ctx, migrations, binPaths, ipfsDir, logger, false) + } + + // Fall back to network download (original behavior) + migrationCfg, err := ReadMigrationConfig(ipfsDir, "") + if err != nil { + return fmt.Errorf("could not read migration config: %w", err) + } + + // Use existing RunMigration which handles network downloads properly (HTTPS only for legacy migrations) + fetcher, err := GetMigrationFetcher(migrationCfg.DownloadSources, GetDistPathEnv(CurrentIpfsDist), nil) + if err != nil { + return fmt.Errorf("failed to get migration fetcher: %w", err) + } + defer fetcher.Close() + return RunMigration(ctx, fetcher, targetVer, ipfsDir, allowDowngrade) + } + + // Case 3: Hybrid migration (current < 16, target ≥ 16) + if needsExternal && needsEmbedded { + logger.Printf("Starting hybrid migration from version %d to %d", currentVer, targetVer) + logger.Print("Using hybrid migration strategy: external to v16, then embedded") + + // Phase 1: Use external migrations to get to v16 + logger.Printf("Phase 1: External migration from v%d to v%d", currentVer, embeddedMigrationsMinVersion) + + // Check for external migration binaries in PATH first + migrations, binPaths, err := findMigrations(ctx, currentVer, embeddedMigrationsMinVersion) + if err != nil { + return fmt.Errorf("could not determine external migration paths: %w", err) + } + + foundAll := true + for _, migName := range migrations { + if _, exists := binPaths[migName]; !exists { + foundAll = false + break + } + } + + if foundAll { + if err = runMigrationsFromPath(ctx, migrations, binPaths, ipfsDir, logger, false); err != nil { + return fmt.Errorf("external migration phase failed: %w", err) + } + } else { + migrationCfg, err := ReadMigrationConfig(ipfsDir, "") + if err != nil { + return fmt.Errorf("could not read migration config: %w", err) + } + + // Legacy migrations only support HTTPS downloads + fetcher, err := GetMigrationFetcher(migrationCfg.DownloadSources, GetDistPathEnv(CurrentIpfsDist), nil) + if err != nil { + return fmt.Errorf("failed to get migration fetcher: %w", err) + } + defer fetcher.Close() + + if err = RunMigration(ctx, fetcher, embeddedMigrationsMinVersion, ipfsDir, allowDowngrade); err != nil { + return fmt.Errorf("external migration phase failed: %w", err) + } + } + + // Phase 2: Use embedded migrations for v16+ + logger.Printf("Phase 2: Embedded migration from v%d to v%d", embeddedMigrationsMinVersion, targetVer) + err = RunEmbeddedMigrations(ctx, targetVer, ipfsDir, allowDowngrade) + if err != nil { + return fmt.Errorf("embedded migration phase failed: %w", err) + } + + logger.Printf("Hybrid migration completed successfully: v%d → v%d", currentVer, targetVer) + return nil + } + + // Case 4: Reverse hybrid migration (≥16 to <16) + // Use embedded migrations for ≥16 steps, then external migrations for <16 steps + logger.Printf("Starting reverse hybrid migration from version %d to %d", currentVer, targetVer) + logger.Print("Using reverse hybrid migration strategy: embedded to v16, then external") + + // Phase 1: Use embedded migrations from current version down to v16 (if needed) + if currentVer > embeddedMigrationsMinVersion { + logger.Printf("Phase 1: Embedded downgrade from v%d to v%d", currentVer, embeddedMigrationsMinVersion) + err = RunEmbeddedMigrations(ctx, embeddedMigrationsMinVersion, ipfsDir, allowDowngrade) + if err != nil { + return fmt.Errorf("embedded downgrade phase failed: %w", err) + } + } + + // Phase 2: Use external migrations from v16 to target (if needed) + if embeddedMigrationsMinVersion > targetVer { + logger.Printf("Phase 2: External downgrade from v%d to v%d", embeddedMigrationsMinVersion, targetVer) + + // Check for external migration binaries in PATH first + migrations, binPaths, err := findMigrations(ctx, embeddedMigrationsMinVersion, targetVer) + if err != nil { + return fmt.Errorf("could not determine external migration paths: %w", err) + } + + foundAll := true + for _, migName := range migrations { + if _, exists := binPaths[migName]; !exists { + foundAll = false + break + } + } + + if foundAll { + if err = runMigrationsFromPath(ctx, migrations, binPaths, ipfsDir, logger, true); err != nil { + return fmt.Errorf("external downgrade phase failed: %w", err) + } + } else { + migrationCfg, err := ReadMigrationConfig(ipfsDir, "") + if err != nil { + return fmt.Errorf("could not read migration config: %w", err) + } + + // Legacy migrations only support HTTPS downloads + fetcher, err := GetMigrationFetcher(migrationCfg.DownloadSources, GetDistPathEnv(CurrentIpfsDist), nil) + if err != nil { + return fmt.Errorf("failed to get migration fetcher: %w", err) + } + defer fetcher.Close() + + if err = RunMigration(ctx, fetcher, targetVer, ipfsDir, allowDowngrade); err != nil { + return fmt.Errorf("external downgrade phase failed: %w", err) + } + } + } + + logger.Printf("Reverse hybrid migration completed successfully: v%d → v%d", currentVer, targetVer) + return nil +} + +// runMigrationsFromPath runs migrations using binaries found in PATH +func runMigrationsFromPath(ctx context.Context, migrations []string, binPaths map[string]string, ipfsDir string, logger *log.Logger, revert bool) error { + for _, migName := range migrations { + binPath, exists := binPaths[migName] + if !exists { + return fmt.Errorf("migration binary %s not found in PATH", migName) + } + + logger.Printf("Running migration %s using binary from PATH: %s", migName, binPath) + + // Run the migration binary directly + err := runMigration(ctx, binPath, ipfsDir, revert, logger) + if err != nil { + return fmt.Errorf("migration %s failed: %w", migName, err) + } + } + return nil +} diff --git a/repo/fsrepo/migrations/migrations_test.go b/repo/fsrepo/migrations/migrations_test.go index 96370f864..90712a41e 100644 --- a/repo/fsrepo/migrations/migrations_test.go +++ b/repo/fsrepo/migrations/migrations_test.go @@ -15,8 +15,7 @@ import ( func TestFindMigrations(t *testing.T) { tmpDir := t.TempDir() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() migs, bins, err := findMigrations(ctx, 0, 5) if err != nil { @@ -33,9 +32,7 @@ func TestFindMigrations(t *testing.T) { createFakeBin(i-1, i, tmpDir) } - origPath := os.Getenv("PATH") - os.Setenv("PATH", tmpDir) - defer os.Setenv("PATH", origPath) + t.Setenv("PATH", tmpDir) migs, bins, err = findMigrations(ctx, 0, 5) if err != nil { @@ -62,8 +59,7 @@ func TestFindMigrations(t *testing.T) { func TestFindMigrationsReverse(t *testing.T) { tmpDir := t.TempDir() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() migs, bins, err := findMigrations(ctx, 5, 0) if err != nil { @@ -80,9 +76,7 @@ func TestFindMigrationsReverse(t *testing.T) { createFakeBin(i-1, i, tmpDir) } - origPath := os.Getenv("PATH") - os.Setenv("PATH", tmpDir) - defer os.Setenv("PATH", origPath) + t.Setenv("PATH", tmpDir) migs, bins, err = findMigrations(ctx, 5, 0) if err != nil { @@ -107,8 +101,7 @@ func TestFindMigrationsReverse(t *testing.T) { } func TestFetchMigrations(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() fetcher := NewHttpFetcher(testIpfsDist, testServer.URL, "", 0) @@ -144,10 +137,8 @@ func TestFetchMigrations(t *testing.T) { } func TestRunMigrations(t *testing.T) { - fakeHome := t.TempDir() - - os.Setenv("HOME", fakeHome) - fakeIpfs := filepath.Join(fakeHome, ".ipfs") + fakeIpfs := filepath.Join(t.TempDir(), ".ipfs") + t.Setenv(config.EnvDir, fakeIpfs) err := os.Mkdir(fakeIpfs, os.ModePerm) if err != nil { @@ -162,14 +153,13 @@ func TestRunMigrations(t *testing.T) { fetcher := NewHttpFetcher(testIpfsDist, testServer.URL, "", 0) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() targetVer := 9 err = RunMigration(ctx, fetcher, targetVer, fakeIpfs, false) if err == nil || !strings.HasPrefix(err.Error(), "downgrade not allowed") { - t.Fatal("expected 'downgrade not alloed' error") + t.Fatal("expected 'downgrade not allowed' error") } err = RunMigration(ctx, fetcher, targetVer, fakeIpfs, true) @@ -327,12 +317,9 @@ func TestGetMigrationFetcher(t *testing.T) { } downloadSources = []string{"ipfs"} - f, err = GetMigrationFetcher(downloadSources, "", newIpfsFetcher) - if err != nil { - t.Fatal(err) - } - if _, ok := f.(*mockIpfsFetcher); !ok { - t.Fatal("expected IpfsFetcher") + _, err = GetMigrationFetcher(downloadSources, "", newIpfsFetcher) + if err == nil || !strings.Contains(err.Error(), "IPFS downloads are not supported for legacy migrations") { + t.Fatal("Expected IPFS downloads error, got:", err) } downloadSources = []string{"http"} @@ -347,6 +334,12 @@ func TestGetMigrationFetcher(t *testing.T) { } downloadSources = []string{"IPFS", "HTTPS"} + _, err = GetMigrationFetcher(downloadSources, "", newIpfsFetcher) + if err == nil || !strings.Contains(err.Error(), "IPFS downloads are not supported for legacy migrations") { + t.Fatal("Expected IPFS downloads error, got:", err) + } + + downloadSources = []string{"https", "some.domain.io"} f, err = GetMigrationFetcher(downloadSources, "", newIpfsFetcher) if err != nil { t.Fatal(err) @@ -359,19 +352,6 @@ func TestGetMigrationFetcher(t *testing.T) { t.Fatal("expected 2 fetchers in MultiFetcher") } - downloadSources = []string{"ipfs", "https", "some.domain.io"} - f, err = GetMigrationFetcher(downloadSources, "", newIpfsFetcher) - if err != nil { - t.Fatal(err) - } - mf, ok = f.(*MultiFetcher) - if !ok { - t.Fatal("expected MultiFetcher") - } - if mf.Len() != 3 { - t.Fatal("expected 3 fetchers in MultiFetcher") - } - downloadSources = nil _, err = GetMigrationFetcher(downloadSources, "", newIpfsFetcher) if err == nil { diff --git a/repo/fsrepo/migrations/setup_test.go b/repo/fsrepo/migrations/setup_test.go index 2e306fda1..9761edb94 100644 --- a/repo/fsrepo/migrations/setup_test.go +++ b/repo/fsrepo/migrations/setup_test.go @@ -32,9 +32,10 @@ var ( ) func TestMain(m *testing.M) { + t := &testing.T{} + // Setup test data - testDataDir := makeTestData() - defer os.RemoveAll(testDataDir) + testDataDir := makeTestData(t) testCar := makeTestCar(testDataDir) defer os.RemoveAll(testCar) @@ -47,18 +48,15 @@ func TestMain(m *testing.M) { os.Exit(m.Run()) } -func makeTestData() string { - tempDir, err := os.MkdirTemp("", "kubo-migrations-test-*") - if err != nil { - panic(err) - } +func makeTestData(t testing.TB) string { + tempDir := t.TempDir() versions := []string{"v1.0.0", "v1.1.0", "v1.1.2", "v2.0.0-rc1", "2.0.0", "v2.0.1"} packages := []string{"kubo", "go-ipfs", "fs-repo-migrations", "fs-repo-1-to-2", "fs-repo-2-to-3", "fs-repo-9-to-10", "fs-repo-10-to-11"} // Generate fake data for _, name := range packages { - err = os.MkdirAll(filepath.Join(tempDir, name), 0777) + err := os.MkdirAll(filepath.Join(tempDir, name), 0777) if err != nil { panic(err) } diff --git a/repo/fsrepo/migrations/versions_test.go b/repo/fsrepo/migrations/versions_test.go index dd62f9bde..d68d62511 100644 --- a/repo/fsrepo/migrations/versions_test.go +++ b/repo/fsrepo/migrations/versions_test.go @@ -1,7 +1,6 @@ package migrations import ( - "context" "testing" "github.com/blang/semver/v4" @@ -10,8 +9,7 @@ import ( const testDist = "go-ipfs" func TestDistVersions(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() fetcher := NewHttpFetcher(testIpfsDist, testServer.URL, "", 0) @@ -27,8 +25,7 @@ func TestDistVersions(t *testing.T) { } func TestLatestDistVersion(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() fetcher := NewHttpFetcher(testIpfsDist, testServer.URL, "", 0) diff --git a/repo/fsrepo/misc.go b/repo/fsrepo/misc.go index 7824f2f4f..fa5b235e2 100644 --- a/repo/fsrepo/misc.go +++ b/repo/fsrepo/misc.go @@ -4,7 +4,7 @@ import ( "os" config "github.com/ipfs/kubo/config" - homedir "github.com/mitchellh/go-homedir" + "github.com/ipfs/kubo/misc/fsutil" ) // BestKnownPath returns the best known fsrepo path. If the ENV override is @@ -15,7 +15,7 @@ func BestKnownPath() (string, error) { if os.Getenv(config.EnvDir) != "" { ipfsPath = os.Getenv(config.EnvDir) } - ipfsPath, err := homedir.Expand(ipfsPath) + ipfsPath, err := fsutil.ExpandHome(ipfsPath) if err != nil { return "", err } diff --git a/routing/composer.go b/routing/composer.go index 3541fc7dd..500fa371e 100644 --- a/routing/composer.go +++ b/routing/composer.go @@ -4,7 +4,6 @@ import ( "context" "errors" - "github.com/hashicorp/go-multierror" "github.com/ipfs/go-cid" routinghelpers "github.com/libp2p/go-libp2p-routing-helpers" "github.com/libp2p/go-libp2p/core/peer" @@ -124,7 +123,7 @@ func (c *Composer) Bootstrap(ctx context.Context) error { errgv := c.GetValueRouter.Bootstrap(ctx) errpv := c.PutValueRouter.Bootstrap(ctx) errp := c.ProvideRouter.Bootstrap(ctx) - err := multierror.Append(errfp, errfps, errgv, errpv, errp) + err := errors.Join(errfp, errfps, errgv, errpv, errp) if err != nil { log.Debug("composer: calling bootstrap error: ", err) } diff --git a/routing/delegated.go b/routing/delegated.go index e830c1aa1..1c6d45ae1 100644 --- a/routing/delegated.go +++ b/routing/delegated.go @@ -6,11 +6,13 @@ import ( "errors" "fmt" "net/http" + "path" + "strings" drclient "github.com/ipfs/boxo/routing/http/client" "github.com/ipfs/boxo/routing/http/contentrouter" "github.com/ipfs/go-datastore" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" version "github.com/ipfs/kubo" "github.com/ipfs/kubo/config" dht "github.com/libp2p/go-libp2p-kad-dht" @@ -24,10 +26,18 @@ import ( "github.com/libp2p/go-libp2p/core/routing" ma "github.com/multiformats/go-multiaddr" "go.opencensus.io/stats/view" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" ) var log = logging.Logger("routing/delegated") +// Parse creates a composed router from the custom routing configuration. +// +// EXPERIMENTAL: Custom routing (Routing.Type=custom with Routing.Routers and +// Routing.Methods) is for research and testing only, not production use. +// The configuration format and behavior may change without notice between +// releases. HTTP-only configurations cannot reliably provide content. +// See docs/delegated-routing.md for limitations. func Parse(routers config.Routers, methods config.Methods, extraDHT *ExtraDHTParams, extraHTTP *ExtraHTTPParams) (routing.Routing, error) { if err := methods.Check(); err != nil { return nil, err @@ -149,12 +159,13 @@ func parse(visited map[string]bool, } type ExtraHTTPParams struct { - PeerID string - Addrs []string - PrivKeyB64 string + PeerID string + Addrs []string + PrivKeyB64 string + HTTPRetrieval bool } -func ConstructHTTPRouter(endpoint string, peerID string, addrs []string, privKey string) (routing.Routing, error) { +func ConstructHTTPRouter(endpoint string, peerID string, addrs []string, privKey string, httpRetrieval bool) (routing.Routing, error) { return httpRoutingFromConfig( config.Router{ Type: "http", @@ -163,9 +174,10 @@ func ConstructHTTPRouter(endpoint string, peerID string, addrs []string, privKey }, }, &ExtraHTTPParams{ - PeerID: peerID, - Addrs: addrs, - PrivKeyB64: privKey, + PeerID: peerID, + Addrs: addrs, + PrivKeyB64: privKey, + HTTPRetrieval: httpRetrieval, }, ) } @@ -185,8 +197,27 @@ func httpRoutingFromConfig(conf config.Router, extraHTTP *ExtraHTTPParams) (rout delegateHTTPClient := &http.Client{ Transport: &drclient.ResponseBodyLimitedTransport{ - RoundTripper: transport, - LimitBytes: 1 << 20, + RoundTripper: otelhttp.NewTransport(transport, + otelhttp.WithSpanNameFormatter(func(operation string, req *http.Request) string { + if req.Method == http.MethodGet { + switch { + case strings.HasPrefix(req.URL.Path, "/routing/v1/providers"): + return "DelegatedHTTPClient.FindProviders" + case strings.HasPrefix(req.URL.Path, "/routing/v1/peers"): + return "DelegatedHTTPClient.FindPeers" + case strings.HasPrefix(req.URL.Path, "/routing/v1/ipns"): + return "DelegatedHTTPClient.GetIPNS" + } + } else if req.Method == http.MethodPut { + switch { + case strings.HasPrefix(req.URL.Path, "/routing/v1/ipns"): + return "DelegatedHTTPClient.PutIPNS" + } + } + return "DelegatedHTTPClient." + path.Dir(req.URL.Path) + }), + ), + LimitBytes: 1 << 20, }, } @@ -200,12 +231,20 @@ func httpRoutingFromConfig(conf config.Router, extraHTTP *ExtraHTTPParams) (rout return nil, err } + protocols := config.DefaultHTTPRoutersFilterProtocols + if extraHTTP.HTTPRetrieval { + protocols = append(protocols, "transport-ipfs-gateway-http") + } + cli, err := drclient.New( params.Endpoint, drclient.WithHTTPClient(delegateHTTPClient), drclient.WithIdentity(key), drclient.WithProviderInfo(addrInfo.ID, addrInfo.Addrs), drclient.WithUserAgent(version.GetUserAgentVersion()), + drclient.WithProtocolFilter(protocols), + drclient.WithStreamResultsRequired(), // https://specs.ipfs.tech/routing/http-routing-v1/#streaming + drclient.WithDisabledLocalFiltering(false), // force local filtering in case remote server does not support IPIP-484 ) if err != nil { return nil, err diff --git a/routing/delegated_test.go b/routing/delegated_test.go index 028f3b465..028503a37 100644 --- a/routing/delegated_test.go +++ b/routing/delegated_test.go @@ -22,7 +22,7 @@ func TestParser(t *testing.T) { Router: config.Router{ Type: config.RouterTypeHTTP, Parameters: &config.HTTPRouterParams{ - Endpoint: "testEndpoint", + Endpoint: "http://testEndpoint", }, }, }, @@ -79,7 +79,7 @@ func TestParserRecursive(t *testing.T) { Router: config.Router{ Type: config.RouterTypeHTTP, Parameters: &config.HTTPRouterParams{ - Endpoint: "testEndpoint1", + Endpoint: "http://testEndpoint1", }, }, }, @@ -87,7 +87,7 @@ func TestParserRecursive(t *testing.T) { Router: config.Router{ Type: config.RouterTypeHTTP, Parameters: &config.HTTPRouterParams{ - Endpoint: "testEndpoint2", + Endpoint: "http://testEndpoint2", }, }, }, @@ -95,7 +95,7 @@ func TestParserRecursive(t *testing.T) { Router: config.Router{ Type: config.RouterTypeHTTP, Parameters: &config.HTTPRouterParams{ - Endpoint: "testEndpoint3", + Endpoint: "http://testEndpoint3", }, }, }, diff --git a/test/3nodetest/bootstrap/Dockerfile b/test/3nodetest/bootstrap/Dockerfile index ed8ac9ffa..e5423f116 100644 --- a/test/3nodetest/bootstrap/Dockerfile +++ b/test/3nodetest/bootstrap/Dockerfile @@ -6,6 +6,6 @@ RUN mv -f /tmp/id/config /root/.ipfs/config RUN ipfs id ENV IPFS_PROF true -ENV IPFS_LOGGING_FMT nocolor +ENV GOLOG_LOG_FMT nocolor EXPOSE 4011 4012/udp diff --git a/test/3nodetest/bootstrap/config b/test/3nodetest/bootstrap/config index ac441a19f..e22f25e90 100644 --- a/test/3nodetest/bootstrap/config +++ b/test/3nodetest/bootstrap/config @@ -15,7 +15,8 @@ }, "Mounts": { "IPFS": "/ipfs", - "IPNS": "/ipns" + "IPNS": "/ipns", + "MFS": "/mfs" }, "Version": { "Current": "0.1.7", diff --git a/test/3nodetest/client/Dockerfile b/test/3nodetest/client/Dockerfile index d4e1ffa36..3e7ada6c0 100644 --- a/test/3nodetest/client/Dockerfile +++ b/test/3nodetest/client/Dockerfile @@ -8,7 +8,7 @@ RUN ipfs id EXPOSE 4031 4032/udp ENV IPFS_PROF true -ENV IPFS_LOGGING_FMT nocolor +ENV GOLOG_LOG_FMT nocolor ENTRYPOINT ["/bin/bash"] CMD ["/tmp/id/run.sh"] diff --git a/test/3nodetest/client/config b/test/3nodetest/client/config index 86ef0668d..fa8f923d5 100644 --- a/test/3nodetest/client/config +++ b/test/3nodetest/client/config @@ -17,7 +17,8 @@ }, "Mounts": { "IPFS": "/ipfs", - "IPNS": "/ipns" + "IPNS": "/ipns", + "MFS": "/mfs" }, "Version": { "AutoUpdate": "minor", diff --git a/test/3nodetest/fig.yml b/test/3nodetest/fig.yml index 18a28c8ff..f163398c2 100644 --- a/test/3nodetest/fig.yml +++ b/test/3nodetest/fig.yml @@ -11,7 +11,7 @@ bootstrap: - "4011" - "4012/udp" environment: - IPFS_LOGGING: debug + GOLOG_LOG_LEVEL: debug server: build: ./server @@ -23,7 +23,7 @@ server: - "4021" - "4022/udp" environment: - IPFS_LOGGING: debug + GOLOG_LOG_LEVEL: debug client: build: ./client @@ -35,4 +35,4 @@ client: - "4031" - "4032/udp" environment: - IPFS_LOGGING: debug + GOLOG_LOG_LEVEL: debug diff --git a/test/3nodetest/server/Dockerfile b/test/3nodetest/server/Dockerfile index 935d2e1b0..72f6fdf57 100644 --- a/test/3nodetest/server/Dockerfile +++ b/test/3nodetest/server/Dockerfile @@ -9,7 +9,7 @@ RUN chmod +x /tmp/test/run.sh EXPOSE 4021 4022/udp ENV IPFS_PROF true -ENV IPFS_LOGGING_FMT nocolor +ENV GOLOG_LOG_FMT nocolor ENTRYPOINT ["/bin/bash"] CMD ["/tmp/test/run.sh"] diff --git a/test/3nodetest/server/config b/test/3nodetest/server/config index fb16a6d7a..1e9db2a63 100644 --- a/test/3nodetest/server/config +++ b/test/3nodetest/server/config @@ -17,7 +17,8 @@ }, "Mounts": { "IPFS": "/ipfs", - "IPNS": "/ipns" + "IPNS": "/ipns", + "MFS": "/mfs" }, "Version": { "AutoUpdate": "minor", diff --git a/test/3nodetest/server/run.sh b/test/3nodetest/server/run.sh index dfe586310..17ae38736 100644 --- a/test/3nodetest/server/run.sh +++ b/test/3nodetest/server/run.sh @@ -9,7 +9,7 @@ echo "3nodetest> starting server daemon" # run daemon in debug mode to collect profiling data ipfs daemon --debug & sleep 3 -# TODO instead of bootrapping: ipfs swarm connect /ip4/$BOOTSTRAP_PORT_4011_TCP_ADDR/tcp/$BOOTSTRAP_PORT_4011_TCP_PORT/p2p/QmNXuBh8HFsWq68Fid8dMbGNQTh7eG6hV9rr1fQyfmfomE +# TODO instead of bootstrapping: ipfs swarm connect /ip4/$BOOTSTRAP_PORT_4011_TCP_ADDR/tcp/$BOOTSTRAP_PORT_4011_TCP_PORT/p2p/QmNXuBh8HFsWq68Fid8dMbGNQTh7eG6hV9rr1fQyfmfomE # change dir before running add commands so ipfs client profiling data doesn't # overwrite the daemon profiling data diff --git a/test/bin/Rules.mk b/test/bin/Rules.mk index 4e264106a..aaa46695b 100644 --- a/test/bin/Rules.mk +++ b/test/bin/Rules.mk @@ -5,7 +5,7 @@ TGTS_$(d) := define go-build-testdep OUT="$(CURDIR)/$@" ; \ cd "test/dependencies" ; \ - $(GOCC) build $(go-flags-with-tags) -o "$${OUT}" "$<" + $(GOCC) build $(go-flags-with-tags) -o "$${OUT}" "$<" 2>&1 endef .PHONY: github.com/ipfs/kubo/test/dependencies/pollEndpoint @@ -58,13 +58,13 @@ $(d)/cid-fmt: github.com/ipfs/go-cidutil/cid-fmt $(go-build-testdep) TGTS_$(d) += $(d)/cid-fmt -.PHONY: github.com/jbenet/go-random/random -$(d)/random: github.com/jbenet/go-random/random +.PHONY: github.com/ipfs/go-test/cli/random-data +$(d)/random-data: github.com/ipfs/go-test/cli/random-data $(go-build-testdep) -TGTS_$(d) += $(d)/random +TGTS_$(d) += $(d)/random-data -.PHONY: github.com/jbenet/go-random-files/random-files -$(d)/random-files: github.com/jbenet/go-random-files/random-files +.PHONY: github.com/ipfs/go-test/cli/random-files +$(d)/random-files: github.com/ipfs/go-test/cli/random-files $(go-build-testdep) TGTS_$(d) += $(d)/random-files diff --git a/test/cli/add_test.go b/test/cli/add_test.go index ae652989a..cda0c977d 100644 --- a/test/cli/add_test.go +++ b/test/cli/add_test.go @@ -1,13 +1,34 @@ package cli import ( + "io" + "os" + "path/filepath" + "strings" "testing" + "time" + "github.com/dustin/go-humanize" "github.com/ipfs/kubo/config" "github.com/ipfs/kubo/test/cli/harness" + "github.com/ipfs/kubo/test/cli/testutils" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) +// waitForLogMessage polls a buffer for a log message, waiting up to timeout duration. +// Returns true if message found, false if timeout reached. +func waitForLogMessage(buffer *harness.Buffer, message string, timeout time.Duration) bool { + deadline := time.Now().Add(timeout) + for time.Now().Before(deadline) { + if strings.Contains(buffer.String(), message) { + return true + } + time.Sleep(100 * time.Millisecond) + } + return false +} + func TestAdd(t *testing.T) { t.Parallel() @@ -19,6 +40,11 @@ func TestAdd(t *testing.T) { shortStringCidV1Sha512 = "bafkrgqbqt3gerhas23vuzrapkdeqf4vu2dwxp3srdj6hvg6nhsug2tgyn6mj3u23yx7utftq3i2ckw2fwdh5qmhid5qf3t35yvkc5e5ottlw6" ) + const ( + cidV0Length = 34 // cidv0 sha2-256 + cidV1Length = 36 // cidv1 sha2-256 + ) + t.Run("produced cid version: implicit default (CIDv0)", func(t *testing.T) { t.Parallel() node := harness.NewT(t).NewNode().Init().StartDaemon() @@ -96,6 +122,71 @@ func TestAdd(t *testing.T) { require.Equal(t, shortStringCidV1NoRawLeaves, cidStr) }) + t.Run("ipfs add --pin-name=foo", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + pinName := "test-pin-name" + cidStr := node.IPFSAddStr(shortString, "--pin-name", pinName) + require.Equal(t, shortStringCidV0, cidStr) + + pinList := node.IPFS("pin", "ls", "--names").Stdout.Trimmed() + require.Contains(t, pinList, shortStringCidV0) + require.Contains(t, pinList, pinName) + }) + + t.Run("ipfs add --pin=false --pin-name=foo returns an error", func(t *testing.T) { + t.Parallel() + + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + // Use RunIPFS to allow for errors without assertion + result := node.RunIPFS("add", "--pin=false", "--pin-name=foo") + require.Error(t, result.Err, "Expected an error due to incompatible --pin and --pin-name") + require.Contains(t, result.Stderr.String(), "pin-name option requires pin to be set") + }) + + t.Run("ipfs add --pin-name without value should fail", func(t *testing.T) { + t.Parallel() + + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + // When --pin-name is passed without any value, it should fail + result := node.RunIPFS("add", "--pin-name") + require.Error(t, result.Err, "Expected an error when --pin-name has no value") + require.Contains(t, result.Stderr.String(), "missing argument for option \"pin-name\"") + }) + + t.Run("produced unixfs max file links: command flag --max-file-links overrides configuration in Import.UnixFSFileMaxLinks", func(t *testing.T) { + t.Parallel() + + // + // UnixFSChunker=size-262144 (256KiB) + // Import.UnixFSFileMaxLinks=174 + node := harness.NewT(t).NewNode().Init("--profile=legacy-cid-v0") // legacy-cid-v0 for determinism across all params + node.UpdateConfig(func(cfg *config.Config) { + cfg.Import.UnixFSChunker = *config.NewOptionalString("size-262144") // 256 KiB chunks + cfg.Import.UnixFSFileMaxLinks = *config.NewOptionalInteger(174) // max 174 per level + }) + node.StartDaemon() + defer node.StopDaemon() + + // Add 174MiB file: + // 1024 * 256KiB should fit in single layer + seed := shortString + cidStr := node.IPFSAddDeterministic("262144KiB", seed, "--max-file-links", "1024") + root, err := node.InspectPBNode(cidStr) + assert.NoError(t, err) + + // Expect 1024 links due to cli parameter raising link limit from 174 to 1024 + require.Equal(t, 1024, len(root.Links)) + // expect same CID every time + require.Equal(t, "QmbBftNHWmjSWKLC49dMVrfnY8pjrJYntiAXirFJ7oJrNk", cidStr) + }) + t.Run("ipfs init --profile=legacy-cid-v0 sets config that produces legacy CIDv0", func(t *testing.T) { t.Parallel() node := harness.NewT(t).NewNode().Init("--profile=legacy-cid-v0") @@ -106,13 +197,482 @@ func TestAdd(t *testing.T) { require.Equal(t, shortStringCidV0, cidStr) }) - t.Run("ipfs init --profile=test-cid-v1 produces modern CIDv1", func(t *testing.T) { + t.Run("ipfs init --profile=legacy-cid-v0 applies UnixFSChunker=size-262144 and UnixFSFileMaxLinks", func(t *testing.T) { + t.Parallel() + seed := "v0-seed" + profile := "--profile=legacy-cid-v0" + + t.Run("under UnixFSFileMaxLinks=174", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init(profile) + node.StartDaemon() + defer node.StopDaemon() + // Add 44544KiB file: + // 174 * 256KiB should fit in single DAG layer + cidStr := node.IPFSAddDeterministic("44544KiB", seed) + root, err := node.InspectPBNode(cidStr) + assert.NoError(t, err) + require.Equal(t, 174, len(root.Links)) + // expect same CID every time + require.Equal(t, "QmUbBALi174SnogsUzLpYbD4xPiBSFANF4iztWCsHbMKh2", cidStr) + }) + + t.Run("above UnixFSFileMaxLinks=174", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init(profile) + node.StartDaemon() + defer node.StopDaemon() + // add 256KiB (one more block), it should force rebalancing DAG and moving most to second layer + cidStr := node.IPFSAddDeterministic("44800KiB", seed) + root, err := node.InspectPBNode(cidStr) + assert.NoError(t, err) + require.Equal(t, 2, len(root.Links)) + // expect same CID every time + require.Equal(t, "QmepeWtdmS1hHXx1oZXsPUv6bMrfRRKfZcoPPU4eEfjnbf", cidStr) + }) + }) + + t.Run("ipfs init --profile=legacy-cid-v0 applies UnixFSHAMTDirectoryMaxFanout=256 and UnixFSHAMTDirectorySizeThreshold=256KiB", func(t *testing.T) { + t.Parallel() + seed := "hamt-legacy-cid-v0" + profile := "--profile=legacy-cid-v0" + + t.Run("under UnixFSHAMTDirectorySizeThreshold=256KiB", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init(profile) + node.StartDaemon() + defer node.StopDaemon() + + randDir, err := os.MkdirTemp(node.Dir, seed) + require.NoError(t, err) + + // Create directory with a lot of files that have filenames which together take close to UnixFSHAMTDirectorySizeThreshold in total + err = createDirectoryForHAMT(randDir, cidV0Length, "255KiB", seed) + require.NoError(t, err) + cidStr := node.IPFS("add", "-r", "-Q", randDir).Stdout.Trimmed() + + // Confirm the number of links is more than UnixFSHAMTDirectorySizeThreshold (indicating regular "basic" directory" + root, err := node.InspectPBNode(cidStr) + assert.NoError(t, err) + require.Equal(t, 903, len(root.Links)) + }) + + t.Run("above UnixFSHAMTDirectorySizeThreshold=256KiB", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init(profile) + node.StartDaemon() + defer node.StopDaemon() + + randDir, err := os.MkdirTemp(node.Dir, seed) + require.NoError(t, err) + + // Create directory with a lot of files that have filenames which together take close to UnixFSHAMTDirectorySizeThreshold in total + err = createDirectoryForHAMT(randDir, cidV0Length, "257KiB", seed) + require.NoError(t, err) + cidStr := node.IPFS("add", "-r", "-Q", randDir).Stdout.Trimmed() + + // Confirm this time, the number of links is less than UnixFSHAMTDirectorySizeThreshold + root, err := node.InspectPBNode(cidStr) + assert.NoError(t, err) + require.Equal(t, 252, len(root.Links)) + }) + }) + + t.Run("ipfs init --profile=test-cid-v1 produces CIDv1 with raw leaves", func(t *testing.T) { t.Parallel() node := harness.NewT(t).NewNode().Init("--profile=test-cid-v1") node.StartDaemon() defer node.StopDaemon() cidStr := node.IPFSAddStr(shortString) - require.Equal(t, shortStringCidV1, cidStr) + require.Equal(t, shortStringCidV1, cidStr) // raw leaf + }) + + t.Run("ipfs init --profile=test-cid-v1 applies UnixFSChunker=size-1048576", func(t *testing.T) { + t.Parallel() + seed := "v1-seed" + profile := "--profile=test-cid-v1" + + t.Run("under UnixFSFileMaxLinks=174", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init(profile) + node.StartDaemon() + defer node.StopDaemon() + // Add 174MiB file: + // 174 * 1MiB should fit in single layer + cidStr := node.IPFSAddDeterministic("174MiB", seed) + root, err := node.InspectPBNode(cidStr) + assert.NoError(t, err) + require.Equal(t, 174, len(root.Links)) + // expect same CID every time + require.Equal(t, "bafybeigwduxcf2aawppv3isnfeshnimkyplvw3hthxjhr2bdeje4tdaicu", cidStr) + }) + + t.Run("above UnixFSFileMaxLinks=174", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init(profile) + node.StartDaemon() + defer node.StopDaemon() + // add +1MiB (one more block), it should force rebalancing DAG and moving most to second layer + cidStr := node.IPFSAddDeterministic("175MiB", seed) + root, err := node.InspectPBNode(cidStr) + assert.NoError(t, err) + require.Equal(t, 2, len(root.Links)) + // expect same CID every time + require.Equal(t, "bafybeidhd7lo2n2v7lta5yamob3xwhbxcczmmtmhquwhjesi35jntf7mpu", cidStr) + }) + }) + + t.Run("ipfs init --profile=test-cid-v1 applies UnixFSHAMTDirectoryMaxFanout=256 and UnixFSHAMTDirectorySizeThreshold=256KiB", func(t *testing.T) { + t.Parallel() + seed := "hamt-cid-v1" + profile := "--profile=test-cid-v1" + + t.Run("under UnixFSHAMTDirectorySizeThreshold=256KiB", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init(profile) + node.StartDaemon() + defer node.StopDaemon() + + randDir, err := os.MkdirTemp(node.Dir, seed) + require.NoError(t, err) + + // Create directory with a lot of files that have filenames which together take close to UnixFSHAMTDirectorySizeThreshold in total + err = createDirectoryForHAMT(randDir, cidV1Length, "255KiB", seed) + require.NoError(t, err) + cidStr := node.IPFS("add", "-r", "-Q", randDir).Stdout.Trimmed() + + // Confirm the number of links is more than UnixFSHAMTDirectoryMaxFanout (indicating regular "basic" directory" + root, err := node.InspectPBNode(cidStr) + assert.NoError(t, err) + require.Equal(t, 897, len(root.Links)) + }) + + t.Run("above UnixFSHAMTDirectorySizeThreshold=256KiB", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init(profile) + node.StartDaemon() + defer node.StopDaemon() + + randDir, err := os.MkdirTemp(node.Dir, seed) + require.NoError(t, err) + + // Create directory with a lot of files that have filenames which together take close to UnixFSHAMTDirectorySizeThreshold in total + err = createDirectoryForHAMT(randDir, cidV1Length, "257KiB", seed) + require.NoError(t, err) + cidStr := node.IPFS("add", "-r", "-Q", randDir).Stdout.Trimmed() + + // Confirm this time, the number of links is less than UnixFSHAMTDirectoryMaxFanout + root, err := node.InspectPBNode(cidStr) + assert.NoError(t, err) + require.Equal(t, 252, len(root.Links)) + }) + }) + + t.Run("ipfs init --profile=test-cid-v1-wide applies UnixFSChunker=size-1048576 and UnixFSFileMaxLinks=1024", func(t *testing.T) { + t.Parallel() + seed := "v1-seed-1024" + profile := "--profile=test-cid-v1-wide" + + t.Run("under UnixFSFileMaxLinks=1024", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init(profile) + node.StartDaemon() + defer node.StopDaemon() + // Add 174MiB file: + // 1024 * 1MiB should fit in single layer + cidStr := node.IPFSAddDeterministic("1024MiB", seed) + root, err := node.InspectPBNode(cidStr) + assert.NoError(t, err) + require.Equal(t, 1024, len(root.Links)) + // expect same CID every time + require.Equal(t, "bafybeiej5w63ir64oxgkr5htqmlerh5k2rqflurn2howimexrlkae64xru", cidStr) + }) + + t.Run("above UnixFSFileMaxLinks=1024", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init(profile) + node.StartDaemon() + defer node.StopDaemon() + // add +1MiB (one more block), it should force rebalancing DAG and moving most to second layer + cidStr := node.IPFSAddDeterministic("1025MiB", seed) + root, err := node.InspectPBNode(cidStr) + assert.NoError(t, err) + require.Equal(t, 2, len(root.Links)) + // expect same CID every time + require.Equal(t, "bafybeieilp2qx24pe76hxrxe6bpef5meuxto3kj5dd6mhb5kplfeglskdm", cidStr) + }) + }) + + t.Run("ipfs init --profile=test-cid-v1-wide applies UnixFSHAMTDirectoryMaxFanout=256 and UnixFSHAMTDirectorySizeThreshold=1MiB", func(t *testing.T) { + t.Parallel() + seed := "hamt-cid-v1" + profile := "--profile=test-cid-v1-wide" + + t.Run("under UnixFSHAMTDirectorySizeThreshold=1MiB", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init(profile) + node.StartDaemon() + defer node.StopDaemon() + + randDir, err := os.MkdirTemp(node.Dir, seed) + require.NoError(t, err) + + // Create directory with a lot of files that have filenames which together take close to UnixFSHAMTDirectorySizeThreshold in total + err = createDirectoryForHAMT(randDir, cidV1Length, "1023KiB", seed) + require.NoError(t, err) + cidStr := node.IPFS("add", "-r", "-Q", randDir).Stdout.Trimmed() + + // Confirm the number of links is more than UnixFSHAMTDirectoryMaxFanout (indicating regular "basic" directory" + root, err := node.InspectPBNode(cidStr) + assert.NoError(t, err) + require.Equal(t, 3599, len(root.Links)) + }) + + t.Run("above UnixFSHAMTDirectorySizeThreshold=1MiB", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init(profile) + node.StartDaemon() + defer node.StopDaemon() + + randDir, err := os.MkdirTemp(node.Dir, seed) + require.NoError(t, err) + + // Create directory with a lot of files that have filenames which together take close to UnixFSHAMTDirectorySizeThreshold in total + err = createDirectoryForHAMT(randDir, cidV1Length, "1025KiB", seed) + require.NoError(t, err) + cidStr := node.IPFS("add", "-r", "-Q", randDir).Stdout.Trimmed() + + // Confirm this time, the number of links is less than UnixFSHAMTDirectoryMaxFanout + root, err := node.InspectPBNode(cidStr) + assert.NoError(t, err) + require.Equal(t, 992, len(root.Links)) + }) }) } + +func TestAddFastProvide(t *testing.T) { + t.Parallel() + + const ( + shortString = "hello world" + shortStringCidV0 = "Qmf412jQZiuVUtdgnB36FXFX7xg5V6KEbSJ4dpQuhkLyfD" // cidv0 - dag-pb - sha2-256 + ) + + t.Run("fast-provide-root disabled via config: verify skipped in logs", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init() + node.UpdateConfig(func(cfg *config.Config) { + cfg.Import.FastProvideRoot = config.False + }) + + // Start daemon with debug logging + node.StartDaemonWithReq(harness.RunRequest{ + CmdOpts: []harness.CmdOpt{ + harness.RunWithEnv(map[string]string{ + "GOLOG_LOG_LEVEL": "error,core/commands=debug,core/commands/cmdenv=debug", + }), + }, + }, "") + defer node.StopDaemon() + + cidStr := node.IPFSAddStr(shortString) + require.Equal(t, shortStringCidV0, cidStr) + + // Verify fast-provide-root was disabled + daemonLog := node.Daemon.Stderr.String() + require.Contains(t, daemonLog, "fast-provide-root: skipped") + }) + + t.Run("fast-provide-root enabled with wait=false: verify async provide", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init() + // Use default config (FastProvideRoot=true, FastProvideWait=false) + + node.StartDaemonWithReq(harness.RunRequest{ + CmdOpts: []harness.CmdOpt{ + harness.RunWithEnv(map[string]string{ + "GOLOG_LOG_LEVEL": "error,core/commands=debug,core/commands/cmdenv=debug", + }), + }, + }, "") + defer node.StopDaemon() + + cidStr := node.IPFSAddStr(shortString) + require.Equal(t, shortStringCidV0, cidStr) + + daemonLog := node.Daemon.Stderr + // Should see async mode started + require.Contains(t, daemonLog.String(), "fast-provide-root: enabled") + require.Contains(t, daemonLog.String(), "fast-provide-root: providing asynchronously") + + // Wait for async completion or failure (up to 11 seconds - slightly more than fastProvideTimeout) + // In test environment with no DHT peers, this will fail with "failed to find any peer in table" + completedOrFailed := waitForLogMessage(daemonLog, "async provide completed", 11*time.Second) || + waitForLogMessage(daemonLog, "async provide failed", 11*time.Second) + require.True(t, completedOrFailed, "async provide should complete or fail within timeout") + }) + + t.Run("fast-provide-root enabled with wait=true: verify sync provide", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init() + node.UpdateConfig(func(cfg *config.Config) { + cfg.Import.FastProvideWait = config.True + }) + + node.StartDaemonWithReq(harness.RunRequest{ + CmdOpts: []harness.CmdOpt{ + harness.RunWithEnv(map[string]string{ + "GOLOG_LOG_LEVEL": "error,core/commands=debug,core/commands/cmdenv=debug", + }), + }, + }, "") + defer node.StopDaemon() + + // Use Runner.Run with stdin to allow for expected errors + res := node.Runner.Run(harness.RunRequest{ + Path: node.IPFSBin, + Args: []string{"add", "-q"}, + CmdOpts: []harness.CmdOpt{ + harness.RunWithStdin(strings.NewReader(shortString)), + }, + }) + + // In sync mode (wait=true), provide errors propagate and fail the command. + // Test environment uses 'test' profile with no bootstrappers, and CI has + // insufficient peers for proper DHT puts, so we expect this to fail with + // "failed to find any peer in table" error from the DHT. + require.Equal(t, 1, res.ExitCode()) + require.Contains(t, res.Stderr.String(), "Error: fast-provide: failed to find any peer in table") + + daemonLog := node.Daemon.Stderr.String() + // Should see sync mode started + require.Contains(t, daemonLog, "fast-provide-root: enabled") + require.Contains(t, daemonLog, "fast-provide-root: providing synchronously") + require.Contains(t, daemonLog, "sync provide failed") // Verify the failure was logged + }) + + t.Run("fast-provide-wait ignored when root disabled", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init() + node.UpdateConfig(func(cfg *config.Config) { + cfg.Import.FastProvideRoot = config.False + cfg.Import.FastProvideWait = config.True + }) + + node.StartDaemonWithReq(harness.RunRequest{ + CmdOpts: []harness.CmdOpt{ + harness.RunWithEnv(map[string]string{ + "GOLOG_LOG_LEVEL": "error,core/commands=debug,core/commands/cmdenv=debug", + }), + }, + }, "") + defer node.StopDaemon() + + cidStr := node.IPFSAddStr(shortString) + require.Equal(t, shortStringCidV0, cidStr) + + daemonLog := node.Daemon.Stderr.String() + require.Contains(t, daemonLog, "fast-provide-root: skipped") + require.Contains(t, daemonLog, "wait-flag-ignored") + }) + + t.Run("CLI flag overrides config: flag=true overrides config=false", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init() + node.UpdateConfig(func(cfg *config.Config) { + cfg.Import.FastProvideRoot = config.False + }) + + node.StartDaemonWithReq(harness.RunRequest{ + CmdOpts: []harness.CmdOpt{ + harness.RunWithEnv(map[string]string{ + "GOLOG_LOG_LEVEL": "error,core/commands=debug,core/commands/cmdenv=debug", + }), + }, + }, "") + defer node.StopDaemon() + + cidStr := node.IPFSAddStr(shortString, "--fast-provide-root=true") + require.Equal(t, shortStringCidV0, cidStr) + + daemonLog := node.Daemon.Stderr + // Flag should enable it despite config saying false + require.Contains(t, daemonLog.String(), "fast-provide-root: enabled") + require.Contains(t, daemonLog.String(), "fast-provide-root: providing asynchronously") + }) + + t.Run("CLI flag overrides config: flag=false overrides config=true", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init() + node.UpdateConfig(func(cfg *config.Config) { + cfg.Import.FastProvideRoot = config.True + }) + + node.StartDaemonWithReq(harness.RunRequest{ + CmdOpts: []harness.CmdOpt{ + harness.RunWithEnv(map[string]string{ + "GOLOG_LOG_LEVEL": "error,core/commands=debug,core/commands/cmdenv=debug", + }), + }, + }, "") + defer node.StopDaemon() + + cidStr := node.IPFSAddStr(shortString, "--fast-provide-root=false") + require.Equal(t, shortStringCidV0, cidStr) + + daemonLog := node.Daemon.Stderr.String() + // Flag should disable it despite config saying true + require.Contains(t, daemonLog, "fast-provide-root: skipped") + }) +} + +// createDirectoryForHAMT aims to create enough files with long names for the directory block to be close to the UnixFSHAMTDirectorySizeThreshold. +// The calculation is based on boxo's HAMTShardingSize and sizeBelowThreshold which calculates ballpark size of the block +// by adding length of link names and the binary cid length. +// See https://github.com/ipfs/boxo/blob/6c5a07602aed248acc86598f30ab61923a54a83e/ipld/unixfs/io/directory.go#L491 +func createDirectoryForHAMT(dirPath string, cidLength int, unixfsNodeSizeTarget, seed string) error { + hamtThreshold, err := humanize.ParseBytes(unixfsNodeSizeTarget) + if err != nil { + return err + } + + // Calculate how many files with long filenames are needed to hit UnixFSHAMTDirectorySizeThreshold + nameLen := 255 // max that works across windows/macos/linux + alphabetLen := len(testutils.AlphabetEasy) + numFiles := int(hamtThreshold) / (nameLen + cidLength) + + // Deterministic pseudo-random bytes for static CID + drand, err := testutils.DeterministicRandomReader(unixfsNodeSizeTarget, seed) + if err != nil { + return err + } + + // Create necessary files in a single, flat directory + for i := 0; i < numFiles; i++ { + buf := make([]byte, nameLen) + _, err := io.ReadFull(drand, buf) + if err != nil { + return err + } + + // Convert deterministic pseudo-random bytes to ASCII + var sb strings.Builder + + for _, b := range buf { + // Map byte to printable ASCII range (33-126) + char := testutils.AlphabetEasy[int(b)%alphabetLen] + sb.WriteRune(char) + } + filename := sb.String()[:nameLen] + filePath := filepath.Join(dirPath, filename) + + // Create empty file + f, err := os.Create(filePath) + if err != nil { + return err + } + f.Close() + } + return nil +} diff --git a/test/cli/agent_version_unicode_test.go b/test/cli/agent_version_unicode_test.go new file mode 100644 index 000000000..732f13466 --- /dev/null +++ b/test/cli/agent_version_unicode_test.go @@ -0,0 +1,220 @@ +package cli + +import ( + "strings" + "testing" + + "github.com/ipfs/kubo/core/commands/cmdutils" + "github.com/stretchr/testify/assert" +) + +func TestCleanAndTrimUnicode(t *testing.T) { + tests := []struct { + name string + input string + expected string + }{ + { + name: "Basic ASCII", + input: "kubo/1.0.0", + expected: "kubo/1.0.0", + }, + { + name: "Polish characters preserved", + input: "test-ąęćłńóśźż", + expected: "test-ąęćłńóśźż", + }, + { + name: "Chinese characters preserved", + input: "版本-中文测试", + expected: "版本-中文测试", + }, + { + name: "Arabic text preserved", + input: "اختبار-العربية", + expected: "اختبار-العربية", + }, + { + name: "Emojis preserved", + input: "version-1.0-🚀-🎉", + expected: "version-1.0-🚀-🎉", + }, + { + name: "Complex Unicode with combining marks preserved", + input: "h̸̢̢̢̢̢̢̢̢̢̢e̵̵̵̵̵̵̵̵̵̵l̷̷̷̷̷̷̷̷̷̷l̶̶̶̶̶̶̶̶̶̶o̴̴̴̴̴̴̴̴̴̴", + expected: "h̸̢̢̢̢̢̢̢̢̢̢e̵̵̵̵̵̵̵̵̵̵l̷̷̷̷̷̷̷̷̷̷l̶̶̶̶̶̶̶̶̶̶o̴̴̴̴̴̴̴̴̴̴", // Preserved as-is (only 50 runes) + }, + { + name: "Long text with combining marks truncated at 128", + input: strings.Repeat("ẽ̸̢̛̖̬͈͉͖͇͈̭̥́̓̌̾͊̊̂̄̍̅̂͌́", 10), // Very long text (260 runes) + expected: "ẽ̸̢̛̖̬͈͉͖͇͈̭̥́̓̌̾͊̊̂̄̍̅̂͌́ẽ̸̢̛̖̬͈͉͖͇͈̭̥́̓̌̾͊̊̂̄̍̅̂͌́ẽ̸̢̛̖̬͈͉͖͇͈̭̥́̓̌̾͊̊̂̄̍̅̂͌́ẽ̸̢̛̖̬͈͉͖͇͈̭̥́̓̌̾͊̊̂̄̍̅̂͌́ẽ̸̢̛̖̬͈͉͖͇͈̭̥́̓̌̾͊̊̂̄̍̅̂", // Truncated at 128 runes + }, + { + name: "Zero-width characters replaced with U+FFFD", + input: "test\u200Bzero\u200Cwidth\u200D\uFEFFchars", + expected: "test�zero�width��chars", + }, + { + name: "RTL/LTR override replaced with U+FFFD", + input: "test\u202Drtl\u202Eltr\u202Aoverride", + expected: "test�rtl�ltr�override", + }, + { + name: "Bidi isolates replaced with U+FFFD", + input: "test\u2066bidi\u2067isolate\u2068text\u2069end", + expected: "test�bidi�isolate�text�end", + }, + { + name: "Control characters replaced with U+FFFD", + input: "test\x00null\x1Fescape\x7Fdelete", + expected: "test�null�escape�delete", + }, + { + name: "Combining marks preserved", + input: "e\u0301\u0302\u0303\u0304\u0305", // e with 5 combining marks + expected: "e\u0301\u0302\u0303\u0304\u0305", // All preserved + }, + { + name: "No truncation at 70 characters", + input: "123456789012345678901234567890123456789012345678901234567890123456789", + expected: "123456789012345678901234567890123456789012345678901234567890123456789", + }, + { + name: "No truncation with Unicode - 70 rockets preserved", + input: strings.Repeat("🚀", 70), + expected: strings.Repeat("🚀", 70), + }, + { + name: "Empty string", + input: "", + expected: "", + }, + { + name: "Only whitespace with control chars", + input: " \t\n ", + expected: "\uFFFD\uFFFD", // Tab and newline become U+FFFD, spaces trimmed + }, + { + name: "Leading and trailing whitespace", + input: " test ", + expected: "test", + }, + { + name: "Complex mix - invisible chars replaced with U+FFFD, Unicode preserved", + input: "kubo/1.0-🚀\u200B h̸̢̏̔ḛ̶̽̀s̵t\u202E-ąęł-中文", + expected: "kubo/1.0-🚀� h̸̢̏̔ḛ̶̽̀s̵t�-ąęł-中文", + }, + { + name: "Emoji with skin tone preserved", + input: "👍🏽", // Thumbs up with skin tone modifier + expected: "👍🏽", // Preserved as-is + }, + { + name: "Mixed scripts preserved", + input: "Hello-你好-مرحبا-Здравствуйте", + expected: "Hello-你好-مرحبا-Здравствуйте", + }, + { + name: "Format characters replaced with U+FFFD", + input: "test\u00ADsoft\u2060word\u206Fnom\u200Ebreak", + expected: "test�soft�word�nom�break", // Soft hyphen, word joiner, etc replaced + }, + { + name: "Complex Unicode text with many combining marks (91 runes, no truncation)", + input: "ț̸̢͙̞̖̏̔ȩ̶̰͓̪͎̱̠̥̳͔̽̀̃̿̌̾̀͗̕̕͜s̵̢̛̖̬͈͉͖͇͈̭̥̃́̓̌̾͊̊̂̄̍̅̂͌́ͅţ̴̯̹̪͖͓̘̊́̑̄̋̈́͐̈́̔̇̄̂́̎̓͛͠ͅ test", + expected: "ț̸̢͙̞̖̏̔ȩ̶̰͓̪͎̱̠̥̳͔̽̀̃̿̌̾̀͗̕̕͜s̵̢̛̖̬͈͉͖͇͈̭̥̃́̓̌̾͊̊̂̄̍̅̂͌́ͅţ̴̯̹̪͖͓̘̊́̑̄̋̈́͐̈́̔̇̄̂́̎̓͛͠ͅ test", // Not truncated (91 < 128) + }, + { + name: "Truncation at 128 characters", + input: strings.Repeat("a", 150), + expected: strings.Repeat("a", 128), + }, + { + name: "Truncation with Unicode at 128", + input: strings.Repeat("🚀", 150), + expected: strings.Repeat("🚀", 128), + }, + { + name: "Private use characters preserved (per spec)", + input: "test\uE000\uF8FF", // Private use area characters + expected: "test\uE000\uF8FF", // Should be preserved + }, + { + name: "U+FFFD replacement for multiple categories", + input: "a\x00b\u200Cc\u202Ed", // control, format chars + expected: "a\uFFFDb\uFFFDc\uFFFDd", // All replaced with U+FFFD + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := cmdutils.CleanAndTrim(tt.input) + assert.Equal(t, tt.expected, result, "CleanAndTrim(%q) = %q, want %q", tt.input, result, tt.expected) + }) + } +} + +func TestCleanAndTrimIdempotent(t *testing.T) { + // Test that applying CleanAndTrim twice gives the same result + inputs := []string{ + "test-ąęćłńóśźż", + "版本-中文测试", + "version-1.0-🚀-🎉", + "h̸e̵l̷l̶o̴ w̸o̵r̷l̶d̴", + "test\u200Bzero\u200Cwidth", + } + + for _, input := range inputs { + once := cmdutils.CleanAndTrim(input) + twice := cmdutils.CleanAndTrim(once) + assert.Equal(t, once, twice, "CleanAndTrim should be idempotent for %q", input) + } +} + +func TestCleanAndTrimSecurity(t *testing.T) { + // Test that all invisible/dangerous characters are removed + tests := []struct { + name string + input string + check func(string) bool + }{ + { + name: "No zero-width spaces", + input: "test\u200B\u200C\u200Dtest", + check: func(s string) bool { + return !strings.Contains(s, "\u200B") && !strings.Contains(s, "\u200C") && !strings.Contains(s, "\u200D") + }, + }, + { + name: "No bidi overrides", + input: "test\u202A\u202B\u202C\u202D\u202Etest", + check: func(s string) bool { + for _, r := range []rune{0x202A, 0x202B, 0x202C, 0x202D, 0x202E} { + if strings.ContainsRune(s, r) { + return false + } + } + return true + }, + }, + { + name: "No control characters", + input: "test\x00\x01\x02\x1F\x7Ftest", + check: func(s string) bool { + for _, r := range s { + if r < 0x20 || r == 0x7F { + return false + } + } + return true + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := cmdutils.CleanAndTrim(tt.input) + assert.True(t, tt.check(result), "Security check failed for %q -> %q", tt.input, result) + }) + } +} diff --git a/test/cli/api_file_test.go b/test/cli/api_file_test.go new file mode 100644 index 000000000..a0ba30fd2 --- /dev/null +++ b/test/cli/api_file_test.go @@ -0,0 +1,104 @@ +package cli + +import ( + "net/http" + "os" + "os/exec" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/require" +) + +// TestAddressFileReady verifies that when address files ($IPFS_PATH/api and +// $IPFS_PATH/gateway) are created, the corresponding HTTP servers are ready +// to accept connections immediately. This prevents race conditions for tools +// like systemd path units that start services when these files appear. +func TestAddressFileReady(t *testing.T) { + t.Parallel() + + t.Run("api file", func(t *testing.T) { + t.Parallel() + h := harness.NewT(t) + node := h.NewNode().Init() + + // Start daemon in background (don't use StartDaemon which waits for API) + res := node.Runner.MustRun(harness.RunRequest{ + Path: node.IPFSBin, + Args: []string{"daemon"}, + RunFunc: (*exec.Cmd).Start, + }) + node.Daemon = res + defer node.StopDaemon() + + // Poll for api file to appear + apiFile := filepath.Join(node.Dir, "api") + var fileExists bool + for i := 0; i < 100; i++ { + if _, err := os.Stat(apiFile); err == nil { + fileExists = true + break + } + time.Sleep(100 * time.Millisecond) + } + require.True(t, fileExists, "api file should be created") + + // Read the api file to get the address + apiAddr, err := node.TryAPIAddr() + require.NoError(t, err) + + // Extract IP and port from multiaddr + ip, err := apiAddr.ValueForProtocol(4) // P_IP4 + require.NoError(t, err) + port, err := apiAddr.ValueForProtocol(6) // P_TCP + require.NoError(t, err) + + // Immediately try to use the API - should work on first attempt + url := "http://" + ip + ":" + port + "/api/v0/id" + resp, err := http.Post(url, "", nil) + require.NoError(t, err, "RPC API should be ready immediately when api file exists") + defer resp.Body.Close() + require.Equal(t, http.StatusOK, resp.StatusCode) + }) + + t.Run("gateway file", func(t *testing.T) { + t.Parallel() + h := harness.NewT(t) + node := h.NewNode().Init() + + // Start daemon in background + res := node.Runner.MustRun(harness.RunRequest{ + Path: node.IPFSBin, + Args: []string{"daemon"}, + RunFunc: (*exec.Cmd).Start, + }) + node.Daemon = res + defer node.StopDaemon() + + // Poll for gateway file to appear + gatewayFile := filepath.Join(node.Dir, "gateway") + var fileExists bool + for i := 0; i < 100; i++ { + if _, err := os.Stat(gatewayFile); err == nil { + fileExists = true + break + } + time.Sleep(100 * time.Millisecond) + } + require.True(t, fileExists, "gateway file should be created") + + // Read the gateway file to get the URL (already includes http:// prefix) + gatewayURL, err := os.ReadFile(gatewayFile) + require.NoError(t, err) + + // Immediately try to use the Gateway - should work on first attempt + url := strings.TrimSpace(string(gatewayURL)) + "/ipfs/bafkqaaa" // empty file CID + resp, err := http.Get(url) + require.NoError(t, err, "Gateway should be ready immediately when gateway file exists") + defer resp.Body.Close() + require.Equal(t, http.StatusOK, resp.StatusCode) + }) +} diff --git a/test/cli/autoconf/autoconf_test.go b/test/cli/autoconf/autoconf_test.go new file mode 100644 index 000000000..0a49e8c89 --- /dev/null +++ b/test/cli/autoconf/autoconf_test.go @@ -0,0 +1,779 @@ +package autoconf + +import ( + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "os" + "strings" + "sync/atomic" + "testing" + "time" + + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestAutoConf(t *testing.T) { + t.Parallel() + + t.Run("basic functionality", func(t *testing.T) { + t.Parallel() + testAutoConfBasicFunctionality(t) + }) + + t.Run("background service updates", func(t *testing.T) { + t.Parallel() + testAutoConfBackgroundService(t) + }) + + t.Run("HTTP error scenarios", func(t *testing.T) { + t.Parallel() + testAutoConfHTTPErrors(t) + }) + + t.Run("cache-based config expansion", func(t *testing.T) { + t.Parallel() + testAutoConfCacheBasedExpansion(t) + }) + + t.Run("disabled autoconf", func(t *testing.T) { + t.Parallel() + testAutoConfDisabled(t) + }) + + t.Run("bootstrap list shows auto as-is", func(t *testing.T) { + t.Parallel() + testBootstrapListResolved(t) + }) + + t.Run("daemon uses resolved bootstrap values", func(t *testing.T) { + t.Parallel() + testDaemonUsesResolvedBootstrap(t) + }) + + t.Run("empty cache uses fallback defaults", func(t *testing.T) { + t.Parallel() + testEmptyCacheUsesFallbacks(t) + }) + + t.Run("stale cache with unreachable server", func(t *testing.T) { + t.Parallel() + testStaleCacheWithUnreachableServer(t) + }) + + t.Run("autoconf disabled with auto values", func(t *testing.T) { + t.Parallel() + testAutoConfDisabledWithAutoValues(t) + }) + + t.Run("network behavior - cached vs refresh", func(t *testing.T) { + t.Parallel() + testAutoConfNetworkBehavior(t) + }) + + t.Run("HTTPS autoconf server", func(t *testing.T) { + t.Parallel() + testAutoConfWithHTTPS(t) + }) +} + +func testAutoConfBasicFunctionality(t *testing.T) { + // Load test autoconf data + autoConfData := loadTestData(t, "valid_autoconf.json") + + // Create HTTP server that serves autoconf.json + etag := `"test-etag-123"` + requestCount := 0 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + requestCount++ + t.Logf("AutoConf server request #%d: %s %s", requestCount, r.Method, r.URL.Path) + w.Header().Set("Content-Type", "application/json") + w.Header().Set("ETag", etag) + w.Header().Set("Last-Modified", "Wed, 21 Oct 2015 07:28:00 GMT") + _, _ = w.Write(autoConfData) + })) + defer server.Close() + + // Create IPFS node and configure it to use our test server + // Use test profile to avoid autoconf profile being applied by default + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + // Disable background updates to prevent multiple requests + node.SetIPFSConfig("AutoConf.RefreshInterval", "24h") + + // Test with normal bootstrap peers (not "auto") to avoid multiaddr parsing issues + // This tests that autoconf fetching works without complex auto replacement + node.SetIPFSConfig("Bootstrap", []string{"/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN"}) + + // Start daemon to trigger autoconf fetch + node.StartDaemon() + defer node.StopDaemon() + + // Give autoconf some time to fetch + time.Sleep(2 * time.Second) + + // Verify that the autoconf system fetched data from our server + t.Logf("Server request count: %d", requestCount) + require.GreaterOrEqual(t, requestCount, 1, "AutoConf server should have been called at least once") + + // Test that daemon is functional + result := node.RunIPFS("id") + assert.Equal(t, 0, result.ExitCode(), "IPFS daemon should be responsive") + assert.Contains(t, result.Stdout.String(), "ID", "IPFS id command should return peer information") + + // Success! AutoConf system is working: + // 1. Server was called (proves fetch works) + // 2. Daemon started successfully (proves DNS resolver validation is fixed) + // 3. Daemon is functional (proves autoconf doesn't break core functionality) + // Note: We skip checking metadata values due to JSON parsing complexity in test harness +} + +func testAutoConfBackgroundService(t *testing.T) { + // Test that the startAutoConfUpdater() goroutine makes network requests for background refresh + // This is separate from daemon config operations which now use cache-first approach + + // Load initial and updated test data + initialData := loadTestData(t, "valid_autoconf.json") + updatedData := loadTestData(t, "updated_autoconf.json") + + // Track which config is being served + currentData := initialData + var requestCount atomic.Int32 + + // Create server that switches payload after first request + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + count := requestCount.Add(1) + t.Logf("Background service request #%d from %s", count, r.UserAgent()) + + w.Header().Set("Content-Type", "application/json") + w.Header().Set("ETag", fmt.Sprintf(`"background-test-etag-%d"`, count)) + w.Header().Set("Last-Modified", time.Now().Format(http.TimeFormat)) + + if count > 1 { + // After first request, serve updated config + currentData = updatedData + } + + _, _ = w.Write(currentData) + })) + defer server.Close() + + // Create IPFS node with short refresh interval to trigger background service + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("AutoConf.RefreshInterval", "1s") // Very short for testing background service + + // Use normal bootstrap values to avoid dependency on autoconf during initialization + node.SetIPFSConfig("Bootstrap", []string{"/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN"}) + + // Start daemon - this should start the background service via startAutoConfUpdater() + node.StartDaemon() + defer node.StopDaemon() + + // Wait for initial request (daemon startup may trigger one) + time.Sleep(1 * time.Second) + initialCount := requestCount.Load() + t.Logf("Initial request count after daemon start: %d", initialCount) + + // Wait for background service to make additional requests + // The background service should make requests at the RefreshInterval (1s) + time.Sleep(3 * time.Second) + + finalCount := requestCount.Load() + t.Logf("Final request count after background updates: %d", finalCount) + + // Background service should have made multiple requests due to 1s refresh interval + assert.Greater(t, finalCount, initialCount, + "Background service should have made additional requests beyond daemon startup") + + // Verify that the service is actively making requests (not just relying on cache) + assert.GreaterOrEqual(t, finalCount, int32(2), + "Should have at least 2 requests total (startup + background refresh)") + + t.Logf("Successfully verified startAutoConfUpdater() background service makes network requests") +} + +func testAutoConfHTTPErrors(t *testing.T) { + tests := []struct { + name string + statusCode int + body string + }{ + {"404 Not Found", http.StatusNotFound, "Not Found"}, + {"500 Internal Server Error", http.StatusInternalServerError, "Internal Server Error"}, + {"Invalid JSON", http.StatusOK, "invalid json content"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create server that returns error + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(tt.statusCode) + _, _ = w.Write([]byte(tt.body)) + })) + defer server.Close() + + // Create node with failing AutoConf URL + // Use test profile to avoid autoconf profile being applied by default + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("Bootstrap", []string{"auto"}) + + // Start daemon - it should start but autoconf should fail gracefully + node.StartDaemon() + defer node.StopDaemon() + + // Daemon should still be functional even with autoconf HTTP errors + result := node.RunIPFS("version") + assert.Equal(t, 0, result.ExitCode(), "Daemon should start even with HTTP errors in autoconf") + }) + } +} + +func testAutoConfCacheBasedExpansion(t *testing.T) { + // Test that config expansion works correctly with cached autoconf data + // without requiring active network requests during expansion operations + + autoConfData := loadTestData(t, "valid_autoconf.json") + + // Create server that serves autoconf data + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Header().Set("ETag", `"cache-test-etag"`) + w.Header().Set("Last-Modified", "Wed, 21 Oct 2015 07:28:00 GMT") + _, _ = w.Write(autoConfData) + })) + defer server.Close() + + // Create IPFS node with autoconf enabled + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + + // Set configuration with "auto" values to test expansion + node.SetIPFSConfig("Bootstrap", []string{"auto"}) + node.SetIPFSConfig("Routing.DelegatedRouters", []string{"auto"}) + node.SetIPFSConfig("DNS.Resolvers", map[string]string{"test.": "auto"}) + + // Populate cache by running a command that triggers autoconf (without daemon) + result := node.RunIPFS("bootstrap", "list", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "Initial bootstrap expansion should succeed") + + expandedBootstrap := result.Stdout.String() + assert.NotContains(t, expandedBootstrap, "auto", "Expanded bootstrap should not contain 'auto' literal") + assert.Greater(t, len(strings.Fields(expandedBootstrap)), 0, "Should have expanded bootstrap peers") + + // Test that subsequent config operations work with cached data (no network required) + // This simulates the cache-first behavior our architecture now uses + + // Test Bootstrap expansion + result = node.RunIPFS("config", "Bootstrap", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "Cached bootstrap expansion should succeed") + + var expandedBootstrapList []string + err := json.Unmarshal([]byte(result.Stdout.String()), &expandedBootstrapList) + require.NoError(t, err) + assert.NotContains(t, expandedBootstrapList, "auto", "Expanded bootstrap list should not contain 'auto'") + assert.Greater(t, len(expandedBootstrapList), 0, "Should have expanded bootstrap peers from cache") + + // Test Routing.DelegatedRouters expansion + result = node.RunIPFS("config", "Routing.DelegatedRouters", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "Cached router expansion should succeed") + + var expandedRouters []string + err = json.Unmarshal([]byte(result.Stdout.String()), &expandedRouters) + require.NoError(t, err) + assert.NotContains(t, expandedRouters, "auto", "Expanded routers should not contain 'auto'") + + // Test DNS.Resolvers expansion + result = node.RunIPFS("config", "DNS.Resolvers", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "Cached DNS resolver expansion should succeed") + + var expandedResolvers map[string]string + err = json.Unmarshal([]byte(result.Stdout.String()), &expandedResolvers) + require.NoError(t, err) + + // Should have expanded the "auto" value for test. domain, or removed it if no autoconf data available + testResolver, exists := expandedResolvers["test."] + if exists { + assert.NotEqual(t, "auto", testResolver, "test. resolver should not be literal 'auto'") + t.Logf("Found expanded resolver for test.: %s", testResolver) + } else { + t.Logf("No resolver found for test. domain (autoconf may not have DNS resolver data)") + } + + // Test full config expansion + result = node.RunIPFS("config", "show", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "Full config expansion should succeed") + + expandedConfig := result.Stdout.String() + // Should not contain literal "auto" values after expansion + assert.NotContains(t, expandedConfig, `"auto"`, "Expanded config should not contain literal 'auto' values") + assert.Contains(t, expandedConfig, `"Bootstrap"`, "Should contain Bootstrap section") + assert.Contains(t, expandedConfig, `"DNS"`, "Should contain DNS section") + + t.Logf("Successfully tested cache-based config expansion without active network requests") +} + +func testAutoConfDisabled(t *testing.T) { + // Create node with AutoConf disabled but "auto" values + // Use test profile to avoid autoconf profile being applied by default + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.Enabled", false) + node.SetIPFSConfig("Bootstrap", []string{"auto"}) + + // Test by trying to list bootstrap - when AutoConf is disabled, it should show literal "auto" + result := node.RunIPFS("bootstrap", "list") + if result.ExitCode() == 0 { + // If command succeeds, it should show literal "auto" (no resolution) + output := result.Stdout.String() + assert.Contains(t, output, "auto", "Should show literal 'auto' when AutoConf is disabled") + } else { + // If command fails, error should mention autoconf issue + assert.Contains(t, result.Stderr.String(), "auto", "Should mention 'auto' values in error") + } +} + +// Helper function to load test data files +func loadTestData(t *testing.T, filename string) []byte { + t.Helper() + + data, err := os.ReadFile("testdata/" + filename) + require.NoError(t, err, "Failed to read test data file: %s", filename) + + return data +} + +func testBootstrapListResolved(t *testing.T) { + // Test that bootstrap list shows "auto" as-is (not expanded) + + // Load test autoconf data + autoConfData := loadTestData(t, "valid_autoconf.json") + + // Create HTTP server that serves autoconf.json + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write(autoConfData) + })) + defer server.Close() + + // Create IPFS node with "auto" bootstrap value + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("Bootstrap", []string{"auto"}) + + // Test 1: bootstrap list (without --expand-auto) shows "auto" as-is - NO DAEMON NEEDED! + result := node.RunIPFS("bootstrap", "list") + require.Equal(t, 0, result.ExitCode(), "bootstrap list command should succeed") + + output := result.Stdout.String() + t.Logf("Bootstrap list output: %s", output) + assert.Contains(t, output, "auto", "bootstrap list should show 'auto' value as-is") + + // Should NOT contain expanded bootstrap peers without --expand-auto + unexpectedPeers := []string{ + "/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", + "/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa", + "/dnsaddr/bootstrap.libp2p.io/p2p/QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb", + } + + for _, peer := range unexpectedPeers { + assert.NotContains(t, output, peer, "bootstrap list should not contain expanded peer: %s", peer) + } + + // Test 2: bootstrap list --expand-auto shows expanded values (no daemon needed!) + result = node.RunIPFS("bootstrap", "list", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "bootstrap list --expand-auto command should succeed") + + expandedOutput := result.Stdout.String() + t.Logf("Bootstrap list --expand-auto output: %s", expandedOutput) + + // Should NOT contain "auto" literal when expanded + assert.NotContains(t, expandedOutput, "auto", "bootstrap list --expand-auto should not show 'auto' literal") + + // Should contain at least one expanded bootstrap peer + expectedPeers := []string{ + "/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", + "/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa", + "/dnsaddr/bootstrap.libp2p.io/p2p/QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb", + } + + foundExpectedPeer := false + for _, peer := range expectedPeers { + if strings.Contains(expandedOutput, peer) { + foundExpectedPeer = true + t.Logf("Found expected expanded peer: %s", peer) + break + } + } + assert.True(t, foundExpectedPeer, "bootstrap list --expand-auto should contain at least one expanded bootstrap peer") +} + +func testDaemonUsesResolvedBootstrap(t *testing.T) { + // Test that daemon actually uses expanded bootstrap values for P2P connections + // even though bootstrap list shows "auto" + + // Step 1: Create bootstrap node (target for connections) + bootstrapNode := harness.NewT(t).NewNode().Init("--profile=test") + // Set a specific swarm port for the bootstrap node to avoid port 0 issues + bootstrapNode.SetIPFSConfig("Addresses.Swarm", []string{"/ip4/127.0.0.1/tcp/14001"}) + // Disable routing and discovery to ensure it's only discoverable via explicit multiaddr + bootstrapNode.SetIPFSConfig("Routing.Type", "none") + bootstrapNode.SetIPFSConfig("Discovery.MDNS.Enabled", false) + bootstrapNode.SetIPFSConfig("Bootstrap", []string{}) // No bootstrap peers + + // Start the bootstrap node first + bootstrapNode.StartDaemon() + defer bootstrapNode.StopDaemon() + + // Get bootstrap node's peer ID and swarm address + bootstrapPeerID := bootstrapNode.PeerID() + + // Use the configured swarm address (we set it to a specific port above) + bootstrapMultiaddr := fmt.Sprintf("/ip4/127.0.0.1/tcp/14001/p2p/%s", bootstrapPeerID.String()) + t.Logf("Bootstrap node configured at: %s", bootstrapMultiaddr) + + // Step 2: Create autoconf server that returns bootstrap node's address + autoConfData := fmt.Sprintf(`{ + "AutoConfVersion": 2025072301, + "AutoConfSchema": 1, + "AutoConfTTL": 86400, + "SystemRegistry": { + "AminoDHT": { + "Description": "Test AminoDHT system", + "NativeConfig": { + "Bootstrap": ["%s"] + } + } + }, + "DNSResolvers": {}, + "DelegatedEndpoints": {} + }`, bootstrapMultiaddr) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write([]byte(autoConfData)) + })) + defer server.Close() + + // Step 3: Create autoconf-enabled node that should connect to bootstrap node + autoconfNode := harness.NewT(t).NewNode().Init("--profile=test") + autoconfNode.SetIPFSConfig("AutoConf.URL", server.URL) + autoconfNode.SetIPFSConfig("AutoConf.Enabled", true) + autoconfNode.SetIPFSConfig("Bootstrap", []string{"auto"}) // This should resolve to bootstrap node + // Disable other discovery methods to force bootstrap-only connectivity + autoconfNode.SetIPFSConfig("Routing.Type", "none") + autoconfNode.SetIPFSConfig("Discovery.MDNS.Enabled", false) + + // Start the autoconf node + autoconfNode.StartDaemon() + defer autoconfNode.StopDaemon() + + // Step 4: Give time for autoconf resolution and connection attempts + time.Sleep(8 * time.Second) + + // Step 5: Verify both nodes are responsive + result := bootstrapNode.RunIPFS("id") + require.Equal(t, 0, result.ExitCode(), "Bootstrap node should be responsive: %s", result.Stderr.String()) + + result = autoconfNode.RunIPFS("id") + require.Equal(t, 0, result.ExitCode(), "AutoConf node should be responsive: %s", result.Stderr.String()) + + // Step 6: Verify that autoconf node connected to bootstrap node + // Check swarm peers on autoconf node - it should show bootstrap node's peer ID + result = autoconfNode.RunIPFS("swarm", "peers") + if result.ExitCode() == 0 { + peerOutput := result.Stdout.String() + if strings.Contains(peerOutput, bootstrapPeerID.String()) { + t.Logf("SUCCESS: AutoConf node connected to bootstrap peer %s", bootstrapPeerID.String()) + } else { + t.Logf("No active connection found. Peers output: %s", peerOutput) + // This might be OK if connection attempt was made but didn't persist + } + } else { + // If swarm peers fails, try alternative verification via daemon logs + t.Logf("Swarm peers command failed, checking daemon logs for connection attempts") + daemonOutput := autoconfNode.Daemon.Stderr.String() + if strings.Contains(daemonOutput, bootstrapPeerID.String()) { + t.Logf("SUCCESS: Found bootstrap peer %s in daemon logs, connection attempted", bootstrapPeerID.String()) + } else { + t.Logf("Daemon stderr: %s", daemonOutput) + } + } + + // Step 7: Verify bootstrap configuration still shows "auto" (not resolved values) + result = autoconfNode.RunIPFS("bootstrap", "list") + require.Equal(t, 0, result.ExitCode(), "Bootstrap list command should work") + assert.Contains(t, result.Stdout.String(), "auto", + "Bootstrap list should still show 'auto' even though values were resolved for networking") +} + +func testEmptyCacheUsesFallbacks(t *testing.T) { + // Test that daemon uses fallback defaults when no cache exists and server is unreachable + + // Create IPFS node with auto values and unreachable autoconf server + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.URL", "http://127.0.0.1:9999/nonexistent") + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("Bootstrap", []string{"auto"}) + node.SetIPFSConfig("Routing.DelegatedRouters", []string{"auto"}) + + // Start daemon - should succeed using fallback values + node.StartDaemon() + defer node.StopDaemon() + + // Verify daemon started successfully (uses fallback bootstrap) + result := node.RunIPFS("id") + require.Equal(t, 0, result.ExitCode(), "Daemon should start successfully with fallback values") + + // Verify config commands still show "auto" + result = node.RunIPFS("config", "Bootstrap") + require.Equal(t, 0, result.ExitCode()) + assert.Contains(t, result.Stdout.String(), "auto", "Bootstrap config should still show 'auto'") + + result = node.RunIPFS("config", "Routing.DelegatedRouters") + require.Equal(t, 0, result.ExitCode()) + assert.Contains(t, result.Stdout.String(), "auto", "DelegatedRouters config should still show 'auto'") + + // Check daemon logs for error about failed autoconf fetch + logOutput := node.Daemon.Stderr.String() + // The daemon should attempt to fetch autoconf but will use fallbacks on failure + // We don't require specific log messages as long as the daemon starts successfully + if logOutput != "" { + t.Logf("Daemon logs: %s", logOutput) + } +} + +func testStaleCacheWithUnreachableServer(t *testing.T) { + // Test that daemon uses stale cache when server is unreachable + + // First create a working autoconf server and cache + autoConfData := loadTestData(t, "valid_autoconf.json") + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write(autoConfData) + })) + + // Create node and fetch autoconf to populate cache + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("Bootstrap", []string{"auto"}) + + // Start daemon briefly to populate cache + node.StartDaemon() + time.Sleep(1 * time.Second) // Allow cache population + node.StopDaemon() + + // Close the server to make it unreachable + server.Close() + + // Update config to point to unreachable server + node.SetIPFSConfig("AutoConf.URL", "http://127.0.0.1:9999/unreachable") + + // Start daemon again - should use stale cache + node.StartDaemon() + defer node.StopDaemon() + + // Verify daemon started successfully (uses cached autoconf) + result := node.RunIPFS("id") + require.Equal(t, 0, result.ExitCode(), "Daemon should start successfully with cached autoconf") + + // Check daemon logs for error about using stale config + logOutput := node.Daemon.Stderr.String() + // The daemon should use cached config when server is unreachable + // We don't require specific log messages as long as the daemon starts successfully + if logOutput != "" { + t.Logf("Daemon logs: %s", logOutput) + } +} + +func testAutoConfDisabledWithAutoValues(t *testing.T) { + // Test that daemon fails to start when AutoConf is disabled but "auto" values are present + + // Create IPFS node with AutoConf disabled but "auto" values configured + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.Enabled", false) + node.SetIPFSConfig("Bootstrap", []string{"auto"}) + + // Test by trying to list bootstrap - when AutoConf is disabled, it should show literal "auto" + result := node.RunIPFS("bootstrap", "list") + if result.ExitCode() == 0 { + // If command succeeds, it should show literal "auto" (no resolution) + output := result.Stdout.String() + assert.Contains(t, output, "auto", "Should show literal 'auto' when AutoConf is disabled") + } else { + // If command fails, error should mention autoconf issue + logOutput := result.Stderr.String() + assert.Contains(t, logOutput, "auto", "Error should mention 'auto' values") + // Check that the error message contains information about disabled state + assert.True(t, + strings.Contains(logOutput, "disabled") || strings.Contains(logOutput, "AutoConf.Enabled=false"), + "Error should mention that AutoConf is disabled or show AutoConf.Enabled=false") + } +} + +func testAutoConfNetworkBehavior(t *testing.T) { + // Test the network behavior differences between MustGetConfigCached and MustGetConfigWithRefresh + // This validates that our cache-first architecture works as expected + + autoConfData := loadTestData(t, "valid_autoconf.json") + var requestCount atomic.Int32 + + // Create server that tracks all requests + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + count := requestCount.Add(1) + t.Logf("Network behavior test request #%d: %s %s", count, r.Method, r.URL.Path) + + w.Header().Set("Content-Type", "application/json") + w.Header().Set("ETag", fmt.Sprintf(`"network-test-etag-%d"`, count)) + w.Header().Set("Last-Modified", time.Now().Format(http.TimeFormat)) + _, _ = w.Write(autoConfData) + })) + defer server.Close() + + // Create IPFS node with autoconf + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("Bootstrap", []string{"auto"}) + + // Phase 1: Test cache-first behavior (no network requests expected) + t.Logf("=== Phase 1: Testing cache-first behavior ===") + initialCount := requestCount.Load() + + // Multiple config operations should NOT trigger network requests (cache-first) + result := node.RunIPFS("config", "Bootstrap") + require.Equal(t, 0, result.ExitCode(), "Bootstrap config read should succeed") + + result = node.RunIPFS("config", "show") + require.Equal(t, 0, result.ExitCode(), "Config show should succeed") + + result = node.RunIPFS("bootstrap", "list") + require.Equal(t, 0, result.ExitCode(), "Bootstrap list should succeed") + + // Check that cache-first operations didn't trigger network requests + afterCacheOpsCount := requestCount.Load() + cachedRequestDiff := afterCacheOpsCount - initialCount + t.Logf("Network requests during cache-first operations: %d", cachedRequestDiff) + + // Phase 2: Test explicit expansion (may trigger cache population) + t.Logf("=== Phase 2: Testing expansion operations ===") + beforeExpansionCount := requestCount.Load() + + // Expansion operations may need to populate cache if empty + result = node.RunIPFS("bootstrap", "list", "--expand-auto") + if result.ExitCode() == 0 { + output := result.Stdout.String() + assert.NotContains(t, output, "auto", "Expanded bootstrap should not contain 'auto' literal") + t.Logf("Bootstrap expansion succeeded") + } else { + t.Logf("Bootstrap expansion failed (may be due to network/cache issues): %s", result.Stderr.String()) + } + + result = node.RunIPFS("config", "Bootstrap", "--expand-auto") + if result.ExitCode() == 0 { + t.Logf("Config Bootstrap expansion succeeded") + } else { + t.Logf("Config Bootstrap expansion failed: %s", result.Stderr.String()) + } + + afterExpansionCount := requestCount.Load() + expansionRequestDiff := afterExpansionCount - beforeExpansionCount + t.Logf("Network requests during expansion operations: %d", expansionRequestDiff) + + // Phase 3: Test background service behavior (if daemon is started) + t.Logf("=== Phase 3: Testing background service behavior ===") + beforeDaemonCount := requestCount.Load() + + // Set short refresh interval to test background service + node.SetIPFSConfig("AutoConf.RefreshInterval", "1s") + + // Start daemon - this triggers startAutoConfUpdater() which should make network requests + node.StartDaemon() + defer node.StopDaemon() + + // Wait for background service to potentially make requests + time.Sleep(2 * time.Second) + + afterDaemonCount := requestCount.Load() + daemonRequestDiff := afterDaemonCount - beforeDaemonCount + t.Logf("Network requests from background service: %d", daemonRequestDiff) + + // Verify expected behavior patterns + t.Logf("=== Summary ===") + t.Logf("Cache-first operations: %d requests", cachedRequestDiff) + t.Logf("Expansion operations: %d requests", expansionRequestDiff) + t.Logf("Background service: %d requests", daemonRequestDiff) + + // Cache-first operations should minimize network requests + assert.LessOrEqual(t, cachedRequestDiff, int32(1), + "Cache-first config operations should make minimal network requests") + + // Background service should make requests for refresh + if daemonRequestDiff > 0 { + t.Logf("✓ Background service is making network requests as expected") + } else { + t.Logf("⚠ Background service made no requests (may be using existing cache)") + } + + t.Logf("Successfully verified network behavior patterns in autoconf architecture") +} + +func testAutoConfWithHTTPS(t *testing.T) { + // Test autoconf with HTTPS server and TLSInsecureSkipVerify enabled + autoConfData := loadTestData(t, "valid_autoconf.json") + + // Create HTTPS server with self-signed certificate + server := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + t.Logf("HTTPS autoconf request from %s", r.UserAgent()) + w.Header().Set("Content-Type", "application/json") + w.Header().Set("ETag", `"https-test-etag"`) + w.Header().Set("Last-Modified", "Wed, 21 Oct 2015 07:28:00 GMT") + _, _ = w.Write(autoConfData) + })) + + // Enable HTTP/2 and start with TLS (self-signed certificate) + server.EnableHTTP2 = true + server.StartTLS() + defer server.Close() + + // Create IPFS node with HTTPS autoconf server and TLS skip verify + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("AutoConf.TLSInsecureSkipVerify", true) // Allow self-signed cert + node.SetIPFSConfig("AutoConf.RefreshInterval", "24h") // Disable background updates + + // Use normal bootstrap peers to test HTTPS fetching without complex auto replacement + node.SetIPFSConfig("Bootstrap", []string{"/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN"}) + + // Start daemon to trigger HTTPS autoconf fetch + node.StartDaemon() + defer node.StopDaemon() + + // Give autoconf time to fetch over HTTPS + time.Sleep(2 * time.Second) + + // Verify daemon is functional with HTTPS autoconf + result := node.RunIPFS("id") + assert.Equal(t, 0, result.ExitCode(), "IPFS daemon should be responsive with HTTPS autoconf") + assert.Contains(t, result.Stdout.String(), "ID", "IPFS id command should return peer information") + + // Test that config operations work with HTTPS-fetched autoconf cache + result = node.RunIPFS("config", "show") + assert.Equal(t, 0, result.ExitCode(), "Config show should work with HTTPS autoconf") + + // Test bootstrap list functionality + result = node.RunIPFS("bootstrap", "list") + assert.Equal(t, 0, result.ExitCode(), "Bootstrap list should work with HTTPS autoconf") + + t.Logf("Successfully tested AutoConf with HTTPS server and TLS skip verify") +} diff --git a/test/cli/autoconf/dns_test.go b/test/cli/autoconf/dns_test.go new file mode 100644 index 000000000..13144fa46 --- /dev/null +++ b/test/cli/autoconf/dns_test.go @@ -0,0 +1,288 @@ +package autoconf + +import ( + "encoding/base64" + "fmt" + "io" + "net/http" + "net/http/httptest" + "strings" + "sync" + "testing" + + "github.com/ipfs/kubo/test/cli/harness" + "github.com/miekg/dns" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestAutoConfDNS(t *testing.T) { + t.Parallel() + + t.Run("DNS resolution with auto DoH resolver", func(t *testing.T) { + t.Parallel() + testDNSResolutionWithAutoDoH(t) + }) + + t.Run("DNS errors are handled properly", func(t *testing.T) { + t.Parallel() + testDNSErrorHandling(t) + }) +} + +// mockDoHServer implements a simple DNS-over-HTTPS server for testing +type mockDoHServer struct { + t *testing.T + server *httptest.Server + mu sync.Mutex + requests []string + responseFunc func(name string) *dns.Msg +} + +func newMockDoHServer(t *testing.T) *mockDoHServer { + m := &mockDoHServer{ + t: t, + requests: []string{}, + } + + // Default response function returns a dnslink TXT record + m.responseFunc = func(name string) *dns.Msg { + msg := &dns.Msg{} + msg.SetReply(&dns.Msg{Question: []dns.Question{{Name: name, Qtype: dns.TypeTXT}}}) + + if strings.HasPrefix(name, "_dnslink.") { + // Return a valid dnslink record + rr := &dns.TXT{ + Hdr: dns.RR_Header{ + Name: name, + Rrtype: dns.TypeTXT, + Class: dns.ClassINET, + Ttl: 300, + }, + Txt: []string{"dnslink=/ipfs/QmYNQJoKGNHTpPxCBPh9KkDpaExgd2duMa3aF6ytMpHdao"}, + } + msg.Answer = append(msg.Answer, rr) + } + + return msg + } + + mux := http.NewServeMux() + mux.HandleFunc("/dns-query", m.handleDNSQuery) + + m.server = httptest.NewServer(mux) + return m +} + +func (m *mockDoHServer) handleDNSQuery(w http.ResponseWriter, r *http.Request) { + m.mu.Lock() + defer m.mu.Unlock() + + var dnsMsg *dns.Msg + + if r.Method == "GET" { + // Handle GET with ?dns= parameter + dnsParam := r.URL.Query().Get("dns") + if dnsParam == "" { + http.Error(w, "missing dns parameter", http.StatusBadRequest) + return + } + + data, err := base64.RawURLEncoding.DecodeString(dnsParam) + if err != nil { + http.Error(w, "invalid base64", http.StatusBadRequest) + return + } + + dnsMsg = &dns.Msg{} + if err := dnsMsg.Unpack(data); err != nil { + http.Error(w, "invalid DNS message", http.StatusBadRequest) + return + } + } else if r.Method == "POST" { + // Handle POST with DNS wire format + data, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, "failed to read body", http.StatusBadRequest) + return + } + + dnsMsg = &dns.Msg{} + if err := dnsMsg.Unpack(data); err != nil { + http.Error(w, "invalid DNS message", http.StatusBadRequest) + return + } + } else { + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + return + } + + // Log the DNS query + if len(dnsMsg.Question) > 0 { + qname := dnsMsg.Question[0].Name + m.requests = append(m.requests, qname) + m.t.Logf("DoH server received query for: %s", qname) + } + + // Generate response + response := m.responseFunc(dnsMsg.Question[0].Name) + responseData, err := response.Pack() + if err != nil { + http.Error(w, "failed to pack response", http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/dns-message") + _, _ = w.Write(responseData) +} + +func (m *mockDoHServer) getRequests() []string { + m.mu.Lock() + defer m.mu.Unlock() + return append([]string{}, m.requests...) +} + +func (m *mockDoHServer) close() { + m.server.Close() +} + +func testDNSResolutionWithAutoDoH(t *testing.T) { + // Create mock DoH server + dohServer := newMockDoHServer(t) + defer dohServer.close() + + // Create autoconf data with DoH resolver for "foo." domain + autoConfData := fmt.Sprintf(`{ + "AutoConfVersion": 2025072302, + "AutoConfSchema": 1, + "AutoConfTTL": 86400, + "SystemRegistry": { + "AminoDHT": { + "Description": "Test AminoDHT system", + "NativeConfig": { + "Bootstrap": [] + } + } + }, + "DNSResolvers": { + "foo.": ["%s/dns-query"] + }, + "DelegatedEndpoints": {} + }`, dohServer.server.URL) + + // Create autoconf server + autoConfServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write([]byte(autoConfData)) + })) + defer autoConfServer.Close() + + // Create IPFS node with auto DNS resolver + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.URL", autoConfServer.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("DNS.Resolvers", map[string]string{"foo.": "auto"}) + + // Start daemon + node.StartDaemon() + defer node.StopDaemon() + + // Verify config still shows "auto" for DNS resolvers + result := node.RunIPFS("config", "DNS.Resolvers") + require.Equal(t, 0, result.ExitCode()) + dnsResolversOutput := result.Stdout.String() + assert.Contains(t, dnsResolversOutput, "foo.", "DNS resolvers should contain foo. domain") + assert.Contains(t, dnsResolversOutput, "auto", "DNS resolver config should show 'auto'") + + // Try to resolve a .foo domain + result = node.RunIPFS("resolve", "/ipns/example.foo") + require.Equal(t, 0, result.ExitCode()) + + // Should resolve to the IPFS path from our mock DoH server + output := strings.TrimSpace(result.Stdout.String()) + assert.Equal(t, "/ipfs/QmYNQJoKGNHTpPxCBPh9KkDpaExgd2duMa3aF6ytMpHdao", output, + "Should resolve to the path returned by DoH server") + + // Verify DoH server received the DNS query + requests := dohServer.getRequests() + require.Greater(t, len(requests), 0, "DoH server should have received at least one request") + + foundDNSLink := false + for _, req := range requests { + if strings.Contains(req, "_dnslink.example.foo") { + foundDNSLink = true + break + } + } + assert.True(t, foundDNSLink, "DoH server should have received query for _dnslink.example.foo") +} + +func testDNSErrorHandling(t *testing.T) { + // Create DoH server that returns NXDOMAIN + dohServer := newMockDoHServer(t) + defer dohServer.close() + + // Configure to return NXDOMAIN + dohServer.responseFunc = func(name string) *dns.Msg { + msg := &dns.Msg{} + msg.SetReply(&dns.Msg{Question: []dns.Question{{Name: name, Qtype: dns.TypeTXT}}}) + msg.Rcode = dns.RcodeNameError // NXDOMAIN + return msg + } + + // Create autoconf data with DoH resolver + autoConfData := fmt.Sprintf(`{ + "AutoConfVersion": 2025072302, + "AutoConfSchema": 1, + "AutoConfTTL": 86400, + "SystemRegistry": { + "AminoDHT": { + "Description": "Test AminoDHT system", + "NativeConfig": { + "Bootstrap": [] + } + } + }, + "DNSResolvers": { + "bar.": ["%s/dns-query"] + }, + "DelegatedEndpoints": {} + }`, dohServer.server.URL) + + // Create autoconf server + autoConfServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write([]byte(autoConfData)) + })) + defer autoConfServer.Close() + + // Create IPFS node + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.URL", autoConfServer.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("DNS.Resolvers", map[string]string{"bar.": "auto"}) + + // Start daemon + node.StartDaemon() + defer node.StopDaemon() + + // Try to resolve a non-existent domain + result := node.RunIPFS("resolve", "/ipns/nonexistent.bar") + require.NotEqual(t, 0, result.ExitCode(), "Resolution should fail for non-existent domain") + + // Should contain appropriate error message + stderr := result.Stderr.String() + assert.Contains(t, stderr, "could not resolve name", + "Error should indicate DNS resolution failure") + + // Verify DoH server received the query + requests := dohServer.getRequests() + foundQuery := false + for _, req := range requests { + if strings.Contains(req, "_dnslink.nonexistent.bar") { + foundQuery = true + break + } + } + assert.True(t, foundQuery, "DoH server should have received query even for failed resolution") +} diff --git a/test/cli/autoconf/expand_comprehensive_test.go b/test/cli/autoconf/expand_comprehensive_test.go new file mode 100644 index 000000000..01dbcfda3 --- /dev/null +++ b/test/cli/autoconf/expand_comprehensive_test.go @@ -0,0 +1,698 @@ +// Package autoconf provides comprehensive tests for --expand-auto functionality. +// +// Test Scenarios: +// 1. Tests WITH daemon: Most tests start a daemon to fetch and cache autoconf data, +// then test CLI commands that read from that cache using MustGetConfigCached. +// 2. Tests WITHOUT daemon: Error condition tests that don't need cached autoconf. +// +// The daemon setup uses startDaemonAndWaitForAutoConf() helper which: +// - Starts the daemon +// - Waits for HTTP request to mock server (not arbitrary timeout) +// - Returns when autoconf is cached and ready for CLI commands +package autoconf + +import ( + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "os" + "strings" + "sync/atomic" + "testing" + "time" + + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestExpandAutoComprehensive(t *testing.T) { + t.Parallel() + + t.Run("all autoconf fields resolve correctly", func(t *testing.T) { + t.Parallel() + testAllAutoConfFieldsResolve(t) + }) + + t.Run("bootstrap list --expand-auto matches config Bootstrap --expand-auto", func(t *testing.T) { + t.Parallel() + testBootstrapCommandConsistency(t) + }) + + t.Run("write operations fail with --expand-auto", func(t *testing.T) { + t.Parallel() + testWriteOperationsFailWithExpandAuto(t) + }) + + t.Run("config show --expand-auto provides complete expanded view", func(t *testing.T) { + t.Parallel() + testConfigShowExpandAutoComplete(t) + }) + + t.Run("multiple expand-auto calls use cache (single HTTP request)", func(t *testing.T) { + t.Parallel() + testMultipleExpandAutoUsesCache(t) + }) + + t.Run("CLI uses cache only while daemon handles background updates", func(t *testing.T) { + t.Parallel() + testCLIUsesCacheOnlyDaemonUpdatesBackground(t) + }) +} + +// testAllAutoConfFieldsResolve verifies that all autoconf fields (Bootstrap, DNS.Resolvers, +// Routing.DelegatedRouters, and Ipns.DelegatedPublishers) can be resolved from "auto" values +// to their actual configuration using --expand-auto flag with daemon-cached autoconf data. +// +// This test is critical because: +// 1. It validates the core autoconf resolution functionality across all supported fields +// 2. It ensures that "auto" placeholders are properly replaced with real configuration values +// 3. It verifies that the autoconf JSON structure is correctly parsed and applied +// 4. It tests the end-to-end flow from HTTP fetch to config field expansion +func testAllAutoConfFieldsResolve(t *testing.T) { + // Test scenario: CLI with daemon started and autoconf cached + // This validates core autoconf resolution functionality across all supported fields + + // Track HTTP requests to verify mock server is being used + var requestCount atomic.Int32 + var autoConfData []byte + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + count := requestCount.Add(1) + t.Logf("Mock autoconf server request #%d: %s %s", count, r.Method, r.URL.Path) + + // Create comprehensive autoconf response matching Schema 4 format + // Use server URLs to ensure they're reachable and valid + serverURL := fmt.Sprintf("http://%s", r.Host) // Get the server URL from the request + autoConf := map[string]interface{}{ + "AutoConfVersion": 2025072301, + "AutoConfSchema": 1, + "AutoConfTTL": 86400, + "SystemRegistry": map[string]interface{}{ + "AminoDHT": map[string]interface{}{ + "URL": "https://github.com/ipfs/specs/pull/497", + "Description": "Test AminoDHT system", + "NativeConfig": map[string]interface{}{ + "Bootstrap": []string{ + "/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", + "/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa", + }, + }, + "DelegatedConfig": map[string]interface{}{ + "Read": []string{"/routing/v1/providers", "/routing/v1/peers", "/routing/v1/ipns"}, + "Write": []string{"/routing/v1/ipns"}, + }, + }, + "IPNI": map[string]interface{}{ + "URL": serverURL + "/ipni-system", + "Description": "Test IPNI system", + "DelegatedConfig": map[string]interface{}{ + "Read": []string{"/routing/v1/providers"}, + "Write": []string{}, + }, + }, + "CustomIPNS": map[string]interface{}{ + "URL": serverURL + "/ipns-system", + "Description": "Test IPNS system", + "DelegatedConfig": map[string]interface{}{ + "Read": []string{"/routing/v1/ipns"}, + "Write": []string{"/routing/v1/ipns"}, + }, + }, + }, + "DNSResolvers": map[string][]string{ + ".": {"https://cloudflare-dns.com/dns-query"}, + "eth.": {"https://dns.google/dns-query"}, + }, + "DelegatedEndpoints": map[string]interface{}{ + serverURL: map[string]interface{}{ + "Systems": []string{"IPNI", "CustomIPNS"}, // Use non-AminoDHT systems to avoid filtering + "Read": []string{"/routing/v1/providers", "/routing/v1/ipns"}, + "Write": []string{"/routing/v1/ipns"}, + }, + }, + } + + var err error + autoConfData, err = json.Marshal(autoConf) + if err != nil { + t.Fatalf("Failed to marshal autoConf: %v", err) + } + + t.Logf("Serving mock autoconf data: %s", string(autoConfData)) + + w.Header().Set("Content-Type", "application/json") + w.Header().Set("ETag", `"test-mock-config"`) + w.Header().Set("Last-Modified", "Wed, 21 Oct 2015 07:28:00 GMT") + _, _ = w.Write(autoConfData) + })) + defer server.Close() + + // Create IPFS node with all auto values + node := harness.NewT(t).NewNode().Init("--profile=test") + + // Clear any existing autoconf cache to prevent interference + result := node.RunIPFS("config", "show") + if result.ExitCode() == 0 { + var cfg map[string]interface{} + if json.Unmarshal([]byte(result.Stdout.String()), &cfg) == nil { + if repoPath, exists := cfg["path"]; exists { + if pathStr, ok := repoPath.(string); ok { + t.Logf("Clearing autoconf cache from %s/autoconf", pathStr) + // Note: We can't directly remove files, but clearing cache via config change should help + } + } + } + } + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("AutoConf.RefreshInterval", "1s") // Force fresh fetches for testing + node.SetIPFSConfig("Bootstrap", []string{"auto"}) + node.SetIPFSConfig("DNS.Resolvers", map[string]string{ + ".": "auto", + "eth.": "auto", + }) + node.SetIPFSConfig("Routing.DelegatedRouters", []string{"auto"}) + node.SetIPFSConfig("Ipns.DelegatedPublishers", []string{"auto"}) + + // Start daemon and wait for autoconf fetch + daemon := startDaemonAndWaitForAutoConf(t, node, &requestCount) + defer daemon.StopDaemon() + + // Test 1: Bootstrap resolution + result = node.RunIPFS("config", "Bootstrap", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "Bootstrap expansion should succeed") + + var expandedBootstrap []string + var err error + err = json.Unmarshal([]byte(result.Stdout.String()), &expandedBootstrap) + require.NoError(t, err) + + assert.NotContains(t, expandedBootstrap, "auto", "Bootstrap should not contain 'auto'") + assert.Contains(t, expandedBootstrap, "/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN") + assert.Contains(t, expandedBootstrap, "/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa") + t.Logf("Bootstrap expanded to: %v", expandedBootstrap) + + // Test 2: DNS.Resolvers resolution + result = node.RunIPFS("config", "DNS.Resolvers", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "DNS.Resolvers expansion should succeed") + + var expandedResolvers map[string]string + err = json.Unmarshal([]byte(result.Stdout.String()), &expandedResolvers) + require.NoError(t, err) + + assert.NotContains(t, expandedResolvers, "auto", "DNS.Resolvers should not contain 'auto'") + assert.Equal(t, "https://cloudflare-dns.com/dns-query", expandedResolvers["."]) + assert.Equal(t, "https://dns.google/dns-query", expandedResolvers["eth."]) + t.Logf("DNS.Resolvers expanded to: %v", expandedResolvers) + + // Test 3: Routing.DelegatedRouters resolution + result = node.RunIPFS("config", "Routing.DelegatedRouters", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "Routing.DelegatedRouters expansion should succeed") + + var expandedRouters []string + err = json.Unmarshal([]byte(result.Stdout.String()), &expandedRouters) + require.NoError(t, err) + + assert.NotContains(t, expandedRouters, "auto", "DelegatedRouters should not contain 'auto'") + + // Test should strictly require mock autoconf to work - no fallback acceptance + // The mock endpoint has Read paths ["/routing/v1/providers", "/routing/v1/ipns"] + // so we expect 2 URLs with those paths + expectedMockURLs := []string{ + server.URL + "/routing/v1/providers", + server.URL + "/routing/v1/ipns", + } + require.Equal(t, 2, len(expandedRouters), + "Should have exactly 2 routers from mock autoconf (one for each Read path). Got %d routers: %v. "+ + "This indicates autoconf is not working properly - check if mock server data is being parsed and filtered correctly.", + len(expandedRouters), expandedRouters) + + // Check that both expected URLs are present + for _, expectedURL := range expectedMockURLs { + assert.Contains(t, expandedRouters, expectedURL, + "Should contain mock autoconf endpoint with path %s. Got: %v. "+ + "This indicates autoconf endpoint path generation is not working properly.", + expectedURL, expandedRouters) + } + + // Test 4: Ipns.DelegatedPublishers resolution + result = node.RunIPFS("config", "Ipns.DelegatedPublishers", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "Ipns.DelegatedPublishers expansion should succeed") + + var expandedPublishers []string + err = json.Unmarshal([]byte(result.Stdout.String()), &expandedPublishers) + require.NoError(t, err) + + assert.NotContains(t, expandedPublishers, "auto", "DelegatedPublishers should not contain 'auto'") + + // Test should require mock autoconf endpoint for IPNS publishing + // The mock endpoint supports /routing/v1/ipns write operations, so it should be included with path + expectedMockPublisherURL := server.URL + "/routing/v1/ipns" + require.Equal(t, 1, len(expandedPublishers), + "Should have exactly 1 IPNS publisher from mock autoconf. Got %d publishers: %v. "+ + "This indicates autoconf IPNS publisher filtering is not working properly.", + len(expandedPublishers), expandedPublishers) + assert.Equal(t, expectedMockPublisherURL, expandedPublishers[0], + "Should use mock autoconf endpoint %s for IPNS publishing, not fallback. Got: %s. "+ + "This indicates autoconf IPNS publisher resolution is not working properly.", + expectedMockPublisherURL, expandedPublishers[0]) + + // CRITICAL: Verify that mock server was actually used + finalRequestCount := requestCount.Load() + require.Greater(t, finalRequestCount, int32(0), + "Mock autoconf server should have been called at least once. Got %d requests. "+ + "This indicates the test is using cached or fallback config instead of mock data.", finalRequestCount) + t.Logf("Mock server was called %d times - test is using mock data", finalRequestCount) +} + +// testBootstrapCommandConsistency verifies that `ipfs bootstrap list --expand-auto` and +// `ipfs config Bootstrap --expand-auto` return identical results when both use autoconf. +// +// This test is important because: +// 1. It ensures consistency between different CLI commands that access the same data +// 2. It validates that both the bootstrap-specific command and generic config command +// use the same underlying autoconf resolution mechanism +// 3. It prevents regression where different commands might resolve "auto" differently +// 4. It ensures users get consistent results regardless of which command they use +func testBootstrapCommandConsistency(t *testing.T) { + // Test scenario: CLI with daemon started and autoconf cached + // This ensures both bootstrap commands read from the same cached autoconf data + + // Load test autoconf data + autoConfData := loadTestDataComprehensive(t, "valid_autoconf.json") + + // Track HTTP requests to verify daemon fetches autoconf + var requestCount atomic.Int32 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + requestCount.Add(1) + t.Logf("Bootstrap consistency test request: %s %s", r.Method, r.URL.Path) + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write(autoConfData) + })) + defer server.Close() + + // Create IPFS node with auto bootstrap + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("Bootstrap", []string{"auto"}) + + // Start daemon and wait for autoconf fetch + daemon := startDaemonAndWaitForAutoConf(t, node, &requestCount) + defer daemon.StopDaemon() + + // Get bootstrap via config command + configResult := node.RunIPFS("config", "Bootstrap", "--expand-auto") + require.Equal(t, 0, configResult.ExitCode(), "config Bootstrap --expand-auto should succeed") + + // Get bootstrap via bootstrap command + bootstrapResult := node.RunIPFS("bootstrap", "list", "--expand-auto") + require.Equal(t, 0, bootstrapResult.ExitCode(), "bootstrap list --expand-auto should succeed") + + // Parse both results + var configBootstrap, bootstrapBootstrap []string + err := json.Unmarshal([]byte(configResult.Stdout.String()), &configBootstrap) + require.NoError(t, err) + + // Bootstrap command output is line-separated, not JSON + bootstrapOutput := strings.TrimSpace(bootstrapResult.Stdout.String()) + if bootstrapOutput != "" { + bootstrapBootstrap = strings.Split(bootstrapOutput, "\n") + } + + // Results should be equivalent + assert.Equal(t, len(configBootstrap), len(bootstrapBootstrap), "Both commands should return same number of peers") + + // Both should contain same peers (order might differ due to different output formats) + for _, peer := range configBootstrap { + found := false + for _, bsPeer := range bootstrapBootstrap { + if strings.TrimSpace(bsPeer) == peer { + found = true + break + } + } + assert.True(t, found, "Peer %s should be in both results", peer) + } + + t.Logf("Config command result: %v", configBootstrap) + t.Logf("Bootstrap command result: %v", bootstrapBootstrap) +} + +// testWriteOperationsFailWithExpandAuto verifies that --expand-auto flag is properly +// restricted to read-only operations and fails when used with config write operations. +// +// This test is essential because: +// 1. It enforces the security principle that --expand-auto should only be used for reading +// 2. It prevents users from accidentally overwriting config with expanded values +// 3. It ensures that "auto" placeholders are preserved in the stored configuration +// 4. It validates proper error handling and user guidance when misused +// 5. It protects against accidental loss of the "auto" semantic meaning +func testWriteOperationsFailWithExpandAuto(t *testing.T) { + // Test scenario: CLI without daemon (tests error conditions) + // This test doesn't need daemon setup since it's testing that write operations + // with --expand-auto should fail with appropriate error messages + + // Create IPFS node + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("Bootstrap", []string{"auto"}) + + // Test that setting config with --expand-auto fails + testCases := []struct { + name string + args []string + }{ + {"config set with expand-auto", []string{"config", "Bootstrap", "[\"test\"]", "--expand-auto"}}, + {"config set JSON with expand-auto", []string{"config", "Bootstrap", "[\"test\"]", "--json", "--expand-auto"}}, + {"config set bool with expand-auto", []string{"config", "SomeField", "true", "--bool", "--expand-auto"}}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := node.RunIPFS(tc.args...) + assert.NotEqual(t, 0, result.ExitCode(), "Write operation with --expand-auto should fail") + + stderr := result.Stderr.String() + assert.Contains(t, stderr, "--expand-auto", "Error should mention --expand-auto") + assert.Contains(t, stderr, "reading", "Error should mention reading limitation") + t.Logf("Expected error: %s", stderr) + }) + } +} + +// testConfigShowExpandAutoComplete verifies that `ipfs config show --expand-auto` +// produces a complete configuration with all "auto" values expanded to their resolved forms. +// +// This test is important because: +// 1. It validates the full-config expansion functionality for comprehensive troubleshooting +// 2. It ensures that users can see the complete resolved configuration state +// 3. It verifies that all "auto" placeholders are replaced, not just individual fields +// 4. It tests that the resulting JSON is valid and well-formed +// 5. It provides a way to export/backup the fully expanded configuration +func testConfigShowExpandAutoComplete(t *testing.T) { + // Test scenario: CLI with daemon started and autoconf cached + + // Load test autoconf data + autoConfData := loadTestDataComprehensive(t, "valid_autoconf.json") + + // Track HTTP requests to verify daemon fetches autoconf + var requestCount atomic.Int32 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + requestCount.Add(1) + t.Logf("Config show test request: %s %s", r.Method, r.URL.Path) + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write(autoConfData) + })) + defer server.Close() + + // Create IPFS node with multiple auto values + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("Bootstrap", []string{"auto"}) + node.SetIPFSConfig("DNS.Resolvers", map[string]string{".": "auto"}) + + // Start daemon and wait for autoconf fetch + daemon := startDaemonAndWaitForAutoConf(t, node, &requestCount) + defer daemon.StopDaemon() + + // Test config show --expand-auto + result := node.RunIPFS("config", "show", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "config show --expand-auto should succeed") + + expandedConfig := result.Stdout.String() + + // Should not contain any literal "auto" values + assert.NotContains(t, expandedConfig, `"auto"`, "Expanded config should not contain literal 'auto' values") + + // Should contain expected expanded sections + assert.Contains(t, expandedConfig, `"Bootstrap"`, "Should contain Bootstrap section") + assert.Contains(t, expandedConfig, `"DNS"`, "Should contain DNS section") + assert.Contains(t, expandedConfig, `"Resolvers"`, "Should contain Resolvers section") + + // Should contain expanded peer addresses (not "auto") + assert.Contains(t, expandedConfig, "bootstrap.libp2p.io", "Should contain expanded bootstrap peers") + + // Should be valid JSON + var configMap map[string]interface{} + err := json.Unmarshal([]byte(expandedConfig), &configMap) + require.NoError(t, err, "Expanded config should be valid JSON") + + // Verify specific fields were expanded + if bootstrap, ok := configMap["Bootstrap"].([]interface{}); ok { + assert.Greater(t, len(bootstrap), 0, "Bootstrap should have expanded entries") + for _, peer := range bootstrap { + assert.NotEqual(t, "auto", peer, "Bootstrap entries should not be 'auto'") + } + } + + t.Logf("Config show --expand-auto produced %d characters of expanded config", len(expandedConfig)) +} + +// testMultipleExpandAutoUsesCache verifies that multiple consecutive --expand-auto calls +// efficiently use cached autoconf data instead of making repeated HTTP requests. +// +// This test is critical for performance because: +// 1. It validates that the caching mechanism works correctly to reduce network overhead +// 2. It ensures that users can make multiple config queries without causing excessive HTTP traffic +// 3. It verifies that cached data is shared across different config fields and commands +// 4. It tests that HTTP headers (ETag/Last-Modified) are properly used for cache validation +// 5. It prevents regression where each --expand-auto call would trigger a new HTTP request +// 6. It demonstrates the performance benefit: 5 operations with only 1 network request +func testMultipleExpandAutoUsesCache(t *testing.T) { + // Test scenario: CLI with daemon started and autoconf cached + + // Create comprehensive autoconf response + autoConfData := loadTestDataComprehensive(t, "valid_autoconf.json") + + // Track HTTP requests to verify caching + var requestCount atomic.Int32 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + count := requestCount.Add(1) + t.Logf("AutoConf cache test request #%d: %s %s", count, r.Method, r.URL.Path) + + w.Header().Set("Content-Type", "application/json") + w.Header().Set("ETag", `"cache-test-123"`) + w.Header().Set("Last-Modified", "Wed, 21 Oct 2015 07:28:00 GMT") + _, _ = w.Write(autoConfData) + })) + defer server.Close() + + // Create IPFS node with all auto values + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + // Note: Using default RefreshInterval (24h) to ensure caching - explicit setting would require rebuilt binary + + // Set up auto values for multiple fields + node.SetIPFSConfig("Bootstrap", []string{"auto"}) + node.SetIPFSConfig("DNS.Resolvers", map[string]string{"foo.": "auto"}) + node.SetIPFSConfig("Routing.DelegatedRouters", []string{"auto"}) + node.SetIPFSConfig("Ipns.DelegatedPublishers", []string{"auto"}) + + // Start daemon and wait for autoconf fetch + daemon := startDaemonAndWaitForAutoConf(t, node, &requestCount) + defer daemon.StopDaemon() + + // Reset counter to only track our expand-auto calls + requestCount.Store(0) + + // Make multiple --expand-auto calls on different fields + t.Log("Testing multiple --expand-auto calls should use cache...") + + // Call 1: Bootstrap --expand-auto (should trigger HTTP request) + result1 := node.RunIPFS("config", "Bootstrap", "--expand-auto") + require.Equal(t, 0, result1.ExitCode(), "Bootstrap --expand-auto should succeed") + + var expandedBootstrap []string + err := json.Unmarshal([]byte(result1.Stdout.String()), &expandedBootstrap) + require.NoError(t, err) + assert.NotContains(t, expandedBootstrap, "auto", "Bootstrap should be expanded") + assert.Greater(t, len(expandedBootstrap), 0, "Bootstrap should have entries") + + // Call 2: DNS.Resolvers --expand-auto (should use cache, no HTTP) + result2 := node.RunIPFS("config", "DNS.Resolvers", "--expand-auto") + require.Equal(t, 0, result2.ExitCode(), "DNS.Resolvers --expand-auto should succeed") + + var expandedResolvers map[string]string + err = json.Unmarshal([]byte(result2.Stdout.String()), &expandedResolvers) + require.NoError(t, err) + + // Call 3: Routing.DelegatedRouters --expand-auto (should use cache, no HTTP) + result3 := node.RunIPFS("config", "Routing.DelegatedRouters", "--expand-auto") + require.Equal(t, 0, result3.ExitCode(), "Routing.DelegatedRouters --expand-auto should succeed") + + var expandedRouters []string + err = json.Unmarshal([]byte(result3.Stdout.String()), &expandedRouters) + require.NoError(t, err) + assert.NotContains(t, expandedRouters, "auto", "Routers should be expanded") + + // Call 4: Ipns.DelegatedPublishers --expand-auto (should use cache, no HTTP) + result4 := node.RunIPFS("config", "Ipns.DelegatedPublishers", "--expand-auto") + require.Equal(t, 0, result4.ExitCode(), "Ipns.DelegatedPublishers --expand-auto should succeed") + + var expandedPublishers []string + err = json.Unmarshal([]byte(result4.Stdout.String()), &expandedPublishers) + require.NoError(t, err) + assert.NotContains(t, expandedPublishers, "auto", "Publishers should be expanded") + + // Call 5: config show --expand-auto (should use cache, no HTTP) + result5 := node.RunIPFS("config", "show", "--expand-auto") + require.Equal(t, 0, result5.ExitCode(), "config show --expand-auto should succeed") + + expandedConfig := result5.Stdout.String() + assert.NotContains(t, expandedConfig, `"auto"`, "Full config should not contain 'auto' values") + + // CRITICAL TEST: Verify NO HTTP requests were made for --expand-auto calls (using cache) + finalRequestCount := requestCount.Load() + assert.Equal(t, int32(0), finalRequestCount, + "Multiple --expand-auto calls should result in 0 HTTP requests (using cache). Got %d requests", finalRequestCount) + + t.Logf("Made 5 --expand-auto calls, resulted in %d HTTP request(s) - cache is being used!", finalRequestCount) + + // Now simulate a manual cache refresh (what the background updater would do) + t.Log("Simulating manual cache refresh...") + + // Update the mock server to return different data + autoConfData2 := loadTestDataComprehensive(t, "updated_autoconf.json") + server.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + count := requestCount.Add(1) + t.Logf("Manual refresh request #%d: %s %s", count, r.Method, r.URL.Path) + w.Header().Set("Content-Type", "application/json") + w.Header().Set("ETag", `"cache-test-456"`) + w.Header().Set("Last-Modified", "Thu, 22 Oct 2015 08:00:00 GMT") + _, _ = w.Write(autoConfData2) + }) + + // Note: In the actual daemon, the background updater would call MustGetConfigWithRefresh + // For this test, we'll verify that subsequent --expand-auto calls still use cache + // and don't trigger additional requests + + // Reset counter before manual refresh simulation + beforeRefresh := requestCount.Load() + + // Make another --expand-auto call - should still use cache + result6 := node.RunIPFS("config", "Bootstrap", "--expand-auto") + require.Equal(t, 0, result6.ExitCode(), "Bootstrap --expand-auto after refresh should succeed") + + afterRefresh := requestCount.Load() + assert.Equal(t, beforeRefresh, afterRefresh, + "--expand-auto should continue using cache even after server update") + + t.Logf("Cache continues to be used after server update - background updater pattern confirmed!") +} + +// testCLIUsesCacheOnlyDaemonUpdatesBackground verifies the correct autoconf behavior: +// daemon makes exactly one HTTP request during startup to fetch and cache data, then +// CLI commands always use cached data without making additional HTTP requests. +// +// This test is essential for correctness because: +// 1. It validates that daemon startup makes exactly one HTTP request to fetch autoconf +// 2. It verifies that CLI --expand-auto never makes HTTP requests (uses cache only) +// 3. It ensures CLI commands remain fast by always using cached data +// 4. It prevents regression where CLI commands might start making HTTP requests +// 5. It confirms the correct separation between daemon (network) and CLI (cache-only) behavior +func testCLIUsesCacheOnlyDaemonUpdatesBackground(t *testing.T) { + // Test scenario: CLI with daemon and long RefreshInterval (no background updates during test) + + // Create autoconf response + autoConfData := loadTestDataComprehensive(t, "valid_autoconf.json") + + // Track HTTP requests with timestamps + var requestCount atomic.Int32 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + count := requestCount.Add(1) + t.Logf("Cache expiry test request #%d at %s: %s %s", count, time.Now().Format("15:04:05.000"), r.Method, r.URL.Path) + + w.Header().Set("Content-Type", "application/json") + // Use different ETag for each request to ensure we can detect new fetches + w.Header().Set("ETag", fmt.Sprintf(`"expiry-test-%d"`, count)) + w.Header().Set("Last-Modified", time.Now().Format(http.TimeFormat)) + _, _ = w.Write(autoConfData) + })) + defer server.Close() + + // Create IPFS node with long refresh interval + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + // Set long RefreshInterval to avoid background updates during test + node.SetIPFSConfig("AutoConf.RefreshInterval", "1h") + + node.SetIPFSConfig("Bootstrap", []string{"auto"}) + node.SetIPFSConfig("DNS.Resolvers", map[string]string{"test.": "auto"}) + + // Start daemon and wait for autoconf fetch + daemon := startDaemonAndWaitForAutoConf(t, node, &requestCount) + defer daemon.StopDaemon() + + // Confirm only one request was made during daemon startup + initialRequestCount := requestCount.Load() + assert.Equal(t, int32(1), initialRequestCount, "Expected exactly 1 HTTP request during daemon startup, got: %d", initialRequestCount) + t.Logf("Daemon startup made exactly 1 HTTP request") + + // Test: CLI commands use cache only (no additional HTTP requests) + t.Log("Testing that CLI --expand-auto commands use cache only...") + + // Make several CLI calls - none should trigger HTTP requests + result1 := node.RunIPFS("config", "Bootstrap", "--expand-auto") + require.Equal(t, 0, result1.ExitCode(), "Bootstrap --expand-auto should succeed") + + result2 := node.RunIPFS("config", "DNS.Resolvers", "--expand-auto") + require.Equal(t, 0, result2.ExitCode(), "DNS.Resolvers --expand-auto should succeed") + + result3 := node.RunIPFS("config", "Routing.DelegatedRouters", "--expand-auto") + require.Equal(t, 0, result3.ExitCode(), "Routing.DelegatedRouters --expand-auto should succeed") + + // Verify the request count remains at 1 (no additional requests from CLI) + finalRequestCount := requestCount.Load() + assert.Equal(t, int32(1), finalRequestCount, "Request count should remain at 1 after CLI commands, got: %d", finalRequestCount) + t.Log("CLI commands use cache only - request count remains at 1") + + t.Log("Test completed: Daemon makes 1 startup request, CLI commands use cache only") +} + +// loadTestDataComprehensive is a helper function that loads test autoconf JSON data files. +// It locates the test data directory relative to the test file and reads the specified file. +// This centralized helper ensures consistent test data loading across all comprehensive tests. +func loadTestDataComprehensive(t *testing.T, filename string) []byte { + t.Helper() + + data, err := os.ReadFile("testdata/" + filename) + require.NoError(t, err, "Failed to read test data file: %s", filename) + + return data +} + +// startDaemonAndWaitForAutoConf starts a daemon and waits for it to fetch autoconf data. +// It returns the node with daemon running and ensures autoconf has been cached before returning. +// This is a DRY helper to avoid repeating daemon setup and request waiting logic in every test. +func startDaemonAndWaitForAutoConf(t *testing.T, node *harness.Node, requestCount *atomic.Int32) *harness.Node { + t.Helper() + + // Start daemon to fetch and cache autoconf data + t.Log("Starting daemon to fetch and cache autoconf data...") + daemon := node.StartDaemon() + // StartDaemon returns *Node, no error to check + + // Wait for daemon to fetch autoconf (wait for HTTP request to mock server) + t.Log("Waiting for daemon to fetch autoconf from mock server...") + timeout := time.After(10 * time.Second) // Safety timeout + ticker := time.NewTicker(10 * time.Millisecond) + defer ticker.Stop() + + for { + select { + case <-timeout: + t.Fatal("Timeout waiting for autoconf fetch") + case <-ticker.C: + if requestCount.Load() > 0 { + t.Logf("Daemon fetched autoconf (%d requests made)", requestCount.Load()) + t.Log("AutoConf should now be cached by daemon") + return daemon + } + } + } +} diff --git a/test/cli/autoconf/expand_fallback_test.go b/test/cli/autoconf/expand_fallback_test.go new file mode 100644 index 000000000..f6fc1e8d3 --- /dev/null +++ b/test/cli/autoconf/expand_fallback_test.go @@ -0,0 +1,286 @@ +package autoconf + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "os" + "testing" + "time" + + "github.com/ipfs/boxo/autoconf" + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestExpandAutoFallbacks(t *testing.T) { + t.Parallel() + + t.Run("expand-auto with unreachable server shows fallbacks", func(t *testing.T) { + t.Parallel() + testExpandAutoWithUnreachableServer(t) + }) + + t.Run("expand-auto with disabled autoconf shows error", func(t *testing.T) { + t.Parallel() + testExpandAutoWithDisabledAutoConf(t) + }) + + t.Run("expand-auto with malformed response shows fallbacks", func(t *testing.T) { + t.Parallel() + testExpandAutoWithMalformedResponse(t) + }) + + t.Run("expand-auto preserves static values in mixed config", func(t *testing.T) { + t.Parallel() + testExpandAutoMixedConfigPreservesStatic(t) + }) + + t.Run("daemon gracefully handles malformed autoconf and uses fallbacks", func(t *testing.T) { + t.Parallel() + testDaemonWithMalformedAutoConf(t) + }) +} + +func testExpandAutoWithUnreachableServer(t *testing.T) { + // Create IPFS node with unreachable AutoConf server + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.URL", "http://127.0.0.1:99999/nonexistent") // Unreachable + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("Bootstrap", []string{"auto"}) + node.SetIPFSConfig("DNS.Resolvers", map[string]string{"foo.": "auto"}) + + // Test that --expand-auto falls back to defaults when server is unreachable + result := node.RunIPFS("config", "Bootstrap", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "config Bootstrap --expand-auto should succeed even with unreachable server") + + var bootstrap []string + err := json.Unmarshal([]byte(result.Stdout.String()), &bootstrap) + require.NoError(t, err) + + // Should contain fallback bootstrap peers (not "auto" and not empty) + assert.NotContains(t, bootstrap, "auto", "Fallback bootstrap should not contain 'auto'") + assert.Greater(t, len(bootstrap), 0, "Fallback bootstrap should not be empty") + + // Should contain known default bootstrap peers + foundDefaultPeer := false + for _, peer := range bootstrap { + if peer != "" && peer != "auto" { + foundDefaultPeer = true + t.Logf("Found fallback bootstrap peer: %s", peer) + break + } + } + assert.True(t, foundDefaultPeer, "Should contain at least one fallback bootstrap peer") + + // Test DNS resolvers fallback + result = node.RunIPFS("config", "DNS.Resolvers", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "config DNS.Resolvers --expand-auto should succeed with unreachable server") + + var resolvers map[string]string + err = json.Unmarshal([]byte(result.Stdout.String()), &resolvers) + require.NoError(t, err) + + // When autoconf server is unreachable, DNS resolvers should fall back to defaults + // The "foo." resolver should not exist in fallbacks (only "eth." has fallback) + fooResolver, fooExists := resolvers["foo."] + + if !fooExists { + t.Log("DNS resolver for 'foo.' has no fallback - correct behavior (only eth. has fallbacks)") + } else { + assert.NotEqual(t, "auto", fooResolver, "DNS resolver should not be 'auto' after expansion") + t.Logf("Unexpected DNS resolver for foo.: %s", fooResolver) + } +} + +func testExpandAutoWithDisabledAutoConf(t *testing.T) { + // Create IPFS node with AutoConf disabled + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.Enabled", false) + node.SetIPFSConfig("Bootstrap", []string{"auto"}) + + // Test that --expand-auto with disabled AutoConf returns appropriate error or fallback + result := node.RunIPFS("config", "Bootstrap", "--expand-auto") + + // When AutoConf is disabled, expand-auto should show empty results + // since "auto" values are not expanded when AutoConf.Enabled=false + var bootstrap []string + err := json.Unmarshal([]byte(result.Stdout.String()), &bootstrap) + require.NoError(t, err) + + // With AutoConf disabled, "auto" values are not expanded so we get empty result + assert.NotContains(t, bootstrap, "auto", "Should not contain 'auto' after expansion") + assert.Equal(t, 0, len(bootstrap), "Should be empty when AutoConf disabled (auto values not expanded)") + t.Log("Bootstrap is empty when AutoConf disabled - correct behavior") +} + +func testExpandAutoWithMalformedResponse(t *testing.T) { + // Create server that returns malformed JSON + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write([]byte(`{"invalid": "json", "Bootstrap": [incomplete`)) // Malformed JSON + })) + defer server.Close() + + // Create IPFS node with malformed autoconf server + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("Bootstrap", []string{"auto"}) + + // Test that --expand-auto handles malformed response gracefully + result := node.RunIPFS("config", "Bootstrap", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "config Bootstrap --expand-auto should succeed even with malformed response") + + var bootstrap []string + err := json.Unmarshal([]byte(result.Stdout.String()), &bootstrap) + require.NoError(t, err) + + // Should fall back to defaults, not contain "auto" + assert.NotContains(t, bootstrap, "auto", "Should not contain 'auto' after fallback") + assert.Greater(t, len(bootstrap), 0, "Should contain fallback peers after malformed response") + t.Logf("Bootstrap after malformed response: %v", bootstrap) +} + +func testExpandAutoMixedConfigPreservesStatic(t *testing.T) { + // Load valid test autoconf data + autoConfData := loadTestDataForFallback(t, "valid_autoconf.json") + + // Create HTTP server that serves autoconf.json + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write(autoConfData) + })) + defer server.Close() + + // Create IPFS node with mixed auto and static values + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + + // Set mixed configuration: static + auto + static + node.SetIPFSConfig("Bootstrap", []string{ + "/ip4/127.0.0.1/tcp/4001/p2p/12D3KooWTest", + "auto", + "/ip4/127.0.0.2/tcp/4001/p2p/12D3KooWTest2", + }) + + // Test that --expand-auto only expands "auto" values, preserves static ones + result := node.RunIPFS("config", "Bootstrap", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "config Bootstrap --expand-auto should succeed") + + var bootstrap []string + err := json.Unmarshal([]byte(result.Stdout.String()), &bootstrap) + require.NoError(t, err) + + // Should not contain literal "auto" anymore + assert.NotContains(t, bootstrap, "auto", "Expanded config should not contain literal 'auto'") + + // Should preserve static values at original positions + assert.Contains(t, bootstrap, "/ip4/127.0.0.1/tcp/4001/p2p/12D3KooWTest", "Should preserve first static peer") + assert.Contains(t, bootstrap, "/ip4/127.0.0.2/tcp/4001/p2p/12D3KooWTest2", "Should preserve third static peer") + + // Should have more entries than just the static ones (auto got expanded) + assert.Greater(t, len(bootstrap), 2, "Should have more than just the 2 static peers") + + t.Logf("Mixed config expansion result: %v", bootstrap) + + // Verify order is preserved: static, expanded auto values, static + assert.Equal(t, "/ip4/127.0.0.1/tcp/4001/p2p/12D3KooWTest", bootstrap[0], "First peer should be preserved") + lastIndex := len(bootstrap) - 1 + assert.Equal(t, "/ip4/127.0.0.2/tcp/4001/p2p/12D3KooWTest2", bootstrap[lastIndex], "Last peer should be preserved") +} + +func testDaemonWithMalformedAutoConf(t *testing.T) { + // Test scenario: Daemon starts with AutoConf.URL pointing to server that returns malformed JSON + // This tests that daemon gracefully handles malformed responses and falls back to hardcoded defaults + + // Create server that returns malformed JSON to simulate broken autoconf service + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + // Return malformed JSON that cannot be parsed + _, _ = w.Write([]byte(`{"Bootstrap": ["incomplete array", "missing closing bracket"`)) + })) + defer server.Close() + + // Create IPFS node with autoconf pointing to malformed server + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("Bootstrap", []string{"auto"}) + node.SetIPFSConfig("DNS.Resolvers", map[string]string{"foo.": "auto"}) + + // Start daemon - this will attempt to fetch autoconf from malformed server + t.Log("Starting daemon with malformed autoconf server...") + daemon := node.StartDaemon() + defer daemon.StopDaemon() + + // Wait for daemon to attempt autoconf fetch and handle the error gracefully + time.Sleep(6 * time.Second) // defaultTimeout is 5s, add 1s buffer + t.Log("Daemon should have attempted autoconf fetch and fallen back to defaults") + + // Test that daemon is still running and CLI commands work with fallback values + result := node.RunIPFS("config", "Bootstrap", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "config Bootstrap --expand-auto should succeed with daemon running") + + var bootstrap []string + err := json.Unmarshal([]byte(result.Stdout.String()), &bootstrap) + require.NoError(t, err) + + // Should fall back to hardcoded defaults from GetMainnetFallbackConfig() + // NOTE: These values may change if autoconf library updates GetMainnetFallbackConfig() + assert.NotContains(t, bootstrap, "auto", "Should not contain 'auto' after fallback") + assert.Greater(t, len(bootstrap), 0, "Should contain fallback bootstrap peers") + + // Verify we got actual fallback bootstrap peers from GetMainnetFallbackConfig() AminoDHT NativeConfig + fallbackConfig := autoconf.GetMainnetFallbackConfig() + aminoDHTSystem := fallbackConfig.SystemRegistry["AminoDHT"] + expectedBootstrapPeers := aminoDHTSystem.NativeConfig.Bootstrap + + foundFallbackPeers := 0 + for _, expectedPeer := range expectedBootstrapPeers { + for _, actualPeer := range bootstrap { + if actualPeer == expectedPeer { + foundFallbackPeers++ + break + } + } + } + assert.Greater(t, foundFallbackPeers, 0, "Should contain bootstrap peers from GetMainnetFallbackConfig() AminoDHT NativeConfig") + assert.Equal(t, len(expectedBootstrapPeers), foundFallbackPeers, "Should contain all bootstrap peers from GetMainnetFallbackConfig() AminoDHT NativeConfig") + + t.Logf("Daemon fallback bootstrap peers after malformed response: %v", bootstrap) + + // Test DNS resolvers also fall back correctly + result = node.RunIPFS("config", "DNS.Resolvers", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "config DNS.Resolvers --expand-auto should succeed with daemon running") + + var resolvers map[string]string + err = json.Unmarshal([]byte(result.Stdout.String()), &resolvers) + require.NoError(t, err) + + // Should not contain "auto" and should have fallback DNS resolvers + assert.NotEqual(t, "auto", resolvers["foo."], "DNS resolver should not be 'auto' after fallback") + if resolvers["foo."] != "" { + // If resolver is populated, it should be a valid URL from fallbacks + assert.Contains(t, resolvers["foo."], "https://", "Fallback DNS resolver should be HTTPS URL") + } + + t.Logf("Daemon fallback DNS resolvers after malformed response: %v", resolvers) + + // Verify daemon is still healthy and responsive + versionResult := node.RunIPFS("version") + require.Equal(t, 0, versionResult.ExitCode(), "daemon should remain healthy after handling malformed autoconf") + t.Log("Daemon remains healthy after gracefully handling malformed autoconf response") +} + +// Helper function to load test data files for fallback tests +func loadTestDataForFallback(t *testing.T, filename string) []byte { + t.Helper() + + data, err := os.ReadFile("testdata/" + filename) + require.NoError(t, err, "Failed to read test data file: %s", filename) + + return data +} diff --git a/test/cli/autoconf/expand_test.go b/test/cli/autoconf/expand_test.go new file mode 100644 index 000000000..253c8000b --- /dev/null +++ b/test/cli/autoconf/expand_test.go @@ -0,0 +1,732 @@ +package autoconf + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "os" + "testing" + "time" + + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestAutoConfExpand(t *testing.T) { + t.Parallel() + + t.Run("config commands show auto values", func(t *testing.T) { + t.Parallel() + testConfigCommandsShowAutoValues(t) + }) + + t.Run("mixed configuration preserves both auto and static", func(t *testing.T) { + t.Parallel() + testMixedConfigurationPreserved(t) + }) + + t.Run("config replace preserves auto values", func(t *testing.T) { + t.Parallel() + testConfigReplacePreservesAuto(t) + }) + + t.Run("expand-auto filters unsupported URL paths with delegated routing", func(t *testing.T) { + t.Parallel() + testExpandAutoFiltersUnsupportedPathsDelegated(t) + }) + + t.Run("expand-auto with auto routing uses NewRoutingSystem", func(t *testing.T) { + t.Parallel() + testExpandAutoWithAutoRouting(t) + }) + + t.Run("expand-auto with auto routing shows AminoDHT native vs IPNI delegated", func(t *testing.T) { + t.Parallel() + testExpandAutoWithMixedSystems(t) + }) + + t.Run("expand-auto filters paths with NewRoutingSystem and auto routing", func(t *testing.T) { + t.Parallel() + testExpandAutoWithFiltering(t) + }) + + t.Run("expand-auto falls back to defaults without cache (delegated)", func(t *testing.T) { + t.Parallel() + testExpandAutoWithoutCacheDelegated(t) + }) + + t.Run("expand-auto with auto routing without cache", func(t *testing.T) { + t.Parallel() + testExpandAutoWithoutCacheAuto(t) + }) +} + +func testConfigCommandsShowAutoValues(t *testing.T) { + // Create IPFS node + node := harness.NewT(t).NewNode().Init("--profile=test") + + // Set all fields to "auto" + node.SetIPFSConfig("Bootstrap", []string{"auto"}) + node.SetIPFSConfig("DNS.Resolvers", map[string]string{"foo.": "auto"}) + node.SetIPFSConfig("Routing.DelegatedRouters", []string{"auto"}) + node.SetIPFSConfig("Ipns.DelegatedPublishers", []string{"auto"}) + + // Test individual field queries + t.Run("Bootstrap shows auto", func(t *testing.T) { + result := node.RunIPFS("config", "Bootstrap") + require.Equal(t, 0, result.ExitCode()) + + var bootstrap []string + err := json.Unmarshal([]byte(result.Stdout.String()), &bootstrap) + require.NoError(t, err) + assert.Equal(t, []string{"auto"}, bootstrap) + }) + + t.Run("DNS.Resolvers shows auto", func(t *testing.T) { + result := node.RunIPFS("config", "DNS.Resolvers") + require.Equal(t, 0, result.ExitCode()) + + var resolvers map[string]string + err := json.Unmarshal([]byte(result.Stdout.String()), &resolvers) + require.NoError(t, err) + assert.Equal(t, map[string]string{"foo.": "auto"}, resolvers) + }) + + t.Run("Routing.DelegatedRouters shows auto", func(t *testing.T) { + result := node.RunIPFS("config", "Routing.DelegatedRouters") + require.Equal(t, 0, result.ExitCode()) + + var routers []string + err := json.Unmarshal([]byte(result.Stdout.String()), &routers) + require.NoError(t, err) + assert.Equal(t, []string{"auto"}, routers) + }) + + t.Run("Ipns.DelegatedPublishers shows auto", func(t *testing.T) { + result := node.RunIPFS("config", "Ipns.DelegatedPublishers") + require.Equal(t, 0, result.ExitCode()) + + var publishers []string + err := json.Unmarshal([]byte(result.Stdout.String()), &publishers) + require.NoError(t, err) + assert.Equal(t, []string{"auto"}, publishers) + }) + + t.Run("config show contains all auto values", func(t *testing.T) { + result := node.RunIPFS("config", "show") + require.Equal(t, 0, result.ExitCode()) + + output := result.Stdout.String() + + // Check that auto values are present in the full config + assert.Contains(t, output, `"Bootstrap": [ + "auto" + ]`, "Bootstrap should contain auto") + + assert.Contains(t, output, `"DNS": { + "Resolvers": { + "foo.": "auto" + } + }`, "DNS.Resolvers should contain auto") + + assert.Contains(t, output, `"DelegatedRouters": [ + "auto" + ]`, "Routing.DelegatedRouters should contain auto") + + assert.Contains(t, output, `"DelegatedPublishers": [ + "auto" + ]`, "Ipns.DelegatedPublishers should contain auto") + }) + + // Test with autoconf server for --expand-auto functionality + t.Run("config with --expand-auto expands auto values", func(t *testing.T) { + // Load test autoconf data + autoConfData := loadTestDataExpand(t, "valid_autoconf.json") + + // Create HTTP server that serves autoconf.json + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write(autoConfData) + })) + defer server.Close() + + // Configure autoconf for the node + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + + // Test Bootstrap field expansion + result := node.RunIPFS("config", "Bootstrap", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "config Bootstrap --expand-auto should succeed") + + var expandedBootstrap []string + err := json.Unmarshal([]byte(result.Stdout.String()), &expandedBootstrap) + require.NoError(t, err) + assert.NotContains(t, expandedBootstrap, "auto", "Expanded bootstrap should not contain 'auto'") + assert.Greater(t, len(expandedBootstrap), 0, "Expanded bootstrap should contain expanded peers") + + // Test DNS.Resolvers field expansion + result = node.RunIPFS("config", "DNS.Resolvers", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "config DNS.Resolvers --expand-auto should succeed") + + var expandedResolvers map[string]string + err = json.Unmarshal([]byte(result.Stdout.String()), &expandedResolvers) + require.NoError(t, err) + assert.NotEqual(t, "auto", expandedResolvers["foo."], "Expanded DNS resolver should not be 'auto'") + + // Test Routing.DelegatedRouters field expansion + result = node.RunIPFS("config", "Routing.DelegatedRouters", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "config Routing.DelegatedRouters --expand-auto should succeed") + + var expandedRouters []string + err = json.Unmarshal([]byte(result.Stdout.String()), &expandedRouters) + require.NoError(t, err) + assert.NotContains(t, expandedRouters, "auto", "Expanded routers should not contain 'auto'") + + // Test Ipns.DelegatedPublishers field expansion + result = node.RunIPFS("config", "Ipns.DelegatedPublishers", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "config Ipns.DelegatedPublishers --expand-auto should succeed") + + var expandedPublishers []string + err = json.Unmarshal([]byte(result.Stdout.String()), &expandedPublishers) + require.NoError(t, err) + assert.NotContains(t, expandedPublishers, "auto", "Expanded publishers should not contain 'auto'") + + // Test config show --expand-auto (full config expansion) + result = node.RunIPFS("config", "show", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "config show --expand-auto should succeed") + + expandedOutput := result.Stdout.String() + t.Logf("Expanded config output contains: %d characters", len(expandedOutput)) + + // Verify that auto values are expanded in the full config + assert.NotContains(t, expandedOutput, `"auto"`, "Expanded config should not contain literal 'auto' values") + assert.Contains(t, expandedOutput, `"Bootstrap"`, "Expanded config should contain Bootstrap section") + assert.Contains(t, expandedOutput, `"DNS"`, "Expanded config should contain DNS section") + }) +} + +func testMixedConfigurationPreserved(t *testing.T) { + // Create IPFS node + node := harness.NewT(t).NewNode().Init("--profile=test") + + // Set mixed configuration + node.SetIPFSConfig("Bootstrap", []string{ + "/ip4/127.0.0.1/tcp/4001/p2p/12D3KooWTest", + "auto", + "/ip4/127.0.0.2/tcp/4001/p2p/12D3KooWTest2", + }) + + node.SetIPFSConfig("DNS.Resolvers", map[string]string{ + "eth.": "https://eth.resolver", + "foo.": "auto", + "bar.": "https://bar.resolver", + }) + + node.SetIPFSConfig("Routing.DelegatedRouters", []string{ + "https://static.router", + "auto", + }) + + // Verify Bootstrap preserves order and mixes auto with static + result := node.RunIPFS("config", "Bootstrap") + require.Equal(t, 0, result.ExitCode()) + + var bootstrap []string + err := json.Unmarshal([]byte(result.Stdout.String()), &bootstrap) + require.NoError(t, err) + assert.Equal(t, []string{ + "/ip4/127.0.0.1/tcp/4001/p2p/12D3KooWTest", + "auto", + "/ip4/127.0.0.2/tcp/4001/p2p/12D3KooWTest2", + }, bootstrap) + + // Verify DNS.Resolvers preserves both auto and static + result = node.RunIPFS("config", "DNS.Resolvers") + require.Equal(t, 0, result.ExitCode()) + + var resolvers map[string]string + err = json.Unmarshal([]byte(result.Stdout.String()), &resolvers) + require.NoError(t, err) + assert.Equal(t, "https://eth.resolver", resolvers["eth."]) + assert.Equal(t, "auto", resolvers["foo."]) + assert.Equal(t, "https://bar.resolver", resolvers["bar."]) + + // Verify Routing.DelegatedRouters preserves order + result = node.RunIPFS("config", "Routing.DelegatedRouters") + require.Equal(t, 0, result.ExitCode()) + + var routers []string + err = json.Unmarshal([]byte(result.Stdout.String()), &routers) + require.NoError(t, err) + assert.Equal(t, []string{ + "https://static.router", + "auto", + }, routers) +} + +func testConfigReplacePreservesAuto(t *testing.T) { + // Create IPFS node + h := harness.NewT(t) + node := h.NewNode().Init("--profile=test") + + // Set initial auto values + node.SetIPFSConfig("Bootstrap", []string{"auto"}) + node.SetIPFSConfig("DNS.Resolvers", map[string]string{"foo.": "auto"}) + + // Export current config + result := node.RunIPFS("config", "show") + require.Equal(t, 0, result.ExitCode()) + originalConfig := result.Stdout.String() + + // Verify auto values are in the exported config + assert.Contains(t, originalConfig, `"Bootstrap": [ + "auto" + ]`) + assert.Contains(t, originalConfig, `"foo.": "auto"`) + + // Modify the config string to add a new field but preserve auto values + var configMap map[string]interface{} + err := json.Unmarshal([]byte(originalConfig), &configMap) + require.NoError(t, err) + + // Add a new field + configMap["NewTestField"] = "test-value" + + // Marshal back to JSON + modifiedConfig, err := json.MarshalIndent(configMap, "", " ") + require.NoError(t, err) + + // Write config to file and replace + configFile := h.WriteToTemp(string(modifiedConfig)) + replaceResult := node.RunIPFS("config", "replace", configFile) + if replaceResult.ExitCode() != 0 { + t.Logf("Config replace failed: stdout=%s, stderr=%s", replaceResult.Stdout.String(), replaceResult.Stderr.String()) + } + require.Equal(t, 0, replaceResult.ExitCode()) + + // Verify auto values are still present after replace + result = node.RunIPFS("config", "Bootstrap") + require.Equal(t, 0, result.ExitCode()) + + var bootstrap []string + err = json.Unmarshal([]byte(result.Stdout.String()), &bootstrap) + require.NoError(t, err) + assert.Equal(t, []string{"auto"}, bootstrap, "Bootstrap should still contain auto after config replace") + + // Verify DNS resolver config is preserved after replace + result = node.RunIPFS("config", "DNS.Resolvers") + require.Equal(t, 0, result.ExitCode()) + + var resolvers map[string]string + err = json.Unmarshal([]byte(result.Stdout.String()), &resolvers) + require.NoError(t, err) + assert.Equal(t, "auto", resolvers["foo."], "DNS resolver for foo. should still be auto after config replace") +} + +func testExpandAutoFiltersUnsupportedPathsDelegated(t *testing.T) { + // Test scenario: CLI with daemon started and autoconf cached using delegated routing + // This tests the production scenario where delegated routing is enabled and + // daemon has fetched and cached autoconf data, and CLI commands read from that cache + + // Create IPFS node + node := harness.NewT(t).NewNode().Init("--profile=test") + + // Configure delegated routing to use autoconf URLs + node.SetIPFSConfig("Routing.Type", "delegated") + node.SetIPFSConfig("Routing.DelegatedRouters", []string{"auto"}) + node.SetIPFSConfig("Ipns.DelegatedPublishers", []string{"auto"}) + // Disable content providing when using delegated routing + node.SetIPFSConfig("Provide.Enabled", false) + node.SetIPFSConfig("Provide.DHT.Interval", "0") + + // Load test autoconf data with unsupported paths + autoConfData := loadTestDataExpand(t, "autoconf_with_unsupported_paths.json") + + // Create HTTP server that serves autoconf.json with unsupported paths + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write(autoConfData) + })) + defer server.Close() + + // Configure autoconf for the node + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + + // Verify the autoconf URL is set correctly + result := node.RunIPFS("config", "AutoConf.URL") + require.Equal(t, 0, result.ExitCode(), "config AutoConf.URL should succeed") + t.Logf("AutoConf URL is set to: %s", result.Stdout.String()) + assert.Contains(t, result.Stdout.String(), "127.0.0.1", "AutoConf URL should contain the test server address") + + // Start daemon to fetch and cache autoconf data + t.Log("Starting daemon to fetch and cache autoconf data...") + daemon := node.StartDaemon() + defer daemon.StopDaemon() + + // Wait for autoconf fetch (use autoconf default timeout + buffer) + time.Sleep(6 * time.Second) // defaultTimeout is 5s, add 1s buffer + t.Log("AutoConf should now be cached by daemon") + + // Test Routing.DelegatedRouters field expansion filters unsupported paths + result = node.RunIPFS("config", "Routing.DelegatedRouters", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "config Routing.DelegatedRouters --expand-auto should succeed") + + var expandedRouters []string + err := json.Unmarshal([]byte(result.Stdout.String()), &expandedRouters) + require.NoError(t, err) + + // After cache prewarming, should get URLs from autoconf that have supported paths + assert.Contains(t, expandedRouters, "https://supported.example.com/routing/v1/providers", "Should contain supported provider URL") + assert.Contains(t, expandedRouters, "https://supported.example.com/routing/v1/peers", "Should contain supported peers URL") + assert.Contains(t, expandedRouters, "https://mixed.example.com/routing/v1/providers", "Should contain mixed provider URL") + assert.Contains(t, expandedRouters, "https://mixed.example.com/routing/v1/peers", "Should contain mixed peers URL") + + // Verify unsupported URLs from autoconf are filtered out (not in result) + assert.NotContains(t, expandedRouters, "https://unsupported.example.com/example/v0/read", "Should filter out unsupported path /example/v0/read") + assert.NotContains(t, expandedRouters, "https://unsupported.example.com/api/v1/custom", "Should filter out unsupported path /api/v1/custom") + assert.NotContains(t, expandedRouters, "https://mixed.example.com/unsupported/path", "Should filter out unsupported path /unsupported/path") + + t.Logf("Filtered routers: %v", expandedRouters) + + // Test Ipns.DelegatedPublishers field expansion filters unsupported paths + result = node.RunIPFS("config", "Ipns.DelegatedPublishers", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "config Ipns.DelegatedPublishers --expand-auto should succeed") + + var expandedPublishers []string + err = json.Unmarshal([]byte(result.Stdout.String()), &expandedPublishers) + require.NoError(t, err) + + // After cache prewarming, should get URLs from autoconf that have supported paths + assert.Contains(t, expandedPublishers, "https://supported.example.com/routing/v1/ipns", "Should contain supported IPNS URL") + assert.Contains(t, expandedPublishers, "https://mixed.example.com/routing/v1/ipns", "Should contain mixed IPNS URL") + + // Verify unsupported URLs from autoconf are filtered out (not in result) + assert.NotContains(t, expandedPublishers, "https://unsupported.example.com/example/v0/write", "Should filter out unsupported write path") + + t.Logf("Filtered publishers: %v", expandedPublishers) +} + +func testExpandAutoWithoutCacheDelegated(t *testing.T) { + // Test scenario: CLI without daemon ever starting (no cached autoconf) using delegated routing + // This tests the fallback scenario where delegated routing is configured but CLI commands + // cannot read from cache and must fall back to hardcoded defaults + + // Create IPFS node but DO NOT start daemon + node := harness.NewT(t).NewNode().Init("--profile=test") + + // Configure delegated routing to use autoconf URLs (but no daemon to fetch them) + node.SetIPFSConfig("Routing.Type", "delegated") + node.SetIPFSConfig("Routing.DelegatedRouters", []string{"auto"}) + node.SetIPFSConfig("Ipns.DelegatedPublishers", []string{"auto"}) + // Disable content providing when using delegated routing + node.SetIPFSConfig("Provide.Enabled", false) + node.SetIPFSConfig("Provide.DHT.Interval", "0") + + // Load test autoconf data with unsupported paths (this won't be used since no daemon) + autoConfData := loadTestDataExpand(t, "autoconf_with_unsupported_paths.json") + + // Create HTTP server that serves autoconf.json with unsupported paths + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write(autoConfData) + })) + defer server.Close() + + // Configure autoconf for the node (but daemon never starts to fetch it) + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + + // Test Routing.DelegatedRouters field expansion without cached autoconf + result := node.RunIPFS("config", "Routing.DelegatedRouters", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "config Routing.DelegatedRouters --expand-auto should succeed") + + var expandedRouters []string + err := json.Unmarshal([]byte(result.Stdout.String()), &expandedRouters) + require.NoError(t, err) + + // Without cached autoconf, should get fallback URLs from GetMainnetFallbackConfig() + // NOTE: These values may change if autoconf library updates GetMainnetFallbackConfig() + assert.Contains(t, expandedRouters, "https://cid.contact/routing/v1/providers", "Should contain fallback provider URL from GetMainnetFallbackConfig()") + + t.Logf("Fallback routers (no cache): %v", expandedRouters) + + // Test Ipns.DelegatedPublishers field expansion without cached autoconf + result = node.RunIPFS("config", "Ipns.DelegatedPublishers", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "config Ipns.DelegatedPublishers --expand-auto should succeed") + + var expandedPublishers []string + err = json.Unmarshal([]byte(result.Stdout.String()), &expandedPublishers) + require.NoError(t, err) + + // Without cached autoconf, should get fallback IPNS publishers from GetMainnetFallbackConfig() + // NOTE: These values may change if autoconf library updates GetMainnetFallbackConfig() + assert.Contains(t, expandedPublishers, "https://delegated-ipfs.dev/routing/v1/ipns", "Should contain fallback IPNS URL from GetMainnetFallbackConfig()") + + t.Logf("Fallback publishers (no cache): %v", expandedPublishers) +} + +func testExpandAutoWithAutoRouting(t *testing.T) { + // Test scenario: CLI with daemon started using auto routing with NewRoutingSystem + // This tests that non-native systems (NewRoutingSystem) ARE delegated even with auto routing + // Only native systems like AminoDHT are handled internally with auto routing + + // Create IPFS node + node := harness.NewT(t).NewNode().Init("--profile=test") + + // Configure auto routing with non-native system + node.SetIPFSConfig("Routing.Type", "auto") + node.SetIPFSConfig("Routing.DelegatedRouters", []string{"auto"}) + node.SetIPFSConfig("Ipns.DelegatedPublishers", []string{"auto"}) + + // Load test autoconf data with NewRoutingSystem (non-native, will be delegated) + autoConfData := loadTestDataExpand(t, "autoconf_new_routing_system.json") + + // Create HTTP server that serves autoconf.json + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write(autoConfData) + })) + defer server.Close() + + // Configure autoconf for the node + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + + // Start daemon to fetch and cache autoconf data + t.Log("Starting daemon to fetch and cache autoconf data...") + daemon := node.StartDaemon() + defer daemon.StopDaemon() + + // Wait for autoconf fetch (use autoconf default timeout + buffer) + time.Sleep(6 * time.Second) // defaultTimeout is 5s, add 1s buffer + t.Log("AutoConf should now be cached by daemon") + + // Test Routing.DelegatedRouters field expansion with auto routing + result := node.RunIPFS("config", "Routing.DelegatedRouters", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "config Routing.DelegatedRouters --expand-auto should succeed") + + var expandedRouters []string + err := json.Unmarshal([]byte(result.Stdout.String()), &expandedRouters) + require.NoError(t, err) + + // With auto routing and NewRoutingSystem (non-native), delegated endpoints should be populated + assert.Contains(t, expandedRouters, "https://new-routing.example.com/routing/v1/providers", "Should contain NewRoutingSystem provider URL") + assert.Contains(t, expandedRouters, "https://new-routing.example.com/routing/v1/peers", "Should contain NewRoutingSystem peers URL") + + t.Logf("Auto routing routers (NewRoutingSystem delegated): %v", expandedRouters) + + // Test Ipns.DelegatedPublishers field expansion with auto routing + result = node.RunIPFS("config", "Ipns.DelegatedPublishers", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "config Ipns.DelegatedPublishers --expand-auto should succeed") + + var expandedPublishers []string + err = json.Unmarshal([]byte(result.Stdout.String()), &expandedPublishers) + require.NoError(t, err) + + // With auto routing and NewRoutingSystem (non-native), delegated publishers should be populated + assert.Contains(t, expandedPublishers, "https://new-routing.example.com/routing/v1/ipns", "Should contain NewRoutingSystem IPNS URL") + + t.Logf("Auto routing publishers (NewRoutingSystem delegated): %v", expandedPublishers) +} + +func testExpandAutoWithMixedSystems(t *testing.T) { + // Test scenario: Auto routing with both AminoDHT (native) and IPNI (delegated) systems + // This explicitly confirms that AminoDHT is NOT delegated but IPNI at cid.contact IS delegated + + // Create IPFS node + node := harness.NewT(t).NewNode().Init("--profile=test") + + // Configure auto routing + node.SetIPFSConfig("Routing.Type", "auto") + node.SetIPFSConfig("Routing.DelegatedRouters", []string{"auto"}) + node.SetIPFSConfig("Ipns.DelegatedPublishers", []string{"auto"}) + + // Load test autoconf data with both AminoDHT and IPNI systems + autoConfData := loadTestDataExpand(t, "autoconf_amino_and_ipni.json") + + // Create HTTP server that serves autoconf.json + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write(autoConfData) + })) + defer server.Close() + + // Configure autoconf for the node + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + + // Start daemon to fetch and cache autoconf data + t.Log("Starting daemon to fetch and cache autoconf data...") + daemon := node.StartDaemon() + defer daemon.StopDaemon() + + // Wait for autoconf fetch (use autoconf default timeout + buffer) + time.Sleep(6 * time.Second) // defaultTimeout is 5s, add 1s buffer + t.Log("AutoConf should now be cached by daemon") + + // Test Routing.DelegatedRouters field expansion + result := node.RunIPFS("config", "Routing.DelegatedRouters", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "config Routing.DelegatedRouters --expand-auto should succeed") + + var expandedRouters []string + err := json.Unmarshal([]byte(result.Stdout.String()), &expandedRouters) + require.NoError(t, err) + + // With auto routing: AminoDHT (native) should NOT be delegated, IPNI should be delegated + assert.Contains(t, expandedRouters, "https://cid.contact/routing/v1/providers", "Should contain IPNI provider URL (delegated)") + assert.NotContains(t, expandedRouters, "https://amino-dht.example.com", "Should NOT contain AminoDHT URLs (native)") + + t.Logf("Mixed systems routers (IPNI delegated, AminoDHT native): %v", expandedRouters) + + // Test Ipns.DelegatedPublishers field expansion + result = node.RunIPFS("config", "Ipns.DelegatedPublishers", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "config Ipns.DelegatedPublishers --expand-auto should succeed") + + var expandedPublishers []string + err = json.Unmarshal([]byte(result.Stdout.String()), &expandedPublishers) + require.NoError(t, err) + + // IPNI system doesn't have write endpoints, so publishers should be empty + // (or contain other systems if they have write endpoints) + t.Logf("Mixed systems publishers (IPNI has no write endpoints): %v", expandedPublishers) +} + +func testExpandAutoWithFiltering(t *testing.T) { + // Test scenario: Auto routing with NewRoutingSystem and path filtering + // This tests that path filtering works for delegated systems even with auto routing + + // Create IPFS node + node := harness.NewT(t).NewNode().Init("--profile=test") + + // Configure auto routing + node.SetIPFSConfig("Routing.Type", "auto") + node.SetIPFSConfig("Routing.DelegatedRouters", []string{"auto"}) + node.SetIPFSConfig("Ipns.DelegatedPublishers", []string{"auto"}) + + // Load test autoconf data with NewRoutingSystem and mixed valid/invalid paths + autoConfData := loadTestDataExpand(t, "autoconf_new_routing_with_filtering.json") + + // Create HTTP server that serves autoconf.json + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write(autoConfData) + })) + defer server.Close() + + // Configure autoconf for the node + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + + // Start daemon to fetch and cache autoconf data + t.Log("Starting daemon to fetch and cache autoconf data...") + daemon := node.StartDaemon() + defer daemon.StopDaemon() + + // Wait for autoconf fetch (use autoconf default timeout + buffer) + time.Sleep(6 * time.Second) // defaultTimeout is 5s, add 1s buffer + t.Log("AutoConf should now be cached by daemon") + + // Test Routing.DelegatedRouters field expansion with filtering + result := node.RunIPFS("config", "Routing.DelegatedRouters", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "config Routing.DelegatedRouters --expand-auto should succeed") + + var expandedRouters []string + err := json.Unmarshal([]byte(result.Stdout.String()), &expandedRouters) + require.NoError(t, err) + + // Should contain supported paths from NewRoutingSystem + assert.Contains(t, expandedRouters, "https://supported-new.example.com/routing/v1/providers", "Should contain supported provider URL") + assert.Contains(t, expandedRouters, "https://supported-new.example.com/routing/v1/peers", "Should contain supported peers URL") + assert.Contains(t, expandedRouters, "https://mixed-new.example.com/routing/v1/providers", "Should contain mixed provider URL") + assert.Contains(t, expandedRouters, "https://mixed-new.example.com/routing/v1/peers", "Should contain mixed peers URL") + + // Should NOT contain unsupported paths + assert.NotContains(t, expandedRouters, "https://unsupported-new.example.com/custom/v0/read", "Should filter out unsupported path") + assert.NotContains(t, expandedRouters, "https://unsupported-new.example.com/api/v1/nonstandard", "Should filter out unsupported path") + assert.NotContains(t, expandedRouters, "https://mixed-new.example.com/invalid/path", "Should filter out invalid path from mixed endpoint") + + t.Logf("Filtered routers (NewRoutingSystem with auto routing): %v", expandedRouters) + + // Test Ipns.DelegatedPublishers field expansion with filtering + result = node.RunIPFS("config", "Ipns.DelegatedPublishers", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "config Ipns.DelegatedPublishers --expand-auto should succeed") + + var expandedPublishers []string + err = json.Unmarshal([]byte(result.Stdout.String()), &expandedPublishers) + require.NoError(t, err) + + // Should contain supported IPNS paths + assert.Contains(t, expandedPublishers, "https://supported-new.example.com/routing/v1/ipns", "Should contain supported IPNS URL") + assert.Contains(t, expandedPublishers, "https://mixed-new.example.com/routing/v1/ipns", "Should contain mixed IPNS URL") + + // Should NOT contain unsupported write paths + assert.NotContains(t, expandedPublishers, "https://unsupported-new.example.com/custom/v0/write", "Should filter out unsupported write path") + + t.Logf("Filtered publishers (NewRoutingSystem with auto routing): %v", expandedPublishers) +} + +func testExpandAutoWithoutCacheAuto(t *testing.T) { + // Test scenario: CLI without daemon ever starting using auto routing (default) + // This tests the fallback scenario where auto routing is used but doesn't populate delegated config fields + + // Create IPFS node but DO NOT start daemon + node := harness.NewT(t).NewNode().Init("--profile=test") + + // Configure auto routing - delegated fields are set to "auto" but won't be populated + // because auto routing uses different internal mechanisms + node.SetIPFSConfig("Routing.Type", "auto") + node.SetIPFSConfig("Routing.DelegatedRouters", []string{"auto"}) + node.SetIPFSConfig("Ipns.DelegatedPublishers", []string{"auto"}) + + // Load test autoconf data (this won't be used since no daemon and auto routing doesn't use these fields) + autoConfData := loadTestDataExpand(t, "autoconf_with_unsupported_paths.json") + + // Create HTTP server (won't be contacted since no daemon) + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write(autoConfData) + })) + defer server.Close() + + // Configure autoconf for the node (but daemon never starts to fetch it) + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + + // Test Routing.DelegatedRouters field expansion without cached autoconf + result := node.RunIPFS("config", "Routing.DelegatedRouters", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "config Routing.DelegatedRouters --expand-auto should succeed") + + var expandedRouters []string + err := json.Unmarshal([]byte(result.Stdout.String()), &expandedRouters) + require.NoError(t, err) + + // With auto routing, some fallback URLs are still populated from GetMainnetFallbackConfig() + // NOTE: These values may change if autoconf library updates GetMainnetFallbackConfig() + assert.Contains(t, expandedRouters, "https://cid.contact/routing/v1/providers", "Should contain fallback provider URL from GetMainnetFallbackConfig()") + + t.Logf("Auto routing fallback routers (with fallbacks): %v", expandedRouters) + + // Test Ipns.DelegatedPublishers field expansion without cached autoconf + result = node.RunIPFS("config", "Ipns.DelegatedPublishers", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "config Ipns.DelegatedPublishers --expand-auto should succeed") + + var expandedPublishers []string + err = json.Unmarshal([]byte(result.Stdout.String()), &expandedPublishers) + require.NoError(t, err) + + // With auto routing, delegated publishers may be empty for fallback scenario + // This can vary based on which systems have write endpoints in the fallback config + t.Logf("Auto routing fallback publishers: %v", expandedPublishers) +} + +// Helper function to load test data files +func loadTestDataExpand(t *testing.T, filename string) []byte { + t.Helper() + + data, err := os.ReadFile("testdata/" + filename) + require.NoError(t, err, "Failed to read test data file: %s", filename) + + return data +} diff --git a/test/cli/autoconf/extensibility_test.go b/test/cli/autoconf/extensibility_test.go new file mode 100644 index 000000000..87939a820 --- /dev/null +++ b/test/cli/autoconf/extensibility_test.go @@ -0,0 +1,255 @@ +package autoconf + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/ipfs/kubo/config" + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/require" +) + +// TestAutoConfExtensibility_NewSystem verifies that the AutoConf system can be extended +// with new routing systems beyond the default AminoDHT and IPNI. +// +// The test verifies that: +// 1. New systems can be added via AutoConf's SystemRegistry +// 2. Native vs delegated system filtering works correctly: +// - Native systems (AminoDHT) provide bootstrap peers and are used for P2P routing +// - Delegated systems (IPNI, NewSystem) provide HTTP endpoints for delegated routing +// +// 3. The system correctly filters endpoints based on routing type +// +// Note: Only native systems contribute bootstrap peers. Delegated systems like "NewSystem" +// only provide HTTP routing endpoints, not P2P bootstrap peers. +func TestAutoConfExtensibility_NewSystem(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + + // Setup mock autoconf server with NewSystem + var mockServer *httptest.Server + mockServer = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Create autoconf.json with NewSystem + autoconfData := map[string]interface{}{ + "AutoConfVersion": 2025072901, + "AutoConfSchema": 1, + "AutoConfTTL": 86400, + "SystemRegistry": map[string]interface{}{ + "AminoDHT": map[string]interface{}{ + "URL": "https://github.com/ipfs/specs/pull/497", + "Description": "Public DHT swarm", + "NativeConfig": map[string]interface{}{ + "Bootstrap": []string{ + "/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", + }, + }, + "DelegatedConfig": map[string]interface{}{ + "Read": []string{"/routing/v1/providers", "/routing/v1/peers", "/routing/v1/ipns"}, + "Write": []string{"/routing/v1/ipns"}, + }, + }, + "IPNI": map[string]interface{}{ + "URL": "https://ipni.example.com", + "Description": "Network Indexer", + "DelegatedConfig": map[string]interface{}{ + "Read": []string{"/routing/v1/providers"}, + "Write": []string{}, + }, + }, + "NewSystem": map[string]interface{}{ + "URL": "https://example.com/newsystem", + "Description": "Test system for extensibility verification", + "NativeConfig": map[string]interface{}{ + "Bootstrap": []string{ + "/ip4/127.0.0.1/tcp/9999/p2p/12D3KooWPeQ4r3v6CmVmKXoFGtqEqcr3L8P6La9yH5oEWKtoLVVa", + }, + }, + "DelegatedConfig": map[string]interface{}{ + "Read": []string{"/routing/v1/providers"}, + "Write": []string{}, + }, + }, + }, + "DNSResolvers": map[string]interface{}{ + "eth.": []string{"https://dns.eth.limo/dns-query"}, + }, + "DelegatedEndpoints": map[string]interface{}{ + "https://ipni.example.com": map[string]interface{}{ + "Systems": []string{"IPNI"}, + "Read": []string{"/routing/v1/providers"}, + "Write": []string{}, + }, + mockServer.URL + "/newsystem": map[string]interface{}{ + "Systems": []string{"NewSystem"}, + "Read": []string{"/routing/v1/providers"}, + "Write": []string{}, + }, + }, + } + + w.Header().Set("Content-Type", "application/json") + w.Header().Set("Cache-Control", "max-age=300") + _ = json.NewEncoder(w).Encode(autoconfData) + })) + defer mockServer.Close() + + // NewSystem mock server URL will be dynamically assigned + newSystemServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Simple mock server for NewSystem endpoint + response := map[string]interface{}{"Providers": []interface{}{}} + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(response) + })) + defer newSystemServer.Close() + + // Update the autoconf to point to the correct NewSystem endpoint + mockServer.Close() + mockServer = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + autoconfData := map[string]interface{}{ + "AutoConfVersion": 2025072901, + "AutoConfSchema": 1, + "AutoConfTTL": 86400, + "SystemRegistry": map[string]interface{}{ + "AminoDHT": map[string]interface{}{ + "URL": "https://github.com/ipfs/specs/pull/497", + "Description": "Public DHT swarm", + "NativeConfig": map[string]interface{}{ + "Bootstrap": []string{ + "/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", + }, + }, + "DelegatedConfig": map[string]interface{}{ + "Read": []string{"/routing/v1/providers", "/routing/v1/peers", "/routing/v1/ipns"}, + "Write": []string{"/routing/v1/ipns"}, + }, + }, + "IPNI": map[string]interface{}{ + "URL": "https://ipni.example.com", + "Description": "Network Indexer", + "DelegatedConfig": map[string]interface{}{ + "Read": []string{"/routing/v1/providers"}, + "Write": []string{}, + }, + }, + "NewSystem": map[string]interface{}{ + "URL": "https://example.com/newsystem", + "Description": "Test system for extensibility verification", + "NativeConfig": map[string]interface{}{ + "Bootstrap": []string{ + "/ip4/127.0.0.1/tcp/9999/p2p/12D3KooWPeQ4r3v6CmVmKXoFGtqEqcr3L8P6La9yH5oEWKtoLVVa", + }, + }, + "DelegatedConfig": map[string]interface{}{ + "Read": []string{"/routing/v1/providers"}, + "Write": []string{}, + }, + }, + }, + "DNSResolvers": map[string]interface{}{ + "eth.": []string{"https://dns.eth.limo/dns-query"}, + }, + "DelegatedEndpoints": map[string]interface{}{ + "https://ipni.example.com": map[string]interface{}{ + "Systems": []string{"IPNI"}, + "Read": []string{"/routing/v1/providers"}, + "Write": []string{}, + }, + newSystemServer.URL: map[string]interface{}{ + "Systems": []string{"NewSystem"}, + "Read": []string{"/routing/v1/providers"}, + "Write": []string{}, + }, + }, + } + + w.Header().Set("Content-Type", "application/json") + w.Header().Set("Cache-Control", "max-age=300") + _ = json.NewEncoder(w).Encode(autoconfData) + })) + defer mockServer.Close() + + // Create Kubo node with autoconf pointing to mock server + h := harness.NewT(t) + node := h.NewNode().Init() + + // Update config to use mock autoconf server + node.UpdateConfig(func(cfg *config.Config) { + cfg.AutoConf.URL = config.NewOptionalString(mockServer.URL) + cfg.AutoConf.Enabled = config.True + cfg.AutoConf.RefreshInterval = config.NewOptionalDuration(1 * time.Second) + cfg.Routing.Type = config.NewOptionalString("auto") // Should enable native AminoDHT + delegated others + cfg.Bootstrap = []string{"auto"} + cfg.Routing.DelegatedRouters = []string{"auto"} + }) + + // Start the daemon + daemon := node.StartDaemon() + defer daemon.StopDaemon() + + // Give the daemon some time to initialize and make requests + time.Sleep(3 * time.Second) + + // Test 1: Verify bootstrap includes both AminoDHT and NewSystem peers (deduplicated) + bootstrapResult := daemon.IPFS("bootstrap", "list", "--expand-auto") + bootstrapOutput := bootstrapResult.Stdout.String() + t.Logf("Bootstrap output: %s", bootstrapOutput) + + // Should contain original DHT bootstrap peer (AminoDHT is a native system) + require.Contains(t, bootstrapOutput, "QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", "Should contain AminoDHT bootstrap peer") + + // Note: NewSystem bootstrap peers are NOT included because only native systems + // (AminoDHT for Routing.Type="auto") contribute bootstrap peers. + // Delegated systems like NewSystem only provide HTTP routing endpoints. + + // Test 2: Verify delegated endpoints are filtered correctly + // For Routing.Type=auto, native systems=[AminoDHT], so: + // - AminoDHT endpoints should be filtered out + // - IPNI and NewSystem endpoints should be included + + // Get the expanded delegated routers using --expand-auto + routerResult := daemon.IPFS("config", "Routing.DelegatedRouters", "--expand-auto") + var expandedRouters []string + require.NoError(t, json.Unmarshal([]byte(routerResult.Stdout.String()), &expandedRouters)) + + t.Logf("Expanded delegated routers: %v", expandedRouters) + + // Verify we got exactly 2 delegated routers: IPNI and NewSystem + require.Equal(t, 2, len(expandedRouters), "Should have exactly 2 delegated routers (IPNI and NewSystem). Got %d: %v", len(expandedRouters), expandedRouters) + + // Convert to URLs for checking + routerURLs := expandedRouters + + // Should contain NewSystem endpoint (not native) - now with routing path + foundNewSystem := false + expectedNewSystemURL := newSystemServer.URL + "/routing/v1/providers" // Full URL with path, as returned by DelegatedRoutersWithAutoConf + for _, url := range routerURLs { + if url == expectedNewSystemURL { + foundNewSystem = true + break + } + } + require.True(t, foundNewSystem, "Should contain NewSystem endpoint (%s) for delegated routing, got: %v", expectedNewSystemURL, routerURLs) + + // Should contain ipni.example.com (IPNI is not native) + foundIPNI := false + for _, url := range routerURLs { + if strings.Contains(url, "ipni.example.com") { + foundIPNI = true + break + } + } + require.True(t, foundIPNI, "Should contain ipni.example.com endpoint for IPNI") + + // Test passes - we've verified that: + // 1. Bootstrap peers are correctly resolved from native systems only + // 2. Delegated routers include both IPNI and NewSystem endpoints + // 3. URL format is correct (base URLs with paths) + // 4. AutoConf extensibility works for unknown systems + + t.Log("NewSystem extensibility test passed - Kubo successfully discovered and used unknown routing system") +} diff --git a/test/cli/autoconf/fuzz_test.go b/test/cli/autoconf/fuzz_test.go new file mode 100644 index 000000000..440bc3a25 --- /dev/null +++ b/test/cli/autoconf/fuzz_test.go @@ -0,0 +1,654 @@ +package autoconf + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/ipfs/boxo/autoconf" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// testAutoConfWithFallback is a helper function that tests autoconf parsing with fallback detection +func testAutoConfWithFallback(t *testing.T, serverURL string, expectError bool, expectErrorMsg string) (*autoconf.Config, bool) { + return testAutoConfWithFallbackAndTimeout(t, serverURL, expectError, expectErrorMsg, 10*time.Second) +} + +// testAutoConfWithFallbackAndTimeout is a helper function that tests autoconf parsing with fallback detection and custom timeout +func testAutoConfWithFallbackAndTimeout(t *testing.T, serverURL string, expectError bool, expectErrorMsg string, timeout time.Duration) (*autoconf.Config, bool) { + // Use fallback detection to test error conditions with MustGetConfigWithRefresh + fallbackUsed := false + fallbackConfig := &autoconf.Config{ + AutoConfVersion: -999, // Special marker to detect fallback usage + AutoConfSchema: -999, + } + + client, err := autoconf.NewClient( + autoconf.WithUserAgent("test-agent"), + autoconf.WithURL(serverURL), + autoconf.WithRefreshInterval(autoconf.DefaultRefreshInterval), + autoconf.WithFallback(func() *autoconf.Config { + fallbackUsed = true + return fallbackConfig + }), + ) + require.NoError(t, err) + + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + result := client.GetCachedOrRefresh(ctx) + + if expectError { + require.True(t, fallbackUsed, expectErrorMsg) + require.Equal(t, int64(-999), result.AutoConfVersion, "Should return fallback config for error case") + } else { + require.False(t, fallbackUsed, "Expected no fallback to be used") + require.NotEqual(t, int64(-999), result.AutoConfVersion, "Should return fetched config for success case") + } + + return result, fallbackUsed +} + +func TestAutoConfFuzz(t *testing.T) { + t.Parallel() + + t.Run("fuzz autoconf version", testFuzzAutoConfVersion) + t.Run("fuzz bootstrap arrays", testFuzzBootstrapArrays) + t.Run("fuzz dns resolvers", testFuzzDNSResolvers) + t.Run("fuzz delegated routers", testFuzzDelegatedRouters) + t.Run("fuzz delegated publishers", testFuzzDelegatedPublishers) + t.Run("fuzz malformed json", testFuzzMalformedJSON) + t.Run("fuzz large payloads", testFuzzLargePayloads) +} + +func testFuzzAutoConfVersion(t *testing.T) { + testCases := []struct { + name string + version interface{} + expectError bool + }{ + {"valid version", 2025071801, false}, + {"zero version", 0, true}, // Should be invalid + {"negative version", -1, false}, // Parser accepts negative versions + {"string version", "2025071801", true}, // Should be number + {"float version", 2025071801.5, true}, + {"very large version", 9999999999999999, false}, // Large but valid int64 + {"null version", nil, true}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + config := map[string]interface{}{ + "AutoConfVersion": tc.version, + "AutoConfSchema": 1, + "AutoConfTTL": 86400, + "SystemRegistry": map[string]interface{}{ + "AminoDHT": map[string]interface{}{ + "Description": "Test AminoDHT system", + "NativeConfig": map[string]interface{}{ + "Bootstrap": []string{ + "/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", + }, + }, + }, + }, + "DNSResolvers": map[string]interface{}{}, + "DelegatedEndpoints": map[string]interface{}{}, + } + + jsonData, err := json.Marshal(config) + require.NoError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write(jsonData) + })) + defer server.Close() + + // Test that our autoconf parser handles this gracefully + _, _ = testAutoConfWithFallback(t, server.URL, tc.expectError, fmt.Sprintf("Expected fallback to be used for %s", tc.name)) + }) + } +} + +func testFuzzBootstrapArrays(t *testing.T) { + type testCase struct { + name string + bootstrap interface{} + expectError bool + validate func(*testing.T, *autoconf.Response) + } + + testCases := []testCase{ + { + name: "valid bootstrap", + bootstrap: []string{"/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN"}, + validate: func(t *testing.T, resp *autoconf.Response) { + expected := []string{"/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN"} + bootstrapPeers := resp.Config.GetBootstrapPeers("AminoDHT") + assert.Equal(t, expected, bootstrapPeers, "Bootstrap peers should match configured values") + }, + }, + { + name: "empty bootstrap", + bootstrap: []string{}, + validate: func(t *testing.T, resp *autoconf.Response) { + bootstrapPeers := resp.Config.GetBootstrapPeers("AminoDHT") + assert.Empty(t, bootstrapPeers, "Empty bootstrap should result in empty peers") + }, + }, + { + name: "null bootstrap", + bootstrap: nil, + validate: func(t *testing.T, resp *autoconf.Response) { + bootstrapPeers := resp.Config.GetBootstrapPeers("AminoDHT") + assert.Empty(t, bootstrapPeers, "Null bootstrap should result in empty peers") + }, + }, + { + name: "invalid multiaddr", + bootstrap: []string{"invalid-multiaddr"}, + expectError: true, + }, + { + name: "very long multiaddr", + bootstrap: []string{"/dnsaddr/" + strings.Repeat("a", 100) + ".com/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN"}, + validate: func(t *testing.T, resp *autoconf.Response) { + expected := []string{"/dnsaddr/" + strings.Repeat("a", 100) + ".com/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN"} + bootstrapPeers := resp.Config.GetBootstrapPeers("AminoDHT") + assert.Equal(t, expected, bootstrapPeers, "Very long multiaddr should be preserved") + }, + }, + { + name: "bootstrap as string", + bootstrap: "/dnsaddr/test", + expectError: true, + }, + { + name: "bootstrap as number", + bootstrap: 123, + expectError: true, + }, + { + name: "mixed types in array", + bootstrap: []interface{}{"/dnsaddr/test", 123, nil}, + expectError: true, + }, + { + name: "extremely large array", + bootstrap: make([]string, 1000), + validate: func(t *testing.T, resp *autoconf.Response) { + // Array will be filled in the loop below + bootstrapPeers := resp.Config.GetBootstrapPeers("AminoDHT") + assert.Len(t, bootstrapPeers, 1000, "Large bootstrap array should be preserved") + }, + }, + } + + // Fill the large array with valid multiaddrs + largeArray := testCases[len(testCases)-1].bootstrap.([]string) + for i := range largeArray { + largeArray[i] = fmt.Sprintf("/dnsaddr/bootstrap%d.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", i) + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + config := map[string]interface{}{ + "AutoConfVersion": 2025072301, + "AutoConfSchema": 1, + "AutoConfTTL": 86400, + "SystemRegistry": map[string]interface{}{ + "AminoDHT": map[string]interface{}{ + "Description": "Test AminoDHT system", + "NativeConfig": map[string]interface{}{ + "Bootstrap": tc.bootstrap, + }, + }, + }, + "DNSResolvers": map[string]interface{}{}, + "DelegatedEndpoints": map[string]interface{}{}, + } + + jsonData, err := json.Marshal(config) + require.NoError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write(jsonData) + })) + defer server.Close() + + autoConf, fallbackUsed := testAutoConfWithFallback(t, server.URL, tc.expectError, fmt.Sprintf("Expected fallback to be used for %s", tc.name)) + + if !tc.expectError { + require.NotNil(t, autoConf, "AutoConf should not be nil for successful parsing") + + // Verify structure is reasonable + bootstrapPeers := autoConf.GetBootstrapPeers("AminoDHT") + require.IsType(t, []string{}, bootstrapPeers, "Bootstrap should be []string") + + // Run test-specific validation if provided (only for non-fallback cases) + if tc.validate != nil && !fallbackUsed { + // Create a mock Response for compatibility with validation functions + mockResponse := &autoconf.Response{Config: autoConf} + tc.validate(t, mockResponse) + } + } + }) + } +} + +func testFuzzDNSResolvers(t *testing.T) { + type testCase struct { + name string + resolvers interface{} + expectError bool + validate func(*testing.T, *autoconf.Response) + } + + testCases := []testCase{ + { + name: "valid resolvers", + resolvers: map[string][]string{".": {"https://dns.google/dns-query"}}, + validate: func(t *testing.T, resp *autoconf.Response) { + expected := map[string][]string{".": {"https://dns.google/dns-query"}} + assert.Equal(t, expected, resp.Config.DNSResolvers, "DNS resolvers should match configured values") + }, + }, + { + name: "empty resolvers", + resolvers: map[string][]string{}, + validate: func(t *testing.T, resp *autoconf.Response) { + assert.Empty(t, resp.Config.DNSResolvers, "Empty resolvers should result in empty map") + }, + }, + { + name: "null resolvers", + resolvers: nil, + validate: func(t *testing.T, resp *autoconf.Response) { + assert.Empty(t, resp.Config.DNSResolvers, "Null resolvers should result in empty map") + }, + }, + { + name: "relative URL (missing scheme)", + resolvers: map[string][]string{".": {"not-a-url"}}, + expectError: true, // Should error due to strict HTTP/HTTPS validation + }, + { + name: "invalid URL format", + resolvers: map[string][]string{".": {"://invalid-missing-scheme"}}, + expectError: true, // Should error because url.Parse() fails + }, + { + name: "non-HTTP scheme", + resolvers: map[string][]string{".": {"ftp://example.com/dns-query"}}, + expectError: true, // Should error due to non-HTTP/HTTPS scheme + }, + { + name: "very long domain", + resolvers: map[string][]string{strings.Repeat("a", 1000) + ".com": {"https://dns.google/dns-query"}}, + validate: func(t *testing.T, resp *autoconf.Response) { + expected := map[string][]string{strings.Repeat("a", 1000) + ".com": {"https://dns.google/dns-query"}} + assert.Equal(t, expected, resp.Config.DNSResolvers, "Very long domain should be preserved") + }, + }, + { + name: "many resolvers", + resolvers: generateManyResolvers(100), + validate: func(t *testing.T, resp *autoconf.Response) { + expected := generateManyResolvers(100) + assert.Equal(t, expected, resp.Config.DNSResolvers, "Many resolvers should be preserved") + assert.Equal(t, 100, len(resp.Config.DNSResolvers), "Should have 100 resolvers") + }, + }, + { + name: "resolvers as array", + resolvers: []string{"https://dns.google/dns-query"}, + expectError: true, + }, + { + name: "nested invalid structure", + resolvers: map[string]interface{}{".": map[string]string{"invalid": "structure"}}, + expectError: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + config := map[string]interface{}{ + "AutoConfVersion": 2025072301, + "AutoConfSchema": 1, + "AutoConfTTL": 86400, + "SystemRegistry": map[string]interface{}{ + "AminoDHT": map[string]interface{}{ + "Description": "Test AminoDHT system", + "NativeConfig": map[string]interface{}{ + "Bootstrap": []string{"/dnsaddr/test"}, + }, + }, + }, + "DNSResolvers": tc.resolvers, + "DelegatedEndpoints": map[string]interface{}{}, + } + + jsonData, err := json.Marshal(config) + require.NoError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write(jsonData) + })) + defer server.Close() + + autoConf, fallbackUsed := testAutoConfWithFallback(t, server.URL, tc.expectError, fmt.Sprintf("Expected fallback to be used for %s", tc.name)) + + if !tc.expectError { + require.NotNil(t, autoConf, "AutoConf should not be nil for successful parsing") + + // Run test-specific validation if provided (only for non-fallback cases) + if tc.validate != nil && !fallbackUsed { + // Create a mock Response for compatibility with validation functions + mockResponse := &autoconf.Response{Config: autoConf} + tc.validate(t, mockResponse) + } + } + }) + } +} + +func testFuzzDelegatedRouters(t *testing.T) { + // Test various malformed delegated router configurations + type testCase struct { + name string + routers interface{} + expectError bool + validate func(*testing.T, *autoconf.Response) + } + + testCases := []testCase{ + { + name: "valid endpoints", + routers: map[string]interface{}{ + "https://ipni.example.com": map[string]interface{}{ + "Systems": []string{"IPNI"}, + "Read": []string{"/routing/v1/providers"}, + "Write": []string{}, + }, + }, + validate: func(t *testing.T, resp *autoconf.Response) { + assert.Len(t, resp.Config.DelegatedEndpoints, 1, "Should have 1 delegated endpoint") + for url, config := range resp.Config.DelegatedEndpoints { + assert.Contains(t, url, "ipni.example.com", "Endpoint URL should contain expected domain") + assert.Contains(t, config.Systems, "IPNI", "Endpoint should have IPNI system") + assert.Contains(t, config.Read, "/routing/v1/providers", "Endpoint should have providers read path") + } + }, + }, + { + name: "empty routers", + routers: map[string]interface{}{}, + validate: func(t *testing.T, resp *autoconf.Response) { + assert.Empty(t, resp.Config.DelegatedEndpoints, "Empty routers should result in empty endpoints") + }, + }, + { + name: "null routers", + routers: nil, + validate: func(t *testing.T, resp *autoconf.Response) { + assert.Empty(t, resp.Config.DelegatedEndpoints, "Null routers should result in empty endpoints") + }, + }, + { + name: "invalid nested structure", + routers: map[string]string{"invalid": "structure"}, + expectError: true, + }, + { + name: "invalid endpoint URLs", + routers: map[string]interface{}{ + "not-a-url": map[string]interface{}{ + "Systems": []string{"IPNI"}, + "Read": []string{"/routing/v1/providers"}, + "Write": []string{}, + }, + }, + expectError: true, // Should error due to URL validation + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + config := map[string]interface{}{ + "AutoConfVersion": 2025072301, + "AutoConfSchema": 1, + "AutoConfTTL": 86400, + "SystemRegistry": map[string]interface{}{ + "AminoDHT": map[string]interface{}{ + "Description": "Test AminoDHT system", + "NativeConfig": map[string]interface{}{ + "Bootstrap": []string{"/dnsaddr/test"}, + }, + }, + }, + "DNSResolvers": map[string]interface{}{}, + "DelegatedEndpoints": tc.routers, + } + + jsonData, err := json.Marshal(config) + require.NoError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write(jsonData) + })) + defer server.Close() + + autoConf, fallbackUsed := testAutoConfWithFallback(t, server.URL, tc.expectError, fmt.Sprintf("Expected fallback to be used for %s", tc.name)) + + if !tc.expectError { + require.NotNil(t, autoConf, "AutoConf should not be nil for successful parsing") + + // Run test-specific validation if provided (only for non-fallback cases) + if tc.validate != nil && !fallbackUsed { + // Create a mock Response for compatibility with validation functions + mockResponse := &autoconf.Response{Config: autoConf} + tc.validate(t, mockResponse) + } + } + }) + } +} + +func testFuzzDelegatedPublishers(t *testing.T) { + // DelegatedPublishers use the same autoclient library validation as DelegatedRouters + // Test that URL validation works for delegated publishers + type testCase struct { + name string + urls []string + expectErr bool + validate func(*testing.T, *autoconf.Response) + } + + testCases := []testCase{ + { + name: "valid HTTPS URLs", + urls: []string{"https://delegated-ipfs.dev", "https://another-publisher.com"}, + validate: func(t *testing.T, resp *autoconf.Response) { + assert.Len(t, resp.Config.DelegatedEndpoints, 2, "Should have 2 delegated endpoints") + foundURLs := make([]string, 0, len(resp.Config.DelegatedEndpoints)) + for url := range resp.Config.DelegatedEndpoints { + foundURLs = append(foundURLs, url) + } + expectedURLs := []string{"https://delegated-ipfs.dev", "https://another-publisher.com"} + for _, expectedURL := range expectedURLs { + assert.Contains(t, foundURLs, expectedURL, "Should contain configured URL: %s", expectedURL) + } + }, + }, + { + name: "invalid URL", + urls: []string{"not-a-url"}, + expectErr: true, + }, + { + name: "HTTP URL (accepted during parsing)", + urls: []string{"http://insecure-publisher.com"}, + validate: func(t *testing.T, resp *autoconf.Response) { + assert.Len(t, resp.Config.DelegatedEndpoints, 1, "Should have 1 delegated endpoint") + for url := range resp.Config.DelegatedEndpoints { + assert.Equal(t, "http://insecure-publisher.com", url, "HTTP URL should be preserved during parsing") + } + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + autoConfData := map[string]interface{}{ + "AutoConfVersion": 2025072301, + "AutoConfSchema": 1, + "AutoConfTTL": 86400, + "SystemRegistry": map[string]interface{}{ + "TestSystem": map[string]interface{}{ + "Description": "Test system for fuzz testing", + "DelegatedConfig": map[string]interface{}{ + "Read": []string{"/routing/v1/ipns"}, + "Write": []string{"/routing/v1/ipns"}, + }, + }, + }, + "DNSResolvers": map[string]interface{}{}, + "DelegatedEndpoints": map[string]interface{}{}, + } + + // Add test URLs as delegated endpoints + for _, url := range tc.urls { + autoConfData["DelegatedEndpoints"].(map[string]interface{})[url] = map[string]interface{}{ + "Systems": []string{"TestSystem"}, + "Read": []string{"/routing/v1/ipns"}, + "Write": []string{"/routing/v1/ipns"}, + } + } + + jsonData, err := json.Marshal(autoConfData) + require.NoError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write(jsonData) + })) + defer server.Close() + + // Test that our autoconf parser handles this gracefully + autoConf, fallbackUsed := testAutoConfWithFallback(t, server.URL, tc.expectErr, fmt.Sprintf("Expected fallback to be used for %s", tc.name)) + + if !tc.expectErr { + require.NotNil(t, autoConf, "AutoConf should not be nil for successful parsing") + + // Run test-specific validation if provided (only for non-fallback cases) + if tc.validate != nil && !fallbackUsed { + // Create a mock Response for compatibility with validation functions + mockResponse := &autoconf.Response{Config: autoConf} + tc.validate(t, mockResponse) + } + } + }) + } +} + +func testFuzzMalformedJSON(t *testing.T) { + malformedJSONs := []string{ + `{`, // Incomplete JSON + `{"AutoConfVersion": }`, // Missing value + `{"AutoConfVersion": 123,}`, // Trailing comma + `{AutoConfVersion: 123}`, // Unquoted key + `{"Bootstrap": [}`, // Incomplete array + `{"Bootstrap": ["/test",]}`, // Trailing comma in array + `invalid json`, // Not JSON at all + `null`, // Just null + `[]`, // Array instead of object + `""`, // String instead of object + } + + for i, malformedJSON := range malformedJSONs { + t.Run(fmt.Sprintf("malformed_%d", i), func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write([]byte(malformedJSON)) + })) + defer server.Close() + + // All malformed JSON should result in fallback usage + _, _ = testAutoConfWithFallback(t, server.URL, true, fmt.Sprintf("Expected fallback to be used for malformed JSON: %s", malformedJSON)) + }) + } +} + +func testFuzzLargePayloads(t *testing.T) { + // Test with very large but valid JSON payloads + largeBootstrap := make([]string, 10000) + for i := range largeBootstrap { + largeBootstrap[i] = fmt.Sprintf("/dnsaddr/bootstrap%d.example.com/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", i) + } + + largeDNSResolvers := make(map[string][]string) + for i := 0; i < 1000; i++ { + domain := fmt.Sprintf("domain%d.example.com", i) + largeDNSResolvers[domain] = []string{ + fmt.Sprintf("https://resolver%d.example.com/dns-query", i), + } + } + + config := map[string]interface{}{ + "AutoConfVersion": 2025072301, + "AutoConfSchema": 1, + "AutoConfTTL": 86400, + "SystemRegistry": map[string]interface{}{ + "AminoDHT": map[string]interface{}{ + "Description": "Test AminoDHT system", + "NativeConfig": map[string]interface{}{ + "Bootstrap": largeBootstrap, + }, + }, + }, + "DNSResolvers": largeDNSResolvers, + "DelegatedEndpoints": map[string]interface{}{}, + } + + jsonData, err := json.Marshal(config) + require.NoError(t, err) + + t.Logf("Large payload size: %d bytes", len(jsonData)) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write(jsonData) + })) + defer server.Close() + + // Should handle large payloads gracefully (up to reasonable limits) + autoConf, _ := testAutoConfWithFallbackAndTimeout(t, server.URL, false, "Large payload should not trigger fallback", 30*time.Second) + require.NotNil(t, autoConf, "Should return valid config") + + // Verify bootstrap entries were preserved + bootstrapPeers := autoConf.GetBootstrapPeers("AminoDHT") + require.Len(t, bootstrapPeers, 10000, "Should preserve all bootstrap entries") +} + +// Helper function to generate many DNS resolvers for testing +func generateManyResolvers(count int) map[string][]string { + resolvers := make(map[string][]string) + for i := 0; i < count; i++ { + domain := fmt.Sprintf("domain%d.example.com", i) + resolvers[domain] = []string{ + fmt.Sprintf("https://resolver%d.example.com/dns-query", i), + } + } + return resolvers +} diff --git a/test/cli/autoconf/ipns_test.go b/test/cli/autoconf/ipns_test.go new file mode 100644 index 000000000..043841e49 --- /dev/null +++ b/test/cli/autoconf/ipns_test.go @@ -0,0 +1,352 @@ +package autoconf + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "net/http/httptest" + "strings" + "sync" + "testing" + "time" + + "github.com/ipfs/boxo/autoconf" + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// TestAutoConfIPNS tests IPNS publishing with autoconf-resolved delegated publishers +func TestAutoConfIPNS(t *testing.T) { + t.Parallel() + + t.Run("PublishingWithWorkingEndpoint", func(t *testing.T) { + t.Parallel() + testIPNSPublishingWithWorkingEndpoint(t) + }) + + t.Run("PublishingResilience", func(t *testing.T) { + t.Parallel() + testIPNSPublishingResilience(t) + }) +} + +// testIPNSPublishingWithWorkingEndpoint verifies that IPNS delegated publishing works +// correctly when the HTTP endpoint is functioning normally and accepts requests. +// It also verifies that the PUT payload matches what can be retrieved via routing get. +func testIPNSPublishingWithWorkingEndpoint(t *testing.T) { + // Create mock IPNS publisher that accepts requests + publisher := newMockIPNSPublisher(t) + defer publisher.close() + + // Create node with delegated publisher + node := setupNodeWithAutoconf(t, publisher.server.URL, "auto") + defer node.StopDaemon() + + // Wait for daemon to be ready + time.Sleep(5 * time.Second) + + // Get node's peer ID + idResult := node.RunIPFS("id", "-f", "") + require.Equal(t, 0, idResult.ExitCode()) + peerID := strings.TrimSpace(idResult.Stdout.String()) + + // Get peer ID in base36 format (used for IPNS keys) + idBase36Result := node.RunIPFS("id", "--peerid-base", "base36", "-f", "") + require.Equal(t, 0, idBase36Result.ExitCode()) + peerIDBase36 := strings.TrimSpace(idBase36Result.Stdout.String()) + + // Verify autoconf resolved "auto" correctly + result := node.RunIPFS("config", "Ipns.DelegatedPublishers", "--expand-auto") + var resolvedPublishers []string + err := json.Unmarshal([]byte(result.Stdout.String()), &resolvedPublishers) + require.NoError(t, err) + expectedURL := publisher.server.URL + "/routing/v1/ipns" + assert.Contains(t, resolvedPublishers, expectedURL, "AutoConf should resolve 'auto' to mock publisher") + + // Test publishing with --allow-delegated + testCID := "bafkqablimvwgy3y" + result = node.RunIPFS("name", "publish", "--allow-delegated", "/ipfs/"+testCID) + require.Equal(t, 0, result.ExitCode(), "Publishing should succeed") + assert.Contains(t, result.Stdout.String(), "Published to") + + // Wait for async HTTP request to delegated publisher + time.Sleep(2 * time.Second) + + // Verify HTTP PUT was made to delegated publisher + publishedKeys := publisher.getPublishedKeys() + assert.NotEmpty(t, publishedKeys, "HTTP PUT request should have been made to delegated publisher") + + // Get the PUT payload that was sent to the delegated publisher + putPayload := publisher.getRecordPayload(peerIDBase36) + require.NotNil(t, putPayload, "Should have captured PUT payload") + require.Greater(t, len(putPayload), 0, "PUT payload should not be empty") + + // Retrieve the IPNS record using routing get + getResult := node.RunIPFS("routing", "get", "/ipns/"+peerID) + require.Equal(t, 0, getResult.ExitCode(), "Should be able to retrieve IPNS record") + getPayload := getResult.Stdout.Bytes() + + // Compare the payloads + assert.Equal(t, putPayload, getPayload, + "PUT payload sent to delegated publisher should match what routing get returns") + + // Also verify the record points to the expected content + assert.Contains(t, getResult.Stdout.String(), testCID, + "Retrieved IPNS record should reference the published CID") + + // Use ipfs name inspect to verify the IPNS record's value matches the published CID + // First write the routing get result to a file for inspection + node.WriteBytes("ipns-record", getPayload) + inspectResult := node.RunIPFS("name", "inspect", "ipns-record") + require.Equal(t, 0, inspectResult.ExitCode(), "Should be able to inspect IPNS record") + + // The inspect output should show the path we published + inspectOutput := inspectResult.Stdout.String() + assert.Contains(t, inspectOutput, "/ipfs/"+testCID, + "IPNS record value should match the published path") + + // Also verify it's a valid record with proper fields + assert.Contains(t, inspectOutput, "Value:", "Should have Value field") + assert.Contains(t, inspectOutput, "Validity:", "Should have Validity field") + assert.Contains(t, inspectOutput, "Sequence:", "Should have Sequence field") + + t.Log("Verified: PUT payload to delegated publisher matches routing get result and name inspect confirms correct path") +} + +// testIPNSPublishingResilience verifies that IPNS publishing is resilient by design. +// Publishing succeeds as long as local storage works, even when all delegated endpoints fail. +// This test documents the intentional resilient behavior, not bugs. +func testIPNSPublishingResilience(t *testing.T) { + testCases := []struct { + name string + routingType string // "auto" or "delegated" + description string + }{ + { + name: "AutoRouting", + routingType: "auto", + description: "auto mode uses DHT + HTTP, tolerates HTTP failures", + }, + { + name: "DelegatedRouting", + routingType: "delegated", + description: "delegated mode uses HTTP only, tolerates HTTP failures", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // Create publisher that always fails + publisher := newMockIPNSPublisher(t) + defer publisher.close() + publisher.responseFunc = func(peerID string, record []byte) int { + return http.StatusInternalServerError + } + + // Create node with failing endpoint + node := setupNodeWithAutoconf(t, publisher.server.URL, tc.routingType) + defer node.StopDaemon() + + // Test different publishing modes - all should succeed due to resilient design + testCID := "/ipfs/bafkqablimvwgy3y" + + // Normal publishing (should succeed despite endpoint failures) + result := node.RunIPFS("name", "publish", testCID) + assert.Equal(t, 0, result.ExitCode(), + "%s: Normal publishing should succeed (local storage works)", tc.description) + + // Publishing with --allow-offline (local only, no network) + result = node.RunIPFS("name", "publish", "--allow-offline", testCID) + assert.Equal(t, 0, result.ExitCode(), + "--allow-offline should succeed (local only)") + + // Publishing with --allow-delegated (if using auto routing) + if tc.routingType == "auto" { + result = node.RunIPFS("name", "publish", "--allow-delegated", testCID) + assert.Equal(t, 0, result.ExitCode(), + "--allow-delegated should succeed (no DHT required)") + } + + t.Logf("%s: All publishing modes succeeded despite endpoint failures (resilient design)", tc.name) + }) + } +} + +// ============================================================================ +// Helper Functions +// ============================================================================ + +// setupNodeWithAutoconf creates an IPFS node with autoconf-configured delegated publishers +func setupNodeWithAutoconf(t *testing.T, publisherURL string, routingType string) *harness.Node { + // Create autoconf server with the publisher endpoint + autoconfData := createAutoconfJSON(publisherURL) + autoconfServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + fmt.Fprint(w, autoconfData) + })) + t.Cleanup(func() { autoconfServer.Close() }) + + // Create and configure node + h := harness.NewT(t) + node := h.NewNode().Init("--profile=test") + + // Configure autoconf + node.SetIPFSConfig("AutoConf.URL", autoconfServer.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("Ipns.DelegatedPublishers", []string{"auto"}) + node.SetIPFSConfig("Routing.Type", routingType) + + // Additional config for delegated routing mode + if routingType == "delegated" { + node.SetIPFSConfig("Provide.Enabled", false) + node.SetIPFSConfig("Provide.DHT.Interval", "0s") + } + + // Add bootstrap peers for connectivity + node.SetIPFSConfig("Bootstrap", autoconf.FallbackBootstrapPeers) + + // Start daemon + node.StartDaemon() + + return node +} + +// createAutoconfJSON generates autoconf configuration with a delegated IPNS publisher +func createAutoconfJSON(publisherURL string) string { + // Use bootstrap peers from autoconf fallbacks for consistency + bootstrapPeers, _ := json.Marshal(autoconf.FallbackBootstrapPeers) + + return fmt.Sprintf(`{ + "AutoConfVersion": 2025072302, + "AutoConfSchema": 1, + "AutoConfTTL": 86400, + "SystemRegistry": { + "TestSystem": { + "Description": "Test system for IPNS publishing", + "NativeConfig": { + "Bootstrap": %s + } + } + }, + "DNSResolvers": {}, + "DelegatedEndpoints": { + "%s": { + "Systems": ["TestSystem"], + "Read": ["/routing/v1/ipns"], + "Write": ["/routing/v1/ipns"] + } + } + }`, string(bootstrapPeers), publisherURL) +} + +// ============================================================================ +// Mock IPNS Publisher +// ============================================================================ + +// mockIPNSPublisher implements a simple IPNS publishing HTTP API server +type mockIPNSPublisher struct { + t *testing.T + server *httptest.Server + mu sync.Mutex + publishedKeys map[string]string // peerID -> published CID + recordPayloads map[string][]byte // peerID -> actual HTTP PUT record payload + responseFunc func(peerID string, record []byte) int // returns HTTP status code +} + +func newMockIPNSPublisher(t *testing.T) *mockIPNSPublisher { + m := &mockIPNSPublisher{ + t: t, + publishedKeys: make(map[string]string), + recordPayloads: make(map[string][]byte), + } + + // Default response function accepts all publishes + m.responseFunc = func(peerID string, record []byte) int { + return http.StatusOK + } + + mux := http.NewServeMux() + mux.HandleFunc("/routing/v1/ipns/", m.handleIPNS) + + m.server = httptest.NewServer(mux) + return m +} + +func (m *mockIPNSPublisher) handleIPNS(w http.ResponseWriter, r *http.Request) { + m.mu.Lock() + defer m.mu.Unlock() + + // Extract peer ID from path + parts := strings.Split(r.URL.Path, "/") + if len(parts) < 5 { + http.Error(w, "invalid path", http.StatusBadRequest) + return + } + + peerID := parts[4] + + if r.Method == "PUT" { + // Handle IPNS record publication + body, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, "failed to read body", http.StatusBadRequest) + return + } + + // Get response status from response function + status := m.responseFunc(peerID, body) + + if status == http.StatusOK { + if len(body) > 0 { + // Store the actual record payload + m.recordPayloads[peerID] = make([]byte, len(body)) + copy(m.recordPayloads[peerID], body) + } + + // Mark as published + m.publishedKeys[peerID] = fmt.Sprintf("published-%d", time.Now().Unix()) + } + + w.WriteHeader(status) + if status != http.StatusOK { + fmt.Fprint(w, `{"error": "publish failed"}`) + } + } else if r.Method == "GET" { + // Handle IPNS record retrieval + if record, exists := m.publishedKeys[peerID]; exists { + w.Header().Set("Content-Type", "application/vnd.ipfs.ipns-record") + fmt.Fprint(w, record) + } else { + http.Error(w, "record not found", http.StatusNotFound) + } + } else { + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + } +} + +func (m *mockIPNSPublisher) getPublishedKeys() map[string]string { + m.mu.Lock() + defer m.mu.Unlock() + result := make(map[string]string) + for k, v := range m.publishedKeys { + result[k] = v + } + return result +} + +func (m *mockIPNSPublisher) getRecordPayload(peerID string) []byte { + m.mu.Lock() + defer m.mu.Unlock() + if payload, exists := m.recordPayloads[peerID]; exists { + result := make([]byte, len(payload)) + copy(result, payload) + return result + } + return nil +} + +func (m *mockIPNSPublisher) close() { + m.server.Close() +} diff --git a/test/cli/autoconf/routing_test.go b/test/cli/autoconf/routing_test.go new file mode 100644 index 000000000..57022e390 --- /dev/null +++ b/test/cli/autoconf/routing_test.go @@ -0,0 +1,236 @@ +package autoconf + +import ( + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "strings" + "sync" + "testing" + + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestAutoConfDelegatedRouting(t *testing.T) { + t.Parallel() + + t.Run("delegated routing with auto router", func(t *testing.T) { + t.Parallel() + testDelegatedRoutingWithAuto(t) + }) + + t.Run("routing errors are handled properly", func(t *testing.T) { + t.Parallel() + testRoutingErrorHandling(t) + }) +} + +// mockRoutingServer implements a simple Delegated Routing HTTP API server +type mockRoutingServer struct { + t *testing.T + server *httptest.Server + mu sync.Mutex + requests []string + providerFunc func(cid string) []map[string]interface{} +} + +func newMockRoutingServer(t *testing.T) *mockRoutingServer { + m := &mockRoutingServer{ + t: t, + requests: []string{}, + } + + // Default provider function returns mock provider records + m.providerFunc = func(cid string) []map[string]interface{} { + return []map[string]interface{}{ + { + "Protocol": "transport-bitswap", + "Schema": "bitswap", + "ID": "12D3KooWMockProvider1", + "Addrs": []string{"/ip4/192.168.1.100/tcp/4001"}, + }, + { + "Protocol": "transport-bitswap", + "Schema": "bitswap", + "ID": "12D3KooWMockProvider2", + "Addrs": []string{"/ip4/192.168.1.101/tcp/4001"}, + }, + } + } + + mux := http.NewServeMux() + mux.HandleFunc("/routing/v1/providers/", m.handleProviders) + + m.server = httptest.NewServer(mux) + return m +} + +func (m *mockRoutingServer) handleProviders(w http.ResponseWriter, r *http.Request) { + m.mu.Lock() + defer m.mu.Unlock() + + // Extract CID from path + parts := strings.Split(r.URL.Path, "/") + if len(parts) < 5 { + http.Error(w, "invalid path", http.StatusBadRequest) + return + } + + cid := parts[4] + m.requests = append(m.requests, cid) + m.t.Logf("Routing server received providers request for CID: %s", cid) + + // Get provider records + providers := m.providerFunc(cid) + + // Return NDJSON response as per IPIP-378 + w.Header().Set("Content-Type", "application/x-ndjson") + encoder := json.NewEncoder(w) + + for _, provider := range providers { + if err := encoder.Encode(provider); err != nil { + m.t.Logf("Failed to encode provider: %v", err) + return + } + } +} + +func (m *mockRoutingServer) close() { + m.server.Close() +} + +func testDelegatedRoutingWithAuto(t *testing.T) { + // Create mock routing server + routingServer := newMockRoutingServer(t) + defer routingServer.close() + + // Create autoconf data with delegated router + autoConfData := fmt.Sprintf(`{ + "AutoConfVersion": 2025072302, + "AutoConfSchema": 1, + "AutoConfTTL": 86400, + "SystemRegistry": { + "AminoDHT": { + "Description": "Test AminoDHT system", + "NativeConfig": { + "Bootstrap": [] + } + } + }, + "DNSResolvers": {}, + "DelegatedEndpoints": { + "%s": { + "Systems": ["AminoDHT", "IPNI"], + "Read": ["/routing/v1/providers", "/routing/v1/peers", "/routing/v1/ipns"], + "Write": [] + } + } + }`, routingServer.server.URL) + + // Create autoconf server + autoConfServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write([]byte(autoConfData)) + })) + defer autoConfServer.Close() + + // Create IPFS node with auto delegated router + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.URL", autoConfServer.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("Routing.DelegatedRouters", []string{"auto"}) + + // Test that daemon starts successfully with auto routing configuration + // The actual routing functionality requires online mode, but we can test + // that the configuration is expanded and daemon starts properly + node.StartDaemon("--offline") + defer node.StopDaemon() + + // Verify config still shows "auto" (this tests that auto values are preserved in user-facing config) + result := node.RunIPFS("config", "Routing.DelegatedRouters") + require.Equal(t, 0, result.ExitCode()) + + var routers []string + err := json.Unmarshal([]byte(result.Stdout.String()), &routers) + require.NoError(t, err) + assert.Equal(t, []string{"auto"}, routers, "Delegated routers config should show 'auto'") + + // Test that daemon is running and accepting commands + result = node.RunIPFS("version") + require.Equal(t, 0, result.ExitCode(), "Daemon should be running and accepting commands") + + // Test that autoconf server was contacted (indicating successful resolution) + // We can't test actual routing in offline mode, but we can verify that + // the AutoConf system expanded the "auto" placeholder successfully + // by checking that the daemon started without errors + t.Log("AutoConf successfully expanded delegated router configuration and daemon started") +} + +func testRoutingErrorHandling(t *testing.T) { + // Create routing server that returns no providers + routingServer := newMockRoutingServer(t) + defer routingServer.close() + + // Configure to return no providers (empty response) + routingServer.providerFunc = func(cid string) []map[string]interface{} { + return []map[string]interface{}{} + } + + // Create autoconf data + autoConfData := fmt.Sprintf(`{ + "AutoConfVersion": 2025072302, + "AutoConfSchema": 1, + "AutoConfTTL": 86400, + "SystemRegistry": { + "AminoDHT": { + "Description": "Test AminoDHT system", + "NativeConfig": { + "Bootstrap": [] + } + } + }, + "DNSResolvers": {}, + "DelegatedEndpoints": { + "%s": { + "Systems": ["AminoDHT", "IPNI"], + "Read": ["/routing/v1/providers", "/routing/v1/peers", "/routing/v1/ipns"], + "Write": [] + } + } + }`, routingServer.server.URL) + + // Create autoconf server + autoConfServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write([]byte(autoConfData)) + })) + defer autoConfServer.Close() + + // Create IPFS node + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.URL", autoConfServer.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("Routing.DelegatedRouters", []string{"auto"}) + + // Test that daemon starts successfully even when no providers are available + node.StartDaemon("--offline") + defer node.StopDaemon() + + // Verify config shows "auto" + result := node.RunIPFS("config", "Routing.DelegatedRouters") + require.Equal(t, 0, result.ExitCode()) + + var routers []string + err := json.Unmarshal([]byte(result.Stdout.String()), &routers) + require.NoError(t, err) + assert.Equal(t, []string{"auto"}, routers, "Delegated routers config should show 'auto'") + + // Test that daemon is running and accepting commands + result = node.RunIPFS("version") + require.Equal(t, 0, result.ExitCode(), "Daemon should be running even with empty routing config") + + t.Log("AutoConf successfully handled routing configuration with empty providers") +} diff --git a/test/cli/autoconf/swarm_connect_test.go b/test/cli/autoconf/swarm_connect_test.go new file mode 100644 index 000000000..95c75d953 --- /dev/null +++ b/test/cli/autoconf/swarm_connect_test.go @@ -0,0 +1,90 @@ +package autoconf + +import ( + "testing" + "time" + + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// TestSwarmConnectWithAutoConf tests that ipfs swarm connect works properly +// when AutoConf is enabled and a daemon is running. +// +// This is a regression test for the issue where: +// - AutoConf disabled: ipfs swarm connect works +// - AutoConf enabled: ipfs swarm connect fails with "Error: connect" +// +// The issue affects CLI command fallback behavior when the HTTP API connection fails. +func TestSwarmConnectWithAutoConf(t *testing.T) { + t.Parallel() + + t.Run("AutoConf disabled - should work", func(t *testing.T) { + testSwarmConnectWithAutoConfSetting(t, false, true) // expect success + }) + + t.Run("AutoConf enabled - should work", func(t *testing.T) { + testSwarmConnectWithAutoConfSetting(t, true, true) // expect success (fix the bug!) + }) +} + +func testSwarmConnectWithAutoConfSetting(t *testing.T, autoConfEnabled bool, expectSuccess bool) { + // Create IPFS node with test profile + node := harness.NewT(t).NewNode().Init("--profile=test") + + // Configure AutoConf + node.SetIPFSConfig("AutoConf.Enabled", autoConfEnabled) + + // Set up bootstrap peers so the node has something to connect to + // Use the same bootstrap peers from boxo/autoconf fallbacks + node.SetIPFSConfig("Bootstrap", []string{ + "/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", + "/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa", + "/dnsaddr/bootstrap.libp2p.io/p2p/QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb", + }) + + // CRITICAL: Start the daemon first - this is the key requirement + // The daemon must be running and working properly + node.StartDaemon() + defer node.StopDaemon() + + // Give daemon time to start up completely + time.Sleep(3 * time.Second) + + // Verify daemon is responsive + result := node.RunIPFS("id") + require.Equal(t, 0, result.ExitCode(), "Daemon should be responsive before testing swarm connect") + t.Logf("Daemon is running and responsive. AutoConf enabled: %v", autoConfEnabled) + + // Now test swarm connect to a bootstrap peer + // This should work because: + // 1. The daemon is running + // 2. The CLI should connect to the daemon via API + // 3. The daemon should handle the swarm connect request + result = node.RunIPFS("swarm", "connect", "/dnsaddr/bootstrap.libp2p.io") + + // swarm connect should work regardless of AutoConf setting + assert.Equal(t, 0, result.ExitCode(), + "swarm connect should succeed with AutoConf=%v. stderr: %s", + autoConfEnabled, result.Stderr.String()) + + // Should contain success message + output := result.Stdout.String() + assert.Contains(t, output, "success", + "swarm connect output should contain 'success' with AutoConf=%v. output: %s", + autoConfEnabled, output) + + // Additional diagnostic: Check if ipfs id shows addresses + // Both AutoConf enabled and disabled should show proper addresses + result = node.RunIPFS("id") + require.Equal(t, 0, result.ExitCode(), "ipfs id should work with AutoConf=%v", autoConfEnabled) + + idOutput := result.Stdout.String() + t.Logf("ipfs id output with AutoConf=%v: %s", autoConfEnabled, idOutput) + + // Addresses should not be null regardless of AutoConf setting + assert.Contains(t, idOutput, `"Addresses"`, "ipfs id should show Addresses field") + assert.NotContains(t, idOutput, `"Addresses": null`, + "ipfs id should not show null addresses with AutoConf=%v", autoConfEnabled) +} diff --git a/test/cli/autoconf/testdata/autoconf_amino_and_ipni.json b/test/cli/autoconf/testdata/autoconf_amino_and_ipni.json new file mode 100644 index 000000000..add246cc3 --- /dev/null +++ b/test/cli/autoconf/testdata/autoconf_amino_and_ipni.json @@ -0,0 +1,60 @@ +{ + "AutoConfVersion": 2025072901, + "AutoConfSchema": 1, + "AutoConfTTL": 86400, + "SystemRegistry": { + "AminoDHT": { + "URL": "https://github.com/ipfs/specs/pull/497", + "Description": "Public DHT swarm that implements the IPFS Kademlia DHT specification under protocol identifier /ipfs/kad/1.0.0", + "NativeConfig": { + "Bootstrap": [ + "/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN" + ] + }, + "DelegatedConfig": { + "Read": [ + "/routing/v1/providers", + "/routing/v1/peers", + "/routing/v1/ipns" + ], + "Write": [ + "/routing/v1/ipns" + ] + } + }, + "IPNI": { + "URL": "https://cid.contact", + "Description": "Network Indexer - content routing database for large storage providers", + "DelegatedConfig": { + "Read": [ + "/routing/v1/providers" + ], + "Write": [] + } + } + }, + "DNSResolvers": { + "eth.": [ + "https://dns.eth.limo/dns-query" + ] + }, + "DelegatedEndpoints": { + "https://amino-dht.example.com": { + "Systems": ["AminoDHT"], + "Read": [ + "/routing/v1/providers", + "/routing/v1/peers" + ], + "Write": [ + "/routing/v1/ipns" + ] + }, + "https://cid.contact": { + "Systems": ["IPNI"], + "Read": [ + "/routing/v1/providers" + ], + "Write": [] + } + } +} \ No newline at end of file diff --git a/test/cli/autoconf/testdata/autoconf_new_routing_system.json b/test/cli/autoconf/testdata/autoconf_new_routing_system.json new file mode 100644 index 000000000..697e5cc8f --- /dev/null +++ b/test/cli/autoconf/testdata/autoconf_new_routing_system.json @@ -0,0 +1,38 @@ +{ + "AutoConfVersion": 2025072901, + "AutoConfSchema": 1, + "AutoConfTTL": 86400, + "SystemRegistry": { + "NewRoutingSystem": { + "URL": "https://new-routing.example.com", + "Description": "New routing system for testing delegation with auto routing", + "DelegatedConfig": { + "Read": [ + "/routing/v1/providers", + "/routing/v1/peers", + "/routing/v1/ipns" + ], + "Write": [ + "/routing/v1/ipns" + ] + } + } + }, + "DNSResolvers": { + "eth.": [ + "https://dns.eth.limo/dns-query" + ] + }, + "DelegatedEndpoints": { + "https://new-routing.example.com": { + "Systems": ["NewRoutingSystem"], + "Read": [ + "/routing/v1/providers", + "/routing/v1/peers" + ], + "Write": [ + "/routing/v1/ipns" + ] + } + } +} \ No newline at end of file diff --git a/test/cli/autoconf/testdata/autoconf_new_routing_with_filtering.json b/test/cli/autoconf/testdata/autoconf_new_routing_with_filtering.json new file mode 100644 index 000000000..982f545aa --- /dev/null +++ b/test/cli/autoconf/testdata/autoconf_new_routing_with_filtering.json @@ -0,0 +1,59 @@ +{ + "AutoConfVersion": 2025072901, + "AutoConfSchema": 1, + "AutoConfTTL": 86400, + "SystemRegistry": { + "NewRoutingSystem": { + "URL": "https://new-routing.example.com", + "Description": "New routing system for testing path filtering with auto routing", + "DelegatedConfig": { + "Read": [ + "/routing/v1/providers", + "/routing/v1/peers", + "/routing/v1/ipns" + ], + "Write": [ + "/routing/v1/ipns" + ] + } + } + }, + "DNSResolvers": { + "eth.": [ + "https://dns.eth.limo/dns-query" + ] + }, + "DelegatedEndpoints": { + "https://supported-new.example.com": { + "Systems": ["NewRoutingSystem"], + "Read": [ + "/routing/v1/providers", + "/routing/v1/peers" + ], + "Write": [ + "/routing/v1/ipns" + ] + }, + "https://unsupported-new.example.com": { + "Systems": ["NewRoutingSystem"], + "Read": [ + "/custom/v0/read", + "/api/v1/nonstandard" + ], + "Write": [ + "/custom/v0/write" + ] + }, + "https://mixed-new.example.com": { + "Systems": ["NewRoutingSystem"], + "Read": [ + "/routing/v1/providers", + "/invalid/path", + "/routing/v1/peers" + ], + "Write": [ + "/routing/v1/ipns" + ] + } + } +} \ No newline at end of file diff --git a/test/cli/autoconf/testdata/autoconf_with_unsupported_paths.json b/test/cli/autoconf/testdata/autoconf_with_unsupported_paths.json new file mode 100644 index 000000000..e7a45a1da --- /dev/null +++ b/test/cli/autoconf/testdata/autoconf_with_unsupported_paths.json @@ -0,0 +1,64 @@ +{ + "AutoConfVersion": 2025072901, + "AutoConfSchema": 1, + "AutoConfTTL": 86400, + "SystemRegistry": { + "AminoDHT": { + "URL": "https://github.com/ipfs/specs/pull/497", + "Description": "Public DHT swarm that implements the IPFS Kademlia DHT specification under protocol identifier /ipfs/kad/1.0.0", + "NativeConfig": { + "Bootstrap": [ + "/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN" + ] + }, + "DelegatedConfig": { + "Read": [ + "/routing/v1/providers", + "/routing/v1/peers", + "/routing/v1/ipns" + ], + "Write": [ + "/routing/v1/ipns" + ] + } + } + }, + "DNSResolvers": { + "eth.": [ + "https://dns.eth.limo/dns-query" + ] + }, + "DelegatedEndpoints": { + "https://supported.example.com": { + "Systems": ["AminoDHT"], + "Read": [ + "/routing/v1/providers", + "/routing/v1/peers" + ], + "Write": [ + "/routing/v1/ipns" + ] + }, + "https://unsupported.example.com": { + "Systems": ["AminoDHT"], + "Read": [ + "/example/v0/read", + "/api/v1/custom" + ], + "Write": [ + "/example/v0/write" + ] + }, + "https://mixed.example.com": { + "Systems": ["AminoDHT"], + "Read": [ + "/routing/v1/providers", + "/unsupported/path", + "/routing/v1/peers" + ], + "Write": [ + "/routing/v1/ipns" + ] + } + } +} diff --git a/test/cli/autoconf/testdata/updated_autoconf.json b/test/cli/autoconf/testdata/updated_autoconf.json new file mode 100644 index 000000000..44b7f1ed9 --- /dev/null +++ b/test/cli/autoconf/testdata/updated_autoconf.json @@ -0,0 +1,87 @@ +{ + "AutoConfVersion": 2025072902, + "AutoConfSchema": 1, + "AutoConfTTL": 86400, + "SystemRegistry": { + "AminoDHT": { + "URL": "https://github.com/ipfs/specs/pull/497", + "Description": "Public DHT swarm that implements the IPFS Kademlia DHT specification under protocol identifier /ipfs/kad/1.0.0", + "NativeConfig": { + "Bootstrap": [ + "/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", + "/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa", + "/dnsaddr/bootstrap.libp2p.io/p2p/QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb", + "/dnsaddr/bootstrap.libp2p.io/p2p/QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt", + "/dnsaddr/va1.bootstrap.libp2p.io/p2p/12D3KooWKnDdG3iXw9eTFijk3EWSunZcFi54Zka4wmtqtt6rPxc8", + "/ip4/104.131.131.82/tcp/4001/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", + "/ip4/104.131.131.82/udp/4001/quic-v1/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ" + ] + }, + "DelegatedConfig": { + "Read": [ + "/routing/v1/providers", + "/routing/v1/peers", + "/routing/v1/ipns" + ], + "Write": [ + "/routing/v1/ipns" + ] + } + }, + "IPNI": { + "URL": "https://ipni.example.com", + "Description": "Network Indexer - content routing database for large storage providers", + "DelegatedConfig": { + "Read": [ + "/routing/v1/providers" + ], + "Write": [] + } + } + }, + "DNSResolvers": { + "eth.": [ + "https://dns.eth.limo/dns-query", + "https://dns.eth.link/dns-query" + ], + "test.": [ + "https://test.resolver/dns-query" + ] + }, + "DelegatedEndpoints": { + "https://ipni.example.com": { + "Systems": ["IPNI"], + "Read": [ + "/routing/v1/providers" + ], + "Write": [] + }, + "https://routing.example.com": { + "Systems": ["IPNI"], + "Read": [ + "/routing/v1/providers" + ], + "Write": [] + }, + "https://delegated-ipfs.dev": { + "Systems": ["AminoDHT", "IPNI"], + "Read": [ + "/routing/v1/providers", + "/routing/v1/peers", + "/routing/v1/ipns" + ], + "Write": [ + "/routing/v1/ipns" + ] + }, + "https://ipns.example.com": { + "Systems": ["AminoDHT"], + "Read": [ + "/routing/v1/ipns" + ], + "Write": [ + "/routing/v1/ipns" + ] + } + } +} \ No newline at end of file diff --git a/test/cli/autoconf/testdata/valid_autoconf.json b/test/cli/autoconf/testdata/valid_autoconf.json new file mode 100644 index 000000000..4469c33c2 --- /dev/null +++ b/test/cli/autoconf/testdata/valid_autoconf.json @@ -0,0 +1,68 @@ +{ + "AutoConfVersion": 2025072901, + "AutoConfSchema": 1, + "AutoConfTTL": 86400, + "SystemRegistry": { + "AminoDHT": { + "URL": "https://github.com/ipfs/specs/pull/497", + "Description": "Public DHT swarm that implements the IPFS Kademlia DHT specification under protocol identifier /ipfs/kad/1.0.0", + "NativeConfig": { + "Bootstrap": [ + "/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", + "/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa", + "/dnsaddr/bootstrap.libp2p.io/p2p/QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb", + "/dnsaddr/bootstrap.libp2p.io/p2p/QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt", + "/dnsaddr/va1.bootstrap.libp2p.io/p2p/12D3KooWKnDdG3iXw9eTFijk3EWSunZcFi54Zka4wmtqtt6rPxc8", + "/ip4/104.131.131.82/tcp/4001/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", + "/ip4/104.131.131.82/udp/4001/quic-v1/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ" + ] + }, + "DelegatedConfig": { + "Read": [ + "/routing/v1/providers", + "/routing/v1/peers", + "/routing/v1/ipns" + ], + "Write": [ + "/routing/v1/ipns" + ] + } + }, + "IPNI": { + "URL": "https://ipni.example.com", + "Description": "Network Indexer - content routing database for large storage providers", + "DelegatedConfig": { + "Read": [ + "/routing/v1/providers" + ], + "Write": [] + } + } + }, + "DNSResolvers": { + "eth.": [ + "https://dns.eth.limo/dns-query", + "https://dns.eth.link/dns-query" + ] + }, + "DelegatedEndpoints": { + "https://ipni.example.com": { + "Systems": ["IPNI"], + "Read": [ + "/routing/v1/providers" + ], + "Write": [] + }, + "https://delegated-ipfs.dev": { + "Systems": ["AminoDHT", "IPNI"], + "Read": [ + "/routing/v1/providers", + "/routing/v1/peers", + "/routing/v1/ipns" + ], + "Write": [ + "/routing/v1/ipns" + ] + } + } +} \ No newline at end of file diff --git a/test/cli/autoconf/validation_test.go b/test/cli/autoconf/validation_test.go new file mode 100644 index 000000000..e906fe175 --- /dev/null +++ b/test/cli/autoconf/validation_test.go @@ -0,0 +1,144 @@ +package autoconf + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/assert" +) + +func TestAutoConfValidation(t *testing.T) { + t.Parallel() + + t.Run("invalid autoconf JSON prevents caching", func(t *testing.T) { + t.Parallel() + testInvalidAutoConfJSONPreventsCaching(t) + }) + + t.Run("malformed multiaddr in autoconf", func(t *testing.T) { + t.Parallel() + testMalformedMultiaddrInAutoConf(t) + }) + + t.Run("malformed URL in autoconf", func(t *testing.T) { + t.Parallel() + testMalformedURLInAutoConf(t) + }) +} + +func testInvalidAutoConfJSONPreventsCaching(t *testing.T) { + // Create server that serves invalid autoconf JSON + invalidAutoConfData := `{ + "AutoConfVersion": 123, + "AutoConfSchema": 1, + "SystemRegistry": { + "AminoDHT": { + "NativeConfig": { + "Bootstrap": [ + "invalid-multiaddr-that-should-fail" + ] + } + } + } + }` + + requestCount := 0 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + requestCount++ + t.Logf("Invalid autoconf server request #%d: %s %s", requestCount, r.Method, r.URL.Path) + w.Header().Set("Content-Type", "application/json") + w.Header().Set("ETag", `"invalid-config-123"`) + _, _ = w.Write([]byte(invalidAutoConfData)) + })) + defer server.Close() + + // Create IPFS node and try to start daemon with invalid autoconf + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("Bootstrap", []string{"auto"}) + + // Start daemon to trigger autoconf fetch - this should start but log validation errors + node.StartDaemon() + defer node.StopDaemon() + + // Give autoconf some time to attempt fetch and fail validation + // The daemon should still start but autoconf should fail + result := node.RunIPFS("version") + assert.Equal(t, 0, result.ExitCode(), "Daemon should start even with invalid autoconf") + + // Verify server was called (autoconf was attempted even though validation failed) + assert.Greater(t, requestCount, 0, "Invalid autoconf server should have been called") +} + +func testMalformedMultiaddrInAutoConf(t *testing.T) { + // Create server that serves autoconf with malformed multiaddr + invalidAutoConfData := `{ + "AutoConfVersion": 456, + "AutoConfSchema": 1, + "SystemRegistry": { + "AminoDHT": { + "NativeConfig": { + "Bootstrap": [ + "/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", + "not-a-valid-multiaddr" + ] + } + } + } + }` + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write([]byte(invalidAutoConfData)) + })) + defer server.Close() + + // Create IPFS node + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("Bootstrap", []string{"auto"}) + + // Start daemon to trigger autoconf fetch - daemon should start but autoconf validation should fail + node.StartDaemon() + defer node.StopDaemon() + + // Daemon should still be functional even with invalid autoconf + result := node.RunIPFS("version") + assert.Equal(t, 0, result.ExitCode(), "Daemon should start even with invalid autoconf") +} + +func testMalformedURLInAutoConf(t *testing.T) { + // Create server that serves autoconf with malformed URL + invalidAutoConfData := `{ + "AutoConfVersion": 789, + "AutoConfSchema": 1, + "DNSResolvers": { + "eth.": ["https://valid.example.com"], + "bad.": ["://malformed-url-missing-scheme"] + } + }` + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write([]byte(invalidAutoConfData)) + })) + defer server.Close() + + // Create IPFS node + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("DNS.Resolvers", map[string]string{"foo.": "auto"}) + + // Start daemon to trigger autoconf fetch - daemon should start but autoconf validation should fail + node.StartDaemon() + defer node.StopDaemon() + + // Daemon should still be functional even with invalid autoconf + result := node.RunIPFS("version") + assert.Equal(t, 0, result.ExitCode(), "Daemon should start even with invalid autoconf") +} diff --git a/test/cli/backup_bootstrap_test.go b/test/cli/backup_bootstrap_test.go index 017499f3d..eff00048a 100644 --- a/test/cli/backup_bootstrap_test.go +++ b/test/cli/backup_bootstrap_test.go @@ -39,7 +39,9 @@ func TestBackupBootstrapPeers(t *testing.T) { // Start 1 and 2. 2 does not know anyone yet. nodes[1].StartDaemon() + defer nodes[1].StopDaemon() nodes[2].StartDaemon() + defer nodes[2].StopDaemon() assert.Len(t, nodes[1].Peers(), 0) assert.Len(t, nodes[2].Peers(), 0) @@ -51,6 +53,7 @@ func TestBackupBootstrapPeers(t *testing.T) { // Start 0, wait a bit. Should connect to 1, and then discover 2 via the // backup bootstrap peers. nodes[0].StartDaemon() + defer nodes[0].StopDaemon() time.Sleep(time.Millisecond * 500) // Check if they're all connected. diff --git a/test/cli/basic_commands_test.go b/test/cli/basic_commands_test.go index 603a03d9d..d9d66d1c3 100644 --- a/test/cli/basic_commands_test.go +++ b/test/cli/basic_commands_test.go @@ -62,7 +62,7 @@ func TestIPFSVersionDeps(t *testing.T) { res = strings.TrimSpace(res) lines := SplitLines(res) - assert.Equal(t, "github.com/ipfs/kubo@(devel)", lines[0]) + assert.True(t, strings.HasPrefix(lines[0], "github.com/ipfs/kubo@v")) for _, depLine := range lines[1:] { split := strings.Split(depLine, " => ") @@ -70,6 +70,10 @@ func TestIPFSVersionDeps(t *testing.T) { splitModVers := strings.Split(moduleVersion, "@") modPath := splitModVers[0] modVers := splitModVers[1] + // Skip local replace paths (starting with "./") + if strings.HasPrefix(modPath, "./") { + continue + } assert.NoError(t, gomod.Check(modPath, modVers), "path: %s, version: %s", modPath, modVers) } } @@ -112,7 +116,7 @@ func TestAllRootCommandsAreMentionedInHelpText(t *testing.T) { // a few base commands are not expected to be in the help message // but we default to requiring them to be in the help message, so that we - // have to make an conscious decision to exclude them + // have to make a conscious decision to exclude them notInHelp := map[string]bool{ "object": true, "shutdown": true, diff --git a/test/cli/bitswap_config_test.go b/test/cli/bitswap_config_test.go new file mode 100644 index 000000000..5ee59ea56 --- /dev/null +++ b/test/cli/bitswap_config_test.go @@ -0,0 +1,186 @@ +package cli + +import ( + "strings" + "testing" + "time" + + "github.com/ipfs/boxo/bitswap/network/bsnet" + "github.com/ipfs/go-test/random" + "github.com/ipfs/kubo/config" + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/assert" +) + +func TestBitswapConfig(t *testing.T) { + t.Parallel() + + // Create test data that will be shared between nodes + testData := random.Bytes(100) + + t.Run("server enabled (default)", func(t *testing.T) { + t.Parallel() + h := harness.NewT(t) + provider := h.NewNode().Init().StartDaemon() + defer provider.StopDaemon() + requester := h.NewNode().Init().StartDaemon() + defer requester.StopDaemon() + + hash := provider.IPFSAddStr(string(testData)) + requester.Connect(provider) + + res := requester.IPFS("cat", hash) + assert.Equal(t, testData, res.Stdout.Bytes(), "retrieved data should match original") + }) + + t.Run("server disabled", func(t *testing.T) { + t.Parallel() + h := harness.NewT(t) + + provider := h.NewNode().Init() + provider.SetIPFSConfig("Bitswap.ServerEnabled", false) + provider = provider.StartDaemon() + defer provider.StopDaemon() + + requester := h.NewNode().Init().StartDaemon() + defer requester.StopDaemon() + + hash := provider.IPFSAddStr(string(testData)) + requester.Connect(provider) + + // If the data was available, it would be retrieved immediately. + // Therefore, after the timeout, we can assume the data is not available + // i.e. the server is disabled + timeout := time.After(3 * time.Second) + dataChan := make(chan []byte) + + go func() { + res := requester.RunIPFS("cat", hash) + dataChan <- res.Stdout.Bytes() + }() + + select { + case data := <-dataChan: + assert.NotEqual(t, testData, data, "retrieved data should not match original") + case <-timeout: + t.Log("Test passed: operation timed out after 3 seconds as expected") + } + }) + + t.Run("client still works when server disabled", func(t *testing.T) { + t.Parallel() + h := harness.NewT(t) + + requester := h.NewNode().Init() + requester.SetIPFSConfig("Bitswap.ServerEnabled", false) + requester.StartDaemon() + defer requester.StopDaemon() + + provider := h.NewNode().Init().StartDaemon() + defer provider.StopDaemon() + hash := provider.IPFSAddStr(string(testData)) + requester.Connect(provider) + + // Even when the server is disabled, the client should be able to retrieve data + res := requester.RunIPFS("cat", hash) + assert.Equal(t, testData, res.Stdout.Bytes(), "retrieved data should match original") + }) + + t.Run("bitswap over libp2p disabled", func(t *testing.T) { + t.Parallel() + h := harness.NewT(t) + + requester := h.NewNode().Init() + requester.UpdateConfig(func(cfg *config.Config) { + cfg.Bitswap.Libp2pEnabled = config.False + cfg.Bitswap.ServerEnabled = config.False + cfg.HTTPRetrieval.Enabled = config.True + }) + requester.StartDaemon() + defer requester.StopDaemon() + + provider := h.NewNode().Init().StartDaemon() + defer provider.StopDaemon() + hash := provider.IPFSAddStr(string(testData)) + + requester.Connect(provider) + res := requester.RunIPFS("cat", hash) + assert.Equal(t, []byte{}, res.Stdout.Bytes(), "cat should not return any data") + assert.Contains(t, res.Stderr.String(), "Error: ipld: could not find") + + // Verify that basic operations still work with bitswap disabled + res = requester.IPFS("id") + assert.Equal(t, 0, res.ExitCode(), "basic IPFS operations should work") + res = requester.IPFS("bitswap", "stat") + assert.Equal(t, 0, res.ExitCode(), "bitswap stat should work even with bitswap disabled") + res = requester.IPFS("bitswap", "wantlist") + assert.Equal(t, 0, res.ExitCode(), "bitswap wantlist should work even with bitswap disabled") + + // Verify local operations still work + hashNew := requester.IPFSAddStr("random") + res = requester.IPFS("cat", hashNew) + assert.Equal(t, []byte("random"), res.Stdout.Bytes(), "cat should return the added data") + }) + + // Disabling Bitswap.Libp2pEnabled should remove /ipfs/bitswap* protocols from `ipfs id` + t.Run("disabling bitswap over libp2p removes it from identify protocol list", func(t *testing.T) { + t.Parallel() + h := harness.NewT(t) + + provider := h.NewNode().Init() + provider.UpdateConfig(func(cfg *config.Config) { + cfg.Bitswap.Libp2pEnabled = config.False + cfg.Bitswap.ServerEnabled = config.False + cfg.HTTPRetrieval.Enabled = config.True + }) + provider = provider.StartDaemon() + defer provider.StopDaemon() + requester := h.NewNode().Init().StartDaemon() + defer requester.StopDaemon() + requester.Connect(provider) + + // read libp2p identify from remote peer, and print protocols + res := requester.IPFS("id", "-f", "", provider.PeerID().String()) + protocols := strings.Split(strings.TrimSpace(res.Stdout.String()), "\n") + + // No bitswap protocols should be present + for _, proto := range protocols { + assert.NotContains(t, proto, bsnet.ProtocolBitswap, "bitswap protocol %s should not be advertised when server is disabled", proto) + assert.NotContains(t, proto, bsnet.ProtocolBitswapNoVers, "bitswap protocol %s should not be advertised when server is disabled", proto) + assert.NotContains(t, proto, bsnet.ProtocolBitswapOneOne, "bitswap protocol %s should not be advertised when server is disabled", proto) + assert.NotContains(t, proto, bsnet.ProtocolBitswapOneZero, "bitswap protocol %s should not be advertised when server is disabled", proto) + } + }) + + // HTTPRetrieval uses bitswap engine, we need it + t.Run("errors when both HTTP and libp2p are disabled", func(t *testing.T) { + t.Parallel() + + // init Kubo repo + node := harness.NewT(t).NewNode().Init() + node.UpdateConfig(func(cfg *config.Config) { + cfg.HTTPRetrieval.Enabled = config.False + cfg.Bitswap.Libp2pEnabled = config.False + cfg.Bitswap.ServerEnabled = config.Default + }) + res := node.RunIPFS("daemon") + assert.Contains(t, res.Stderr.Trimmed(), "invalid configuration: Bitswap.Libp2pEnabled and HTTPRetrieval.Enabled are both disabled, unable to initialize Bitswap") + assert.Equal(t, 1, res.ExitCode()) + }) + + // HTTPRetrieval uses bitswap engine, we need it + t.Run("errors when user set conflicting HTTP and libp2p flags", func(t *testing.T) { + t.Parallel() + + // init Kubo repo + node := harness.NewT(t).NewNode().Init() + node.UpdateConfig(func(cfg *config.Config) { + cfg.HTTPRetrieval.Enabled = config.False + cfg.Bitswap.Libp2pEnabled = config.False + cfg.Bitswap.ServerEnabled = config.True // bad user config: can't enable server when libp2p is down + }) + res := node.RunIPFS("daemon") + assert.Contains(t, res.Stderr.Trimmed(), "invalid configuration: Bitswap.Libp2pEnabled and HTTPRetrieval.Enabled are both disabled, unable to initialize Bitswap") + assert.Equal(t, 1, res.ExitCode()) + }) +} diff --git a/test/cli/bootstrap_auto_test.go b/test/cli/bootstrap_auto_test.go new file mode 100644 index 000000000..e3959ece7 --- /dev/null +++ b/test/cli/bootstrap_auto_test.go @@ -0,0 +1,202 @@ +package cli + +import ( + "testing" + + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestBootstrapCommandsWithAutoPlaceholder(t *testing.T) { + t.Parallel() + + t.Run("bootstrap add default", func(t *testing.T) { + t.Parallel() + // Test that 'ipfs bootstrap add default' works correctly + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("Bootstrap", []string{}) // Start with empty bootstrap + + // Add default bootstrap peers via "auto" placeholder + result := node.RunIPFS("bootstrap", "add", "default") + require.Equal(t, 0, result.ExitCode(), "bootstrap add default should succeed") + + output := result.Stdout.String() + t.Logf("Bootstrap add default output: %s", output) + assert.Contains(t, output, "added auto", "bootstrap add default should report adding 'auto'") + + // Verify bootstrap list shows "auto" + listResult := node.RunIPFS("bootstrap", "list") + require.Equal(t, 0, listResult.ExitCode(), "bootstrap list should succeed") + + listOutput := listResult.Stdout.String() + t.Logf("Bootstrap list after add default: %s", listOutput) + assert.Contains(t, listOutput, "auto", "bootstrap list should show 'auto' placeholder") + }) + + t.Run("bootstrap add auto explicitly", func(t *testing.T) { + t.Parallel() + // Test that 'ipfs bootstrap add auto' works correctly + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("Bootstrap", []string{}) // Start with empty bootstrap + + // Add "auto" placeholder explicitly + result := node.RunIPFS("bootstrap", "add", "auto") + require.Equal(t, 0, result.ExitCode(), "bootstrap add auto should succeed") + + output := result.Stdout.String() + t.Logf("Bootstrap add auto output: %s", output) + assert.Contains(t, output, "added auto", "bootstrap add auto should report adding 'auto'") + + // Verify bootstrap list shows "auto" + listResult := node.RunIPFS("bootstrap", "list") + require.Equal(t, 0, listResult.ExitCode(), "bootstrap list should succeed") + + listOutput := listResult.Stdout.String() + t.Logf("Bootstrap list after add auto: %s", listOutput) + assert.Contains(t, listOutput, "auto", "bootstrap list should show 'auto' placeholder") + }) + + t.Run("bootstrap add default converts to auto", func(t *testing.T) { + t.Parallel() + // Test that 'ipfs bootstrap add default' adds "auto" to the bootstrap list + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("Bootstrap", []string{}) // Start with empty bootstrap + node.SetIPFSConfig("AutoConf.Enabled", true) // Enable AutoConf to allow adding "auto" + + // Add default bootstrap peers + result := node.RunIPFS("bootstrap", "add", "default") + require.Equal(t, 0, result.ExitCode(), "bootstrap add default should succeed") + assert.Contains(t, result.Stdout.String(), "added auto", "should report adding 'auto'") + + // Verify bootstrap list shows "auto" + var bootstrap []string + node.GetIPFSConfig("Bootstrap", &bootstrap) + require.Equal(t, []string{"auto"}, bootstrap, "Bootstrap should contain ['auto']") + }) + + t.Run("bootstrap add default fails when AutoConf disabled", func(t *testing.T) { + t.Parallel() + // Test that adding default/auto fails when AutoConf is disabled + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("Bootstrap", []string{}) // Start with empty bootstrap + node.SetIPFSConfig("AutoConf.Enabled", false) // Disable AutoConf + + // Try to add default - should fail + result := node.RunIPFS("bootstrap", "add", "default") + require.NotEqual(t, 0, result.ExitCode(), "bootstrap add default should fail when AutoConf disabled") + assert.Contains(t, result.Stderr.String(), "AutoConf is disabled", "should mention AutoConf is disabled") + + // Try to add auto - should also fail + result = node.RunIPFS("bootstrap", "add", "auto") + require.NotEqual(t, 0, result.ExitCode(), "bootstrap add auto should fail when AutoConf disabled") + assert.Contains(t, result.Stderr.String(), "AutoConf is disabled", "should mention AutoConf is disabled") + }) + + t.Run("bootstrap rm with auto placeholder", func(t *testing.T) { + t.Parallel() + // Test that selective removal fails properly when "auto" is present + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("Bootstrap", []string{"auto"}) // Start with auto + + // Try to remove a specific peer - should fail with helpful error + result := node.RunIPFS("bootstrap", "rm", "/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN") + require.NotEqual(t, 0, result.ExitCode(), "bootstrap rm of specific peer should fail when 'auto' is present") + + output := result.Stderr.String() + t.Logf("Bootstrap rm error output: %s", output) + assert.Contains(t, output, "cannot remove individual bootstrap peers when using 'auto' placeholder", + "should provide helpful error message about auto placeholder") + assert.Contains(t, output, "disable AutoConf", + "should suggest disabling AutoConf as solution") + assert.Contains(t, output, "ipfs bootstrap rm --all", + "should suggest using rm --all as alternative") + }) + + t.Run("bootstrap rm --all with auto placeholder", func(t *testing.T) { + t.Parallel() + // Test that 'ipfs bootstrap rm --all' works with "auto" placeholder + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("Bootstrap", []string{"auto"}) // Start with auto + + // Remove all bootstrap peers + result := node.RunIPFS("bootstrap", "rm", "--all") + require.Equal(t, 0, result.ExitCode(), "bootstrap rm --all should succeed with auto placeholder") + + output := result.Stdout.String() + t.Logf("Bootstrap rm --all output: %s", output) + assert.Contains(t, output, "removed auto", "bootstrap rm --all should report removing 'auto'") + + // Verify bootstrap list is now empty + listResult := node.RunIPFS("bootstrap", "list") + require.Equal(t, 0, listResult.ExitCode(), "bootstrap list should succeed") + + listOutput := listResult.Stdout.String() + t.Logf("Bootstrap list after rm --all: %s", listOutput) + assert.Empty(t, listOutput, "bootstrap list should be empty after rm --all") + + // Test the rm all subcommand too + node.SetIPFSConfig("Bootstrap", []string{"auto"}) // Reset to auto + + result = node.RunIPFS("bootstrap", "rm", "all") + require.Equal(t, 0, result.ExitCode(), "bootstrap rm all should succeed with auto placeholder") + + output = result.Stdout.String() + t.Logf("Bootstrap rm all output: %s", output) + assert.Contains(t, output, "removed auto", "bootstrap rm all should report removing 'auto'") + }) + + t.Run("bootstrap mixed auto and specific peers", func(t *testing.T) { + t.Parallel() + // Test that bootstrap commands work when mixing "auto" with specific peers + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("Bootstrap", []string{}) // Start with empty bootstrap + + // Add a specific peer first + specificPeer := "/ip4/127.0.0.1/tcp/4001/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ" + result := node.RunIPFS("bootstrap", "add", specificPeer) + require.Equal(t, 0, result.ExitCode(), "bootstrap add specific peer should succeed") + + // Add auto placeholder + result = node.RunIPFS("bootstrap", "add", "auto") + require.Equal(t, 0, result.ExitCode(), "bootstrap add auto should succeed") + + // Verify bootstrap list shows both + listResult := node.RunIPFS("bootstrap", "list") + require.Equal(t, 0, listResult.ExitCode(), "bootstrap list should succeed") + + listOutput := listResult.Stdout.String() + t.Logf("Bootstrap list with mixed peers: %s", listOutput) + assert.Contains(t, listOutput, "auto", "bootstrap list should contain 'auto' placeholder") + assert.Contains(t, listOutput, specificPeer, "bootstrap list should contain specific peer") + + // Try to remove the specific peer - should fail because auto is present + result = node.RunIPFS("bootstrap", "rm", specificPeer) + require.NotEqual(t, 0, result.ExitCode(), "bootstrap rm of specific peer should fail when 'auto' is present") + + output := result.Stderr.String() + assert.Contains(t, output, "cannot remove individual bootstrap peers when using 'auto' placeholder", + "should provide helpful error message about auto placeholder") + + // Remove all should work and remove both auto and specific peer + result = node.RunIPFS("bootstrap", "rm", "--all") + require.Equal(t, 0, result.ExitCode(), "bootstrap rm --all should succeed") + + output = result.Stdout.String() + t.Logf("Bootstrap rm --all output with mixed peers: %s", output) + // Should report removing both the specific peer and auto + assert.Contains(t, output, "removed", "should report removing peers") + + // Verify bootstrap list is now empty + listResult = node.RunIPFS("bootstrap", "list") + require.Equal(t, 0, listResult.ExitCode(), "bootstrap list should succeed") + + listOutput = listResult.Stdout.String() + assert.Empty(t, listOutput, "bootstrap list should be empty after rm --all") + }) +} diff --git a/test/cli/cid_test.go b/test/cli/cid_test.go new file mode 100644 index 000000000..5e44b0db6 --- /dev/null +++ b/test/cli/cid_test.go @@ -0,0 +1,609 @@ +package cli + +import ( + "fmt" + "strings" + "testing" + + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/assert" +) + +func TestCidCommands(t *testing.T) { + t.Parallel() + + t.Run("base32", testCidBase32) + t.Run("format", testCidFormat) + t.Run("bases", testCidBases) + t.Run("codecs", testCidCodecs) + t.Run("hashes", testCidHashes) +} + +// testCidBase32 tests 'ipfs cid base32' subcommand +// Includes regression tests for https://github.com/ipfs/kubo/issues/9007 +func testCidBase32(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode() + + t.Run("converts valid CIDs to base32", func(t *testing.T) { + t.Run("CIDv0 to base32", func(t *testing.T) { + res := node.RunIPFS("cid", "base32", "QmZZRTyhDpL5Jgift1cHbAhexeE1m2Hw8x8g7rTcPahDvo") + assert.Equal(t, 0, res.ExitCode()) + assert.Equal(t, "bafybeifgwyq5gs4l2mru5klgwjfmftjvkmbyyjurbupuz2bst7mhmg2hwa\n", res.Stdout.String()) + }) + + t.Run("CIDv1 base58 to base32", func(t *testing.T) { + res := node.RunIPFS("cid", "base32", "zdj7WgefqQm5HogBQ2bckZuTYYDarRTUZi51GYCnerHD2G86j") + assert.Equal(t, 0, res.ExitCode()) + assert.Equal(t, "bafybeifgwyq5gs4l2mru5klgwjfmftjvkmbyyjurbupuz2bst7mhmg2hwa\n", res.Stdout.String()) + }) + + t.Run("already base32 CID remains unchanged", func(t *testing.T) { + res := node.RunIPFS("cid", "base32", "bafybeifgwyq5gs4l2mru5klgwjfmftjvkmbyyjurbupuz2bst7mhmg2hwa") + assert.Equal(t, 0, res.ExitCode()) + assert.Equal(t, "bafybeifgwyq5gs4l2mru5klgwjfmftjvkmbyyjurbupuz2bst7mhmg2hwa\n", res.Stdout.String()) + }) + + t.Run("multiple valid CIDs", func(t *testing.T) { + res := node.RunIPFS("cid", "base32", + "QmZZRTyhDpL5Jgift1cHbAhexeE1m2Hw8x8g7rTcPahDvo", + "bafybeifgwyq5gs4l2mru5klgwjfmftjvkmbyyjurbupuz2bst7mhmg2hwa") + assert.Equal(t, 0, res.ExitCode()) + assert.Empty(t, res.Stderr.String()) + lines := strings.Split(strings.TrimSpace(res.Stdout.String()), "\n") + assert.Equal(t, 2, len(lines)) + assert.Equal(t, "bafybeifgwyq5gs4l2mru5klgwjfmftjvkmbyyjurbupuz2bst7mhmg2hwa", lines[0]) + assert.Equal(t, "bafybeifgwyq5gs4l2mru5klgwjfmftjvkmbyyjurbupuz2bst7mhmg2hwa", lines[1]) + }) + }) + + t.Run("error handling", func(t *testing.T) { + // Regression tests for https://github.com/ipfs/kubo/issues/9007 + t.Run("returns error code 1 for single invalid CID", func(t *testing.T) { + res := node.RunIPFS("cid", "base32", "invalid-cid") + assert.Equal(t, 1, res.ExitCode()) + assert.Contains(t, res.Stderr.String(), "invalid-cid: invalid cid") + assert.Contains(t, res.Stderr.String(), "Error: errors while displaying some entries") + }) + + t.Run("returns error code 1 for mixed valid and invalid CIDs", func(t *testing.T) { + res := node.RunIPFS("cid", "base32", "QmZZRTyhDpL5Jgift1cHbAhexeE1m2Hw8x8g7rTcPahDvo", "invalid-cid") + assert.Equal(t, 1, res.ExitCode()) + // Valid CID should be converted and printed to stdout + assert.Contains(t, res.Stdout.String(), "bafybeifgwyq5gs4l2mru5klgwjfmftjvkmbyyjurbupuz2bst7mhmg2hwa") + // Invalid CID error should be printed to stderr + assert.Contains(t, res.Stderr.String(), "invalid-cid: invalid cid") + assert.Contains(t, res.Stderr.String(), "Error: errors while displaying some entries") + }) + + t.Run("returns error code 1 for stdin with invalid CIDs", func(t *testing.T) { + input := "QmZZRTyhDpL5Jgift1cHbAhexeE1m2Hw8x8g7rTcPahDvo\nbad-cid\nbafybeifgwyq5gs4l2mru5klgwjfmftjvkmbyyjurbupuz2bst7mhmg2hwa" + res := node.RunPipeToIPFS(strings.NewReader(input), "cid", "base32") + assert.Equal(t, 1, res.ExitCode()) + // Valid CIDs should be converted + assert.Contains(t, res.Stdout.String(), "bafybeifgwyq5gs4l2mru5klgwjfmftjvkmbyyjurbupuz2bst7mhmg2hwa") + // Invalid CID error should be in stderr + assert.Contains(t, res.Stderr.String(), "bad-cid: invalid cid") + }) + }) +} + +// testCidFormat tests 'ipfs cid format' subcommand +// Includes regression tests for https://github.com/ipfs/kubo/issues/9007 +func testCidFormat(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode() + + t.Run("formats CIDs with various options", func(t *testing.T) { + t.Run("default format preserves CID", func(t *testing.T) { + res := node.RunIPFS("cid", "format", "QmZZRTyhDpL5Jgift1cHbAhexeE1m2Hw8x8g7rTcPahDvo") + assert.Equal(t, 0, res.ExitCode()) + assert.Equal(t, "QmZZRTyhDpL5Jgift1cHbAhexeE1m2Hw8x8g7rTcPahDvo\n", res.Stdout.String()) + }) + + t.Run("convert to CIDv1 with base58btc", func(t *testing.T) { + res := node.RunIPFS("cid", "format", "-v", "1", "-b", "base58btc", + "QmZZRTyhDpL5Jgift1cHbAhexeE1m2Hw8x8g7rTcPahDvo") + assert.Equal(t, 0, res.ExitCode()) + assert.Equal(t, "zdj7WgefqQm5HogBQ2bckZuTYYDarRTUZi51GYCnerHD2G86j\n", res.Stdout.String()) + }) + + t.Run("convert to CIDv0", func(t *testing.T) { + res := node.RunIPFS("cid", "format", "-v", "0", + "bafybeifgwyq5gs4l2mru5klgwjfmftjvkmbyyjurbupuz2bst7mhmg2hwa") + assert.Equal(t, 0, res.ExitCode()) + assert.Equal(t, "QmZZRTyhDpL5Jgift1cHbAhexeE1m2Hw8x8g7rTcPahDvo\n", res.Stdout.String()) + }) + + t.Run("change codec to raw", func(t *testing.T) { + res := node.RunIPFS("cid", "format", "--mc", "raw", "-b", "base32", + "bafybeievd6mwe6vcwnkwo3eizs3h7w3a34opszbyfxziqdxguhjw7imdve") + assert.Equal(t, 0, res.ExitCode()) + assert.Equal(t, "bafkreievd6mwe6vcwnkwo3eizs3h7w3a34opszbyfxziqdxguhjw7imdve\n", res.Stdout.String()) + }) + + t.Run("multiple valid CIDs with format options", func(t *testing.T) { + res := node.RunIPFS("cid", "format", "-v", "1", "-b", "base58btc", + "QmZZRTyhDpL5Jgift1cHbAhexeE1m2Hw8x8g7rTcPahDvo", + "bafybeifgwyq5gs4l2mru5klgwjfmftjvkmbyyjurbupuz2bst7mhmg2hwa") + assert.Equal(t, 0, res.ExitCode()) + assert.Empty(t, res.Stderr.String()) + lines := strings.Split(strings.TrimSpace(res.Stdout.String()), "\n") + assert.Equal(t, 2, len(lines)) + assert.Equal(t, "zdj7WgefqQm5HogBQ2bckZuTYYDarRTUZi51GYCnerHD2G86j", lines[0]) + assert.Equal(t, "zdj7WgefqQm5HogBQ2bckZuTYYDarRTUZi51GYCnerHD2G86j", lines[1]) + }) + }) + + t.Run("error handling", func(t *testing.T) { + // Regression tests for https://github.com/ipfs/kubo/issues/9007 + t.Run("returns error code 1 for single invalid CID", func(t *testing.T) { + res := node.RunIPFS("cid", "format", "not-a-cid") + assert.Equal(t, 1, res.ExitCode()) + assert.Contains(t, res.Stderr.String(), "not-a-cid: invalid cid") + assert.Contains(t, res.Stderr.String(), "Error: errors while displaying some entries") + }) + + t.Run("returns error code 1 for mixed valid and invalid CIDs", func(t *testing.T) { + res := node.RunIPFS("cid", "format", "not-a-cid", "QmZZRTyhDpL5Jgift1cHbAhexeE1m2Hw8x8g7rTcPahDvo") + assert.Equal(t, 1, res.ExitCode()) + // Valid CID should be printed to stdout + assert.Contains(t, res.Stdout.String(), "QmZZRTyhDpL5Jgift1cHbAhexeE1m2Hw8x8g7rTcPahDvo") + // Invalid CID error should be printed to stderr + assert.Contains(t, res.Stderr.String(), "not-a-cid: invalid cid") + assert.Contains(t, res.Stderr.String(), "Error: errors while displaying some entries") + }) + + t.Run("returns error code 1 for stdin with invalid CIDs", func(t *testing.T) { + input := "invalid\nQmZZRTyhDpL5Jgift1cHbAhexeE1m2Hw8x8g7rTcPahDvo" + res := node.RunPipeToIPFS(strings.NewReader(input), "cid", "format", "-v", "1", "-b", "base58btc") + assert.Equal(t, 1, res.ExitCode()) + // Valid CID should be converted + assert.Contains(t, res.Stdout.String(), "zdj7WgefqQm5HogBQ2bckZuTYYDarRTUZi51GYCnerHD2G86j") + // Invalid CID error should be in stderr + assert.Contains(t, res.Stderr.String(), "invalid: invalid cid") + }) + }) +} + +// testCidBases tests 'ipfs cid bases' subcommand +func testCidBases(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode() + + t.Run("lists available bases", func(t *testing.T) { + // This is a regression test to ensure we don't accidentally add or remove support + // for multibase encodings. If a new base is intentionally added or removed, + // this test should be updated accordingly. + expectedBases := []string{ + "identity", + "base2", + "base16", + "base16upper", + "base32", + "base32upper", + "base32pad", + "base32padupper", + "base32hex", + "base32hexupper", + "base32hexpad", + "base32hexpadupper", + "base36", + "base36upper", + "base58btc", + "base58flickr", + "base64", + "base64pad", + "base64url", + "base64urlpad", + "base256emoji", + } + + res := node.RunIPFS("cid", "bases") + assert.Equal(t, 0, res.ExitCode()) + + lines := strings.Split(strings.TrimSpace(res.Stdout.String()), "\n") + assertExactSet(t, "bases", expectedBases, lines) + }) + + t.Run("with --prefix flag shows single letter prefixes", func(t *testing.T) { + // Regression test to catch any changes to the output format or supported bases + expectedLines := []string{ + "identity", + "0 base2", + "b base32", + "B base32upper", + "c base32pad", + "C base32padupper", + "f base16", + "F base16upper", + "k base36", + "K base36upper", + "m base64", + "M base64pad", + "t base32hexpad", + "T base32hexpadupper", + "u base64url", + "U base64urlpad", + "v base32hex", + "V base32hexupper", + "z base58btc", + "Z base58flickr", + "🚀 base256emoji", + } + + res := node.RunIPFS("cid", "bases", "--prefix") + assert.Equal(t, 0, res.ExitCode()) + + lines := strings.Split(strings.TrimSpace(res.Stdout.String()), "\n") + assertExactSet(t, "bases --prefix output", expectedLines, lines) + }) + + t.Run("with --numeric flag shows numeric codes", func(t *testing.T) { + // Regression test to catch any changes to the output format or supported bases + expectedLines := []string{ + "0 identity", + "48 base2", + "98 base32", + "66 base32upper", + "99 base32pad", + "67 base32padupper", + "102 base16", + "70 base16upper", + "107 base36", + "75 base36upper", + "109 base64", + "77 base64pad", + "116 base32hexpad", + "84 base32hexpadupper", + "117 base64url", + "85 base64urlpad", + "118 base32hex", + "86 base32hexupper", + "122 base58btc", + "90 base58flickr", + "128640 base256emoji", + } + + res := node.RunIPFS("cid", "bases", "--numeric") + assert.Equal(t, 0, res.ExitCode()) + + lines := strings.Split(strings.TrimSpace(res.Stdout.String()), "\n") + assertExactSet(t, "bases --numeric output", expectedLines, lines) + }) + + t.Run("with both --prefix and --numeric flags", func(t *testing.T) { + // Regression test to catch any changes to the output format or supported bases + expectedLines := []string{ + "0 identity", + "0 48 base2", + "b 98 base32", + "B 66 base32upper", + "c 99 base32pad", + "C 67 base32padupper", + "f 102 base16", + "F 70 base16upper", + "k 107 base36", + "K 75 base36upper", + "m 109 base64", + "M 77 base64pad", + "t 116 base32hexpad", + "T 84 base32hexpadupper", + "u 117 base64url", + "U 85 base64urlpad", + "v 118 base32hex", + "V 86 base32hexupper", + "z 122 base58btc", + "Z 90 base58flickr", + "🚀 128640 base256emoji", + } + + res := node.RunIPFS("cid", "bases", "--prefix", "--numeric") + assert.Equal(t, 0, res.ExitCode()) + + lines := strings.Split(strings.TrimSpace(res.Stdout.String()), "\n") + assertExactSet(t, "bases --prefix --numeric output", expectedLines, lines) + }) +} + +// testCidCodecs tests 'ipfs cid codecs' subcommand +func testCidCodecs(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode() + + t.Run("lists available codecs", func(t *testing.T) { + // This is a regression test to ensure we don't accidentally add or remove + // IPLD codecs. If a codec is intentionally added or removed, + // this test should be updated accordingly. + expectedCodecs := []string{ + "cbor", + "raw", + "dag-pb", + "dag-cbor", + "libp2p-key", + "git-raw", + "torrent-info", + "torrent-file", + "blake3-hashseq", + "leofcoin-block", + "leofcoin-tx", + "leofcoin-pr", + "dag-jose", + "dag-cose", + "eth-block", + "eth-block-list", + "eth-tx-trie", + "eth-tx", + "eth-tx-receipt-trie", + "eth-tx-receipt", + "eth-state-trie", + "eth-account-snapshot", + "eth-storage-trie", + "eth-receipt-log-trie", + "eth-receipt-log", + "bitcoin-block", + "bitcoin-tx", + "bitcoin-witness-commitment", + "zcash-block", + "zcash-tx", + "stellar-block", + "stellar-tx", + "decred-block", + "decred-tx", + "dash-block", + "dash-tx", + "swarm-manifest", + "swarm-feed", + "beeson", + "dag-json", + "swhid-1-snp", + "json", + "rdfc-1", + "json-jcs", + } + + res := node.RunIPFS("cid", "codecs") + assert.Equal(t, 0, res.ExitCode()) + + lines := strings.Split(strings.TrimSpace(res.Stdout.String()), "\n") + assertExactSet(t, "codecs", expectedCodecs, lines) + }) + + t.Run("with --numeric flag shows codec numbers", func(t *testing.T) { + // This is a regression test to ensure we don't accidentally add or remove + // IPLD codecs. If a codec is intentionally added or removed, + // this test should be updated accordingly. + expectedLines := []string{ + "81 cbor", + "85 raw", + "112 dag-pb", + "113 dag-cbor", + "114 libp2p-key", + "120 git-raw", + "123 torrent-info", + "124 torrent-file", + "128 blake3-hashseq", + "129 leofcoin-block", + "130 leofcoin-tx", + "131 leofcoin-pr", + "133 dag-jose", + "134 dag-cose", + "144 eth-block", + "145 eth-block-list", + "146 eth-tx-trie", + "147 eth-tx", + "148 eth-tx-receipt-trie", + "149 eth-tx-receipt", + "150 eth-state-trie", + "151 eth-account-snapshot", + "152 eth-storage-trie", + "153 eth-receipt-log-trie", + "154 eth-receipt-log", + "176 bitcoin-block", + "177 bitcoin-tx", + "178 bitcoin-witness-commitment", + "192 zcash-block", + "193 zcash-tx", + "208 stellar-block", + "209 stellar-tx", + "224 decred-block", + "225 decred-tx", + "240 dash-block", + "241 dash-tx", + "250 swarm-manifest", + "251 swarm-feed", + "252 beeson", + "297 dag-json", + "496 swhid-1-snp", + "512 json", + "46083 rdfc-1", + "46593 json-jcs", + } + + res := node.RunIPFS("cid", "codecs", "--numeric") + assert.Equal(t, 0, res.ExitCode()) + + lines := strings.Split(strings.TrimSpace(res.Stdout.String()), "\n") + assertExactSet(t, "codecs --numeric output", expectedLines, lines) + }) + + t.Run("with --supported flag lists only supported codecs", func(t *testing.T) { + // This is a regression test to ensure we don't accidentally change the list + // of supported codecs. If a codec is intentionally added or removed from + // support, this test should be updated accordingly. + expectedSupportedCodecs := []string{ + "cbor", + "dag-cbor", + "dag-jose", + "dag-json", + "dag-pb", + "git-raw", + "json", + "libp2p-key", + "raw", + } + + res := node.RunIPFS("cid", "codecs", "--supported") + assert.Equal(t, 0, res.ExitCode()) + + lines := strings.Split(strings.TrimSpace(res.Stdout.String()), "\n") + assertExactSet(t, "supported codecs", expectedSupportedCodecs, lines) + }) + + t.Run("with both --supported and --numeric flags", func(t *testing.T) { + // Regression test to catch any changes to supported codecs or output format + expectedLines := []string{ + "81 cbor", + "85 raw", + "112 dag-pb", + "113 dag-cbor", + "114 libp2p-key", + "120 git-raw", + "133 dag-jose", + "297 dag-json", + "512 json", + } + + res := node.RunIPFS("cid", "codecs", "--supported", "--numeric") + assert.Equal(t, 0, res.ExitCode()) + + lines := strings.Split(strings.TrimSpace(res.Stdout.String()), "\n") + assertExactSet(t, "codecs --supported --numeric output", expectedLines, lines) + }) +} + +// testCidHashes tests 'ipfs cid hashes' subcommand +func testCidHashes(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode() + + t.Run("lists available hashes", func(t *testing.T) { + // This is a regression test to ensure we don't accidentally add or remove + // support for hash functions. If a hash function is intentionally added + // or removed, this test should be updated accordingly. + expectedHashes := []string{ + "identity", + "sha1", + "sha2-256", + "sha2-512", + "sha3-512", + "sha3-384", + "sha3-256", + "sha3-224", + "shake-256", + "keccak-224", + "keccak-256", + "keccak-384", + "keccak-512", + "blake3", + "dbl-sha2-256", + } + + // Also expect all blake2b variants (160-512 in steps of 8) + for i := 160; i <= 512; i += 8 { + expectedHashes = append(expectedHashes, fmt.Sprintf("blake2b-%d", i)) + } + + // Also expect all blake2s variants (160-256 in steps of 8) + for i := 160; i <= 256; i += 8 { + expectedHashes = append(expectedHashes, fmt.Sprintf("blake2s-%d", i)) + } + + res := node.RunIPFS("cid", "hashes") + assert.Equal(t, 0, res.ExitCode()) + + lines := strings.Split(strings.TrimSpace(res.Stdout.String()), "\n") + assertExactSet(t, "hash functions", expectedHashes, lines) + }) + + t.Run("with --numeric flag shows hash function codes", func(t *testing.T) { + // This is a regression test to ensure we don't accidentally add or remove + // support for hash functions. If a hash function is intentionally added + // or removed, this test should be updated accordingly. + expectedLines := []string{ + "0 identity", + "17 sha1", + "18 sha2-256", + "19 sha2-512", + "20 sha3-512", + "21 sha3-384", + "22 sha3-256", + "23 sha3-224", + "25 shake-256", + "26 keccak-224", + "27 keccak-256", + "28 keccak-384", + "29 keccak-512", + "30 blake3", + "86 dbl-sha2-256", + } + + // Add all blake2b variants (160-512 in steps of 8) + for i := 160; i <= 512; i += 8 { + expectedLines = append(expectedLines, fmt.Sprintf("%d blake2b-%d", 45568+i/8, i)) + } + + // Add all blake2s variants (160-256 in steps of 8) + for i := 160; i <= 256; i += 8 { + expectedLines = append(expectedLines, fmt.Sprintf("%d blake2s-%d", 45632+i/8, i)) + } + + res := node.RunIPFS("cid", "hashes", "--numeric") + assert.Equal(t, 0, res.ExitCode()) + + lines := strings.Split(strings.TrimSpace(res.Stdout.String()), "\n") + assertExactSet(t, "hashes --numeric output", expectedLines, lines) + }) +} + +// assertExactSet compares expected vs actual items and reports clear errors for any differences. +// This is used as a regression test to ensure we don't accidentally add or remove support. +// Both expected and actual strings are trimmed of whitespace before comparison for maintainability. +func assertExactSet(t *testing.T, itemType string, expected []string, actual []string) { + t.Helper() + + // Normalize by trimming whitespace + normalizedExpected := make([]string, len(expected)) + for i, item := range expected { + normalizedExpected[i] = strings.TrimSpace(item) + } + + normalizedActual := make([]string, len(actual)) + for i, item := range actual { + normalizedActual[i] = strings.TrimSpace(item) + } + + expectedSet := make(map[string]bool) + for _, item := range normalizedExpected { + expectedSet[item] = true + } + + actualSet := make(map[string]bool) + for _, item := range normalizedActual { + actualSet[item] = true + } + + var missing []string + for _, item := range normalizedExpected { + if !actualSet[item] { + missing = append(missing, item) + } + } + + var unexpected []string + for _, item := range normalizedActual { + if !expectedSet[item] { + unexpected = append(unexpected, item) + } + } + + if len(missing) > 0 { + t.Errorf("Missing expected %s: %q", itemType, missing) + } + if len(unexpected) > 0 { + t.Errorf("Unexpected %s found: %q", itemType, unexpected) + } + + assert.Equal(t, len(expected), len(actual), + "Expected %d %s but got %d", len(expected), itemType, len(actual)) +} diff --git a/test/cli/cli_https_test.go b/test/cli/cli_https_test.go new file mode 100644 index 000000000..e128a1916 --- /dev/null +++ b/test/cli/cli_https_test.go @@ -0,0 +1,46 @@ +package cli + +import ( + "fmt" + "net" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/require" +) + +func TestCLIWithRemoteHTTPS(t *testing.T) { + tests := []struct{ addrSuffix string }{{"https"}, {"tls/http"}} + for _, tt := range tests { + t.Run("with "+tt.addrSuffix+" multiaddr", func(t *testing.T) { + + // Create HTTPS test server + server := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.TLS == nil { + t.Error("Mocked Kubo RPC received plain HTTP request instead of HTTPS TLS Handshake") + } + _, _ = w.Write([]byte("OK")) + })) + defer server.Close() + + serverURL, _ := url.Parse(server.URL) + _, port, _ := net.SplitHostPort(serverURL.Host) + + // Create Kubo repo + node := harness.NewT(t).NewNode().Init() + + // Attempt to talk to remote Kubo RPC endpoint over HTTPS + resp := node.RunIPFS("id", "--api", fmt.Sprintf("/ip4/127.0.0.1/tcp/%s/%s", port, tt.addrSuffix)) + + // Expect HTTPS error (confirming TLS and https:// were used, and not Cleartext HTTP) + require.Error(t, resp.Err) + require.Contains(t, resp.Stderr.String(), "Error: tls: failed to verify certificate: x509: certificate signed by unknown authority") + + node.StopDaemon() + + }) + } +} diff --git a/test/cli/commands_without_repo_test.go b/test/cli/commands_without_repo_test.go new file mode 100644 index 000000000..55469adae --- /dev/null +++ b/test/cli/commands_without_repo_test.go @@ -0,0 +1,130 @@ +package cli + +import ( + "os" + "os/exec" + "strings" + "testing" +) + +func TestCommandsWithoutRepo(t *testing.T) { + t.Run("cid", func(t *testing.T) { + t.Run("base32", func(t *testing.T) { + cmd := exec.Command("ipfs", "cid", "base32", "QmS4ustL54uo8FzR9455qaxZwuMiUhyvMcX9Ba8nUH4uVv") + cmd.Env = append(os.Environ(), "IPFS_PATH="+t.TempDir()) + stdout, err := cmd.Output() + if err != nil { + t.Fatal(err) + } + expected := "bafybeibxm2nsadl3fnxv2sxcxmxaco2jl53wpeorjdzidjwf5aqdg7wa6u\n" + if string(stdout) != expected { + t.Fatalf("expected %q, got: %q", expected, stdout) + } + }) + + t.Run("format", func(t *testing.T) { + cmd := exec.Command("ipfs", "cid", "format", "-v", "1", "QmS4ustL54uo8FzR9455qaxZwuMiUhyvMcX9Ba8nUH4uVv") + cmd.Env = append(os.Environ(), "IPFS_PATH="+t.TempDir()) + stdout, err := cmd.Output() + if err != nil { + t.Fatal(err) + } + expected := "zdj7WZAAFKPvYPPzyJLso2hhxo8a7ZACFQ4DvvfrNXTHidofr\n" + if string(stdout) != expected { + t.Fatalf("expected %q, got: %q", expected, stdout) + } + }) + + t.Run("bases", func(t *testing.T) { + cmd := exec.Command("ipfs", "cid", "bases") + cmd.Env = append(os.Environ(), "IPFS_PATH="+t.TempDir()) + stdout, err := cmd.Output() + if err != nil { + t.Fatal(err) + } + if !strings.Contains(string(stdout), "base32") { + t.Fatalf("expected base32 in output, got: %s", stdout) + } + }) + + t.Run("codecs", func(t *testing.T) { + cmd := exec.Command("ipfs", "cid", "codecs") + cmd.Env = append(os.Environ(), "IPFS_PATH="+t.TempDir()) + stdout, err := cmd.Output() + if err != nil { + t.Fatal(err) + } + if !strings.Contains(string(stdout), "dag-pb") { + t.Fatalf("expected dag-pb in output, got: %s", stdout) + } + }) + + t.Run("hashes", func(t *testing.T) { + cmd := exec.Command("ipfs", "cid", "hashes") + cmd.Env = append(os.Environ(), "IPFS_PATH="+t.TempDir()) + stdout, err := cmd.Output() + if err != nil { + t.Fatal(err) + } + if !strings.Contains(string(stdout), "sha2-256") { + t.Fatalf("expected sha2-256 in output, got: %s", stdout) + } + }) + }) + + t.Run("multibase", func(t *testing.T) { + t.Run("list", func(t *testing.T) { + cmd := exec.Command("ipfs", "multibase", "list") + cmd.Env = append(os.Environ(), "IPFS_PATH="+t.TempDir()) + stdout, err := cmd.Output() + if err != nil { + t.Fatal(err) + } + if !strings.Contains(string(stdout), "base32") { + t.Fatalf("expected base32 in output, got: %s", stdout) + } + }) + + t.Run("encode", func(t *testing.T) { + cmd := exec.Command("ipfs", "multibase", "encode", "-b", "base32") + cmd.Env = append(os.Environ(), "IPFS_PATH="+t.TempDir()) + cmd.Stdin = strings.NewReader("hello\n") + stdout, err := cmd.Output() + if err != nil { + t.Fatal(err) + } + expected := "bnbswy3dpbi" + if string(stdout) != expected { + t.Fatalf("expected %q, got: %q", expected, stdout) + } + }) + + t.Run("decode", func(t *testing.T) { + cmd := exec.Command("ipfs", "multibase", "decode") + cmd.Env = append(os.Environ(), "IPFS_PATH="+t.TempDir()) + cmd.Stdin = strings.NewReader("bnbswy3dpbi") + stdout, err := cmd.Output() + if err != nil { + t.Fatal(err) + } + expected := "hello\n" + if string(stdout) != expected { + t.Fatalf("expected %q, got: %q", expected, stdout) + } + }) + + t.Run("transcode", func(t *testing.T) { + cmd := exec.Command("ipfs", "multibase", "transcode", "-b", "base64") + cmd.Env = append(os.Environ(), "IPFS_PATH="+t.TempDir()) + cmd.Stdin = strings.NewReader("bnbswy3dpbi") + stdout, err := cmd.Output() + if err != nil { + t.Fatal(err) + } + expected := "maGVsbG8K" + if string(stdout) != expected { + t.Fatalf("expected %q, got: %q", expected, stdout) + } + }) + }) +} diff --git a/test/cli/config_secrets_test.go b/test/cli/config_secrets_test.go new file mode 100644 index 000000000..b3e3cdc26 --- /dev/null +++ b/test/cli/config_secrets_test.go @@ -0,0 +1,164 @@ +package cli + +import ( + "strings" + "testing" + + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/assert" + "github.com/tidwall/sjson" +) + +func TestConfigSecrets(t *testing.T) { + t.Parallel() + + t.Run("Identity.PrivKey protection", func(t *testing.T) { + t.Parallel() + + t.Run("Identity.PrivKey is concealed in config show", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init() + + // Read the actual config file to get the real PrivKey + configFile := node.ReadFile(node.ConfigFile()) + assert.Contains(t, configFile, "PrivKey") + + // config show should NOT contain the PrivKey + configShow := node.RunIPFS("config", "show").Stdout.String() + assert.NotContains(t, configShow, "PrivKey") + }) + + t.Run("Identity.PrivKey cannot be read via ipfs config", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init() + + // Attempting to read Identity.PrivKey should fail + res := node.RunIPFS("config", "Identity.PrivKey") + assert.Equal(t, 1, res.ExitCode()) + assert.Contains(t, res.Stderr.String(), "cannot show or change private key") + }) + + t.Run("Identity.PrivKey cannot be read via ipfs config Identity", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init() + + // Attempting to read Identity section should fail (it contains PrivKey) + res := node.RunIPFS("config", "Identity") + assert.Equal(t, 1, res.ExitCode()) + assert.Contains(t, res.Stderr.String(), "cannot show or change private key") + }) + + t.Run("Identity.PrivKey cannot be set via config replace", func(t *testing.T) { + t.Parallel() + // Key rotation must be done in offline mode via the dedicated `ipfs key rotate` command. + // This test ensures PrivKey cannot be changed via config replace. + node := harness.NewT(t).NewNode().Init() + + configShow := node.RunIPFS("config", "show").Stdout.String() + + // Try to inject a PrivKey via config replace + configJSON := MustVal(sjson.Set(configShow, "Identity.PrivKey", "CAASqAkwggSkAgEAAo")) + node.WriteBytes("new-config", []byte(configJSON)) + res := node.RunIPFS("config", "replace", "new-config") + assert.Equal(t, 1, res.ExitCode()) + assert.Contains(t, res.Stderr.String(), "setting private key") + }) + + t.Run("Identity.PrivKey is preserved when re-injecting config", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init() + + // Read the original config file + originalConfig := node.ReadFile(node.ConfigFile()) + assert.Contains(t, originalConfig, "PrivKey") + + // Extract the PrivKey value for comparison + var origPrivKey string + assert.Contains(t, originalConfig, "PrivKey") + // Simple extraction - find the PrivKey line + for _, line := range strings.Split(originalConfig, "\n") { + if strings.Contains(line, "\"PrivKey\":") { + origPrivKey = line + break + } + } + assert.NotEmpty(t, origPrivKey) + + // Get config show output (which should NOT contain PrivKey) + configShow := node.RunIPFS("config", "show").Stdout.String() + assert.NotContains(t, configShow, "PrivKey") + + // Re-inject the config via config replace + node.WriteBytes("config-show", []byte(configShow)) + node.IPFS("config", "replace", "config-show") + + // The PrivKey should still be in the config file + newConfig := node.ReadFile(node.ConfigFile()) + assert.Contains(t, newConfig, "PrivKey") + + // Verify the PrivKey line is the same + var newPrivKey string + for _, line := range strings.Split(newConfig, "\n") { + if strings.Contains(line, "\"PrivKey\":") { + newPrivKey = line + break + } + } + assert.Equal(t, origPrivKey, newPrivKey, "PrivKey should be preserved") + }) + }) + + t.Run("TLS security validation", func(t *testing.T) { + t.Parallel() + + t.Run("AutoConf.TLSInsecureSkipVerify defaults to false", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init() + + // Check the default value in a fresh init + res := node.RunIPFS("config", "AutoConf.TLSInsecureSkipVerify") + // Field may not exist (exit code 1) or be false/empty (exit code 0) + // Both are acceptable as they mean "not true" + output := res.Stdout.String() + assert.NotContains(t, output, "true", "default should not be true") + }) + + t.Run("AutoConf.TLSInsecureSkipVerify can be set to true", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init() + + // Set to true + node.IPFS("config", "AutoConf.TLSInsecureSkipVerify", "true", "--json") + + // Verify it was set + res := node.RunIPFS("config", "AutoConf.TLSInsecureSkipVerify") + assert.Equal(t, 0, res.ExitCode()) + assert.Contains(t, res.Stdout.String(), "true") + }) + + t.Run("HTTPRetrieval.TLSInsecureSkipVerify defaults to false", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init() + + // Check the default value in a fresh init + res := node.RunIPFS("config", "HTTPRetrieval.TLSInsecureSkipVerify") + // Field may not exist (exit code 1) or be false/empty (exit code 0) + // Both are acceptable as they mean "not true" + output := res.Stdout.String() + assert.NotContains(t, output, "true", "default should not be true") + }) + + t.Run("HTTPRetrieval.TLSInsecureSkipVerify can be set to true", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init() + + // Set to true + node.IPFS("config", "HTTPRetrieval.TLSInsecureSkipVerify", "true", "--json") + + // Verify it was set + res := node.RunIPFS("config", "HTTPRetrieval.TLSInsecureSkipVerify") + assert.Equal(t, 0, res.ExitCode()) + assert.Contains(t, res.Stdout.String(), "true") + }) + }) +} diff --git a/test/cli/content_blocking_test.go b/test/cli/content_blocking_test.go index 6598354d1..513de5e59 100644 --- a/test/cli/content_blocking_test.go +++ b/test/cli/content_blocking_test.go @@ -76,6 +76,7 @@ func TestContentBlocking(t *testing.T) { // Start daemon, it should pick up denylist from $IPFS_PATH/denylists/test.deny node.StartDaemon() // we need online mode for GatewayOverLibp2p tests + t.Cleanup(func() { node.StopDaemon() }) client := node.GatewayClient() // First, confirm gateway works @@ -308,7 +309,7 @@ func TestContentBlocking(t *testing.T) { // trustless gateway exposed over libp2p // when Experimental.GatewayOverLibp2p=true // (https://github.com/ipfs/kubo/blob/master/docs/experimental-features.md#http-gateway-over-libp2p) - // NOTE: this type fo gateway is hardcoded to be NoFetch: it does not fetch + // NOTE: this type of gateway is hardcoded to be NoFetch: it does not fetch // data that is not in local store, so we only need to run it once: a // simple smoke-test for allowed CID and blockedCID. t.Run("GatewayOverLibp2p", func(t *testing.T) { diff --git a/test/cli/content_routing_http_test.go b/test/cli/content_routing_http_test.go index aea5c41ca..b6e045383 100644 --- a/test/cli/content_routing_http_test.go +++ b/test/cli/content_routing_http_test.go @@ -1,69 +1,20 @@ package cli import ( - "context" "net/http" "net/http/httptest" "os/exec" - "sync" "testing" "time" - "github.com/ipfs/boxo/ipns" "github.com/ipfs/boxo/routing/http/server" - "github.com/ipfs/boxo/routing/http/types" - "github.com/ipfs/boxo/routing/http/types/iter" - "github.com/ipfs/go-cid" + "github.com/ipfs/go-test/random" + "github.com/ipfs/kubo/config" "github.com/ipfs/kubo/test/cli/harness" - "github.com/ipfs/kubo/test/cli/testutils" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/libp2p/go-libp2p/core/routing" + "github.com/ipfs/kubo/test/cli/testutils/httprouting" "github.com/stretchr/testify/assert" ) -type fakeHTTPContentRouter struct { - m sync.Mutex - provideBitswapCalls int - findProvidersCalls int - findPeersCalls int -} - -func (r *fakeHTTPContentRouter) FindProviders(ctx context.Context, key cid.Cid, limit int) (iter.ResultIter[types.Record], error) { - r.m.Lock() - defer r.m.Unlock() - r.findProvidersCalls++ - return iter.FromSlice([]iter.Result[types.Record]{}), nil -} - -// nolint deprecated -func (r *fakeHTTPContentRouter) ProvideBitswap(ctx context.Context, req *server.BitswapWriteProvideRequest) (time.Duration, error) { - r.m.Lock() - defer r.m.Unlock() - r.provideBitswapCalls++ - return 0, nil -} - -func (r *fakeHTTPContentRouter) FindPeers(ctx context.Context, pid peer.ID, limit int) (iter.ResultIter[*types.PeerRecord], error) { - r.m.Lock() - defer r.m.Unlock() - r.findPeersCalls++ - return iter.FromSlice([]iter.Result[*types.PeerRecord]{}), nil -} - -func (r *fakeHTTPContentRouter) GetIPNS(ctx context.Context, name ipns.Name) (*ipns.Record, error) { - return nil, routing.ErrNotSupported -} - -func (r *fakeHTTPContentRouter) PutIPNS(ctx context.Context, name ipns.Name, rec *ipns.Record) error { - return routing.ErrNotSupported -} - -func (r *fakeHTTPContentRouter) numFindProvidersCalls() int { - r.m.Lock() - defer r.m.Unlock() - return r.findProvidersCalls -} - // userAgentRecorder records the user agent of every HTTP request type userAgentRecorder struct { delegate http.Handler @@ -76,20 +27,23 @@ func (r *userAgentRecorder) ServeHTTP(w http.ResponseWriter, req *http.Request) } func TestContentRoutingHTTP(t *testing.T) { - cr := &fakeHTTPContentRouter{} + mockRouter := &httprouting.MockHTTPContentRouter{} // run the content routing HTTP server - userAgentRecorder := &userAgentRecorder{delegate: server.Handler(cr)} + userAgentRecorder := &userAgentRecorder{delegate: server.Handler(mockRouter)} server := httptest.NewServer(userAgentRecorder) t.Cleanup(func() { server.Close() }) // setup the node node := harness.NewT(t).NewNode().Init() - node.Runner.Env["IPFS_HTTP_ROUTERS"] = server.URL + node.UpdateConfig(func(cfg *config.Config) { + // setup Kubo node to use mocked HTTP Router + cfg.Routing.DelegatedRouters = []string{server.URL} + }) node.StartDaemon() // compute a random CID - randStr := string(testutils.RandomBytes(100)) + randStr := string(random.Bytes(100)) res := node.PipeStrToIPFS(randStr, "add", "-qn") wantCIDStr := res.Stdout.Trimmed() @@ -107,7 +61,7 @@ func TestContentRoutingHTTP(t *testing.T) { // verify the content router was called assert.Eventually(t, func() bool { - return cr.numFindProvidersCalls() > 0 + return mockRouter.NumFindProvidersCalls() > 0 }, time.Minute, 10*time.Millisecond) assert.NotEmpty(t, userAgentRecorder.userAgents) diff --git a/test/cli/daemon_test.go b/test/cli/daemon_test.go index 7a8c583a2..f87a21651 100644 --- a/test/cli/daemon_test.go +++ b/test/cli/daemon_test.go @@ -1,10 +1,20 @@ package cli import ( + "bytes" + "crypto/rand" + "fmt" + "io" + "net/http" "os/exec" "testing" + "time" + "github.com/ipfs/kubo/config" "github.com/ipfs/kubo/test/cli/harness" + "github.com/multiformats/go-multiaddr" + manet "github.com/multiformats/go-multiaddr/net" + "github.com/stretchr/testify/require" ) func TestDaemon(t *testing.T) { @@ -22,4 +32,125 @@ func TestDaemon(t *testing.T) { node.StopDaemon() }) + + t.Run("daemon shuts down gracefully with active operations", func(t *testing.T) { + t.Parallel() + + // Start daemon with multiple components active via config + node := harness.NewT(t).NewNode().Init() + + // Enable experimental features and pubsub via config + node.UpdateConfig(func(cfg *config.Config) { + cfg.Pubsub.Enabled = config.True // Instead of --enable-pubsub-experiment + cfg.Experimental.P2pHttpProxy = true // Enable P2P HTTP proxy + cfg.Experimental.GatewayOverLibp2p = true // Enable gateway over libp2p + }) + + node.StartDaemon("--enable-gc") + + // Start background operations to simulate real daemon workload: + // 1. "ipfs add" simulates content onboarding/ingestion work + // 2. Gateway request simulates content retrieval and gateway processing work + + // Background operation 1: Continuous add of random data to simulate onboarding + addDone := make(chan struct{}) + go func() { + defer close(addDone) + + // Start the add command asynchronously + res := node.Runner.Run(harness.RunRequest{ + Path: node.IPFSBin, + Args: []string{"add", "--progress=false", "-"}, + RunFunc: (*exec.Cmd).Start, + CmdOpts: []harness.CmdOpt{ + harness.RunWithStdin(&infiniteReader{}), + }, + }) + + // Wait for command to finish (when daemon stops) + if res.Cmd != nil { + _ = res.Cmd.Wait() // Ignore error, expect command to be killed during shutdown + } + }() + + // Background operation 2: Gateway CAR request to simulate retrieval work + gatewayDone := make(chan struct{}) + go func() { + defer close(gatewayDone) + + // First add a file sized to ensure gateway request takes ~1 minute + largeData := make([]byte, 512*1024) // 512KB of data + _, _ = rand.Read(largeData) // Always succeeds for crypto/rand + testCID := node.IPFSAdd(bytes.NewReader(largeData)) + + // Get gateway address from config + cfg := node.ReadConfig() + gatewayMaddr, err := multiaddr.NewMultiaddr(cfg.Addresses.Gateway[0]) + if err != nil { + return + } + gatewayAddr, err := manet.ToNetAddr(gatewayMaddr) + if err != nil { + return + } + + // Request CAR but slow reading to simulate heavy gateway load + gatewayURL := fmt.Sprintf("http://%s/ipfs/%s?format=car", gatewayAddr, testCID) + + client := &http.Client{Timeout: 90 * time.Second} + resp, err := client.Get(gatewayURL) + if err == nil { + defer resp.Body.Close() + // Read response slowly: 512KB ÷ 1KB × 125ms = ~64 seconds (1+ minute) total + // This ensures operation is still active when we shutdown at 2 seconds + buf := make([]byte, 1024) // 1KB buffer + for { + if _, err := io.ReadFull(resp.Body, buf); err != nil { + return + } + time.Sleep(125 * time.Millisecond) // 125ms delay = ~64s total for 512KB + } + } + }() + + // Let operations run for 2 seconds to ensure they're active + time.Sleep(2 * time.Second) + + // Trigger graceful shutdown + shutdownStart := time.Now() + node.StopDaemon() + shutdownDuration := time.Since(shutdownStart) + + // Verify clean shutdown: + // - Daemon should stop within reasonable time (not hang) + require.Less(t, shutdownDuration, 10*time.Second, "daemon should shut down within 10 seconds") + + // Wait for background operations to complete (with timeout) + select { + case <-addDone: + // Good, add operation terminated + case <-time.After(5 * time.Second): + t.Error("add operation did not terminate within 5 seconds after daemon shutdown") + } + + select { + case <-gatewayDone: + // Good, gateway operation terminated + case <-time.After(5 * time.Second): + t.Error("gateway operation did not terminate within 5 seconds after daemon shutdown") + } + + // Verify we can restart with same repo (no lock issues) + node.StartDaemon() + node.StopDaemon() + }) +} + +// infiniteReader provides an infinite stream of random data +type infiniteReader struct{} + +func (r *infiniteReader) Read(p []byte) (n int, err error) { + _, _ = rand.Read(p) // Always succeeds for crypto/rand + time.Sleep(50 * time.Millisecond) // Rate limit to simulate steady stream + return len(p), nil } diff --git a/test/cli/dag_test.go b/test/cli/dag_test.go index 1a3defc3c..38457318a 100644 --- a/test/cli/dag_test.go +++ b/test/cli/dag_test.go @@ -5,10 +5,13 @@ import ( "io" "os" "testing" + "time" + "github.com/ipfs/kubo/config" "github.com/ipfs/kubo/test/cli/harness" "github.com/ipfs/kubo/test/cli/testutils" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) const ( @@ -44,6 +47,8 @@ func TestDag(t *testing.T) { t.Run("ipfs dag stat --enc=json", func(t *testing.T) { t.Parallel() node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + // Import fixture r, err := os.Open(fixtureFile) assert.Nil(t, err) @@ -88,6 +93,7 @@ func TestDag(t *testing.T) { t.Run("ipfs dag stat", func(t *testing.T) { t.Parallel() node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() r, err := os.Open(fixtureFile) assert.NoError(t, err) defer r.Close() @@ -102,3 +108,200 @@ func TestDag(t *testing.T) { assert.Equal(t, content, stat.Stdout.Bytes()) }) } + +func TestDagImportFastProvide(t *testing.T) { + t.Parallel() + + t.Run("fast-provide-root disabled via config: verify skipped in logs", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init() + node.UpdateConfig(func(cfg *config.Config) { + cfg.Import.FastProvideRoot = config.False + }) + + // Start daemon with debug logging + node.StartDaemonWithReq(harness.RunRequest{ + CmdOpts: []harness.CmdOpt{ + harness.RunWithEnv(map[string]string{ + "GOLOG_LOG_LEVEL": "error,core/commands=debug,core/commands/cmdenv=debug", + }), + }, + }, "") + defer node.StopDaemon() + + // Import CAR file + r, err := os.Open(fixtureFile) + require.NoError(t, err) + defer r.Close() + err = node.IPFSDagImport(r, fixtureCid) + require.NoError(t, err) + + // Verify fast-provide-root was disabled + daemonLog := node.Daemon.Stderr.String() + require.Contains(t, daemonLog, "fast-provide-root: skipped") + }) + + t.Run("fast-provide-root enabled with wait=false: verify async provide", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init() + // Use default config (FastProvideRoot=true, FastProvideWait=false) + + node.StartDaemonWithReq(harness.RunRequest{ + CmdOpts: []harness.CmdOpt{ + harness.RunWithEnv(map[string]string{ + "GOLOG_LOG_LEVEL": "error,core/commands=debug,core/commands/cmdenv=debug", + }), + }, + }, "") + defer node.StopDaemon() + + // Import CAR file + r, err := os.Open(fixtureFile) + require.NoError(t, err) + defer r.Close() + err = node.IPFSDagImport(r, fixtureCid) + require.NoError(t, err) + + daemonLog := node.Daemon.Stderr + // Should see async mode started + require.Contains(t, daemonLog.String(), "fast-provide-root: enabled") + require.Contains(t, daemonLog.String(), "fast-provide-root: providing asynchronously") + require.Contains(t, daemonLog.String(), fixtureCid) // Should log the specific CID being provided + + // Wait for async completion or failure (slightly more than DefaultFastProvideTimeout) + // In test environment with no DHT peers, this will fail with "failed to find any peer in table" + timeout := config.DefaultFastProvideTimeout + time.Second + completedOrFailed := waitForLogMessage(daemonLog, "async provide completed", timeout) || + waitForLogMessage(daemonLog, "async provide failed", timeout) + require.True(t, completedOrFailed, "async provide should complete or fail within timeout") + }) + + t.Run("fast-provide-root enabled with wait=true: verify sync provide", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init() + node.UpdateConfig(func(cfg *config.Config) { + cfg.Import.FastProvideWait = config.True + }) + + node.StartDaemonWithReq(harness.RunRequest{ + CmdOpts: []harness.CmdOpt{ + harness.RunWithEnv(map[string]string{ + "GOLOG_LOG_LEVEL": "error,core/commands=debug,core/commands/cmdenv=debug", + }), + }, + }, "") + defer node.StopDaemon() + + // Import CAR file - use Run instead of IPFSDagImport to handle expected error + r, err := os.Open(fixtureFile) + require.NoError(t, err) + defer r.Close() + res := node.Runner.Run(harness.RunRequest{ + Path: node.IPFSBin, + Args: []string{"dag", "import", "--pin-roots=false"}, + CmdOpts: []harness.CmdOpt{ + harness.RunWithStdin(r), + }, + }) + // In sync mode (wait=true), provide errors propagate and fail the command. + // Test environment uses 'test' profile with no bootstrappers, and CI has + // insufficient peers for proper DHT puts, so we expect this to fail with + // "failed to find any peer in table" error from the DHT. + require.Equal(t, 1, res.ExitCode()) + require.Contains(t, res.Stderr.String(), "Error: fast-provide: failed to find any peer in table") + + daemonLog := node.Daemon.Stderr.String() + // Should see sync mode started + require.Contains(t, daemonLog, "fast-provide-root: enabled") + require.Contains(t, daemonLog, "fast-provide-root: providing synchronously") + require.Contains(t, daemonLog, fixtureCid) // Should log the specific CID being provided + require.Contains(t, daemonLog, "sync provide failed") // Verify the failure was logged + }) + + t.Run("fast-provide-wait ignored when root disabled", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init() + node.UpdateConfig(func(cfg *config.Config) { + cfg.Import.FastProvideRoot = config.False + cfg.Import.FastProvideWait = config.True + }) + + node.StartDaemonWithReq(harness.RunRequest{ + CmdOpts: []harness.CmdOpt{ + harness.RunWithEnv(map[string]string{ + "GOLOG_LOG_LEVEL": "error,core/commands=debug,core/commands/cmdenv=debug", + }), + }, + }, "") + defer node.StopDaemon() + + // Import CAR file + r, err := os.Open(fixtureFile) + require.NoError(t, err) + defer r.Close() + err = node.IPFSDagImport(r, fixtureCid) + require.NoError(t, err) + + daemonLog := node.Daemon.Stderr.String() + require.Contains(t, daemonLog, "fast-provide-root: skipped") + // Note: dag import doesn't log wait-flag-ignored like add does + }) + + t.Run("CLI flag overrides config: flag=true overrides config=false", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init() + node.UpdateConfig(func(cfg *config.Config) { + cfg.Import.FastProvideRoot = config.False + }) + + node.StartDaemonWithReq(harness.RunRequest{ + CmdOpts: []harness.CmdOpt{ + harness.RunWithEnv(map[string]string{ + "GOLOG_LOG_LEVEL": "error,core/commands=debug,core/commands/cmdenv=debug", + }), + }, + }, "") + defer node.StopDaemon() + + // Import CAR file with flag override + r, err := os.Open(fixtureFile) + require.NoError(t, err) + defer r.Close() + err = node.IPFSDagImport(r, fixtureCid, "--fast-provide-root=true") + require.NoError(t, err) + + daemonLog := node.Daemon.Stderr + // Flag should enable it despite config saying false + require.Contains(t, daemonLog.String(), "fast-provide-root: enabled") + require.Contains(t, daemonLog.String(), "fast-provide-root: providing asynchronously") + require.Contains(t, daemonLog.String(), fixtureCid) // Should log the specific CID being provided + }) + + t.Run("CLI flag overrides config: flag=false overrides config=true", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init() + node.UpdateConfig(func(cfg *config.Config) { + cfg.Import.FastProvideRoot = config.True + }) + + node.StartDaemonWithReq(harness.RunRequest{ + CmdOpts: []harness.CmdOpt{ + harness.RunWithEnv(map[string]string{ + "GOLOG_LOG_LEVEL": "error,core/commands=debug,core/commands/cmdenv=debug", + }), + }, + }, "") + defer node.StopDaemon() + + // Import CAR file with flag override + r, err := os.Open(fixtureFile) + require.NoError(t, err) + defer r.Close() + err = node.IPFSDagImport(r, fixtureCid, "--fast-provide-root=false") + require.NoError(t, err) + + daemonLog := node.Daemon.Stderr.String() + // Flag should disable it despite config saying true + require.Contains(t, daemonLog, "fast-provide-root: skipped") + }) +} diff --git a/test/cli/delegated_routing_v1_http_proxy_test.go b/test/cli/delegated_routing_v1_http_proxy_test.go index 1d80ae50a..2b82a2714 100644 --- a/test/cli/delegated_routing_v1_http_proxy_test.go +++ b/test/cli/delegated_routing_v1_http_proxy_test.go @@ -4,9 +4,9 @@ import ( "testing" "github.com/ipfs/boxo/ipns" + "github.com/ipfs/go-test/random" "github.com/ipfs/kubo/config" "github.com/ipfs/kubo/test/cli/harness" - "github.com/ipfs/kubo/test/cli/testutils" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -15,9 +15,11 @@ func TestRoutingV1Proxy(t *testing.T) { t.Parallel() setupNodes := func(t *testing.T) harness.Nodes { - nodes := harness.NewT(t).NewNodes(2).Init() + nodes := harness.NewT(t).NewNodes(3).Init() - // Node 0 uses DHT and exposes the Routing API. + // Node 0 uses DHT and exposes the Routing API. For the DHT + // to actually work there will need to be another DHT-enabled + // node. nodes[0].UpdateConfig(func(cfg *config.Config) { cfg.Gateway.ExposeRoutingAPI = config.True cfg.Discovery.MDNS.Enabled = false @@ -49,6 +51,19 @@ func TestRoutingV1Proxy(t *testing.T) { }) nodes[1].StartDaemon() + // This is the second DHT node. Only used so that the DHT is + // operative. + nodes[2].UpdateConfig(func(cfg *config.Config) { + cfg.Gateway.ExposeRoutingAPI = config.True + cfg.Discovery.MDNS.Enabled = false + cfg.Routing.Type = config.NewOptionalString("dht") + }) + nodes[2].StartDaemon() + + t.Cleanup(func() { + nodes.StopDaemons() + }) + // Connect them. nodes.Connect() @@ -59,7 +74,9 @@ func TestRoutingV1Proxy(t *testing.T) { t.Parallel() nodes := setupNodes(t) - cidStr := nodes[0].IPFSAddStr(testutils.RandomStr(1000)) + cidStr := nodes[0].IPFSAddStr(string(random.Bytes(1000))) + // Reprovide as initialProviderDelay still ongoing + waitUntilProvidesComplete(t, nodes[0]) res := nodes[1].IPFS("routing", "findprovs", cidStr) assert.Equal(t, nodes[0].PeerID().String(), res.Stdout.Trimmed()) @@ -96,7 +113,7 @@ func TestRoutingV1Proxy(t *testing.T) { require.Error(t, res.ExitErr) // Publish record on Node 0. - path := "/ipfs/" + nodes[0].IPFSAddStr(testutils.RandomStr(1000)) + path := "/ipfs/" + nodes[0].IPFSAddStr(string(random.Bytes(1000))) nodes[0].IPFS("name", "publish", "--allow-offline", path) // Get record on Node 1 (no DHT). @@ -119,7 +136,7 @@ func TestRoutingV1Proxy(t *testing.T) { require.Error(t, res.ExitErr) // Publish name. - path := "/ipfs/" + nodes[0].IPFSAddStr(testutils.RandomStr(1000)) + path := "/ipfs/" + nodes[0].IPFSAddStr(string(random.Bytes(1000))) nodes[0].IPFS("name", "publish", "--allow-offline", path) // Resolve IPNS name @@ -133,7 +150,7 @@ func TestRoutingV1Proxy(t *testing.T) { // Publish something on Node 1 (no DHT). nodeName := "/ipns/" + ipns.NameFromPeer(nodes[1].PeerID()).String() - path := "/ipfs/" + nodes[1].IPFSAddStr(testutils.RandomStr(1000)) + path := "/ipfs/" + nodes[1].IPFSAddStr(string(random.Bytes(1000))) nodes[1].IPFS("name", "publish", "--allow-offline", path) // Retrieve through Node 0. diff --git a/test/cli/delegated_routing_v1_http_server_test.go b/test/cli/delegated_routing_v1_http_server_test.go index f2bd98cb7..503dba39b 100644 --- a/test/cli/delegated_routing_v1_http_server_test.go +++ b/test/cli/delegated_routing_v1_http_server_test.go @@ -2,9 +2,12 @@ package cli import ( "context" + "strings" "testing" + "time" "github.com/google/uuid" + "github.com/ipfs/boxo/autoconf" "github.com/ipfs/boxo/ipns" "github.com/ipfs/boxo/routing/http/client" "github.com/ipfs/boxo/routing/http/types" @@ -14,6 +17,7 @@ import ( "github.com/ipfs/kubo/test/cli/harness" "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestRoutingV1Server(t *testing.T) { @@ -28,6 +32,7 @@ func TestRoutingV1Server(t *testing.T) { }) }) nodes.StartDaemons().Connect() + t.Cleanup(func() { nodes.StopDaemons() }) return nodes } @@ -38,6 +43,7 @@ func TestRoutingV1Server(t *testing.T) { text := "hello world " + uuid.New().String() cidStr := nodes[2].IPFSAddStr(text) _ = nodes[3].IPFSAddStr(text) + waitUntilProvidesComplete(t, nodes[3]) cid, err := cid.Decode(cidStr) assert.NoError(t, err) @@ -128,6 +134,7 @@ func TestRoutingV1Server(t *testing.T) { cfg.Routing.Type = config.NewOptionalString("dht") }) node.StartDaemon() + defer node.StopDaemon() // Put IPNS record in lonely node. It should be accepted as it is a valid record. c, err = client.New(node.GatewayURL()) @@ -142,4 +149,135 @@ func TestRoutingV1Server(t *testing.T) { assert.NoError(t, err) assert.Equal(t, "/ipfs/"+cidStr, value.String()) }) + + t.Run("GetClosestPeers returns error when DHT is disabled", func(t *testing.T) { + t.Parallel() + + // Test various routing types that don't support DHT + routingTypes := []string{"none", "delegated", "custom"} + for _, routingType := range routingTypes { + t.Run("routing_type="+routingType, func(t *testing.T) { + t.Parallel() + + // Create node with specified routing type (DHT disabled) + node := harness.NewT(t).NewNode().Init() + node.UpdateConfig(func(cfg *config.Config) { + cfg.Gateway.ExposeRoutingAPI = config.True + cfg.Routing.Type = config.NewOptionalString(routingType) + + // For custom routing type, we need to provide minimal valid config + // otherwise daemon startup will fail + if routingType == "custom" { + // Configure a minimal HTTP router (no DHT) + cfg.Routing.Routers = map[string]config.RouterParser{ + "http-only": { + Router: config.Router{ + Type: config.RouterTypeHTTP, + Parameters: config.HTTPRouterParams{ + Endpoint: "https://delegated-ipfs.dev", + }, + }, + }, + } + cfg.Routing.Methods = map[config.MethodName]config.Method{ + config.MethodNameProvide: {RouterName: "http-only"}, + config.MethodNameFindProviders: {RouterName: "http-only"}, + config.MethodNameFindPeers: {RouterName: "http-only"}, + config.MethodNameGetIPNS: {RouterName: "http-only"}, + config.MethodNamePutIPNS: {RouterName: "http-only"}, + } + } + + // For delegated routing type, ensure we have at least one HTTP router + // to avoid daemon startup failure + if routingType == "delegated" { + // Use a minimal delegated router configuration + cfg.Routing.DelegatedRouters = []string{"https://delegated-ipfs.dev"} + // Delegated routing doesn't support providing, must be disabled + cfg.Provide.Enabled = config.False + } + }) + node.StartDaemon() + defer node.StopDaemon() + + c, err := client.New(node.GatewayURL()) + require.NoError(t, err) + + // Try to get closest peers - should fail gracefully with an error. + // Use 60-second timeout (server has 30s routing timeout). + testCid, err := cid.Decode("QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn") + require.NoError(t, err) + + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + _, err = c.GetClosestPeers(ctx, testCid) + require.Error(t, err) + // All these routing types should indicate DHT is not available + // The exact error message may vary based on implementation details + errStr := err.Error() + assert.True(t, + strings.Contains(errStr, "not supported") || + strings.Contains(errStr, "not available") || + strings.Contains(errStr, "500"), + "Expected error indicating DHT not available for routing type %s, got: %s", routingType, errStr) + }) + } + }) + + t.Run("GetClosestPeers returns peers", func(t *testing.T) { + t.Parallel() + + routingTypes := []string{"auto", "autoclient", "dht", "dhtclient"} + for _, routingType := range routingTypes { + t.Run("routing_type="+routingType, func(t *testing.T) { + t.Parallel() + + // Single node with DHT and real bootstrap peers + node := harness.NewT(t).NewNode().Init() + node.UpdateConfig(func(cfg *config.Config) { + cfg.Gateway.ExposeRoutingAPI = config.True + cfg.Routing.Type = config.NewOptionalString(routingType) + // Set real bootstrap peers from boxo/autoconf + cfg.Bootstrap = autoconf.FallbackBootstrapPeers + }) + node.StartDaemon() + defer node.StopDaemon() + + c, err := client.New(node.GatewayURL()) + require.NoError(t, err) + + // Query for closest peers to our own peer ID + key := peer.ToCid(node.PeerID()) + + // Wait for WAN DHT routing table to be populated. + // The server has a 30-second routing timeout, so we use 60 seconds + // per request to allow for network latency while preventing hangs. + // Total wait time is 2 minutes (locally passes in under 1 minute). + var records []*types.PeerRecord + require.EventuallyWithT(t, func(ct *assert.CollectT) { + ctx, cancel := context.WithTimeout(t.Context(), 60*time.Second) + defer cancel() + resultsIter, err := c.GetClosestPeers(ctx, key) + if !assert.NoError(ct, err) { + return + } + records, err = iter.ReadAllResults(resultsIter) + assert.NoError(ct, err) + }, 2*time.Minute, 5*time.Second) + + // Verify we got some peers back from WAN DHT + require.NotEmpty(t, records, "should return peers close to own peerid") + + // Per IPIP-0476, GetClosestPeers returns at most 20 peers + assert.LessOrEqual(t, len(records), 20, "IPIP-0476 limits GetClosestPeers to 20 peers") + + // Verify structure of returned records + for _, record := range records { + assert.Equal(t, types.SchemaPeer, record.Schema) + assert.NotNil(t, record.ID) + assert.NotEmpty(t, record.Addrs, "peer record should have addresses") + } + }) + } + }) } diff --git a/test/cli/dht_autoclient_test.go b/test/cli/dht_autoclient_test.go index 39aa5b258..75e1cc241 100644 --- a/test/cli/dht_autoclient_test.go +++ b/test/cli/dht_autoclient_test.go @@ -4,8 +4,8 @@ import ( "bytes" "testing" + "github.com/ipfs/go-test/random" "github.com/ipfs/kubo/test/cli/harness" - "github.com/ipfs/kubo/test/cli/testutils" "github.com/stretchr/testify/assert" ) @@ -16,10 +16,11 @@ func TestDHTAutoclient(t *testing.T) { node.IPFS("config", "Routing.Type", "autoclient") }) nodes.StartDaemons().Connect() + t.Cleanup(func() { nodes.StopDaemons() }) t.Run("file added on node in client mode is retrievable from node in client mode", func(t *testing.T) { t.Parallel() - randomBytes := testutils.RandomBytes(1000) + randomBytes := random.Bytes(1000) randomBytes = append(randomBytes, '\r') hash := nodes[8].IPFSAdd(bytes.NewReader(randomBytes)) @@ -29,7 +30,7 @@ func TestDHTAutoclient(t *testing.T) { t.Run("file added on node in server mode is retrievable from all nodes", func(t *testing.T) { t.Parallel() - randomBytes := testutils.RandomBytes(1000) + randomBytes := random.Bytes(1000) hash := nodes[0].IPFSAdd(bytes.NewReader(randomBytes)) for i := 0; i < 10; i++ { diff --git a/test/cli/dht_opt_prov_test.go b/test/cli/dht_opt_prov_test.go index f7b492066..291d48c54 100644 --- a/test/cli/dht_opt_prov_test.go +++ b/test/cli/dht_opt_prov_test.go @@ -3,9 +3,9 @@ package cli import ( "testing" + "github.com/ipfs/go-test/random" "github.com/ipfs/kubo/config" "github.com/ipfs/kubo/test/cli/harness" - "github.com/ipfs/kubo/test/cli/testutils" "github.com/stretchr/testify/assert" ) @@ -17,11 +17,14 @@ func TestDHTOptimisticProvide(t *testing.T) { nodes[0].UpdateConfig(func(cfg *config.Config) { cfg.Experimental.OptimisticProvide = true + // Optimistic provide only works with the legacy provider. + cfg.Provide.DHT.SweepEnabled = config.False }) nodes.StartDaemons().Connect() + defer nodes.StopDaemons() - hash := nodes[0].IPFSAddStr(testutils.RandomStr(100)) + hash := nodes[0].IPFSAddStr(string(random.Bytes(100))) nodes[0].IPFS("routing", "provide", hash) res := nodes[1].IPFS("routing", "findprovs", "--num-providers=1", hash) diff --git a/test/cli/files_test.go b/test/cli/files_test.go new file mode 100644 index 000000000..4760c23aa --- /dev/null +++ b/test/cli/files_test.go @@ -0,0 +1,355 @@ +package cli + +import ( + "fmt" + "os" + "path/filepath" + "testing" + + "github.com/ipfs/kubo/config" + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestFilesCp(t *testing.T) { + t.Parallel() + + t.Run("files cp with valid UnixFS succeeds", func(t *testing.T) { + t.Parallel() + + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + // Create simple text file + data := "testing files cp command" + cid := node.IPFSAddStr(data) + + // Copy form IPFS => MFS + res := node.IPFS("files", "cp", fmt.Sprintf("/ipfs/%s", cid), "/valid-file") + assert.NoError(t, res.Err) + + // verification + catRes := node.IPFS("files", "read", "/valid-file") + assert.Equal(t, data, catRes.Stdout.Trimmed()) + }) + + t.Run("files cp with unsupported DAG node type fails", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + // MFS UnixFS is limited to dag-pb or raw, so we create a dag-cbor node to test this + jsonData := `{"data": "not a UnixFS node"}` + tempFile := filepath.Join(node.Dir, "test.json") + err := os.WriteFile(tempFile, []byte(jsonData), 0644) + require.NoError(t, err) + cid := node.IPFS("dag", "put", "--input-codec=json", "--store-codec=dag-cbor", tempFile).Stdout.Trimmed() + + // copy without --force + res := node.RunIPFS("files", "cp", fmt.Sprintf("/ipfs/%s", cid), "/invalid-file") + assert.NotEqual(t, 0, res.ExitErr.ExitCode()) + assert.Contains(t, res.Stderr.String(), "Error: cp: source must be a valid UnixFS (dag-pb or raw codec)") + }) + + t.Run("files cp with invalid UnixFS data structure fails", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + // Create an invalid proto file + data := []byte{0xDE, 0xAD, 0xBE, 0xEF} // Invalid protobuf data + tempFile := filepath.Join(node.Dir, "invalid-proto.bin") + err := os.WriteFile(tempFile, data, 0644) + require.NoError(t, err) + + res := node.IPFS("block", "put", "--format=raw", tempFile) + require.NoError(t, res.Err) + + // we manually changed codec from raw to dag-pb to test "bad dag-pb" scenario + cid := "bafybeic7pdbte5heh6u54vszezob3el6exadoiw4wc4ne7ny2x7kvajzkm" + + // should fail because node cannot be read as a valid dag-pb + cpResNoForce := node.RunIPFS("files", "cp", fmt.Sprintf("/ipfs/%s", cid), "/invalid-proto") + assert.NotEqual(t, 0, cpResNoForce.ExitErr.ExitCode()) + assert.Contains(t, cpResNoForce.Stderr.String(), "Error") + }) + + t.Run("files cp with raw node succeeds", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + // Create a raw node + data := "raw data" + tempFile := filepath.Join(node.Dir, "raw.bin") + err := os.WriteFile(tempFile, []byte(data), 0644) + require.NoError(t, err) + + res := node.IPFS("block", "put", "--format=raw", tempFile) + require.NoError(t, res.Err) + cid := res.Stdout.Trimmed() + + // Copy from IPFS to MFS (raw nodes should work without --force) + cpRes := node.IPFS("files", "cp", fmt.Sprintf("/ipfs/%s", cid), "/raw-file") + assert.NoError(t, cpRes.Err) + + // Verify the file was copied correctly + catRes := node.IPFS("files", "read", "/raw-file") + assert.Equal(t, data, catRes.Stdout.Trimmed()) + }) + + t.Run("files cp creates intermediate directories with -p", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + // Create a simple text file and add it to IPFS + data := "hello parent directories" + tempFile := filepath.Join(node.Dir, "parent-test.txt") + err := os.WriteFile(tempFile, []byte(data), 0644) + require.NoError(t, err) + + cid := node.IPFS("add", "-Q", tempFile).Stdout.Trimmed() + + // Copy from IPFS to MFS with parent flag + res := node.IPFS("files", "cp", "-p", fmt.Sprintf("/ipfs/%s", cid), "/parent/dir/file") + assert.NoError(t, res.Err) + + // Verify the file and directories were created + lsRes := node.IPFS("files", "ls", "/parent/dir") + assert.Contains(t, lsRes.Stdout.String(), "file") + + catRes := node.IPFS("files", "read", "/parent/dir/file") + assert.Equal(t, data, catRes.Stdout.Trimmed()) + }) +} + +func TestFilesRm(t *testing.T) { + t.Parallel() + + t.Run("files rm with --flush=false returns error", func(t *testing.T) { + // Test that files rm rejects --flush=false so user does not assume disabling flush works + // (rm ignored it before, better to explicitly error) + // See https://github.com/ipfs/kubo/issues/10842 + t.Parallel() + + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + // Create a file to remove + node.IPFS("files", "mkdir", "/test-dir") + + // Try to remove with --flush=false, should error + res := node.RunIPFS("files", "rm", "-r", "--flush=false", "/test-dir") + assert.NotEqual(t, 0, res.ExitErr.ExitCode()) + assert.Contains(t, res.Stderr.String(), "files rm always flushes for safety") + assert.Contains(t, res.Stderr.String(), "cannot be set to false") + + // Verify the directory still exists (wasn't removed due to error) + lsRes := node.IPFS("files", "ls", "/") + assert.Contains(t, lsRes.Stdout.String(), "test-dir") + }) + + t.Run("files rm with --flush=true works", func(t *testing.T) { + t.Parallel() + + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + // Create a file to remove + node.IPFS("files", "mkdir", "/test-dir") + + // Remove with explicit --flush=true, should work + res := node.IPFS("files", "rm", "-r", "--flush=true", "/test-dir") + assert.NoError(t, res.Err) + + // Verify the directory was removed + lsRes := node.IPFS("files", "ls", "/") + assert.NotContains(t, lsRes.Stdout.String(), "test-dir") + }) + + t.Run("files rm without flush flag works (default behavior)", func(t *testing.T) { + t.Parallel() + + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + // Create a file to remove + node.IPFS("files", "mkdir", "/test-dir") + + // Remove without flush flag (should use default which is true) + res := node.IPFS("files", "rm", "-r", "/test-dir") + assert.NoError(t, res.Err) + + // Verify the directory was removed + lsRes := node.IPFS("files", "ls", "/") + assert.NotContains(t, lsRes.Stdout.String(), "test-dir") + }) +} + +func TestFilesNoFlushLimit(t *testing.T) { + t.Parallel() + + t.Run("reaches default limit of 256 operations", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + // Perform 256 operations with --flush=false (should succeed) + for i := 0; i < 256; i++ { + res := node.IPFS("files", "mkdir", "--flush=false", fmt.Sprintf("/dir%d", i)) + assert.NoError(t, res.Err, "operation %d should succeed", i+1) + } + + // 257th operation should fail + res := node.RunIPFS("files", "mkdir", "--flush=false", "/dir256") + require.NotNil(t, res.ExitErr, "command should have failed") + assert.NotEqual(t, 0, res.ExitErr.ExitCode()) + assert.Contains(t, res.Stderr.String(), "reached limit of 256 unflushed MFS operations") + assert.Contains(t, res.Stderr.String(), "run 'ipfs files flush'") + assert.Contains(t, res.Stderr.String(), "use --flush=true") + assert.Contains(t, res.Stderr.String(), "increase Internal.MFSNoFlushLimit") + }) + + t.Run("custom limit via config", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init() + + // Set custom limit to 5 + node.UpdateConfig(func(cfg *config.Config) { + limit := config.NewOptionalInteger(5) + cfg.Internal.MFSNoFlushLimit = limit + }) + + node.StartDaemon() + defer node.StopDaemon() + + // Perform 5 operations (should succeed) + for i := 0; i < 5; i++ { + res := node.IPFS("files", "mkdir", "--flush=false", fmt.Sprintf("/dir%d", i)) + assert.NoError(t, res.Err, "operation %d should succeed", i+1) + } + + // 6th operation should fail + res := node.RunIPFS("files", "mkdir", "--flush=false", "/dir5") + require.NotNil(t, res.ExitErr, "command should have failed") + assert.NotEqual(t, 0, res.ExitErr.ExitCode()) + assert.Contains(t, res.Stderr.String(), "reached limit of 5 unflushed MFS operations") + }) + + t.Run("flush=true resets counter", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init() + + // Set limit to 3 for faster testing + node.UpdateConfig(func(cfg *config.Config) { + limit := config.NewOptionalInteger(3) + cfg.Internal.MFSNoFlushLimit = limit + }) + + node.StartDaemon() + defer node.StopDaemon() + + // Do 2 operations with --flush=false + node.IPFS("files", "mkdir", "--flush=false", "/dir1") + node.IPFS("files", "mkdir", "--flush=false", "/dir2") + + // Operation with --flush=true should reset counter + node.IPFS("files", "mkdir", "--flush=true", "/dir3") + + // Now we should be able to do 3 more operations with --flush=false + for i := 4; i <= 6; i++ { + res := node.IPFS("files", "mkdir", "--flush=false", fmt.Sprintf("/dir%d", i)) + assert.NoError(t, res.Err, "operation after flush should succeed") + } + + // 4th operation after reset should fail + res := node.RunIPFS("files", "mkdir", "--flush=false", "/dir7") + require.NotNil(t, res.ExitErr, "command should have failed") + assert.NotEqual(t, 0, res.ExitErr.ExitCode()) + assert.Contains(t, res.Stderr.String(), "reached limit of 3 unflushed MFS operations") + }) + + t.Run("explicit flush command resets counter", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init() + + // Set limit to 3 for faster testing + node.UpdateConfig(func(cfg *config.Config) { + limit := config.NewOptionalInteger(3) + cfg.Internal.MFSNoFlushLimit = limit + }) + + node.StartDaemon() + defer node.StopDaemon() + + // Do 2 operations with --flush=false + node.IPFS("files", "mkdir", "--flush=false", "/dir1") + node.IPFS("files", "mkdir", "--flush=false", "/dir2") + + // Explicit flush should reset counter + node.IPFS("files", "flush") + + // Now we should be able to do 3 more operations + for i := 3; i <= 5; i++ { + res := node.IPFS("files", "mkdir", "--flush=false", fmt.Sprintf("/dir%d", i)) + assert.NoError(t, res.Err, "operation after flush should succeed") + } + + // 4th operation should fail + res := node.RunIPFS("files", "mkdir", "--flush=false", "/dir6") + require.NotNil(t, res.ExitErr, "command should have failed") + assert.NotEqual(t, 0, res.ExitErr.ExitCode()) + assert.Contains(t, res.Stderr.String(), "reached limit of 3 unflushed MFS operations") + }) + + t.Run("limit=0 disables the feature", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init() + + // Set limit to 0 (disabled) + node.UpdateConfig(func(cfg *config.Config) { + limit := config.NewOptionalInteger(0) + cfg.Internal.MFSNoFlushLimit = limit + }) + + node.StartDaemon() + defer node.StopDaemon() + + // Should be able to do many operations without error + for i := 0; i < 300; i++ { + res := node.IPFS("files", "mkdir", "--flush=false", fmt.Sprintf("/dir%d", i)) + assert.NoError(t, res.Err, "operation %d should succeed with limit disabled", i+1) + } + }) + + t.Run("different MFS commands count towards limit", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init() + + // Set limit to 5 for testing + node.UpdateConfig(func(cfg *config.Config) { + limit := config.NewOptionalInteger(5) + cfg.Internal.MFSNoFlushLimit = limit + }) + + node.StartDaemon() + defer node.StopDaemon() + + // Mix of different MFS operations (5 operations to hit the limit) + node.IPFS("files", "mkdir", "--flush=false", "/testdir") + // Create a file first, then copy it + testCid := node.IPFSAddStr("test content") + node.IPFS("files", "cp", "--flush=false", fmt.Sprintf("/ipfs/%s", testCid), "/testfile") + node.IPFS("files", "cp", "--flush=false", "/testfile", "/testfile2") + node.IPFS("files", "mv", "--flush=false", "/testfile2", "/testfile3") + node.IPFS("files", "mkdir", "--flush=false", "/anotherdir") + + // 6th operation should fail + res := node.RunIPFS("files", "mkdir", "--flush=false", "/another") + require.NotNil(t, res.ExitErr, "command should have failed") + assert.NotEqual(t, 0, res.ExitErr.ExitCode()) + assert.Contains(t, res.Stderr.String(), "reached limit of 5 unflushed MFS operations") + }) +} diff --git a/test/cli/fixtures/TestDagStatExpectedOutput.txt b/test/cli/fixtures/TestDagStatExpectedOutput.txt index 9e709f4a2..87bc405a1 100644 --- a/test/cli/fixtures/TestDagStatExpectedOutput.txt +++ b/test/cli/fixtures/TestDagStatExpectedOutput.txt @@ -4,9 +4,9 @@ bafyreibmdfd7c5db4kls4ty57zljfhqv36gi43l6txl44pi423wwmeskwy 2 53 bafyreie3njilzdi4ixumru4nzgecsnjtu7fzfcwhg7e6s4s5i7cnbslvn4 2 53 Summary -Total Size: 99 +Total Size: 99 (99 B) Unique Blocks: 3 -Shared Size: 7 +Shared Size: 7 (7 B) Ratio: 1.070707 diff --git a/test/cli/fuse_test.go b/test/cli/fuse_test.go new file mode 100644 index 000000000..6182a069a --- /dev/null +++ b/test/cli/fuse_test.go @@ -0,0 +1,166 @@ +package cli + +import ( + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "testing" + + "github.com/ipfs/kubo/test/cli/harness" + "github.com/ipfs/kubo/test/cli/testutils" + "github.com/stretchr/testify/require" +) + +func TestFUSE(t *testing.T) { + testutils.RequiresFUSE(t) + t.Parallel() + + t.Run("mount and unmount work correctly", func(t *testing.T) { + t.Parallel() + + // Create a node and start daemon + node := harness.NewT(t).NewNode().Init() + node.StartDaemon() + + // Create mount directories in the node's working directory + nodeDir := node.Dir + ipfsMount := filepath.Join(nodeDir, "ipfs") + ipnsMount := filepath.Join(nodeDir, "ipns") + mfsMount := filepath.Join(nodeDir, "mfs") + + err := os.MkdirAll(ipfsMount, 0755) + require.NoError(t, err) + err = os.MkdirAll(ipnsMount, 0755) + require.NoError(t, err) + err = os.MkdirAll(mfsMount, 0755) + require.NoError(t, err) + + // Ensure any existing mounts are cleaned up first + failOnError := false // mount points might not exist from previous runs + doUnmount(t, ipfsMount, failOnError) + doUnmount(t, ipnsMount, failOnError) + doUnmount(t, mfsMount, failOnError) + + // Test mount operation + result := node.IPFS("mount", "-f", ipfsMount, "-n", ipnsMount, "-m", mfsMount) + + // Verify mount output + expectedOutput := "IPFS mounted at: " + ipfsMount + "\n" + + "IPNS mounted at: " + ipnsMount + "\n" + + "MFS mounted at: " + mfsMount + "\n" + require.Equal(t, expectedOutput, result.Stdout.String()) + + // Test basic MFS functionality via FUSE mount + testFile := filepath.Join(mfsMount, "testfile") + testContent := "hello fuse world" + + // Create file via FUSE mount + err = os.WriteFile(testFile, []byte(testContent), 0644) + require.NoError(t, err) + + // Verify file appears in MFS via IPFS commands + result = node.IPFS("files", "ls", "/") + require.Contains(t, result.Stdout.String(), "testfile") + + // Read content back via MFS FUSE mount + readContent, err := os.ReadFile(testFile) + require.NoError(t, err) + require.Equal(t, testContent, string(readContent)) + + // Get the CID of the MFS file + result = node.IPFS("files", "stat", "/testfile", "--format=") + fileCID := strings.TrimSpace(result.Stdout.String()) + require.NotEmpty(t, fileCID, "should have a CID for the MFS file") + + // Read the same content via IPFS FUSE mount using the CID + ipfsFile := filepath.Join(ipfsMount, fileCID) + ipfsContent, err := os.ReadFile(ipfsFile) + require.NoError(t, err) + require.Equal(t, testContent, string(ipfsContent), "content should match between MFS and IPFS mounts") + + // Verify both FUSE mounts return identical data + require.Equal(t, readContent, ipfsContent, "MFS and IPFS FUSE mounts should return identical data") + + // Test that mount directories cannot be removed while mounted + err = os.Remove(ipfsMount) + require.Error(t, err, "should not be able to remove mounted directory") + + // Stop daemon - this should trigger automatic unmount via context cancellation + node.StopDaemon() + + // Daemon shutdown should handle unmount synchronously via context.AfterFunc + + // Verify directories can now be removed (indicating successful unmount) + err = os.Remove(ipfsMount) + require.NoError(t, err, "should be able to remove directory after unmount") + err = os.Remove(ipnsMount) + require.NoError(t, err, "should be able to remove directory after unmount") + err = os.Remove(mfsMount) + require.NoError(t, err, "should be able to remove directory after unmount") + }) + + t.Run("explicit unmount works", func(t *testing.T) { + t.Parallel() + + node := harness.NewT(t).NewNode().Init() + node.StartDaemon() + + // Create mount directories + nodeDir := node.Dir + ipfsMount := filepath.Join(nodeDir, "ipfs") + ipnsMount := filepath.Join(nodeDir, "ipns") + mfsMount := filepath.Join(nodeDir, "mfs") + + err := os.MkdirAll(ipfsMount, 0755) + require.NoError(t, err) + err = os.MkdirAll(ipnsMount, 0755) + require.NoError(t, err) + err = os.MkdirAll(mfsMount, 0755) + require.NoError(t, err) + + // Clean up any existing mounts + failOnError := false // mount points might not exist from previous runs + doUnmount(t, ipfsMount, failOnError) + doUnmount(t, ipnsMount, failOnError) + doUnmount(t, mfsMount, failOnError) + + // Mount + node.IPFS("mount", "-f", ipfsMount, "-n", ipnsMount, "-m", mfsMount) + + // Explicit unmount via platform-specific command + failOnError = true // test that explicit unmount works correctly + doUnmount(t, ipfsMount, failOnError) + doUnmount(t, ipnsMount, failOnError) + doUnmount(t, mfsMount, failOnError) + + // Verify directories can be removed after explicit unmount + err = os.Remove(ipfsMount) + require.NoError(t, err) + err = os.Remove(ipnsMount) + require.NoError(t, err) + err = os.Remove(mfsMount) + require.NoError(t, err) + + node.StopDaemon() + }) +} + +// doUnmount performs platform-specific unmount, similar to sharness do_umount +// failOnError: if true, unmount errors cause test failure; if false, errors are ignored (useful for cleanup) +func doUnmount(t *testing.T, mountPoint string, failOnError bool) { + t.Helper() + var cmd *exec.Cmd + if runtime.GOOS == "linux" { + // fusermount -u: unmount filesystem (strict - fails if busy) + cmd = exec.Command("fusermount", "-u", mountPoint) + } else { + cmd = exec.Command("umount", mountPoint) + } + + err := cmd.Run() + if err != nil && failOnError { + t.Fatalf("failed to unmount %s: %v", mountPoint, err) + } +} diff --git a/test/cli/gateway_limits_test.go b/test/cli/gateway_limits_test.go new file mode 100644 index 000000000..990eabb1a --- /dev/null +++ b/test/cli/gateway_limits_test.go @@ -0,0 +1,134 @@ +package cli + +import ( + "net/http" + "testing" + "time" + + "github.com/ipfs/kubo/config" + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/assert" +) + +// TestGatewayLimits tests the gateway request limiting and timeout features. +// These are basic integration tests that verify the configuration works. +// For comprehensive tests, see: +// - github.com/ipfs/boxo/gateway/middleware_retrieval_timeout_test.go +// - github.com/ipfs/boxo/gateway/middleware_ratelimit_test.go +func TestGatewayLimits(t *testing.T) { + t.Parallel() + + t.Run("RetrievalTimeout", func(t *testing.T) { + t.Parallel() + + // Create a node with a short retrieval timeout + node := harness.NewT(t).NewNode().Init() + node.UpdateConfig(func(cfg *config.Config) { + // Set a 1 second timeout for retrieval + cfg.Gateway.RetrievalTimeout = config.NewOptionalDuration(1 * time.Second) + }) + node.StartDaemon() + defer node.StopDaemon() + + // Add content that can be retrieved quickly + cid := node.IPFSAddStr("test content") + + client := node.GatewayClient() + + // Normal request should succeed (content is local) + resp := client.Get("/ipfs/" + cid) + assert.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "test content", resp.Body) + + // Request for non-existent content should timeout + // Using a CID that has no providers (generated with ipfs add -n) + nonExistentCID := "bafkreif6lrhgz3fpiwypdk65qrqiey7svgpggruhbylrgv32l3izkqpsc4" + + // Create a client with longer timeout than the gateway's retrieval timeout + // to ensure we get the gateway's 504 response + clientWithTimeout := &harness.HTTPClient{ + Client: &http.Client{ + Timeout: 5 * time.Second, + }, + BaseURL: client.BaseURL, + } + + resp = clientWithTimeout.Get("/ipfs/" + nonExistentCID) + assert.Equal(t, http.StatusGatewayTimeout, resp.StatusCode, "Expected 504 Gateway Timeout for stuck retrieval") + assert.Contains(t, resp.Body, "Unable to retrieve content within timeout period") + }) + + t.Run("MaxConcurrentRequests", func(t *testing.T) { + t.Parallel() + + // Create a node with a low concurrent request limit + node := harness.NewT(t).NewNode().Init() + node.UpdateConfig(func(cfg *config.Config) { + // Allow only 1 concurrent request to make test deterministic + cfg.Gateway.MaxConcurrentRequests = config.NewOptionalInteger(1) + // Set retrieval timeout so blocking requests don't hang forever + cfg.Gateway.RetrievalTimeout = config.NewOptionalDuration(2 * time.Second) + }) + node.StartDaemon() + defer node.StopDaemon() + + // Add some content - use a non-existent CID that will block during retrieval + // to ensure we can control timing + blockingCID := "bafkreif6lrhgz3fpiwypdk65qrqiey7svgpggruhbylrgv32l3izkqpsc4" + normalCID := node.IPFSAddStr("test content for concurrent request limiting") + + client := node.GatewayClient() + + // First, verify single request succeeds + resp := client.Get("/ipfs/" + normalCID) + assert.Equal(t, http.StatusOK, resp.StatusCode) + + // Now test deterministic 429 response: + // Start a blocking request that will occupy the single slot, + // then make another request that MUST get 429 + + blockingStarted := make(chan bool) + blockingDone := make(chan bool) + + // Start a request that will block (searching for non-existent content) + go func() { + blockingStarted <- true + // This will block until timeout looking for providers + client.Get("/ipfs/" + blockingCID) + blockingDone <- true + }() + + // Wait for blocking request to start and occupy the slot + <-blockingStarted + time.Sleep(1 * time.Second) // Ensure it has acquired the semaphore + + // This request MUST get 429 because the slot is occupied + resp = client.Get("/ipfs/" + normalCID + "?must-get-429=true") + assert.Equal(t, http.StatusTooManyRequests, resp.StatusCode, "Second request must get 429 when slot is occupied") + + // Verify 429 response headers + retryAfter := resp.Headers.Get("Retry-After") + assert.NotEmpty(t, retryAfter, "Retry-After header must be set on 429 response") + assert.Equal(t, "60", retryAfter, "Retry-After must be 60 seconds") + + cacheControl := resp.Headers.Get("Cache-Control") + assert.Equal(t, "no-store", cacheControl, "Cache-Control must be no-store on 429 response") + + assert.Contains(t, resp.Body, "Too many requests", "429 response must contain error message") + + // Clean up: wait for blocking request to timeout (it will timeout due to gateway retrieval timeout) + select { + case <-blockingDone: + // Good, it completed + case <-time.After(10 * time.Second): + // Give it more time if needed + } + + // Wait a bit more to ensure slot is fully released + time.Sleep(1 * time.Second) + + // After blocking request completes, new request should succeed + resp = client.Get("/ipfs/" + normalCID + "?after-limit-cleared=true") + assert.Equal(t, http.StatusOK, resp.StatusCode, "Request must succeed after slot is freed") + }) +} diff --git a/test/cli/gateway_range_test.go b/test/cli/gateway_range_test.go index 2d8ce1a3e..9efe08710 100644 --- a/test/cli/gateway_range_test.go +++ b/test/cli/gateway_range_test.go @@ -27,6 +27,7 @@ func TestGatewayHAMTDirectory(t *testing.T) { // Start node h := harness.NewT(t) node := h.NewNode().Init("--empty-repo", "--profile=test").StartDaemon("--offline") + defer node.StopDaemon() client := node.GatewayClient() // Import fixtures @@ -56,6 +57,7 @@ func TestGatewayHAMTRanges(t *testing.T) { // Start node h := harness.NewT(t) node := h.NewNode().Init("--empty-repo", "--profile=test").StartDaemon("--offline") + t.Cleanup(func() { node.StopDaemon() }) client := node.GatewayClient() // Import fixtures diff --git a/test/cli/gateway_test.go b/test/cli/gateway_test.go index 0a10782f9..b80d2d700 100644 --- a/test/cli/gateway_test.go +++ b/test/cli/gateway_test.go @@ -1,6 +1,7 @@ package cli import ( + "bufio" "context" "encoding/json" "fmt" @@ -11,6 +12,7 @@ import ( "strconv" "strings" "testing" + "time" "github.com/ipfs/kubo/config" "github.com/ipfs/kubo/test/cli/harness" @@ -26,6 +28,7 @@ func TestGateway(t *testing.T) { t.Parallel() h := harness.NewT(t) node := h.NewNode().Init().StartDaemon("--offline") + t.Cleanup(func() { node.StopDaemon() }) cid := node.IPFSAddStr("Hello Worlds!") peerID, err := peer.ToCid(node.PeerID()).StringOfBase(multibase.Base36) @@ -232,36 +235,13 @@ func TestGateway(t *testing.T) { cfg.API.HTTPHeaders = map[string][]string{header: values} }) node.StartDaemon() + defer node.StopDaemon() resp := node.APIClient().DisableRedirects().Get("/webui/") assert.Equal(t, resp.Headers.Values(header), values) assert.Contains(t, []int{302, 301}, resp.StatusCode) }) - t.Run("GET /logs returns logs", func(t *testing.T) { - t.Parallel() - apiClient := node.APIClient() - reqURL := apiClient.BuildURL("/logs") - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - req, err := http.NewRequestWithContext(ctx, http.MethodGet, reqURL, nil) - require.NoError(t, err) - - resp, err := apiClient.Client.Do(req) - require.NoError(t, err) - defer resp.Body.Close() - - // read the first line of the output and parse its JSON - dec := json.NewDecoder(resp.Body) - event := struct{ Event string }{} - err = dec.Decode(&event) - require.NoError(t, err) - - assert.Equal(t, "log API client connected", event.Event) - }) - t.Run("POST /api/v0/version succeeds", func(t *testing.T) { t.Parallel() resp := node.APIClient().Post("/api/v0/version", nil) @@ -279,6 +259,7 @@ func TestGateway(t *testing.T) { t.Run("pprof", func(t *testing.T) { t.Parallel() node := harness.NewT(t).NewNode().Init().StartDaemon() + t.Cleanup(func() { node.StopDaemon() }) apiClient := node.APIClient() t.Run("mutex", func(t *testing.T) { t.Parallel() @@ -322,6 +303,7 @@ func TestGateway(t *testing.T) { t.Parallel() h := harness.NewT(t) node := h.NewNode().Init().StartDaemon() + t.Cleanup(func() { node.StopDaemon() }) h.WriteFile("index/index.html", "

") cid := node.IPFS("add", "-Q", "-r", filepath.Join(h.Dir, "index")).Stderr.Trimmed() @@ -389,6 +371,7 @@ func TestGateway(t *testing.T) { cfg.Addresses.Gateway = config.Strings{"/ip4/127.0.0.1/tcp/32563"} }) node.StartDaemon() + defer node.StopDaemon() b, err := os.ReadFile(filepath.Join(node.Dir, "gateway")) require.NoError(t, err) @@ -410,6 +393,7 @@ func TestGateway(t *testing.T) { assert.NoError(t, err) nodes.StartDaemons().Connect() + t.Cleanup(func() { nodes.StopDaemons() }) t.Run("not present", func(t *testing.T) { cidFoo := node2.IPFSAddStr("foo") @@ -482,6 +466,7 @@ func TestGateway(t *testing.T) { } }) node.StartDaemon() + defer node.StopDaemon() cidFoo := node.IPFSAddStr("foo") client := node.GatewayClient() @@ -531,6 +516,7 @@ func TestGateway(t *testing.T) { node := harness.NewT(t).NewNode().Init() node.StartDaemon() + defer node.StopDaemon() client := node.GatewayClient() res := client.Get("/ipfs/invalid-thing", func(r *http.Request) { @@ -548,6 +534,7 @@ func TestGateway(t *testing.T) { cfg.Gateway.DisableHTMLErrors = config.True }) node.StartDaemon() + defer node.StopDaemon() client := node.GatewayClient() res := client.Get("/ipfs/invalid-thing", func(r *http.Request) { @@ -558,3 +545,48 @@ func TestGateway(t *testing.T) { }) }) } + +// TestLogs tests that GET /logs returns log messages. This test is separate +// because it requires setting the server's log level to "info" which may +// change the output expected by other tests. +func TestLogs(t *testing.T) { + h := harness.NewT(t) + + t.Setenv("GOLOG_LOG_LEVEL", "info") + + node := h.NewNode().Init().StartDaemon("--offline") + defer node.StopDaemon() + cid := node.IPFSAddStr("Hello Worlds!") + + peerID, err := peer.ToCid(node.PeerID()).StringOfBase(multibase.Base36) + assert.NoError(t, err) + + client := node.GatewayClient() + client.TemplateData = map[string]string{ + "CID": cid, + "PeerID": peerID, + } + + apiClient := node.APIClient() + reqURL := apiClient.BuildURL("/logs") + + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, reqURL, nil) + require.NoError(t, err) + + resp, err := apiClient.Client.Do(req) + require.NoError(t, err) + defer resp.Body.Close() + + var found bool + scanner := bufio.NewScanner(resp.Body) + for scanner.Scan() { + if strings.Contains(scanner.Text(), "log API client connected") { + found = true + break + } + } + assert.True(t, found) +} diff --git a/test/cli/harness/ipfs.go b/test/cli/harness/ipfs.go index 8537e2aa2..2f7a8f18e 100644 --- a/test/cli/harness/ipfs.go +++ b/test/cli/harness/ipfs.go @@ -76,6 +76,17 @@ func (n *Node) IPFSAddStr(content string, args ...string) string { return n.IPFSAdd(strings.NewReader(content), args...) } +// IPFSAddDeterministic produces a CID of a file of a certain size, filled with deterministically generated bytes based on some seed. +// This ensures deterministic CID on the other end, that can be used in tests. +func (n *Node) IPFSAddDeterministic(size string, seed string, args ...string) string { + log.Debugf("node %d adding %s of deterministic pseudo-random data with seed %q and args: %v", n.ID, size, seed, args) + reader, err := DeterministicRandomReader(size, seed) + if err != nil { + panic(err) + } + return n.IPFSAdd(reader, args...) +} + func (n *Node) IPFSAdd(content io.Reader, args ...string) string { log.Debugf("node %d adding with args: %v", n.ID, args) fullArgs := []string{"add", "-q"} @@ -90,6 +101,34 @@ func (n *Node) IPFSAdd(content io.Reader, args ...string) string { return out } +func (n *Node) IPFSBlockPut(content io.Reader, args ...string) string { + log.Debugf("node %d block put with args: %v", n.ID, args) + fullArgs := []string{"block", "put"} + fullArgs = append(fullArgs, args...) + res := n.Runner.MustRun(RunRequest{ + Path: n.IPFSBin, + Args: fullArgs, + CmdOpts: []CmdOpt{RunWithStdin(content)}, + }) + out := strings.TrimSpace(res.Stdout.String()) + log.Debugf("block put result: %q", out) + return out +} + +func (n *Node) IPFSDAGPut(content io.Reader, args ...string) string { + log.Debugf("node %d dag put with args: %v", n.ID, args) + fullArgs := []string{"dag", "put"} + fullArgs = append(fullArgs, args...) + res := n.Runner.MustRun(RunRequest{ + Path: n.IPFSBin, + Args: fullArgs, + CmdOpts: []CmdOpt{RunWithStdin(content)}, + }) + out := strings.TrimSpace(res.Stdout.String()) + log.Debugf("dag put result: %q", out) + return out +} + func (n *Node) IPFSDagImport(content io.Reader, cid string, args ...string) error { log.Debugf("node %d dag import with args: %v", n.ID, args) fullArgs := []string{"dag", "import", "--pin-roots=false"} @@ -108,3 +147,15 @@ func (n *Node) IPFSDagImport(content io.Reader, cid string, args ...string) erro }) return res.Err } + +/* +func (n *Node) IPFSDagExport(cid string, car *os.File) error { + log.Debugf("node %d dag export of %s to %q with args: %v", n.ID, cid, car.Name()) + res := n.Runner.MustRun(RunRequest{ + Path: n.IPFSBin, + Args: []string{"dag", "export", cid}, + CmdOpts: []CmdOpt{RunWithStdout(car)}, + }) + return res.Err +} +*/ diff --git a/test/cli/harness/node.go b/test/cli/harness/node.go index af389e0ba..0315e81df 100644 --- a/test/cli/harness/node.go +++ b/test/cli/harness/node.go @@ -54,6 +54,42 @@ func BuildNode(ipfsBin, baseDir string, id int) *Node { env := environToMap(os.Environ()) env["IPFS_PATH"] = dir + // If using "ipfs" binary name, provide helpful binary information + if ipfsBin == "ipfs" { + // Check if cmd/ipfs/ipfs exists (simple relative path check) + localBinary := "cmd/ipfs/ipfs" + localExists := false + if _, err := os.Stat(localBinary); err == nil { + localExists = true + if abs, err := filepath.Abs(localBinary); err == nil { + localBinary = abs + } + } + + // Check if ipfs is available in PATH + pathBinary, pathErr := exec.LookPath("ipfs") + + // Handle different scenarios + if pathErr != nil { + // No ipfs in PATH + if localExists { + fmt.Printf("WARNING: No 'ipfs' found in PATH, but local binary exists at %s\n", localBinary) + fmt.Printf("Consider adding it to PATH or run: export PATH=\"$(pwd)/cmd/ipfs:$PATH\"\n") + } else { + fmt.Printf("ERROR: No 'ipfs' binary found in PATH and no local build at cmd/ipfs/ipfs\n") + fmt.Printf("Run 'make build' first or install ipfs and add it to PATH\n") + panic("ipfs binary not available") + } + } else { + // ipfs found in PATH + if localExists && localBinary != pathBinary { + fmt.Printf("NOTE: Local binary at %s differs from PATH binary at %s\n", localBinary, pathBinary) + fmt.Printf("Consider adding the local binary to PATH if you want to use the version built by 'make build'\n") + } + // If they match or no local binary, no message needed + } + } + return &Node{ ID: id, Dir: dir, @@ -209,6 +245,14 @@ func (n *Node) Init(ipfsArgs ...string) *Node { cfg.Swarm.DisableNatPortMap = true cfg.Discovery.MDNS.Enabled = n.EnableMDNS cfg.Routing.LoopbackAddressesOnLanDHT = config.True + // Telemetry disabled by default in tests. + cfg.Plugins = config.Plugins{ + Plugins: map[string]config.Plugin{ + "telemetry": config.Plugin{ + Disabled: true, + }, + }, + } }) return n } @@ -457,28 +501,60 @@ func (n *Node) IsAlive() bool { } func (n *Node) SwarmAddrs() []multiaddr.Multiaddr { - res := n.Runner.MustRun(RunRequest{ + res := n.Runner.Run(RunRequest{ Path: n.IPFSBin, Args: []string{"swarm", "addrs", "local"}, }) + if res.ExitCode() != 0 { + // If swarm command fails (e.g., daemon not online), return empty slice + log.Debugf("Node %d: swarm addrs local failed (exit %d): %s", n.ID, res.ExitCode(), res.Stderr.String()) + return []multiaddr.Multiaddr{} + } out := strings.TrimSpace(res.Stdout.String()) + if out == "" { + log.Debugf("Node %d: swarm addrs local returned empty output", n.ID) + return []multiaddr.Multiaddr{} + } + log.Debugf("Node %d: swarm addrs local output: %s", n.ID, out) outLines := strings.Split(out, "\n") var addrs []multiaddr.Multiaddr for _, addrStr := range outLines { + addrStr = strings.TrimSpace(addrStr) + if addrStr == "" { + continue + } ma, err := multiaddr.NewMultiaddr(addrStr) if err != nil { panic(err) } addrs = append(addrs, ma) } + log.Debugf("Node %d: parsed %d swarm addresses", n.ID, len(addrs)) return addrs } +// SwarmAddrsWithTimeout waits for swarm addresses to be available +func (n *Node) SwarmAddrsWithTimeout(timeout time.Duration) []multiaddr.Multiaddr { + start := time.Now() + for time.Since(start) < timeout { + addrs := n.SwarmAddrs() + if len(addrs) > 0 { + return addrs + } + time.Sleep(100 * time.Millisecond) + } + return []multiaddr.Multiaddr{} +} + func (n *Node) SwarmAddrsWithPeerIDs() []multiaddr.Multiaddr { + return n.SwarmAddrsWithPeerIDsTimeout(5 * time.Second) +} + +func (n *Node) SwarmAddrsWithPeerIDsTimeout(timeout time.Duration) []multiaddr.Multiaddr { ipfsProtocol := multiaddr.ProtocolWithCode(multiaddr.P_IPFS).Name peerID := n.PeerID() var addrs []multiaddr.Multiaddr - for _, ma := range n.SwarmAddrs() { + for _, ma := range n.SwarmAddrsWithTimeout(timeout) { // add the peer ID to the multiaddr if it doesn't have it _, err := ma.ValueForProtocol(multiaddr.P_IPFS) if errors.Is(err, multiaddr.ErrProtocolNotFound) { @@ -496,33 +572,97 @@ func (n *Node) SwarmAddrsWithPeerIDs() []multiaddr.Multiaddr { func (n *Node) SwarmAddrsWithoutPeerIDs() []multiaddr.Multiaddr { var addrs []multiaddr.Multiaddr for _, ma := range n.SwarmAddrs() { - var components []multiaddr.Multiaddr - multiaddr.ForEach(ma, func(c multiaddr.Component) bool { + i := 0 + for _, c := range ma { if c.Protocol().Code == multiaddr.P_IPFS { - return true + continue } - components = append(components, &c) - return true - }) - ma = multiaddr.Join(components...) - addrs = append(addrs, ma) + ma[i] = c + i++ + } + ma = ma[:i] + if len(ma) > 0 { + addrs = append(addrs, ma) + } } return addrs } func (n *Node) Connect(other *Node) *Node { - n.Runner.MustRun(RunRequest{ + // Get the peer addresses to connect to + addrs := other.SwarmAddrsWithPeerIDs() + if len(addrs) == 0 { + // If no addresses available, skip connection + log.Debugf("No swarm addresses available for connection") + return n + } + // Use Run instead of MustRun to avoid panics on connection failures + res := n.Runner.Run(RunRequest{ Path: n.IPFSBin, - Args: []string{"swarm", "connect", other.SwarmAddrsWithPeerIDs()[0].String()}, + Args: []string{"swarm", "connect", addrs[0].String()}, }) + if res.ExitCode() != 0 { + log.Debugf("swarm connect failed: %s", res.Stderr.String()) + } return n } +// ConnectAndWait connects to another node and waits for the connection to be established +func (n *Node) ConnectAndWait(other *Node, timeout time.Duration) error { + // Get the peer addresses to connect to - wait up to half the timeout for addresses + addrs := other.SwarmAddrsWithPeerIDsTimeout(timeout / 2) + if len(addrs) == 0 { + return fmt.Errorf("no swarm addresses available for node %d after waiting %v", other.ID, timeout/2) + } + + otherPeerID := other.PeerID() + + // Try to connect + res := n.Runner.Run(RunRequest{ + Path: n.IPFSBin, + Args: []string{"swarm", "connect", addrs[0].String()}, + }) + if res.ExitCode() != 0 { + return fmt.Errorf("swarm connect failed: %s", res.Stderr.String()) + } + + // Wait for connection to be established + start := time.Now() + for time.Since(start) < timeout { + peers := n.Peers() + for _, peerAddr := range peers { + if peerID, err := peerAddr.ValueForProtocol(multiaddr.P_P2P); err == nil { + if peerID == otherPeerID.String() { + return nil // Connection established + } + } + } + time.Sleep(100 * time.Millisecond) + } + + return fmt.Errorf("timeout waiting for connection to node %d (peer %s)", other.ID, otherPeerID) +} + func (n *Node) Peers() []multiaddr.Multiaddr { - res := n.Runner.MustRun(RunRequest{ + // Wait for daemon to be ready if it's supposed to be running + if n.Daemon != nil && n.Daemon.Cmd != nil && n.Daemon.Cmd.Process != nil { + // Give daemon a short time to become ready + for i := 0; i < 10; i++ { + if n.IsAlive() { + break + } + time.Sleep(100 * time.Millisecond) + } + } + res := n.Runner.Run(RunRequest{ Path: n.IPFSBin, Args: []string{"swarm", "peers"}, }) + if res.ExitCode() != 0 { + // If swarm peers fails (e.g., daemon not online), return empty slice + log.Debugf("swarm peers failed: %s", res.Stderr.String()) + return []multiaddr.Multiaddr{} + } var addrs []multiaddr.Multiaddr for _, line := range res.Stdout.Lines() { ma, err := multiaddr.NewMultiaddr(line) diff --git a/test/cli/harness/nodes.go b/test/cli/harness/nodes.go index 113289e3c..8a5451e03 100644 --- a/test/cli/harness/nodes.go +++ b/test/cli/harness/nodes.go @@ -5,7 +5,6 @@ import ( . "github.com/ipfs/kubo/test/cli/testutils" "github.com/multiformats/go-multiaddr" - "golang.org/x/sync/errgroup" ) // Nodes is a collection of Kubo nodes along with operations on groups of nodes. @@ -17,37 +16,28 @@ func (n Nodes) Init(args ...string) Nodes { } func (n Nodes) ForEachPar(f func(*Node)) { - group := &errgroup.Group{} + var wg sync.WaitGroup for _, node := range n { + wg.Add(1) node := node - group.Go(func() error { + go func() { + defer wg.Done() f(node) - return nil - }) - } - err := group.Wait() - if err != nil { - panic(err) + }() } + wg.Wait() } func (n Nodes) Connect() Nodes { - wg := sync.WaitGroup{} for i, node := range n { for j, otherNode := range n { if i == j { continue } - node := node - otherNode := otherNode - wg.Add(1) - go func() { - defer wg.Done() - node.Connect(otherNode) - }() + // Do not connect in parallel, because that can cause TLS handshake problems on some platforms. + node.Connect(otherNode) } } - wg.Wait() for _, node := range n { firstPeer := node.Peers()[0] if _, err := firstPeer.ValueForProtocol(multiaddr.P_P2P); err != nil { diff --git a/test/cli/harness/pbinspect.go b/test/cli/harness/pbinspect.go new file mode 100644 index 000000000..6abddb61f --- /dev/null +++ b/test/cli/harness/pbinspect.go @@ -0,0 +1,54 @@ +package harness + +import ( + "bytes" + "encoding/json" +) + +// InspectPBNode uses dag-json output of 'ipfs dag get' to inspect +// "Logical Format" of DAG-PB as defined in +// https://web.archive.org/web/20250403194752/https://ipld.io/specs/codecs/dag-pb/spec/#logical-format +// (mainly used for inspecting Links without depending on any libraries) +func (n *Node) InspectPBNode(cid string) (PBNode, error) { + log.Debugf("node %d dag get %s as dag-json", n.ID, cid) + + var root PBNode + var dagJsonOutput bytes.Buffer + res := n.Runner.MustRun(RunRequest{ + Path: n.IPFSBin, + Args: []string{"dag", "get", "--output-codec=dag-json", cid}, + CmdOpts: []CmdOpt{RunWithStdout(&dagJsonOutput)}, + }) + if res.Err != nil { + return root, res.Err + } + + err := json.Unmarshal(dagJsonOutput.Bytes(), &root) + if err != nil { + return root, err + } + return root, nil + +} + +// Define structs to match the JSON for +type PBHash struct { + Slash string `json:"/"` +} + +type PBLink struct { + Hash PBHash `json:"Hash"` + Name string `json:"Name"` + Tsize int `json:"Tsize"` +} + +type PBData struct { + Slash struct { + Bytes string `json:"bytes"` + } `json:"/"` +} + +type PBNode struct { + Data PBData `json:"Data"` + Links []PBLink `json:"Links"` +} diff --git a/test/cli/harness/peering.go b/test/cli/harness/peering.go index 7680eaf57..445c2cf26 100644 --- a/test/cli/harness/peering.go +++ b/test/cli/harness/peering.go @@ -3,6 +3,8 @@ package harness import ( "fmt" "math/rand" + "net" + "sync" "testing" "github.com/ipfs/kubo/config" @@ -13,9 +15,39 @@ type Peering struct { To int } +var ( + allocatedPorts = make(map[int]struct{}) + portMutex sync.Mutex +) + func NewRandPort() int { - n := rand.Int() - return 3000 + (n % 1000) + portMutex.Lock() + defer portMutex.Unlock() + + for i := 0; i < 100; i++ { + l, err := net.Listen("tcp", "localhost:0") + if err != nil { + continue + } + port := l.Addr().(*net.TCPAddr).Port + l.Close() + + if _, used := allocatedPorts[port]; !used { + allocatedPorts[port] = struct{}{} + return port + } + } + + // Fallback to random port if we can't get a unique one from the OS + for i := 0; i < 1000; i++ { + port := 30000 + rand.Intn(10000) + if _, used := allocatedPorts[port]; !used { + allocatedPorts[port] = struct{}{} + return port + } + } + + panic("failed to allocate unique port after 1100 attempts") } func CreatePeerNodes(t *testing.T, n int, peerings []Peering) (*Harness, Nodes) { diff --git a/test/cli/harness/run.go b/test/cli/harness/run.go index 8ca85eb63..077af6ca5 100644 --- a/test/cli/harness/run.go +++ b/test/cli/harness/run.go @@ -3,6 +3,7 @@ package harness import ( "fmt" "io" + "os" "os/exec" "strings" ) @@ -60,8 +61,27 @@ func environToMap(environ []string) map[string]string { func (r *Runner) Run(req RunRequest) *RunResult { cmd := exec.Command(req.Path, req.Args...) - stdout := &Buffer{} - stderr := &Buffer{} + var stdout io.Writer + var stderr io.Writer + outbuf := &Buffer{} + errbuf := &Buffer{} + + if r.Verbose { + or, ow := io.Pipe() + errr, errw := io.Pipe() + stdout = io.MultiWriter(outbuf, ow) + stderr = io.MultiWriter(errbuf, errw) + go func() { + _, _ = io.Copy(os.Stdout, or) + }() + go func() { + _, _ = io.Copy(os.Stderr, errr) + }() + } else { + stdout = outbuf + stderr = errbuf + } + cmd.Stdout = stdout cmd.Stderr = stderr cmd.Dir = r.Dir @@ -83,8 +103,8 @@ func (r *Runner) Run(req RunRequest) *RunResult { err := req.RunFunc(cmd) result := RunResult{ - Stdout: stdout, - Stderr: stderr, + Stdout: outbuf, + Stderr: errbuf, Cmd: cmd, Err: err, } diff --git a/test/cli/http_gateway_over_libp2p_test.go b/test/cli/http_gateway_over_libp2p_test.go index f8cfe0071..58ab0217b 100644 --- a/test/cli/http_gateway_over_libp2p_test.go +++ b/test/cli/http_gateway_over_libp2p_test.go @@ -32,6 +32,7 @@ func TestGatewayOverLibp2p(t *testing.T) { p2pProxyNode := nodes[1] nodes.StartDaemons().Connect() + defer nodes.StopDaemons() // Add data to the gateway node cidDataOnGatewayNode := cid.MustParse(gwNode.IPFSAddStr("Hello Worlds2!")) @@ -65,6 +66,7 @@ func TestGatewayOverLibp2p(t *testing.T) { // Enable the experimental feature and reconnect the nodes gwNode.IPFS("config", "--json", "Experimental.GatewayOverLibp2p", "true") gwNode.StopDaemon().StartDaemon() + t.Cleanup(func() { gwNode.StopDaemon() }) nodes.Connect() // Note: the bare HTTP requests here assume that the gateway is mounted at `/` diff --git a/test/cli/http_retrieval_client_test.go b/test/cli/http_retrieval_client_test.go new file mode 100644 index 000000000..32628bfce --- /dev/null +++ b/test/cli/http_retrieval_client_test.go @@ -0,0 +1,146 @@ +package cli + +import ( + "fmt" + "net" + "net/http" + "net/http/httptest" + "net/url" + "os" + "strings" + "testing" + + "github.com/ipfs/boxo/routing/http/server" + "github.com/ipfs/boxo/routing/http/types" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-test/random" + "github.com/ipfs/kubo/config" + "github.com/ipfs/kubo/test/cli/harness" + "github.com/ipfs/kubo/test/cli/testutils/httprouting" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/multiformats/go-multiaddr" + "github.com/stretchr/testify/assert" +) + +func TestHTTPRetrievalClient(t *testing.T) { + t.Parallel() + + // many moving pieces here, show more when debug is needed + debug := os.Getenv("DEBUG") == "true" + + // usee local /routing/v1/providers/{cid} and + // /ipfs/{cid} HTTP servers to confirm HTTP-only retrieval works end-to-end. + t.Run("works end-to-end with an HTTP-only provider", func(t *testing.T) { + // setup mocked HTTP Router to handle /routing/v1/providers/cid + mockRouter := &httprouting.MockHTTPContentRouter{Debug: debug} + delegatedRoutingServer := httptest.NewServer(server.Handler(mockRouter)) + t.Cleanup(func() { delegatedRoutingServer.Close() }) + + // init Kubo repo + node := harness.NewT(t).NewNode().Init() + + node.UpdateConfig(func(cfg *config.Config) { + // explicitly enable http client + cfg.HTTPRetrieval.Enabled = config.True + // allow NewMockHTTPProviderServer to use self-signed TLS cert + cfg.HTTPRetrieval.TLSInsecureSkipVerify = config.True + // setup client-only routing which asks both HTTP + DHT + // cfg.Routing.Type = config.NewOptionalString("autoclient") + // setup Kubo node to use mocked HTTP Router + cfg.Routing.DelegatedRouters = []string{delegatedRoutingServer.URL} + }) + + // compute a random CID + randStr := string(random.Bytes(100)) + res := node.PipeStrToIPFS(randStr, "add", "-qn", "--cid-version", "1") // -n means dont add to local repo, just produce CID + wantCIDStr := res.Stdout.Trimmed() + testCid := cid.MustParse(wantCIDStr) + + // setup mock HTTP provider + httpProviderServer := NewMockHTTPProviderServer(testCid, randStr, debug) + t.Cleanup(func() { httpProviderServer.Close() }) + httpHost, httpPort, err := splitHostPort(httpProviderServer.URL) + assert.NoError(t, err) + + // setup /routing/v1/providers/cid result that points at our mocked HTTP provider + mockHTTPProviderPeerID := "12D3KooWCjfPiojcCUmv78Wd1NJzi4Mraj1moxigp7AfQVQvGLwH" // static, it does not matter, we only care about multiaddr + mockHTTPMultiaddr, _ := multiaddr.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%s/tls/http", httpHost, httpPort)) + mpid, _ := peer.Decode(mockHTTPProviderPeerID) + mockRouter.AddProvider(testCid, &types.PeerRecord{ + Schema: types.SchemaPeer, + ID: &mpid, + Addrs: []types.Multiaddr{{Multiaddr: mockHTTPMultiaddr}}, + // no explicit Protocols, ensure multiaddr alone is enough + }) + + // Start Kubo + node.StartDaemon() + defer node.StopDaemon() + + if debug { + fmt.Printf("delegatedRoutingServer.URL: %s\n", delegatedRoutingServer.URL) + fmt.Printf("httpProviderServer.URL: %s\n", httpProviderServer.URL) + fmt.Printf("httpProviderServer.Multiaddr: %s\n", mockHTTPMultiaddr) + fmt.Printf("testCid: %s\n", testCid) + } + + // Now, make Kubo to read testCid. it was not added to local blockstore, so it has only one provider -- a HTTP server. + + // First, confirm delegatedRoutingServer returned HTTP provider + findprovsRes := node.IPFS("routing", "findprovs", testCid.String()) + assert.Equal(t, mockHTTPProviderPeerID, findprovsRes.Stdout.Trimmed()) + + // Ok, now attempt retrieval. + // If there was no timeout and returned bytes match expected body, HTTP routing and retrieval worked end-to-end. + catRes := node.IPFS("cat", testCid.String()) + assert.Equal(t, randStr, catRes.Stdout.Trimmed()) + }) +} + +// NewMockHTTPProviderServer pretends to be http provider that supports +// block response https://specs.ipfs.tech/http-gateways/trustless-gateway/#block-responses-application-vnd-ipld-raw +func NewMockHTTPProviderServer(c cid.Cid, body string, debug bool) *httptest.Server { + expectedPathPrefix := "/ipfs/" + c.String() + handler := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + if debug { + fmt.Printf("NewMockHTTPProviderServer GET %s\n", req.URL.Path) + } + if strings.HasPrefix(req.URL.Path, expectedPathPrefix) { + w.Header().Set("Content-Type", "application/vnd.ipld.raw") + w.WriteHeader(http.StatusOK) + if req.Method == "GET" { + _, err := w.Write([]byte(body)) + if err != nil { + fmt.Fprintf(os.Stderr, "NewMockHTTPProviderServer GET %s error: %v\n", req.URL.Path, err) + } + } + } else if strings.HasPrefix(req.URL.Path, "/ipfs/bafkqaaa") { + // This is probe from https://specs.ipfs.tech/http-gateways/trustless-gateway/#dedicated-probe-paths + w.Header().Set("Content-Type", "application/vnd.ipld.raw") + w.WriteHeader(http.StatusOK) + } else { + http.Error(w, "Not Found", http.StatusNotFound) + } + }) + + // Make it HTTP/2 with self-signed TLS cert + srv := httptest.NewUnstartedServer(handler) + srv.EnableHTTP2 = true + srv.StartTLS() + return srv +} + +func splitHostPort(httpUrl string) (ipAddr string, port string, err error) { + u, err := url.Parse(httpUrl) + if err != nil { + return "", "", err + } + if u.Scheme == "" || u.Host == "" { + return "", "", fmt.Errorf("invalid URL format: missing scheme or host") + } + ipAddr, port, err = net.SplitHostPort(u.Host) + if err != nil { + return "", "", fmt.Errorf("failed to split host and port from %q: %w", u.Host, err) + } + return ipAddr, port, nil +} diff --git a/test/cli/identity_cid_test.go b/test/cli/identity_cid_test.go new file mode 100644 index 000000000..61a464ac5 --- /dev/null +++ b/test/cli/identity_cid_test.go @@ -0,0 +1,310 @@ +package cli + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/ipfs/boxo/verifcid" + "github.com/ipfs/kubo/config" + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestIdentityCIDOverflowProtection(t *testing.T) { + t.Parallel() + + t.Run("ipfs add --hash=identity with small data succeeds", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + // small data that fits in identity CID + smallData := "small data" + tempFile := filepath.Join(node.Dir, "small.txt") + err := os.WriteFile(tempFile, []byte(smallData), 0644) + require.NoError(t, err) + + res := node.IPFS("add", "--hash=identity", tempFile) + assert.NoError(t, res.Err) + cid := strings.Fields(res.Stdout.String())[1] + + // verify it's actually using identity hash + res = node.IPFS("cid", "format", "-f", "%h", cid) + assert.NoError(t, res.Err) + assert.Equal(t, "identity", res.Stdout.Trimmed()) + }) + + t.Run("ipfs add --hash=identity with large data fails", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + // data larger than verifcid.DefaultMaxIdentityDigestSize + largeData := strings.Repeat("x", verifcid.DefaultMaxIdentityDigestSize+50) + tempFile := filepath.Join(node.Dir, "large.txt") + err := os.WriteFile(tempFile, []byte(largeData), 0644) + require.NoError(t, err) + + res := node.RunIPFS("add", "--hash=identity", tempFile) + assert.NotEqual(t, 0, res.ExitErr.ExitCode()) + // should error with digest too large message + assert.Contains(t, res.Stderr.String(), "digest too large") + }) + + t.Run("ipfs add --inline with valid --inline-limit succeeds", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + smallData := "small inline data" + tempFile := filepath.Join(node.Dir, "inline.txt") + err := os.WriteFile(tempFile, []byte(smallData), 0644) + require.NoError(t, err) + + // use limit just under the maximum + limit := verifcid.DefaultMaxIdentityDigestSize - 10 + res := node.IPFS("add", "--inline", fmt.Sprintf("--inline-limit=%d", limit), tempFile) + assert.NoError(t, res.Err) + cid := strings.Fields(res.Stdout.String())[1] + + // verify the CID is using identity hash (inline) + res = node.IPFS("cid", "format", "-f", "%h", cid) + assert.NoError(t, res.Err) + assert.Equal(t, "identity", res.Stdout.Trimmed()) + + // verify the codec (may be dag-pb or raw depending on kubo version) + res = node.IPFS("cid", "format", "-f", "%c", cid) + assert.NoError(t, res.Err) + // Accept either raw or dag-pb as both are valid for inline data + codec := res.Stdout.Trimmed() + assert.True(t, codec == "raw" || codec == "dag-pb", "expected raw or dag-pb codec, got %s", codec) + }) + + t.Run("ipfs add --inline with excessive --inline-limit fails", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + smallData := "data" + tempFile := filepath.Join(node.Dir, "inline2.txt") + err := os.WriteFile(tempFile, []byte(smallData), 0644) + require.NoError(t, err) + + excessiveLimit := verifcid.DefaultMaxIdentityDigestSize + 50 + res := node.RunIPFS("add", "--inline", fmt.Sprintf("--inline-limit=%d", excessiveLimit), tempFile) + assert.NotEqual(t, 0, res.ExitErr.ExitCode()) + assert.Contains(t, res.Stderr.String(), fmt.Sprintf("inline-limit %d exceeds maximum allowed size of %d bytes", excessiveLimit, verifcid.DefaultMaxIdentityDigestSize)) + }) + + t.Run("ipfs files write --hash=identity appending to identity CID switches to configured hash", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + // create initial small file with identity CID + initialData := "initial" + tempFile := filepath.Join(node.Dir, "initial.txt") + err := os.WriteFile(tempFile, []byte(initialData), 0644) + require.NoError(t, err) + + res := node.IPFS("add", "--hash=identity", tempFile) + assert.NoError(t, res.Err) + cid1 := strings.Fields(res.Stdout.String())[1] + + // verify initial CID uses identity + res = node.IPFS("cid", "format", "-f", "%h", cid1) + assert.NoError(t, res.Err) + assert.Equal(t, "identity", res.Stdout.Trimmed()) + + // copy to MFS + res = node.IPFS("files", "cp", fmt.Sprintf("/ipfs/%s", cid1), "/identity-file") + assert.NoError(t, res.Err) + + // append data that would exceed identity CID limit + appendData := strings.Repeat("a", verifcid.DefaultMaxIdentityDigestSize) + appendFile := filepath.Join(node.Dir, "append.txt") + err = os.WriteFile(appendFile, []byte(appendData), 0644) + require.NoError(t, err) + + // append to the end of the file + // get the current data size + res = node.IPFS("files", "stat", "--format", "", "/identity-file") + assert.NoError(t, res.Err) + size := res.Stdout.Trimmed() + // this should succeed because DagModifier in boxo handles the overflow + res = node.IPFS("files", "write", "--hash=identity", "--offset="+size, "/identity-file", appendFile) + assert.NoError(t, res.Err) + + // check that the file now uses non-identity hash + res = node.IPFS("files", "stat", "--hash", "/identity-file") + assert.NoError(t, res.Err) + newCid := res.Stdout.Trimmed() + + // verify new CID does NOT use identity + res = node.IPFS("cid", "format", "-f", "%h", newCid) + assert.NoError(t, res.Err) + assert.NotEqual(t, "identity", res.Stdout.Trimmed()) + + // verify it switched to a cryptographic hash + assert.Equal(t, config.DefaultHashFunction, res.Stdout.Trimmed()) + }) + + t.Run("ipfs files write --hash=identity with small write creates identity CID", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + // create a small file with identity hash directly in MFS + smallData := "small" + tempFile := filepath.Join(node.Dir, "small.txt") + err := os.WriteFile(tempFile, []byte(smallData), 0644) + require.NoError(t, err) + + // write to MFS with identity hash + res := node.IPFS("files", "write", "--create", "--hash=identity", "/mfs-identity", tempFile) + assert.NoError(t, res.Err) + + // verify using identity CID + res = node.IPFS("files", "stat", "--hash", "/mfs-identity") + assert.NoError(t, res.Err) + cid := res.Stdout.Trimmed() + + // verify CID uses identity hash + res = node.IPFS("cid", "format", "-f", "%h", cid) + assert.NoError(t, res.Err) + assert.Equal(t, "identity", res.Stdout.Trimmed()) + + // verify content + res = node.IPFS("files", "read", "/mfs-identity") + assert.NoError(t, res.Err) + assert.Equal(t, smallData, res.Stdout.Trimmed()) + }) + + t.Run("raw node with identity CID converts to UnixFS when appending", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + // create raw block with identity CID + rawData := "raw" + tempFile := filepath.Join(node.Dir, "raw.txt") + err := os.WriteFile(tempFile, []byte(rawData), 0644) + require.NoError(t, err) + + res := node.IPFS("block", "put", "--format=raw", "--mhtype=identity", tempFile) + assert.NoError(t, res.Err) + rawCid := res.Stdout.Trimmed() + + // verify initial CID uses identity hash and raw codec + res = node.IPFS("cid", "format", "-f", "%h", rawCid) + assert.NoError(t, res.Err) + assert.Equal(t, "identity", res.Stdout.Trimmed()) + + res = node.IPFS("cid", "format", "-f", "%c", rawCid) + assert.NoError(t, res.Err) + assert.Equal(t, "raw", res.Stdout.Trimmed()) + + // copy to MFS + res = node.IPFS("files", "cp", fmt.Sprintf("/ipfs/%s", rawCid), "/raw-identity") + assert.NoError(t, res.Err) + + // append data + appendData := "appended" + appendFile := filepath.Join(node.Dir, "append-raw.txt") + err = os.WriteFile(appendFile, []byte(appendData), 0644) + require.NoError(t, err) + + // get current data size for appending + res = node.IPFS("files", "stat", "--format", "", "/raw-identity") + assert.NoError(t, res.Err) + size := res.Stdout.Trimmed() + res = node.IPFS("files", "write", "--hash=identity", "--offset="+size, "/raw-identity", appendFile) + assert.NoError(t, res.Err) + + // verify content + res = node.IPFS("files", "read", "/raw-identity") + assert.NoError(t, res.Err) + assert.Equal(t, rawData+appendData, res.Stdout.Trimmed()) + + // check that it's now a UnixFS structure (dag-pb) + res = node.IPFS("files", "stat", "--hash", "/raw-identity") + assert.NoError(t, res.Err) + newCid := res.Stdout.Trimmed() + + res = node.IPFS("cid", "format", "-f", "%c", newCid) + assert.NoError(t, res.Err) + assert.Equal(t, "dag-pb", res.Stdout.Trimmed()) + + res = node.IPFS("files", "stat", "/raw-identity") + assert.NoError(t, res.Err) + assert.Contains(t, res.Stdout.String(), "Type: file") + }) + + t.Run("ipfs add --inline-limit at exactly max size succeeds", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + // create small data that will be inlined + smallData := "test data for inline" + tempFile := filepath.Join(node.Dir, "exact.txt") + err := os.WriteFile(tempFile, []byte(smallData), 0644) + require.NoError(t, err) + + // exactly at the limit should succeed + res := node.IPFS("add", "--inline", fmt.Sprintf("--inline-limit=%d", verifcid.DefaultMaxIdentityDigestSize), tempFile) + assert.NoError(t, res.Err) + cid := strings.Fields(res.Stdout.String())[1] + + // verify it uses identity hash (inline) since data is small enough + res = node.IPFS("cid", "format", "-f", "%h", cid) + assert.NoError(t, res.Err) + assert.Equal(t, "identity", res.Stdout.Trimmed()) + }) + + t.Run("ipfs add --inline-limit one byte over max fails", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + smallData := "test" + tempFile := filepath.Join(node.Dir, "oneover.txt") + err := os.WriteFile(tempFile, []byte(smallData), 0644) + require.NoError(t, err) + + // one byte over should fail + overLimit := verifcid.DefaultMaxIdentityDigestSize + 1 + res := node.RunIPFS("add", "--inline", fmt.Sprintf("--inline-limit=%d", overLimit), tempFile) + assert.NotEqual(t, 0, res.ExitErr.ExitCode()) + assert.Contains(t, res.Stderr.String(), fmt.Sprintf("inline-limit %d exceeds maximum allowed size of %d bytes", overLimit, verifcid.DefaultMaxIdentityDigestSize)) + }) + + t.Run("ipfs add --inline with data larger than limit uses configured hash", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + // data larger than inline limit + largeData := strings.Repeat("y", 100) + tempFile := filepath.Join(node.Dir, "toolarge.txt") + err := os.WriteFile(tempFile, []byte(largeData), 0644) + require.NoError(t, err) + + // set inline limit smaller than data + res := node.IPFS("add", "--inline", "--inline-limit=50", tempFile) + assert.NoError(t, res.Err) + cid := strings.Fields(res.Stdout.String())[1] + + // verify it's NOT using identity hash (data too large for inline) + res = node.IPFS("cid", "format", "-f", "%h", cid) + assert.NoError(t, res.Err) + assert.NotEqual(t, "identity", res.Stdout.Trimmed()) + + // should use configured hash + assert.Equal(t, config.DefaultHashFunction, res.Stdout.Trimmed()) + }) +} diff --git a/test/cli/init_test.go b/test/cli/init_test.go index 217ec64c3..dee844608 100644 --- a/test/cli/init_test.go +++ b/test/cli/init_test.go @@ -155,6 +155,7 @@ func TestInit(t *testing.T) { t.Run("ipfs init should not run while daemon is running", func(t *testing.T) { t.Parallel() node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() res := node.RunIPFS("init") assert.NotEqual(t, 0, res.ExitErr.ExitCode()) assert.Contains(t, res.Stderr.String(), "Error: ipfs daemon is running. please stop it to run this command") diff --git a/test/cli/ipfswatch_test.go b/test/cli/ipfswatch_test.go new file mode 100644 index 000000000..cd6859176 --- /dev/null +++ b/test/cli/ipfswatch_test.go @@ -0,0 +1,164 @@ +//go:build !plan9 + +package cli + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + "regexp" + "testing" + "time" + + "github.com/ipfs/kubo/config" + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/require" +) + +func TestIPFSWatch(t *testing.T) { + t.Parallel() + + // Build ipfswatch binary once before running parallel subtests. + // This avoids race conditions and duplicate builds. + h := harness.NewT(t) + repoRoot := filepath.Dir(filepath.Dir(filepath.Dir(h.IPFSBin))) + ipfswatchBin := filepath.Join(repoRoot, "cmd", "ipfswatch", "ipfswatch") + + if _, err := os.Stat(ipfswatchBin); os.IsNotExist(err) { + // -C changes to repo root so go.mod is found + cmd := exec.Command("go", "build", "-C", repoRoot, "-o", ipfswatchBin, "./cmd/ipfswatch") + out, err := cmd.CombinedOutput() + require.NoError(t, err, "failed to build ipfswatch: %s", string(out)) + } + + t.Run("ipfswatch adds watched files to IPFS", func(t *testing.T) { + t.Parallel() + h := harness.NewT(t) + node := h.NewNode().Init() + + // Create a temp directory to watch + watchDir := filepath.Join(h.Dir, "watch") + err := os.MkdirAll(watchDir, 0o755) + require.NoError(t, err) + + // Start ipfswatch in background + result := node.Runner.Run(harness.RunRequest{ + Path: ipfswatchBin, + Args: []string{"--repo", node.Dir, "--path", watchDir}, + RunFunc: harness.RunFuncStart, + }) + require.NoError(t, result.Err, "ipfswatch should start without error") + defer func() { + if result.Cmd.Process != nil { + _ = result.Cmd.Process.Kill() + _, _ = result.Cmd.Process.Wait() + } + }() + + // Wait for ipfswatch to initialize + time.Sleep(2 * time.Second) + + // Check for startup errors + stderrStr := result.Stderr.String() + require.NotContains(t, stderrStr, "unknown datastore type", "ipfswatch should recognize datastore plugins") + + // Create a test file with unique content based on timestamp + testContent := fmt.Sprintf("ipfswatch test content generated at %s", time.Now().Format(time.RFC3339Nano)) + testFile := filepath.Join(watchDir, "test.txt") + err = os.WriteFile(testFile, []byte(testContent), 0o644) + require.NoError(t, err) + + // Wait for ipfswatch to process the file and extract CID from log + // Log format: "added %s... key: %s" + cidPattern := regexp.MustCompile(`added .*/test\.txt\.\.\. key: (\S+)`) + var cid string + deadline := time.Now().Add(10 * time.Second) + for time.Now().Before(deadline) { + stderrStr = result.Stderr.String() + if matches := cidPattern.FindStringSubmatch(stderrStr); len(matches) > 1 { + cid = matches[1] + break + } + time.Sleep(100 * time.Millisecond) + } + require.NotEmpty(t, cid, "ipfswatch should have added test.txt and logged the CID, got stderr: %s", stderrStr) + + // Kill ipfswatch to release the repo lock + if result.Cmd.Process != nil { + if err = result.Cmd.Process.Signal(os.Interrupt); err != nil { + _ = result.Cmd.Process.Kill() + } + _, _ = result.Cmd.Process.Wait() + } + + // Verify the content matches by reading it back via ipfs cat + catRes := node.RunIPFS("cat", "--offline", cid) + require.Equal(t, 0, catRes.Cmd.ProcessState.ExitCode(), + "ipfs cat should succeed, cid=%s, stderr: %s", cid, catRes.Stderr.String()) + require.Equal(t, testContent, catRes.Stdout.String(), + "content read from IPFS should match what was written") + }) + + t.Run("ipfswatch loads datastore plugins for pebbleds", func(t *testing.T) { + t.Parallel() + h := harness.NewT(t) + node := h.NewNode().Init() + + // Configure pebbleds as the datastore + node.UpdateConfig(func(cfg *config.Config) { + cfg.Datastore.Spec = map[string]interface{}{ + "type": "mount", + "mounts": []interface{}{ + map[string]interface{}{ + "mountpoint": "/blocks", + "path": "blocks", + "prefix": "flatfs.datastore", + "shardFunc": "/repo/flatfs/shard/v1/next-to-last/2", + "sync": true, + "type": "flatfs", + }, + map[string]interface{}{ + "mountpoint": "/", + "path": "datastore", + "prefix": "pebble.datastore", + "type": "pebbleds", + }, + }, + } + }) + + // Re-initialize datastore directory for pebbleds + // (the repo was initialized with levelds, need to remove it) + dsPath := filepath.Join(node.Dir, "datastore") + err := os.RemoveAll(dsPath) + require.NoError(t, err) + err = os.MkdirAll(dsPath, 0o755) + require.NoError(t, err) + + // Create a temp directory to watch + watchDir := filepath.Join(h.Dir, "watch") + err = os.MkdirAll(watchDir, 0o755) + require.NoError(t, err) + + // Start ipfswatch in background + result := node.Runner.Run(harness.RunRequest{ + Path: ipfswatchBin, + Args: []string{"--repo", node.Dir, "--path", watchDir}, + RunFunc: harness.RunFuncStart, + }) + require.NoError(t, result.Err, "ipfswatch should start without error") + defer func() { + if result.Cmd.Process != nil { + _ = result.Cmd.Process.Kill() + _, _ = result.Cmd.Process.Wait() + } + }() + + // Wait for ipfswatch to initialize and check for errors + time.Sleep(3 * time.Second) + + stderrStr := result.Stderr.String() + require.NotContains(t, stderrStr, "unknown datastore type", "ipfswatch should recognize pebbleds datastore plugin") + }) +} diff --git a/test/cli/log_level_test.go b/test/cli/log_level_test.go new file mode 100644 index 000000000..4858f2657 --- /dev/null +++ b/test/cli/log_level_test.go @@ -0,0 +1,826 @@ +package cli + +import ( + "bufio" + "context" + "encoding/json" + "fmt" + "net/http" + "os" + "os/exec" + "strings" + "testing" + "time" + + "github.com/ipfs/kubo/test/cli/harness" + . "github.com/ipfs/kubo/test/cli/testutils" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestLogLevel(t *testing.T) { + + t.Run("CLI", func(t *testing.T) { + t.Run("level '*' shows all subsystems", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + expectedSubsystems := getExpectedSubsystems(t, node) + + res := node.IPFS("log", "level", "*") + assert.NoError(t, res.Err) + assert.Empty(t, res.Stderr.Lines()) + + actualSubsystems := parseCLIOutput(t, res.Stdout.String()) + + // Should show all subsystems plus the (default) entry + assert.GreaterOrEqual(t, len(actualSubsystems), len(expectedSubsystems)) + + validateAllSubsystemsPresentCLI(t, expectedSubsystems, actualSubsystems, "CLI output") + + // Should have the (default) entry + _, hasDefault := actualSubsystems["(default)"] + assert.True(t, hasDefault, "Should have '(default)' entry") + }) + + t.Run("level 'all' shows all subsystems (alias for '*')", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + expectedSubsystems := getExpectedSubsystems(t, node) + + res := node.IPFS("log", "level", "all") + assert.NoError(t, res.Err) + assert.Empty(t, res.Stderr.Lines()) + + actualSubsystems := parseCLIOutput(t, res.Stdout.String()) + + // Should show all subsystems plus the (default) entry + assert.GreaterOrEqual(t, len(actualSubsystems), len(expectedSubsystems)) + + validateAllSubsystemsPresentCLI(t, expectedSubsystems, actualSubsystems, "CLI output") + + // Should have the (default) entry + _, hasDefault := actualSubsystems["(default)"] + assert.True(t, hasDefault, "Should have '(default)' entry") + }) + + t.Run("get level for specific subsystem", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + node.IPFS("log", "level", "core", "debug") + res := node.IPFS("log", "level", "core") + assert.NoError(t, res.Err) + assert.Empty(t, res.Stderr.Lines()) + + output := res.Stdout.String() + lines := SplitLines(output) + + assert.Equal(t, 1, len(lines)) + + line := strings.TrimSpace(lines[0]) + assert.Equal(t, "debug", line) + }) + + t.Run("get level with no args returns default level", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + res1 := node.IPFS("log", "level", "*", "fatal") + assert.NoError(t, res1.Err) + assert.Empty(t, res1.Stderr.Lines()) + + res := node.IPFS("log", "level") + assert.NoError(t, res.Err) + assert.Equal(t, 0, len(res.Stderr.Lines())) + + output := res.Stdout.String() + lines := SplitLines(output) + + assert.Equal(t, 1, len(lines)) + + line := strings.TrimSpace(lines[0]) + assert.Equal(t, "fatal", line) + }) + + t.Run("get level reflects runtime log level changes", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon("--offline") + defer node.StopDaemon() + + node.IPFS("log", "level", "core", "debug") + res := node.IPFS("log", "level", "core") + assert.NoError(t, res.Err) + + output := res.Stdout.String() + lines := SplitLines(output) + + assert.Equal(t, 1, len(lines)) + + line := strings.TrimSpace(lines[0]) + assert.Equal(t, "debug", line) + }) + + t.Run("get level with non-existent subsystem returns error", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + res := node.RunIPFS("log", "level", "non-existent-subsystem") + assert.Error(t, res.Err) + assert.NotEqual(t, 0, len(res.Stderr.Lines())) + }) + + t.Run("set level to 'default' keyword", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + // First set a specific subsystem to a different level + res1 := node.IPFS("log", "level", "core", "debug") + assert.NoError(t, res1.Err) + assert.Contains(t, res1.Stdout.String(), "Changed log level of 'core' to 'debug'") + + // Verify it was set to debug + res2 := node.IPFS("log", "level", "core") + assert.NoError(t, res2.Err) + assert.Equal(t, "debug", strings.TrimSpace(res2.Stdout.String())) + + // Get the current default level (should be 'error' since unchanged) + res3 := node.IPFS("log", "level") + assert.NoError(t, res3.Err) + defaultLevel := strings.TrimSpace(res3.Stdout.String()) + assert.Equal(t, "error", defaultLevel, "Default level should be 'error' when unchanged") + + // Now set the subsystem back to default + res4 := node.IPFS("log", "level", "core", "default") + assert.NoError(t, res4.Err) + assert.Contains(t, res4.Stdout.String(), "Changed log level of 'core' to") + + // Verify it's now at the default level (should be 'error') + res5 := node.IPFS("log", "level", "core") + assert.NoError(t, res5.Err) + assert.Equal(t, "error", strings.TrimSpace(res5.Stdout.String())) + }) + + t.Run("set all subsystems with 'all' changes default (alias for '*')", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + // Initial state - default should be 'error' + res := node.IPFS("log", "level") + assert.NoError(t, res.Err) + assert.Equal(t, "error", strings.TrimSpace(res.Stdout.String())) + + // Set one subsystem to a different level + res = node.IPFS("log", "level", "core", "debug") + assert.NoError(t, res.Err) + + // Default should still be 'error' + res = node.IPFS("log", "level") + assert.NoError(t, res.Err) + assert.Equal(t, "error", strings.TrimSpace(res.Stdout.String())) + + // Now use 'all' to set everything to 'info' + res = node.IPFS("log", "level", "all", "info") + assert.NoError(t, res.Err) + assert.Contains(t, res.Stdout.String(), "Changed log level of '*' to 'info'") + + // Default should now be 'info' + res = node.IPFS("log", "level") + assert.NoError(t, res.Err) + assert.Equal(t, "info", strings.TrimSpace(res.Stdout.String())) + + // Core should also be 'info' (overwritten by 'all') + res = node.IPFS("log", "level", "core") + assert.NoError(t, res.Err) + assert.Equal(t, "info", strings.TrimSpace(res.Stdout.String())) + + // Any other subsystem should also be 'info' + res = node.IPFS("log", "level", "dht") + assert.NoError(t, res.Err) + assert.Equal(t, "info", strings.TrimSpace(res.Stdout.String())) + }) + + t.Run("set all subsystems with '*' changes default", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + // Initial state - default should be 'error' + res := node.IPFS("log", "level") + assert.NoError(t, res.Err) + assert.Equal(t, "error", strings.TrimSpace(res.Stdout.String())) + + // Set one subsystem to a different level + res = node.IPFS("log", "level", "core", "debug") + assert.NoError(t, res.Err) + + // Default should still be 'error' + res = node.IPFS("log", "level") + assert.NoError(t, res.Err) + assert.Equal(t, "error", strings.TrimSpace(res.Stdout.String())) + + // Now use '*' to set everything to 'info' + res = node.IPFS("log", "level", "*", "info") + assert.NoError(t, res.Err) + assert.Contains(t, res.Stdout.String(), "Changed log level of '*' to 'info'") + + // Default should now be 'info' + res = node.IPFS("log", "level") + assert.NoError(t, res.Err) + assert.Equal(t, "info", strings.TrimSpace(res.Stdout.String())) + + // Core should also be 'info' (overwritten by '*') + res = node.IPFS("log", "level", "core") + assert.NoError(t, res.Err) + assert.Equal(t, "info", strings.TrimSpace(res.Stdout.String())) + + // Any other subsystem should also be 'info' + res = node.IPFS("log", "level", "dht") + assert.NoError(t, res.Err) + assert.Equal(t, "info", strings.TrimSpace(res.Stdout.String())) + }) + + t.Run("'all' in get mode shows (default) entry (alias for '*')", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + // Get all levels with 'all' + res := node.IPFS("log", "level", "all") + assert.NoError(t, res.Err) + + output := res.Stdout.String() + + // Should contain "(default): error" entry + assert.Contains(t, output, "(default): error", "Should show default level with (default) key") + + // Should also contain various subsystems + assert.Contains(t, output, "core: error") + assert.Contains(t, output, "dht: error") + }) + + t.Run("'*' in get mode shows (default) entry", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + // Get all levels with '*' + res := node.IPFS("log", "level", "*") + assert.NoError(t, res.Err) + + output := res.Stdout.String() + + // Should contain "(default): error" entry + assert.Contains(t, output, "(default): error", "Should show default level with (default) key") + + // Should also contain various subsystems + assert.Contains(t, output, "core: error") + assert.Contains(t, output, "dht: error") + }) + + t.Run("set all subsystems to 'default' using 'all' (alias for '*')", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + // Get the original default level (just for reference, it should be "error") + res0 := node.IPFS("log", "level") + assert.NoError(t, res0.Err) + assert.Equal(t, "error", strings.TrimSpace(res0.Stdout.String())) + + // First set all subsystems to debug using 'all' + res1 := node.IPFS("log", "level", "all", "debug") + assert.NoError(t, res1.Err) + assert.Contains(t, res1.Stdout.String(), "Changed log level of '*' to 'debug'") + + // Verify a specific subsystem is at debug + res2 := node.IPFS("log", "level", "core") + assert.NoError(t, res2.Err) + assert.Equal(t, "debug", strings.TrimSpace(res2.Stdout.String())) + + // Verify the default level is now debug + res3 := node.IPFS("log", "level") + assert.NoError(t, res3.Err) + assert.Equal(t, "debug", strings.TrimSpace(res3.Stdout.String())) + + // Now set all subsystems back to default (which is now "debug") using 'all' + res4 := node.IPFS("log", "level", "all", "default") + assert.NoError(t, res4.Err) + assert.Contains(t, res4.Stdout.String(), "Changed log level of '*' to") + + // The subsystem should still be at debug (because that's what default is now) + res5 := node.IPFS("log", "level", "core") + assert.NoError(t, res5.Err) + assert.Equal(t, "debug", strings.TrimSpace(res5.Stdout.String())) + + // The behavior is correct: "default" uses the current default level, + // which was changed to "debug" when we set "all" to "debug" + }) + + t.Run("set all subsystems to 'default' keyword", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + // Get the original default level (just for reference, it should be "error") + res0 := node.IPFS("log", "level") + assert.NoError(t, res0.Err) + // originalDefault := strings.TrimSpace(res0.Stdout.String()) + assert.Equal(t, "error", strings.TrimSpace(res0.Stdout.String())) + + // First set all subsystems to debug + res1 := node.IPFS("log", "level", "*", "debug") + assert.NoError(t, res1.Err) + assert.Contains(t, res1.Stdout.String(), "Changed log level of '*' to 'debug'") + + // Verify a specific subsystem is at debug + res2 := node.IPFS("log", "level", "core") + assert.NoError(t, res2.Err) + assert.Equal(t, "debug", strings.TrimSpace(res2.Stdout.String())) + + // Verify the default level is now debug + res3 := node.IPFS("log", "level") + assert.NoError(t, res3.Err) + assert.Equal(t, "debug", strings.TrimSpace(res3.Stdout.String())) + + // Now set all subsystems back to default (which is now "debug") + res4 := node.IPFS("log", "level", "*", "default") + assert.NoError(t, res4.Err) + assert.Contains(t, res4.Stdout.String(), "Changed log level of '*' to") + + // The subsystem should still be at debug (because that's what default is now) + res5 := node.IPFS("log", "level", "core") + assert.NoError(t, res5.Err) + assert.Equal(t, "debug", strings.TrimSpace(res5.Stdout.String())) + + // The behavior is correct: "default" uses the current default level, + // which was changed to "debug" when we set "*" to "debug" + }) + + t.Run("shell escaping variants for '*' wildcard", func(t *testing.T) { + t.Parallel() + h := harness.NewT(t) + node := h.NewNode().Init().StartDaemon() + defer node.StopDaemon() + + // Test different shell escaping methods work for '*' + // This tests the behavior documented in help text: '*' or "*" or \* + + // Test 1: Single quotes '*' (should work) + cmd1 := fmt.Sprintf("IPFS_PATH='%s' %s --api='%s' log level '*' info", + node.Dir, node.IPFSBin, node.APIAddr()) + res1 := h.Sh(cmd1) + assert.NoError(t, res1.Err) + assert.Contains(t, res1.Stdout.String(), "Changed log level of '*' to 'info'") + + // Test 2: Double quotes "*" (should work) + cmd2 := fmt.Sprintf("IPFS_PATH='%s' %s --api='%s' log level \"*\" debug", + node.Dir, node.IPFSBin, node.APIAddr()) + res2 := h.Sh(cmd2) + assert.NoError(t, res2.Err) + assert.Contains(t, res2.Stdout.String(), "Changed log level of '*' to 'debug'") + + // Test 3: Backslash escape \* (should work) + cmd3 := fmt.Sprintf("IPFS_PATH='%s' %s --api='%s' log level \\* warn", + node.Dir, node.IPFSBin, node.APIAddr()) + res3 := h.Sh(cmd3) + assert.NoError(t, res3.Err) + assert.Contains(t, res3.Stdout.String(), "Changed log level of '*' to 'warn'") + + // Test 4: Verify the final state - should show 'warn' as default + res4 := node.IPFS("log", "level") + assert.NoError(t, res4.Err) + assert.Equal(t, "warn", strings.TrimSpace(res4.Stdout.String())) + + // Test 5: Get all levels using escaped '*' to verify it shows all subsystems + cmd5 := fmt.Sprintf("IPFS_PATH='%s' %s --api='%s' log level \\*", + node.Dir, node.IPFSBin, node.APIAddr()) + res5 := h.Sh(cmd5) + assert.NoError(t, res5.Err) + output := res5.Stdout.String() + assert.Contains(t, output, "(default): warn", "Should show updated default level") + assert.Contains(t, output, "core: warn", "Should show core subsystem at warn level") + }) + }) + + t.Run("HTTP RPC", func(t *testing.T) { + t.Run("get default level returns JSON", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + // Make HTTP request to get default log level + resp, err := http.Post(node.APIURL()+"/api/v0/log/level", "", nil) + require.NoError(t, err) + defer resp.Body.Close() + + // Parse JSON response + var result map[string]interface{} + err = json.NewDecoder(resp.Body).Decode(&result) + require.NoError(t, err) + + // Check that we have the Levels field + levels, ok := result["Levels"].(map[string]interface{}) + require.True(t, ok, "Response should have 'Levels' field") + + // Should have exactly one entry for the default level + assert.Equal(t, 1, len(levels)) + + // The default level should be present + defaultLevel, ok := levels[""] + require.True(t, ok, "Should have empty string key for default level") + assert.Equal(t, "error", defaultLevel, "Default level should be 'error'") + }) + + t.Run("get all levels using 'all' returns JSON (alias for '*')", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + expectedSubsystems := getExpectedSubsystems(t, node) + + // Make HTTP request to get all log levels using 'all' + resp, err := http.Post(node.APIURL()+"/api/v0/log/level?arg=all", "", nil) + require.NoError(t, err) + defer resp.Body.Close() + + levels := parseHTTPResponse(t, resp) + validateAllSubsystemsPresent(t, expectedSubsystems, levels, "JSON response") + + // Should have the (default) entry + defaultLevel, ok := levels["(default)"] + require.True(t, ok, "Should have '(default)' key") + assert.Equal(t, "error", defaultLevel, "Default level should be 'error'") + }) + + t.Run("get all levels returns JSON", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + expectedSubsystems := getExpectedSubsystems(t, node) + + // Make HTTP request to get all log levels + resp, err := http.Post(node.APIURL()+"/api/v0/log/level?arg=*", "", nil) + require.NoError(t, err) + defer resp.Body.Close() + + levels := parseHTTPResponse(t, resp) + validateAllSubsystemsPresent(t, expectedSubsystems, levels, "JSON response") + + // Should have the (default) entry + defaultLevel, ok := levels["(default)"] + require.True(t, ok, "Should have '(default)' key") + assert.Equal(t, "error", defaultLevel, "Default level should be 'error'") + }) + + t.Run("get specific subsystem level returns JSON", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + // First set a specific level for a subsystem + resp, err := http.Post(node.APIURL()+"/api/v0/log/level?arg=core&arg=debug", "", nil) + require.NoError(t, err) + resp.Body.Close() + + // Now get the level for that subsystem + resp, err = http.Post(node.APIURL()+"/api/v0/log/level?arg=core", "", nil) + require.NoError(t, err) + defer resp.Body.Close() + + // Parse JSON response + var result map[string]interface{} + err = json.NewDecoder(resp.Body).Decode(&result) + require.NoError(t, err) + + // Check that we have the Levels field + levels, ok := result["Levels"].(map[string]interface{}) + require.True(t, ok, "Response should have 'Levels' field") + + // Should have exactly one entry + assert.Equal(t, 1, len(levels)) + + // Check the level for 'core' subsystem + coreLevel, ok := levels["core"] + require.True(t, ok, "Should have 'core' key") + assert.Equal(t, "debug", coreLevel, "Core level should be 'debug'") + }) + + t.Run("set level using 'all' returns JSON message (alias for '*')", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + // Set a log level using 'all' + resp, err := http.Post(node.APIURL()+"/api/v0/log/level?arg=all&arg=info", "", nil) + require.NoError(t, err) + defer resp.Body.Close() + + // Parse JSON response + var result map[string]interface{} + err = json.NewDecoder(resp.Body).Decode(&result) + require.NoError(t, err) + + // Check that we have the Message field + message, ok := result["Message"].(string) + require.True(t, ok, "Response should have 'Message' field") + + // Check the message content (should show '*' in message even when 'all' was used) + assert.Contains(t, message, "Changed log level of '*' to 'info'") + }) + + t.Run("set level returns JSON message", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + // Set a log level + resp, err := http.Post(node.APIURL()+"/api/v0/log/level?arg=core&arg=info", "", nil) + require.NoError(t, err) + defer resp.Body.Close() + + // Parse JSON response + var result map[string]interface{} + err = json.NewDecoder(resp.Body).Decode(&result) + require.NoError(t, err) + + // Check that we have the Message field + message, ok := result["Message"].(string) + require.True(t, ok, "Response should have 'Message' field") + + // Check the message content + assert.Contains(t, message, "Changed log level of 'core' to 'info'") + }) + + t.Run("set level to 'default' keyword", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + // First set a subsystem to debug + resp, err := http.Post(node.APIURL()+"/api/v0/log/level?arg=core&arg=debug", "", nil) + require.NoError(t, err) + resp.Body.Close() + + // Now set it back to default + resp, err = http.Post(node.APIURL()+"/api/v0/log/level?arg=core&arg=default", "", nil) + require.NoError(t, err) + defer resp.Body.Close() + + // Parse JSON response + var result map[string]interface{} + err = json.NewDecoder(resp.Body).Decode(&result) + require.NoError(t, err) + + // Check that we have the Message field + message, ok := result["Message"].(string) + require.True(t, ok, "Response should have 'Message' field") + + // The message should indicate the change + assert.True(t, strings.Contains(message, "Changed log level of 'core' to"), + "Message should indicate level change") + + // Verify the level is back to error (default) + resp, err = http.Post(node.APIURL()+"/api/v0/log/level?arg=core", "", nil) + require.NoError(t, err) + defer resp.Body.Close() + + var getResult map[string]interface{} + err = json.NewDecoder(resp.Body).Decode(&getResult) + require.NoError(t, err) + + levels, _ := getResult["Levels"].(map[string]interface{}) + coreLevel, _ := levels["core"].(string) + assert.Equal(t, "error", coreLevel, "Core level should be back to 'error' (default)") + }) + }) + + // Constants for slog interop tests + const ( + slogTestLogTailTimeout = 10 * time.Second + slogTestLogWaitTimeout = 5 * time.Second + slogTestLogStartupDelay = 1 * time.Second // Wait for log tail to start + slogTestSubsystemCmdsHTTP = "cmds/http" // Native go-log subsystem + slogTestSubsystemNetIdentify = "net/identify" // go-libp2p slog subsystem + ) + + // logMatch represents a matched log entry for slog interop tests + type logMatch struct { + subsystem string + line string + } + + // startLogMonitoring starts ipfs log tail and returns command and channel for matched logs. + startLogMonitoring := func(t *testing.T, node *harness.Node) (*exec.Cmd, chan logMatch) { + t.Helper() + + ctx, cancel := context.WithTimeout(context.Background(), slogTestLogTailTimeout) + t.Cleanup(cancel) + + cmd := exec.CommandContext(ctx, node.IPFSBin, "log", "tail") + cmd.Env = append([]string(nil), os.Environ()...) + for k, v := range node.Runner.Env { + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", k, v)) + } + cmd.Dir = node.Runner.Dir + + stdout, err := cmd.StdoutPipe() + require.NoError(t, err) + require.NoError(t, cmd.Start()) + + matches := make(chan logMatch, 10) + + go func() { + scanner := bufio.NewScanner(stdout) + for scanner.Scan() { + line := scanner.Text() + // Check for actual logger field in JSON, not just substring match + if strings.Contains(line, `"logger":"cmds/http"`) { + matches <- logMatch{slogTestSubsystemCmdsHTTP, line} + } + if strings.Contains(line, `"logger":"net/identify"`) { + matches <- logMatch{slogTestSubsystemNetIdentify, line} + } + } + }() + + return cmd, matches + } + + // waitForBothSubsystems waits for both native go-log and slog subsystems to appear in logs. + waitForBothSubsystems := func(t *testing.T, matches chan logMatch, timeout time.Duration) { + t.Helper() + + seen := make(map[string]struct{}) + deadline := time.After(timeout) + + for len(seen) < 2 { + select { + case match := <-matches: + if _, exists := seen[match.subsystem]; !exists { + t.Logf("Found %s log", match.subsystem) + seen[match.subsystem] = struct{}{} + } + case <-deadline: + t.Fatalf("Timeout waiting for logs. Seen: %v", seen) + } + } + + assert.Contains(t, seen, slogTestSubsystemCmdsHTTP, "should see cmds/http (native go-log)") + assert.Contains(t, seen, slogTestSubsystemNetIdentify, "should see net/identify (slog from go-libp2p)") + } + + // triggerIdentifyProtocol connects node1 to node2, triggering net/identify logs. + triggerIdentifyProtocol := func(t *testing.T, node1, node2 *harness.Node) { + t.Helper() + + // Get node2's peer ID and address + node2ID := node2.PeerID().String() + addrsRes := node2.IPFS("id", "-f", "") + require.NoError(t, addrsRes.Err) + + addrs := strings.Split(strings.TrimSpace(addrsRes.Stdout.String()), "\n") + require.NotEmpty(t, addrs, "node2 should have at least one address") + + // Connect node1 to node2 + multiaddr := fmt.Sprintf("%s/p2p/%s", addrs[0], node2ID) + res := node1.IPFS("swarm", "connect", multiaddr) + require.NoError(t, res.Err) + } + + // verifySlogInterop verifies that both native go-log and slog from go-libp2p + // appear in ipfs log tail with correct formatting and level control. + verifySlogInterop := func(t *testing.T, node1, node2 *harness.Node) { + t.Helper() + + cmd, matches := startLogMonitoring(t, node1) + defer func() { + _ = cmd.Process.Kill() + }() + + time.Sleep(slogTestLogStartupDelay) + + // Trigger cmds/http (native go-log) + node1.IPFS("version") + + // Trigger net/identify (slog from go-libp2p) + triggerIdentifyProtocol(t, node1, node2) + + waitForBothSubsystems(t, matches, slogTestLogWaitTimeout) + } + + // This test verifies that go-log's slog bridge works with go-libp2p's gologshim + // when log levels are set via GOLOG_LOG_LEVEL environment variable. + // It tests both native go-log loggers (cmds/http) and slog-based loggers from + // go-libp2p (net/identify), ensuring both types appear in `ipfs log tail`. + t.Run("slog interop via env var", func(t *testing.T) { + t.Parallel() + h := harness.NewT(t) + + node1 := h.NewNode().Init() + node1.Runner.Env["GOLOG_LOG_LEVEL"] = "error,cmds/http=debug,net/identify=debug" + node1.StartDaemon() + defer node1.StopDaemon() + + node2 := h.NewNode().Init().StartDaemon() + defer node2.StopDaemon() + + verifySlogInterop(t, node1, node2) + }) + + // This test verifies that go-log's slog bridge works with go-libp2p's gologshim + // when log levels are set dynamically via `ipfs log level` CLI commands. + // It tests the key feature that SetLogLevel auto-creates level entries for subsystems + // that don't exist yet, enabling `ipfs log level net/identify debug` to work even + // before the net/identify logger is created. This is critical for slog interop. + t.Run("slog interop via CLI", func(t *testing.T) { + t.Parallel() + h := harness.NewT(t) + + node1 := h.NewNode().Init().StartDaemon() + defer node1.StopDaemon() + + node2 := h.NewNode().Init().StartDaemon() + defer node2.StopDaemon() + + // Set levels via CLI for both subsystems BEFORE triggering events + res := node1.IPFS("log", "level", slogTestSubsystemCmdsHTTP, "debug") + require.NoError(t, res.Err) + + res = node1.IPFS("log", "level", slogTestSubsystemNetIdentify, "debug") + require.NoError(t, res.Err) // Auto-creates level entry for slog subsystem + + verifySlogInterop(t, node1, node2) + }) + +} + +func getExpectedSubsystems(t *testing.T, node *harness.Node) []string { + t.Helper() + lsRes := node.IPFS("log", "ls") + require.NoError(t, lsRes.Err) + expectedSubsystems := SplitLines(lsRes.Stdout.String()) + assert.Greater(t, len(expectedSubsystems), 10, "Should have many subsystems") + return expectedSubsystems +} + +func parseCLIOutput(t *testing.T, output string) map[string]string { + t.Helper() + lines := SplitLines(output) + actualSubsystems := make(map[string]string) + for _, line := range lines { + if strings.TrimSpace(line) == "" { + continue + } + parts := strings.Split(line, ": ") + assert.Equal(t, 2, len(parts), "Line should have format 'subsystem: level', got: %s", line) + assert.NotEmpty(t, parts[0], "Subsystem should not be empty") + assert.NotEmpty(t, parts[1], "Level should not be empty") + actualSubsystems[parts[0]] = parts[1] + } + return actualSubsystems +} + +func parseHTTPResponse(t *testing.T, resp *http.Response) map[string]interface{} { + t.Helper() + var result map[string]interface{} + err := json.NewDecoder(resp.Body).Decode(&result) + require.NoError(t, err) + levels, ok := result["Levels"].(map[string]interface{}) + require.True(t, ok, "Response should have 'Levels' field") + assert.Greater(t, len(levels), 10, "Should have many subsystems") + return levels +} + +func validateAllSubsystemsPresent(t *testing.T, expectedSubsystems []string, actualLevels map[string]interface{}, context string) { + t.Helper() + for _, expectedSub := range expectedSubsystems { + expectedSub = strings.TrimSpace(expectedSub) + if expectedSub == "" { + continue + } + _, found := actualLevels[expectedSub] + assert.True(t, found, "Expected subsystem '%s' should be present in %s", expectedSub, context) + } +} + +func validateAllSubsystemsPresentCLI(t *testing.T, expectedSubsystems []string, actualLevels map[string]string, context string) { + t.Helper() + for _, expectedSub := range expectedSubsystems { + expectedSub = strings.TrimSpace(expectedSub) + if expectedSub == "" { + continue + } + _, found := actualLevels[expectedSub] + assert.True(t, found, "Expected subsystem '%s' should be present in %s", expectedSub, context) + } +} diff --git a/test/cli/migrations/migration_16_to_latest_test.go b/test/cli/migrations/migration_16_to_latest_test.go new file mode 100644 index 000000000..97a1ec1ff --- /dev/null +++ b/test/cli/migrations/migration_16_to_latest_test.go @@ -0,0 +1,918 @@ +package migrations + +// NOTE: These migration tests require the local Kubo binary (built with 'make build') to be in PATH. +// +// To run these tests successfully: +// export PATH="$(pwd)/cmd/ipfs:$PATH" +// go test ./test/cli/migrations/ + +import ( + "bufio" + "context" + "encoding/json" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "strings" + "testing" + "time" + + ipfs "github.com/ipfs/kubo" + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// TestMigration16ToLatest tests migration from repo version 16 to the latest version. +// +// This test uses a real IPFS repository snapshot from Kubo v0.36.0 (the last version that used repo v16). +// The intention is to confirm that users can upgrade from Kubo v0.36.0 to the latest version by applying +// all intermediate migrations successfully. +// +// NOTE: This test comprehensively tests all migration methods (daemon --migrate, repo migrate, +// and reverse migration) because 16-to-17 was the first embedded migration that did not fetch +// external files. It serves as a reference implementation for migration testing. +// +// Future migrations can have simplified tests (like 17-to-18 in migration_17_to_latest_test.go) +// that focus on specific migration logic rather than testing all migration methods. +// +// If you need to test migration of configuration keys that appeared in later repo versions, +// create a new test file migration_N_to_latest_test.go with a separate IPFS repository test vector +// from the appropriate Kubo version. +func TestMigration16ToLatest(t *testing.T) { + t.Parallel() + + // Primary tests using 'ipfs daemon --migrate' command (default in Docker) + t.Run("daemon migrate: forward migration with auto values", testDaemonMigrationWithAuto) + t.Run("daemon migrate: forward migration without auto values", testDaemonMigrationWithoutAuto) + t.Run("daemon migrate: corrupted config handling", testDaemonCorruptedConfigHandling) + t.Run("daemon migrate: missing fields handling", testDaemonMissingFieldsHandling) + + // Comparison tests using 'ipfs repo migrate' command + t.Run("repo migrate: forward migration with auto values", testRepoMigrationWithAuto) + t.Run("repo migrate: backward migration", testRepoBackwardMigration) + + // Temp file and backup cleanup tests + t.Run("daemon migrate: no temp files after successful migration", testNoTempFilesAfterSuccessfulMigration) + t.Run("daemon migrate: no temp files after failed migration", testNoTempFilesAfterFailedMigration) + t.Run("daemon migrate: backup files persist after successful migration", testBackupFilesPersistAfterSuccessfulMigration) + t.Run("repo migrate: backup files can revert migration", testBackupFilesCanRevertMigration) + t.Run("repo migrate: conversion failure cleans up temp files", testConversionFailureCleanup) +} + +// ============================================================================= +// PRIMARY TESTS: 'ipfs daemon --migrate' command (default in Docker) +// +// These tests exercise the primary migration path used in production Docker +// containers where --migrate is enabled by default. This covers: +// - Normal forward migration scenarios +// - Error handling with corrupted configs +// - Migration with minimal/missing config fields +// ============================================================================= + +func testDaemonMigrationWithAuto(t *testing.T) { + // TEST: Forward migration using 'ipfs daemon --migrate' command (PRIMARY) + // Use static v16 repo fixture from real Kubo 0.36 `ipfs init` + // NOTE: This test may need to be revised/updated once repo version 18 is released, + // at that point only keep tests that use 'ipfs repo migrate' + node := setupStaticV16Repo(t) + + configPath := filepath.Join(node.Dir, "config") + versionPath := filepath.Join(node.Dir, "version") + + // Static fixture already uses port 0 for random port assignment - no config update needed + + // Run migration using daemon --migrate (automatic during daemon startup) + // This is the primary method used in Docker containers + // Monitor output until daemon is ready, then shut it down gracefully + stdoutOutput, migrationSuccess := runDaemonMigrationWithMonitoring(t, node) + + // Debug: Print the actual output + t.Logf("Daemon output:\n%s", stdoutOutput) + + // Verify migration was successful based on monitoring + require.True(t, migrationSuccess, "Migration should have been successful") + require.Contains(t, stdoutOutput, "applying 16-to-17 repo migration", "Migration should have been triggered") + require.Contains(t, stdoutOutput, "Migration 16-to-17 succeeded", "Migration should have completed successfully") + + // Verify version was updated to latest + versionData, err := os.ReadFile(versionPath) + require.NoError(t, err) + expectedVersion := fmt.Sprint(ipfs.RepoVersion) + require.Equal(t, expectedVersion, strings.TrimSpace(string(versionData)), "Version should be updated to %s (latest)", expectedVersion) + + // Verify migration results using DRY helper + helper := NewMigrationTestHelper(t, configPath) + helper.RequireAutoConfDefaults(). + RequireArrayContains("Bootstrap", "auto"). + RequireArrayLength("Bootstrap", 1). // Should only contain "auto" when all peers were defaults + RequireArrayContains("Routing.DelegatedRouters", "auto"). + RequireArrayContains("Ipns.DelegatedPublishers", "auto") + + // DNS resolver in static fixture should be empty, so "." should be set to "auto" + helper.RequireFieldEquals("DNS.Resolvers[.]", "auto") +} + +func testDaemonMigrationWithoutAuto(t *testing.T) { + // TEST: Forward migration using 'ipfs daemon --migrate' command (PRIMARY) + // Test migration of a config that already has some custom values + // NOTE: This test may need to be revised/updated once repo version 18 is released, + // at that point only keep tests that use 'ipfs repo migrate' + // Should preserve existing settings and only add missing ones + node := setupStaticV16Repo(t) + + // Modify the static fixture to add some custom values for testing mixed scenarios + configPath := filepath.Join(node.Dir, "config") + + // Read existing config from static fixture + var v16Config map[string]interface{} + configData, err := os.ReadFile(configPath) + require.NoError(t, err) + require.NoError(t, json.Unmarshal(configData, &v16Config)) + + // Add custom DNS resolver that should be preserved + if v16Config["DNS"] == nil { + v16Config["DNS"] = map[string]interface{}{} + } + dnsSection := v16Config["DNS"].(map[string]interface{}) + dnsSection["Resolvers"] = map[string]string{ + ".": "https://custom-dns.example.com/dns-query", + "eth.": "https://dns.eth.limo/dns-query", // This is a default that will be replaced with "auto" + } + + // Write modified config back + modifiedConfigData, err := json.MarshalIndent(v16Config, "", " ") + require.NoError(t, err) + require.NoError(t, os.WriteFile(configPath, modifiedConfigData, 0644)) + + // Static fixture already uses port 0 for random port assignment - no config update needed + + // Run migration using daemon --migrate command (this is a daemon test) + // Monitor output until daemon is ready, then shut it down gracefully + stdoutOutput, migrationSuccess := runDaemonMigrationWithMonitoring(t, node) + + // Verify migration was successful based on monitoring + require.True(t, migrationSuccess, "Migration should have been successful") + require.Contains(t, stdoutOutput, "applying 16-to-17 repo migration", "Migration should have been triggered") + require.Contains(t, stdoutOutput, "Migration 16-to-17 succeeded", "Migration should have completed successfully") + + // Verify migration results: custom values preserved alongside "auto" + helper := NewMigrationTestHelper(t, configPath) + helper.RequireAutoConfDefaults(). + RequireArrayContains("Bootstrap", "auto"). + RequireFieldEquals("DNS.Resolvers[.]", "https://custom-dns.example.com/dns-query") + + // Check that eth. resolver was replaced with "auto" since it uses a default URL + helper.RequireFieldEquals("DNS.Resolvers[eth.]", "auto"). + RequireFieldEquals("DNS.Resolvers[.]", "https://custom-dns.example.com/dns-query") +} + +// ============================================================================= +// Tests using 'ipfs daemon --migrate' command +// ============================================================================= + +// Test helper structs and functions for cleaner, more DRY tests + +type ConfigField struct { + Path string + Expected interface{} + Message string +} + +type MigrationTestHelper struct { + t *testing.T + config map[string]interface{} +} + +func NewMigrationTestHelper(t *testing.T, configPath string) *MigrationTestHelper { + var config map[string]interface{} + configData, err := os.ReadFile(configPath) + require.NoError(t, err) + require.NoError(t, json.Unmarshal(configData, &config)) + + return &MigrationTestHelper{t: t, config: config} +} + +func (h *MigrationTestHelper) RequireFieldExists(path string) *MigrationTestHelper { + value := h.getNestedValue(path) + require.NotNil(h.t, value, "Field %s should exist", path) + return h +} + +func (h *MigrationTestHelper) RequireFieldEquals(path string, expected interface{}) *MigrationTestHelper { + value := h.getNestedValue(path) + require.Equal(h.t, expected, value, "Field %s should equal %v", path, expected) + return h +} + +func (h *MigrationTestHelper) RequireArrayContains(path string, expected interface{}) *MigrationTestHelper { + value := h.getNestedValue(path) + require.IsType(h.t, []interface{}{}, value, "Field %s should be an array", path) + array := value.([]interface{}) + require.Contains(h.t, array, expected, "Array %s should contain %v", path, expected) + return h +} + +func (h *MigrationTestHelper) RequireArrayLength(path string, expectedLen int) *MigrationTestHelper { + value := h.getNestedValue(path) + require.IsType(h.t, []interface{}{}, value, "Field %s should be an array", path) + array := value.([]interface{}) + require.Len(h.t, array, expectedLen, "Array %s should have length %d", path, expectedLen) + return h +} + +func (h *MigrationTestHelper) RequireArrayDoesNotContain(path string, notExpected interface{}) *MigrationTestHelper { + value := h.getNestedValue(path) + require.IsType(h.t, []interface{}{}, value, "Field %s should be an array", path) + array := value.([]interface{}) + require.NotContains(h.t, array, notExpected, "Array %s should not contain %v", path, notExpected) + return h +} + +func (h *MigrationTestHelper) RequireFieldAbsent(path string) *MigrationTestHelper { + value := h.getNestedValue(path) + require.Nil(h.t, value, "Field %s should not exist", path) + return h +} + +func (h *MigrationTestHelper) RequireAutoConfDefaults() *MigrationTestHelper { + // AutoConf section should exist but be empty (using implicit defaults) + return h.RequireFieldExists("AutoConf"). + RequireFieldAbsent("AutoConf.Enabled"). // Should use implicit default (true) + RequireFieldAbsent("AutoConf.URL"). // Should use implicit default (mainnet URL) + RequireFieldAbsent("AutoConf.RefreshInterval"). // Should use implicit default (24h) + RequireFieldAbsent("AutoConf.TLSInsecureSkipVerify") // Should use implicit default (false) +} + +func (h *MigrationTestHelper) RequireAutoFieldsSetToAuto() *MigrationTestHelper { + return h.RequireArrayContains("Bootstrap", "auto"). + RequireFieldEquals("DNS.Resolvers[.]", "auto"). + RequireArrayContains("Routing.DelegatedRouters", "auto"). + RequireArrayContains("Ipns.DelegatedPublishers", "auto") +} + +func (h *MigrationTestHelper) RequireNoAutoValues() *MigrationTestHelper { + // Check Bootstrap if it exists + if h.getNestedValue("Bootstrap") != nil { + h.RequireArrayDoesNotContain("Bootstrap", "auto") + } + + // Check DNS.Resolvers if it exists + if h.getNestedValue("DNS.Resolvers") != nil { + h.RequireMapDoesNotContainValue("DNS.Resolvers", "auto") + } + + // Check Routing.DelegatedRouters if it exists + if h.getNestedValue("Routing.DelegatedRouters") != nil { + h.RequireArrayDoesNotContain("Routing.DelegatedRouters", "auto") + } + + // Check Ipns.DelegatedPublishers if it exists + if h.getNestedValue("Ipns.DelegatedPublishers") != nil { + h.RequireArrayDoesNotContain("Ipns.DelegatedPublishers", "auto") + } + + return h +} + +func (h *MigrationTestHelper) RequireMapDoesNotContainValue(path string, notExpected interface{}) *MigrationTestHelper { + value := h.getNestedValue(path) + require.IsType(h.t, map[string]interface{}{}, value, "Field %s should be a map", path) + mapValue := value.(map[string]interface{}) + for k, v := range mapValue { + require.NotEqual(h.t, notExpected, v, "Map %s[%s] should not equal %v", path, k, notExpected) + } + return h +} + +func (h *MigrationTestHelper) getNestedValue(path string) interface{} { + segments := h.parseKuboConfigPath(path) + current := interface{}(h.config) + + for _, segment := range segments { + switch segment.Type { + case "field": + switch v := current.(type) { + case map[string]interface{}: + current = v[segment.Key] + default: + return nil + } + case "mapKey": + switch v := current.(type) { + case map[string]interface{}: + current = v[segment.Key] + default: + return nil + } + default: + return nil + } + + if current == nil { + return nil + } + } + + return current +} + +type PathSegment struct { + Type string // "field" or "mapKey" + Key string +} + +func (h *MigrationTestHelper) parseKuboConfigPath(path string) []PathSegment { + var segments []PathSegment + + // Split path into parts, respecting bracket boundaries + parts := h.splitKuboConfigPath(path) + + for _, part := range parts { + if strings.Contains(part, "[") && strings.HasSuffix(part, "]") { + // Handle field[key] notation + bracketStart := strings.Index(part, "[") + fieldName := part[:bracketStart] + mapKey := part[bracketStart+1 : len(part)-1] // Remove [ and ] + + // Add field segment if present + if fieldName != "" { + segments = append(segments, PathSegment{Type: "field", Key: fieldName}) + } + // Add map key segment + segments = append(segments, PathSegment{Type: "mapKey", Key: mapKey}) + } else { + // Regular field access + if part != "" { + segments = append(segments, PathSegment{Type: "field", Key: part}) + } + } + } + + return segments +} + +// splitKuboConfigPath splits a path on dots, but preserves bracket sections intact +func (h *MigrationTestHelper) splitKuboConfigPath(path string) []string { + var parts []string + var current strings.Builder + inBrackets := false + + for _, r := range path { + switch r { + case '[': + inBrackets = true + current.WriteRune(r) + case ']': + inBrackets = false + current.WriteRune(r) + case '.': + if inBrackets { + // Inside brackets, preserve the dot + current.WriteRune(r) + } else { + // Outside brackets, split here + if current.Len() > 0 { + parts = append(parts, current.String()) + current.Reset() + } + } + default: + current.WriteRune(r) + } + } + + // Add final part if any + if current.Len() > 0 { + parts = append(parts, current.String()) + } + + return parts +} + +// setupStaticV16Repo creates a test node using static v16 repo fixture from real Kubo 0.36 `ipfs init` +// This ensures tests remain stable regardless of future changes to the IPFS binary +// Each test gets its own copy in a temporary directory to allow modifications +func setupStaticV16Repo(t *testing.T) *harness.Node { + // Get absolute path to static v16 repo fixture + v16FixturePath := "testdata/v16-repo" + + // Create a temporary test directory - each test gets its own copy + // Sanitize test name for Windows - replace invalid characters + sanitizedName := strings.Map(func(r rune) rune { + if strings.ContainsRune(`<>:"/\|?*`, r) { + return '_' + } + return r + }, t.Name()) + tmpDir := filepath.Join(t.TempDir(), "migration-test-"+sanitizedName) + require.NoError(t, os.MkdirAll(tmpDir, 0755)) + + // Convert to absolute path for harness + absTmpDir, err := filepath.Abs(tmpDir) + require.NoError(t, err) + + // Use the built binary (should be in PATH) + node := harness.BuildNode("ipfs", absTmpDir, 0) + + // Replace IPFS_PATH with static fixture files to test directory (creates independent copy per test) + cloneStaticRepoFixture(t, v16FixturePath, node.Dir) + + return node +} + +// cloneStaticRepoFixture recursively copies the v16 repo fixture to the target directory +// It completely removes the target directory contents before copying to ensure no extra files remain +func cloneStaticRepoFixture(t *testing.T, srcPath, dstPath string) { + srcInfo, err := os.Stat(srcPath) + require.NoError(t, err) + + if srcInfo.IsDir() { + // Completely remove destination directory and all contents + require.NoError(t, os.RemoveAll(dstPath)) + // Create fresh destination directory + require.NoError(t, os.MkdirAll(dstPath, srcInfo.Mode())) + + // Read source directory + entries, err := os.ReadDir(srcPath) + require.NoError(t, err) + + // Copy each entry recursively + for _, entry := range entries { + srcEntryPath := filepath.Join(srcPath, entry.Name()) + dstEntryPath := filepath.Join(dstPath, entry.Name()) + cloneStaticRepoFixture(t, srcEntryPath, dstEntryPath) + } + } else { + // Copy file (destination directory should already be clean from parent call) + srcFile, err := os.Open(srcPath) + require.NoError(t, err) + defer srcFile.Close() + + dstFile, err := os.Create(dstPath) + require.NoError(t, err) + defer dstFile.Close() + + _, err = io.Copy(dstFile, srcFile) + require.NoError(t, err) + + // Copy file permissions + require.NoError(t, dstFile.Chmod(srcInfo.Mode())) + } +} + +// Placeholder stubs for new test functions - to be implemented +func testDaemonCorruptedConfigHandling(t *testing.T) { + // TEST: Error handling using 'ipfs daemon --migrate' command with corrupted config (PRIMARY) + // Test what happens when config file is corrupted during migration + // NOTE: This test may need to be revised/updated once repo version 18 is released, + // at that point only keep tests that use 'ipfs repo migrate' + node := setupStaticV16Repo(t) + + // Create corrupted config + configPath := filepath.Join(node.Dir, "config") + corruptedJson := `{"Bootstrap": [invalid json}` + require.NoError(t, os.WriteFile(configPath, []byte(corruptedJson), 0644)) + + // Write version file indicating v16 + versionPath := filepath.Join(node.Dir, "version") + require.NoError(t, os.WriteFile(versionPath, []byte("16"), 0644)) + + // Run daemon with --migrate flag - this should fail gracefully + result := node.RunIPFS("daemon", "--migrate") + + // Verify graceful failure handling + // The daemon should fail but migration error should be clear + errorOutput := result.Stderr.String() + result.Stdout.String() + require.True(t, strings.Contains(errorOutput, "json") || strings.Contains(errorOutput, "invalid character"), "Error should mention JSON parsing issue") + + // Verify atomic failure: version and config should remain unchanged + versionData, err := os.ReadFile(versionPath) + require.NoError(t, err) + require.Equal(t, "16", strings.TrimSpace(string(versionData)), "Version should remain unchanged after failed migration") + + originalContent, err := os.ReadFile(configPath) + require.NoError(t, err) + require.Equal(t, corruptedJson, string(originalContent), "Original config should be unchanged after failed migration") +} + +func testDaemonMissingFieldsHandling(t *testing.T) { + // TEST: Migration using 'ipfs daemon --migrate' command with minimal config (PRIMARY) + // Test migration when config is missing expected fields + // NOTE: This test may need to be revised/updated once repo version 18 is released, + // at that point only keep tests that use 'ipfs repo migrate' + node := setupStaticV16Repo(t) + + // The static fixture already has all required fields, use it as-is + configPath := filepath.Join(node.Dir, "config") + versionPath := filepath.Join(node.Dir, "version") + + // Static fixture already uses port 0 for random port assignment - no config update needed + + // Run daemon migration + stdoutOutput, migrationSuccess := runDaemonMigrationWithMonitoring(t, node) + + // Verify migration was successful + require.True(t, migrationSuccess, "Migration should have been successful") + require.Contains(t, stdoutOutput, "applying 16-to-17 repo migration", "Migration should have been triggered") + require.Contains(t, stdoutOutput, "Migration 16-to-17 succeeded", "Migration should have completed successfully") + + // Verify version was updated to latest + versionData, err := os.ReadFile(versionPath) + require.NoError(t, err) + expectedVersion := fmt.Sprint(ipfs.RepoVersion) + require.Equal(t, expectedVersion, strings.TrimSpace(string(versionData)), "Version should be updated to %s (latest)", expectedVersion) + + // Verify migration adds all required fields to minimal config + NewMigrationTestHelper(t, configPath). + RequireAutoConfDefaults(). + RequireAutoFieldsSetToAuto(). + RequireFieldExists("Identity.PeerID") // Original identity preserved from static fixture +} + +// ============================================================================= +// COMPARISON TESTS: 'ipfs repo migrate' command +// +// These tests verify that repo migrate produces equivalent results to +// daemon migrate, and test scenarios specific to repo migrate like +// backward migration (which daemon doesn't support). +// ============================================================================= + +func testRepoMigrationWithAuto(t *testing.T) { + // TEST: Forward migration using 'ipfs repo migrate' command (COMPARISON) + // Simple comparison test to verify repo migrate produces same results as daemon migrate + node := setupStaticV16Repo(t) + + // Use static fixture as-is + configPath := filepath.Join(node.Dir, "config") + + // Run migration using 'ipfs repo migrate' command + result := node.RunIPFS("repo", "migrate") + require.Empty(t, result.Stderr.String(), "Migration should succeed without errors") + + // Verify same results as daemon migrate + helper := NewMigrationTestHelper(t, configPath) + helper.RequireAutoConfDefaults(). + RequireArrayContains("Bootstrap", "auto"). + RequireArrayContains("Routing.DelegatedRouters", "auto"). + RequireArrayContains("Ipns.DelegatedPublishers", "auto"). + RequireFieldEquals("DNS.Resolvers[.]", "auto") +} + +func testRepoBackwardMigration(t *testing.T) { + // TEST: Backward migration using 'ipfs repo migrate --to=16 --allow-downgrade' command + // This is kept as repo migrate since daemon doesn't support backward migration + node := setupStaticV16Repo(t) + + // Use static fixture as-is + configPath := filepath.Join(node.Dir, "config") + versionPath := filepath.Join(node.Dir, "version") + + // First run forward migration to get to v17 + result := node.RunIPFS("repo", "migrate") + t.Logf("Forward migration stdout:\n%s", result.Stdout.String()) + t.Logf("Forward migration stderr:\n%s", result.Stderr.String()) + require.Empty(t, result.Stderr.String(), "Forward migration should succeed") + + // Verify we're at the latest version + versionData, err := os.ReadFile(versionPath) + require.NoError(t, err) + expectedVersion := fmt.Sprint(ipfs.RepoVersion) + require.Equal(t, expectedVersion, strings.TrimSpace(string(versionData)), "Should be at version %s (latest) after forward migration", expectedVersion) + + // Now run reverse migration back to v16 + result = node.RunIPFS("repo", "migrate", "--to=16", "--allow-downgrade") + t.Logf("Backward migration stdout:\n%s", result.Stdout.String()) + t.Logf("Backward migration stderr:\n%s", result.Stderr.String()) + require.Empty(t, result.Stderr.String(), "Reverse migration should succeed") + + // Verify version was downgraded to 16 + versionData, err = os.ReadFile(versionPath) + require.NoError(t, err) + require.Equal(t, "16", strings.TrimSpace(string(versionData)), "Version should be downgraded to 16") + + // Verify backward migration results: AutoConf removed and no "auto" values remain + NewMigrationTestHelper(t, configPath). + RequireFieldAbsent("AutoConf"). + RequireNoAutoValues() +} + +// runDaemonMigrationWithMonitoring starts daemon --migrate, monitors output until "Daemon is ready", +// then gracefully shuts down the daemon and returns the captured output and success status. +// This monitors for all expected migrations from version 16 to latest. +func runDaemonMigrationWithMonitoring(t *testing.T, node *harness.Node) (string, bool) { + // Monitor migrations from repo v16 to latest + return runDaemonWithExpectedMigrations(t, node, 16, ipfs.RepoVersion) +} + +// runDaemonWithExpectedMigrations monitors daemon startup for a sequence of migrations from startVersion to endVersion +func runDaemonWithExpectedMigrations(t *testing.T, node *harness.Node, startVersion, endVersion int) (string, bool) { + // Build list of expected migrations + var expectedMigrations []struct { + pattern string + success string + } + + for v := startVersion; v < endVersion; v++ { + from := v + to := v + 1 + expectedMigrations = append(expectedMigrations, struct { + pattern string + success string + }{ + pattern: fmt.Sprintf("applying %d-to-%d repo migration", from, to), + success: fmt.Sprintf("Migration %d-to-%d succeeded", from, to), + }) + } + + return runDaemonWithMultipleMigrationMonitoring(t, node, expectedMigrations) +} + +// runDaemonWithMultipleMigrationMonitoring monitors daemon startup for multiple sequential migrations +func runDaemonWithMultipleMigrationMonitoring(t *testing.T, node *harness.Node, expectedMigrations []struct { + pattern string + success string +}) (string, bool) { + // Create context with timeout as safety net + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + + // Set up daemon command with output monitoring + cmd := exec.CommandContext(ctx, node.IPFSBin, "daemon", "--migrate") + cmd.Dir = node.Dir + + // Set environment (especially IPFS_PATH) + for k, v := range node.Runner.Env { + cmd.Env = append(cmd.Env, k+"="+v) + } + + // Set up pipes for output monitoring + stdout, err := cmd.StdoutPipe() + require.NoError(t, err) + stderr, err := cmd.StderrPipe() + require.NoError(t, err) + + // Start the daemon + err = cmd.Start() + require.NoError(t, err) + + var allOutput strings.Builder + var daemonReady bool + + // Track which migrations have been detected + migrationsDetected := make([]bool, len(expectedMigrations)) + migrationsSucceeded := make([]bool, len(expectedMigrations)) + + // Monitor stdout for completion signals + scanner := bufio.NewScanner(stdout) + go func() { + for scanner.Scan() { + line := scanner.Text() + allOutput.WriteString(line + "\n") + + // Check for migration messages + for i, migration := range expectedMigrations { + if strings.Contains(line, migration.pattern) { + migrationsDetected[i] = true + } + if strings.Contains(line, migration.success) { + migrationsSucceeded[i] = true + } + } + if strings.Contains(line, "Daemon is ready") { + daemonReady = true + break // Exit monitoring loop + } + } + }() + + // Also monitor stderr (but don't use it for completion detection) + go func() { + stderrScanner := bufio.NewScanner(stderr) + for stderrScanner.Scan() { + line := stderrScanner.Text() + allOutput.WriteString("STDERR: " + line + "\n") + } + }() + + // Wait for daemon ready signal or timeout + ticker := time.NewTicker(100 * time.Millisecond) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + // Timeout - kill the process + if cmd.Process != nil { + _ = cmd.Process.Kill() + } + t.Logf("Daemon migration timed out after 60 seconds") + return allOutput.String(), false + + case <-ticker.C: + if daemonReady { + // Daemon is ready - shut it down gracefully + shutdownCmd := exec.Command(node.IPFSBin, "shutdown") + shutdownCmd.Dir = node.Dir + for k, v := range node.Runner.Env { + shutdownCmd.Env = append(shutdownCmd.Env, k+"="+v) + } + + if err := shutdownCmd.Run(); err != nil { + t.Logf("Warning: ipfs shutdown failed: %v", err) + // Force kill if graceful shutdown fails + if cmd.Process != nil { + _ = cmd.Process.Kill() + } + } + + // Wait for process to exit + _ = cmd.Wait() + + // Check all migrations were detected and succeeded + allDetected := true + allSucceeded := true + for i := range expectedMigrations { + if !migrationsDetected[i] { + allDetected = false + t.Logf("Migration %s was not detected", expectedMigrations[i].pattern) + } + if !migrationsSucceeded[i] { + allSucceeded = false + t.Logf("Migration %s did not succeed", expectedMigrations[i].success) + } + } + + return allOutput.String(), allDetected && allSucceeded + } + + // Check if process has exited (e.g., due to startup failure after migration) + if cmd.ProcessState != nil && cmd.ProcessState.Exited() { + // Process exited - migration may have completed but daemon failed to start + // This is expected for corrupted config tests + + // Check all migrations status + allDetected := true + allSucceeded := true + for i := range expectedMigrations { + if !migrationsDetected[i] { + allDetected = false + } + if !migrationsSucceeded[i] { + allSucceeded = false + } + } + + return allOutput.String(), allDetected && allSucceeded + } + } + } +} + +// ============================================================================= +// TEMP FILE AND BACKUP CLEANUP TESTS +// ============================================================================= + +// Helper functions for test cleanup assertions +func assertNoTempFiles(t *testing.T, dir string, msgAndArgs ...interface{}) { + t.Helper() + tmpFiles, err := filepath.Glob(filepath.Join(dir, ".tmp-*")) + require.NoError(t, err) + assert.Empty(t, tmpFiles, msgAndArgs...) +} + +func backupPath(configPath string, fromVer, toVer int) string { + return fmt.Sprintf("%s.%d-to-%d.bak", configPath, fromVer, toVer) +} + +func setupDaemonCmd(ctx context.Context, node *harness.Node, args ...string) *exec.Cmd { + cmd := exec.CommandContext(ctx, node.IPFSBin, args...) + cmd.Dir = node.Dir + for k, v := range node.Runner.Env { + cmd.Env = append(cmd.Env, k+"="+v) + } + return cmd +} + +func testNoTempFilesAfterSuccessfulMigration(t *testing.T) { + node := setupStaticV16Repo(t) + + // Run successful migration + _, migrationSuccess := runDaemonMigrationWithMonitoring(t, node) + require.True(t, migrationSuccess, "migration should succeed") + + assertNoTempFiles(t, node.Dir, "no temp files should remain after successful migration") +} + +func testNoTempFilesAfterFailedMigration(t *testing.T) { + node := setupStaticV16Repo(t) + + // Corrupt config to force migration failure + configPath := filepath.Join(node.Dir, "config") + corruptedJson := `{"Bootstrap": ["auto",` // Invalid JSON + require.NoError(t, os.WriteFile(configPath, []byte(corruptedJson), 0644)) + + // Attempt migration (should fail) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + cmd := setupDaemonCmd(ctx, node, "daemon", "--migrate") + output, _ := cmd.CombinedOutput() + t.Logf("Failed migration output: %s", output) + + assertNoTempFiles(t, node.Dir, "no temp files should remain after failed migration") +} + +func testBackupFilesPersistAfterSuccessfulMigration(t *testing.T) { + node := setupStaticV16Repo(t) + + // Run migration from v16 to latest (v18) + _, migrationSuccess := runDaemonMigrationWithMonitoring(t, node) + require.True(t, migrationSuccess, "migration should succeed") + + // Check for backup files from each migration step + configPath := filepath.Join(node.Dir, "config") + backup16to17 := backupPath(configPath, 16, 17) + backup17to18 := backupPath(configPath, 17, 18) + + // Both backup files should exist + assert.FileExists(t, backup16to17, "16-to-17 backup should exist") + assert.FileExists(t, backup17to18, "17-to-18 backup should exist") + + // Verify backup files contain valid JSON + data16to17, err := os.ReadFile(backup16to17) + require.NoError(t, err) + var config16to17 map[string]interface{} + require.NoError(t, json.Unmarshal(data16to17, &config16to17), "16-to-17 backup should be valid JSON") + + data17to18, err := os.ReadFile(backup17to18) + require.NoError(t, err) + var config17to18 map[string]interface{} + require.NoError(t, json.Unmarshal(data17to18, &config17to18), "17-to-18 backup should be valid JSON") +} + +func testBackupFilesCanRevertMigration(t *testing.T) { + node := setupStaticV16Repo(t) + + configPath := filepath.Join(node.Dir, "config") + versionPath := filepath.Join(node.Dir, "version") + + // Read original v16 config + originalConfig, err := os.ReadFile(configPath) + require.NoError(t, err) + + // Migrate to v17 only + result := node.RunIPFS("repo", "migrate", "--to=17") + require.Empty(t, result.Stderr.String(), "migration to v17 should succeed") + + // Verify backup exists + backup16to17 := backupPath(configPath, 16, 17) + assert.FileExists(t, backup16to17, "16-to-17 backup should exist") + + // Manually revert using backup + backupData, err := os.ReadFile(backup16to17) + require.NoError(t, err) + require.NoError(t, os.WriteFile(configPath, backupData, 0600)) + require.NoError(t, os.WriteFile(versionPath, []byte("16"), 0644)) + + // Verify config matches original + revertedConfig, err := os.ReadFile(configPath) + require.NoError(t, err) + assert.JSONEq(t, string(originalConfig), string(revertedConfig), "reverted config should match original") + + // Verify version is back to 16 + versionData, err := os.ReadFile(versionPath) + require.NoError(t, err) + assert.Equal(t, "16", strings.TrimSpace(string(versionData)), "version should be reverted to 16") +} + +func testConversionFailureCleanup(t *testing.T) { + // This test verifies that when a migration's conversion function fails, + // all temporary files are cleaned up properly + node := setupStaticV16Repo(t) + + configPath := filepath.Join(node.Dir, "config") + + // Create a corrupted config that will cause conversion to fail during JSON parsing + // The migration will read this, attempt to parse as JSON, and fail + corruptedJson := `{"Bootstrap": ["auto",` // Invalid JSON - missing closing bracket + require.NoError(t, os.WriteFile(configPath, []byte(corruptedJson), 0644)) + + // Attempt migration (should fail during conversion) + result := node.RunIPFS("repo", "migrate") + require.NotEmpty(t, result.Stderr.String(), "migration should fail with error") + + assertNoTempFiles(t, node.Dir, "no temp files should remain after conversion failure") + + // Verify no backup files were created (failure happened before backup) + backupFiles, err := filepath.Glob(filepath.Join(node.Dir, "config.*.bak")) + require.NoError(t, err) + assert.Empty(t, backupFiles, "no backup files should be created on conversion failure") + + // Verify corrupted config is unchanged (atomic operations prevented overwrite) + currentConfig, err := os.ReadFile(configPath) + require.NoError(t, err) + assert.Equal(t, corruptedJson, string(currentConfig), "corrupted config should remain unchanged") +} diff --git a/test/cli/migrations/migration_17_to_latest_test.go b/test/cli/migrations/migration_17_to_latest_test.go new file mode 100644 index 000000000..635573461 --- /dev/null +++ b/test/cli/migrations/migration_17_to_latest_test.go @@ -0,0 +1,360 @@ +package migrations + +// NOTE: These migration tests require the local Kubo binary (built with 'make build') to be in PATH. +// +// To run these tests successfully: +// export PATH="$(pwd)/cmd/ipfs:$PATH" +// go test ./test/cli/migrations/ + +import ( + "context" + "encoding/json" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + "testing" + "time" + + ipfs "github.com/ipfs/kubo" + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/require" +) + +// TestMigration17ToLatest tests migration from repo version 17 to the latest version. +// +// Since we don't have a v17 repo fixture, we start with v16 and migrate it to v17 first, +// then test the 17-to-18 migration specifically. +// +// This test focuses on the Provider/Reprovider to Provide consolidation that happens in 17-to-18. +func TestMigration17ToLatest(t *testing.T) { + t.Parallel() + + // Tests for Provider/Reprovider to Provide migration (17-to-18) + t.Run("daemon migrate: Provider/Reprovider to Provide consolidation", testProviderReproviderMigration) + t.Run("daemon migrate: flat strategy conversion", testFlatStrategyConversion) + t.Run("daemon migrate: empty Provider/Reprovider sections", testEmptyProviderReproviderMigration) + t.Run("daemon migrate: partial configuration (Provider only)", testProviderOnlyMigration) + t.Run("daemon migrate: partial configuration (Reprovider only)", testReproviderOnlyMigration) + t.Run("repo migrate: invalid strategy values preserved", testInvalidStrategyMigration) + t.Run("repo migrate: Provider/Reprovider to Provide consolidation", testRepoProviderReproviderMigration) +} + +// ============================================================================= +// MIGRATION 17-to-18 SPECIFIC TESTS: Provider/Reprovider to Provide consolidation +// ============================================================================= + +func testProviderReproviderMigration(t *testing.T) { + // TEST: 17-to-18 migration with explicit Provider/Reprovider configuration + node := setupV17RepoWithProviderConfig(t) + + configPath := filepath.Join(node.Dir, "config") + versionPath := filepath.Join(node.Dir, "version") + + // Run migration using daemon --migrate command + stdoutOutput, migrationSuccess := runDaemonMigrationFromV17(t, node) + + // Debug: Print the actual output + t.Logf("Daemon output:\n%s", stdoutOutput) + + // Verify migration was successful + require.True(t, migrationSuccess, "Migration should have been successful") + require.Contains(t, stdoutOutput, "applying 17-to-18 repo migration", "Migration 17-to-18 should have been triggered") + require.Contains(t, stdoutOutput, "Migration 17-to-18 succeeded", "Migration 17-to-18 should have completed successfully") + + // Verify version was updated to latest + versionData, err := os.ReadFile(versionPath) + require.NoError(t, err) + expectedVersion := fmt.Sprint(ipfs.RepoVersion) + require.Equal(t, expectedVersion, strings.TrimSpace(string(versionData)), "Version should be updated to %s (latest)", expectedVersion) + + // ============================================================================= + // MIGRATION 17-to-18 ASSERTIONS: Provider/Reprovider to Provide consolidation + // ============================================================================= + helper := NewMigrationTestHelper(t, configPath) + + // Verify Provider/Reprovider migration to Provide + helper.RequireProviderMigration(). + RequireFieldEquals("Provide.Enabled", true). // Migrated from Provider.Enabled + RequireFieldEquals("Provide.DHT.MaxWorkers", float64(8)). // Migrated from Provider.WorkerCount + RequireFieldEquals("Provide.Strategy", "roots"). // Migrated from Reprovider.Strategy + RequireFieldEquals("Provide.DHT.Interval", "24h") // Migrated from Reprovider.Interval + + // Verify old sections are removed + helper.RequireFieldAbsent("Provider"). + RequireFieldAbsent("Reprovider") +} + +func testFlatStrategyConversion(t *testing.T) { + // TEST: 17-to-18 migration with "flat" strategy that should convert to "all" + node := setupV17RepoWithFlatStrategy(t) + + configPath := filepath.Join(node.Dir, "config") + + // Run migration using daemon --migrate command + stdoutOutput, migrationSuccess := runDaemonMigrationFromV17(t, node) + + // Verify migration was successful + require.True(t, migrationSuccess, "Migration should have been successful") + require.Contains(t, stdoutOutput, "applying 17-to-18 repo migration", "Migration 17-to-18 should have been triggered") + require.Contains(t, stdoutOutput, "Migration 17-to-18 succeeded", "Migration 17-to-18 should have completed successfully") + + // ============================================================================= + // MIGRATION 17-to-18 ASSERTIONS: "flat" to "all" strategy conversion + // ============================================================================= + helper := NewMigrationTestHelper(t, configPath) + + // Verify "flat" was converted to "all" + helper.RequireProviderMigration(). + RequireFieldEquals("Provide.Strategy", "all"). // "flat" converted to "all" + RequireFieldEquals("Provide.DHT.Interval", "12h") +} + +func testEmptyProviderReproviderMigration(t *testing.T) { + // TEST: 17-to-18 migration with empty Provider and Reprovider sections + node := setupV17RepoWithEmptySections(t) + + configPath := filepath.Join(node.Dir, "config") + + // Run migration + stdoutOutput, migrationSuccess := runDaemonMigrationFromV17(t, node) + + // Verify migration was successful + require.True(t, migrationSuccess, "Migration should have been successful") + require.Contains(t, stdoutOutput, "Migration 17-to-18 succeeded") + + // Verify empty sections are removed and no Provide section is created + helper := NewMigrationTestHelper(t, configPath) + helper.RequireFieldAbsent("Provider"). + RequireFieldAbsent("Reprovider"). + RequireFieldAbsent("Provide") // No Provide section should be created for empty configs +} + +func testProviderOnlyMigration(t *testing.T) { + // TEST: 17-to-18 migration with only Provider configuration + node := setupV17RepoWithProviderOnly(t) + + configPath := filepath.Join(node.Dir, "config") + + // Run migration + stdoutOutput, migrationSuccess := runDaemonMigrationFromV17(t, node) + + // Verify migration was successful + require.True(t, migrationSuccess, "Migration should have been successful") + require.Contains(t, stdoutOutput, "Migration 17-to-18 succeeded") + + // Verify only Provider fields are migrated + helper := NewMigrationTestHelper(t, configPath) + helper.RequireProviderMigration(). + RequireFieldEquals("Provide.Enabled", false). + RequireFieldEquals("Provide.DHT.MaxWorkers", float64(32)). + RequireFieldAbsent("Provide.Strategy"). // No Reprovider.Strategy to migrate + RequireFieldAbsent("Provide.DHT.Interval") // No Reprovider.Interval to migrate +} + +func testReproviderOnlyMigration(t *testing.T) { + // TEST: 17-to-18 migration with only Reprovider configuration + node := setupV17RepoWithReproviderOnly(t) + + configPath := filepath.Join(node.Dir, "config") + + // Run migration + stdoutOutput, migrationSuccess := runDaemonMigrationFromV17(t, node) + + // Verify migration was successful + require.True(t, migrationSuccess, "Migration should have been successful") + require.Contains(t, stdoutOutput, "Migration 17-to-18 succeeded") + + // Verify only Reprovider fields are migrated + helper := NewMigrationTestHelper(t, configPath) + helper.RequireProviderMigration(). + RequireFieldEquals("Provide.Strategy", "pinned"). + RequireFieldEquals("Provide.DHT.Interval", "48h"). + RequireFieldAbsent("Provide.Enabled"). // No Provider.Enabled to migrate + RequireFieldAbsent("Provide.DHT.MaxWorkers") // No Provider.WorkerCount to migrate +} + +func testInvalidStrategyMigration(t *testing.T) { + // TEST: 17-to-18 migration with invalid strategy values (should be preserved as-is) + // The migration itself should succeed, but daemon start will fail due to invalid strategy + node := setupV17RepoWithInvalidStrategy(t) + + configPath := filepath.Join(node.Dir, "config") + + // Run the migration using 'ipfs repo migrate' (not daemon --migrate) + // because daemon would fail to start with invalid strategy after migration + result := node.RunIPFS("repo", "migrate") + require.Empty(t, result.Stderr.String(), "Migration should succeed without errors") + + // Verify invalid strategy is preserved as-is (not validated during migration) + helper := NewMigrationTestHelper(t, configPath) + helper.RequireProviderMigration(). + RequireFieldEquals("Provide.Strategy", "invalid-strategy") // Should be preserved + + // Now verify that daemon fails to start with invalid strategy + // Note: We cannot use --offline as it skips provider validation + // Use a context with timeout to avoid hanging + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + cmd := exec.CommandContext(ctx, node.IPFSBin, "daemon") + cmd.Dir = node.Dir + for k, v := range node.Runner.Env { + cmd.Env = append(cmd.Env, k+"="+v) + } + + output, err := cmd.CombinedOutput() + + // The daemon should fail (either with error or timeout if it's hanging) + require.Error(t, err, "Daemon should fail to start with invalid strategy") + + // Check if we got the expected error message + outputStr := string(output) + t.Logf("Daemon output with invalid strategy: %s", outputStr) + + // The error should mention unknown strategy + require.Contains(t, outputStr, "unknown strategy", "Should report unknown strategy error") +} + +func testRepoProviderReproviderMigration(t *testing.T) { + // TEST: 17-to-18 migration using 'ipfs repo migrate' command + node := setupV17RepoWithProviderConfig(t) + + configPath := filepath.Join(node.Dir, "config") + + // Run migration using 'ipfs repo migrate' command + result := node.RunIPFS("repo", "migrate") + require.Empty(t, result.Stderr.String(), "Migration should succeed without errors") + + // Verify same results as daemon migrate + helper := NewMigrationTestHelper(t, configPath) + helper.RequireProviderMigration(). + RequireFieldEquals("Provide.Enabled", true). + RequireFieldEquals("Provide.DHT.MaxWorkers", float64(8)). + RequireFieldEquals("Provide.Strategy", "roots"). + RequireFieldEquals("Provide.DHT.Interval", "24h") +} + +// ============================================================================= +// HELPER FUNCTIONS +// ============================================================================= + +// setupV17RepoWithProviderConfig creates a v17 repo with Provider/Reprovider configuration +func setupV17RepoWithProviderConfig(t *testing.T) *harness.Node { + return setupV17RepoWithConfig(t, + map[string]interface{}{ + "Enabled": true, + "WorkerCount": 8, + }, + map[string]interface{}{ + "Strategy": "roots", + "Interval": "24h", + }) +} + +// setupV17RepoWithFlatStrategy creates a v17 repo with "flat" strategy for testing conversion +func setupV17RepoWithFlatStrategy(t *testing.T) *harness.Node { + return setupV17RepoWithConfig(t, + map[string]interface{}{ + "Enabled": false, + }, + map[string]interface{}{ + "Strategy": "flat", // This should be converted to "all" + "Interval": "12h", + }) +} + +// setupV17RepoWithConfig is a helper that creates a v17 repo with specified Provider/Reprovider config +func setupV17RepoWithConfig(t *testing.T, providerConfig, reproviderConfig map[string]interface{}) *harness.Node { + node := setupStaticV16Repo(t) + + // First migrate to v17 + result := node.RunIPFS("repo", "migrate", "--to=17") + require.Empty(t, result.Stderr.String(), "Migration to v17 should succeed") + + // Update config with specified Provider and Reprovider settings + configPath := filepath.Join(node.Dir, "config") + var config map[string]interface{} + configData, err := os.ReadFile(configPath) + require.NoError(t, err) + require.NoError(t, json.Unmarshal(configData, &config)) + + if providerConfig != nil { + config["Provider"] = providerConfig + } else { + config["Provider"] = map[string]interface{}{} + } + + if reproviderConfig != nil { + config["Reprovider"] = reproviderConfig + } else { + config["Reprovider"] = map[string]interface{}{} + } + + modifiedConfigData, err := json.MarshalIndent(config, "", " ") + require.NoError(t, err) + require.NoError(t, os.WriteFile(configPath, modifiedConfigData, 0644)) + + return node +} + +// setupV17RepoWithEmptySections creates a v17 repo with empty Provider/Reprovider sections +func setupV17RepoWithEmptySections(t *testing.T) *harness.Node { + return setupV17RepoWithConfig(t, + map[string]interface{}{}, + map[string]interface{}{}) +} + +// setupV17RepoWithProviderOnly creates a v17 repo with only Provider configuration +func setupV17RepoWithProviderOnly(t *testing.T) *harness.Node { + return setupV17RepoWithConfig(t, + map[string]interface{}{ + "Enabled": false, + "WorkerCount": 32, + }, + map[string]interface{}{}) +} + +// setupV17RepoWithReproviderOnly creates a v17 repo with only Reprovider configuration +func setupV17RepoWithReproviderOnly(t *testing.T) *harness.Node { + return setupV17RepoWithConfig(t, + map[string]interface{}{}, + map[string]interface{}{ + "Strategy": "pinned", + "Interval": "48h", + }) +} + +// setupV17RepoWithInvalidStrategy creates a v17 repo with an invalid strategy value +func setupV17RepoWithInvalidStrategy(t *testing.T) *harness.Node { + return setupV17RepoWithConfig(t, + map[string]interface{}{}, + map[string]interface{}{ + "Strategy": "invalid-strategy", // This is not a valid strategy + "Interval": "24h", + }) +} + +// runDaemonMigrationFromV17 monitors daemon startup for 17-to-18 migration only +func runDaemonMigrationFromV17(t *testing.T, node *harness.Node) (string, bool) { + // Monitor only the 17-to-18 migration + expectedMigrations := []struct { + pattern string + success string + }{ + { + pattern: "applying 17-to-18 repo migration", + success: "Migration 17-to-18 succeeded", + }, + } + + return runDaemonWithMultipleMigrationMonitoring(t, node, expectedMigrations) +} + +// RequireProviderMigration verifies that Provider/Reprovider have been migrated to Provide section +func (h *MigrationTestHelper) RequireProviderMigration() *MigrationTestHelper { + return h.RequireFieldExists("Provide"). + RequireFieldAbsent("Provider"). + RequireFieldAbsent("Reprovider") +} diff --git a/test/cli/migrations/migration_concurrent_test.go b/test/cli/migrations/migration_concurrent_test.go new file mode 100644 index 000000000..8c716f51c --- /dev/null +++ b/test/cli/migrations/migration_concurrent_test.go @@ -0,0 +1,55 @@ +package migrations + +// NOTE: These concurrent migration tests require the local Kubo binary (built with 'make build') to be in PATH. +// +// To run these tests successfully: +// export PATH="$(pwd)/cmd/ipfs:$PATH" +// go test ./test/cli/migrations/ + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +const daemonStartupWait = 2 * time.Second + +// TestConcurrentMigrations tests concurrent daemon --migrate attempts +func TestConcurrentMigrations(t *testing.T) { + t.Parallel() + + t.Run("concurrent daemon migrations prevented by lock", testConcurrentDaemonMigrations) +} + +func testConcurrentDaemonMigrations(t *testing.T) { + node := setupStaticV16Repo(t) + + // Start first daemon --migrate in background (holds repo.lock) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + firstDaemon := setupDaemonCmd(ctx, node, "daemon", "--migrate") + require.NoError(t, firstDaemon.Start()) + defer func() { + // Shutdown first daemon + shutdownCmd := setupDaemonCmd(context.Background(), node, "shutdown") + _ = shutdownCmd.Run() + _ = firstDaemon.Wait() + }() + + // Wait for first daemon to start and acquire lock + time.Sleep(daemonStartupWait) + + // Attempt second daemon --migrate (should fail due to lock) + secondDaemon := setupDaemonCmd(context.Background(), node, "daemon", "--migrate") + output, err := secondDaemon.CombinedOutput() + t.Logf("Second daemon output: %s", output) + + // Should fail with lock error + require.Error(t, err, "second daemon should fail when first daemon holds lock") + require.Contains(t, string(output), "lock", "error should mention lock") + + assertNoTempFiles(t, node.Dir, "no temp files should be created when lock fails") +} diff --git a/test/cli/migrations/migration_mixed_15_to_latest_test.go b/test/cli/migrations/migration_mixed_15_to_latest_test.go new file mode 100644 index 000000000..6ee96b939 --- /dev/null +++ b/test/cli/migrations/migration_mixed_15_to_latest_test.go @@ -0,0 +1,506 @@ +package migrations + +// NOTE: These mixed migration tests validate the transition from old Kubo versions that used external +// migration binaries to the latest version with embedded migrations. This ensures users can upgrade +// from very old installations (v15) to the latest version seamlessly. +// +// The tests verify hybrid migration paths: +// - Forward: external binary (15→16) + embedded migrations (16→latest) +// - Backward: embedded migrations (latest→16) + external binary (16→15) +// +// This confirms compatibility between the old external migration system and the new embedded system. +// +// To run these tests successfully: +// export PATH="$(pwd)/cmd/ipfs:$PATH" +// go test ./test/cli/migrations/ + +import ( + "bufio" + "context" + "encoding/json" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "runtime" + "slices" + "strings" + "testing" + "time" + + ipfs "github.com/ipfs/kubo" + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/require" +) + +// TestMixedMigration15ToLatest tests migration from old Kubo (v15 with external migrations) +// to the latest version using a hybrid approach: external binary for 15→16, then embedded +// migrations for 16→latest. This ensures backward compatibility for users upgrading from +// very old Kubo installations. +func TestMixedMigration15ToLatest(t *testing.T) { + t.Parallel() + + // Test mixed migration from v15 to latest (combines external 15→16 + embedded 16→latest) + t.Run("daemon migrate: mixed 15 to latest", testDaemonMigration15ToLatest) + t.Run("repo migrate: mixed 15 to latest", testRepoMigration15ToLatest) +} + +// TestMixedMigrationLatestTo15Downgrade tests downgrading from the latest version back to v15 +// using a hybrid approach: embedded migrations for latest→16, then external binary for 16→15. +// This ensures the migration system works bidirectionally for recovery scenarios. +func TestMixedMigrationLatestTo15Downgrade(t *testing.T) { + t.Parallel() + + // Test reverse hybrid migration from latest to v15 (embedded latest→16 + external 16→15) + t.Run("repo migrate: reverse hybrid latest to 15", testRepoReverseHybridMigrationLatestTo15) +} + +func testDaemonMigration15ToLatest(t *testing.T) { + // TEST: Migration from v15 to latest using 'ipfs daemon --migrate' + // This tests the mixed migration path: external binary (15→16) + embedded (16→latest) + node := setupStaticV15Repo(t) + + // Create mock migration binary for 15→16 (16→17 will use embedded migration) + mockBinDir := createMockMigrationBinary(t, "15", "16") + customPath := buildCustomPath(mockBinDir) + + configPath := filepath.Join(node.Dir, "config") + versionPath := filepath.Join(node.Dir, "version") + + // Verify starting conditions + versionData, err := os.ReadFile(versionPath) + require.NoError(t, err) + require.Equal(t, "15", strings.TrimSpace(string(versionData)), "Should start at version 15") + + // Read original config to verify preservation of key fields + var originalConfig map[string]interface{} + configData, err := os.ReadFile(configPath) + require.NoError(t, err) + require.NoError(t, json.Unmarshal(configData, &originalConfig)) + + originalPeerID := getNestedValue(originalConfig, "Identity.PeerID") + + // Run dual migration using daemon --migrate + stdoutOutput, migrationSuccess := runDaemonWithLegacyMigrationMonitoring(t, node, customPath) + + // Debug output + t.Logf("Daemon output:\n%s", stdoutOutput) + + // Verify hybrid migration was successful + require.True(t, migrationSuccess, "Hybrid migration should have been successful") + require.Contains(t, stdoutOutput, "Phase 1: External migration from v15 to v16", "Should detect external migration phase") + // Verify each embedded migration step from 16 to latest + verifyMigrationSteps(t, stdoutOutput, 16, ipfs.RepoVersion, true) + require.Contains(t, stdoutOutput, fmt.Sprintf("Phase 2: Embedded migration from v16 to v%d", ipfs.RepoVersion), "Should detect embedded migration phase") + require.Contains(t, stdoutOutput, "Hybrid migration completed successfully", "Should confirm hybrid migration completion") + + // Verify final version is latest + versionData, err = os.ReadFile(versionPath) + require.NoError(t, err) + latestVersion := fmt.Sprintf("%d", ipfs.RepoVersion) + require.Equal(t, latestVersion, strings.TrimSpace(string(versionData)), "Version should be updated to latest") + + // Verify config is still valid JSON and key fields preserved + var finalConfig map[string]interface{} + configData, err = os.ReadFile(configPath) + require.NoError(t, err) + require.NoError(t, json.Unmarshal(configData, &finalConfig), "Config should remain valid JSON") + + // Verify essential fields preserved + finalPeerID := getNestedValue(finalConfig, "Identity.PeerID") + require.Equal(t, originalPeerID, finalPeerID, "Identity.PeerID should be preserved") + + // Verify bootstrap exists (may be modified by 16→17 migration) + finalBootstrap := getNestedValue(finalConfig, "Bootstrap") + require.NotNil(t, finalBootstrap, "Bootstrap should exist after migration") + + // Verify AutoConf was added by 16→17 migration + autoConf := getNestedValue(finalConfig, "AutoConf") + require.NotNil(t, autoConf, "AutoConf should be added by 16→17 migration") +} + +func testRepoMigration15ToLatest(t *testing.T) { + // TEST: Migration from v15 to latest using 'ipfs repo migrate' + // Comparison test to verify repo migrate produces same results as daemon migrate + node := setupStaticV15Repo(t) + + // Create mock migration binary for 15→16 (16→17 will use embedded migration) + mockBinDir := createMockMigrationBinary(t, "15", "16") + customPath := buildCustomPath(mockBinDir) + + configPath := filepath.Join(node.Dir, "config") + versionPath := filepath.Join(node.Dir, "version") + + // Verify starting version + versionData, err := os.ReadFile(versionPath) + require.NoError(t, err) + require.Equal(t, "15", strings.TrimSpace(string(versionData)), "Should start at version 15") + + // Run migration using 'ipfs repo migrate' with custom PATH + result := runMigrationWithCustomPath(node, customPath, "repo", "migrate") + require.Empty(t, result.Stderr.String(), "Migration should succeed without errors") + + // Verify final version is latest + versionData, err = os.ReadFile(versionPath) + require.NoError(t, err) + latestVersion := fmt.Sprintf("%d", ipfs.RepoVersion) + require.Equal(t, latestVersion, strings.TrimSpace(string(versionData)), "Version should be updated to latest") + + // Verify config is valid JSON + var finalConfig map[string]interface{} + configData, err := os.ReadFile(configPath) + require.NoError(t, err) + require.NoError(t, json.Unmarshal(configData, &finalConfig), "Config should remain valid JSON") + + // Verify essential fields exist + require.NotNil(t, getNestedValue(finalConfig, "Identity.PeerID"), "Identity.PeerID should exist") + require.NotNil(t, getNestedValue(finalConfig, "Bootstrap"), "Bootstrap should exist") + require.NotNil(t, getNestedValue(finalConfig, "AutoConf"), "AutoConf should be added") +} + +// setupStaticV15Repo creates a test node using static v15 repo fixture +// This ensures tests remain stable and validates migration from very old repos +func setupStaticV15Repo(t *testing.T) *harness.Node { + // Get path to static v15 repo fixture + v15FixturePath := "testdata/v15-repo" + + // Create temporary test directory using Go's testing temp dir + tmpDir := t.TempDir() + + // Use the built binary (should be in PATH) + node := harness.BuildNode("ipfs", tmpDir, 0) + + // Copy static fixture to test directory + cloneStaticRepoFixture(t, v15FixturePath, node.Dir) + + return node +} + +// runDaemonWithLegacyMigrationMonitoring monitors for hybrid migration patterns +func runDaemonWithLegacyMigrationMonitoring(t *testing.T, node *harness.Node, customPath string) (string, bool) { + // Monitor for hybrid migration completion - use "Hybrid migration completed successfully" as success pattern + stdoutOutput, daemonStarted := runDaemonWithMigrationMonitoringCustomEnv(t, node, "Using hybrid migration strategy", "Hybrid migration completed successfully", map[string]string{ + "PATH": customPath, // Pass custom PATH with our mock binaries + }) + + // Check for hybrid migration patterns in output + hasHybridStart := strings.Contains(stdoutOutput, "Using hybrid migration strategy") + hasPhase1 := strings.Contains(stdoutOutput, "Phase 1: External migration from v15 to v16") + hasPhase2 := strings.Contains(stdoutOutput, fmt.Sprintf("Phase 2: Embedded migration from v16 to v%d", ipfs.RepoVersion)) + hasHybridSuccess := strings.Contains(stdoutOutput, "Hybrid migration completed successfully") + + // Success requires daemon to start and hybrid migration patterns to be detected + hybridMigrationSuccess := daemonStarted && hasHybridStart && hasPhase1 && hasPhase2 && hasHybridSuccess + + return stdoutOutput, hybridMigrationSuccess +} + +// runDaemonWithMigrationMonitoringCustomEnv is like runDaemonWithMigrationMonitoring but allows custom environment +func runDaemonWithMigrationMonitoringCustomEnv(t *testing.T, node *harness.Node, migrationPattern, successPattern string, extraEnv map[string]string) (string, bool) { + // Create context with timeout as safety net + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + + // Set up daemon command with output monitoring + cmd := exec.CommandContext(ctx, node.IPFSBin, "daemon", "--migrate") + cmd.Dir = node.Dir + + // Set environment (especially IPFS_PATH) + for k, v := range node.Runner.Env { + cmd.Env = append(cmd.Env, k+"="+v) + } + + // Add extra environment variables (like PATH with mock binaries) + for k, v := range extraEnv { + cmd.Env = append(cmd.Env, k+"="+v) + } + + // Set up pipes for output monitoring + stdout, err := cmd.StdoutPipe() + require.NoError(t, err) + stderr, err := cmd.StderrPipe() + require.NoError(t, err) + + // Start the daemon + require.NoError(t, cmd.Start()) + + // Monitor output from both streams + var outputBuffer strings.Builder + done := make(chan bool) + migrationStarted := false + migrationCompleted := false + + go func() { + scanner := bufio.NewScanner(io.MultiReader(stdout, stderr)) + for scanner.Scan() { + line := scanner.Text() + outputBuffer.WriteString(line + "\n") + + // Check for migration start + if strings.Contains(line, migrationPattern) { + migrationStarted = true + } + + // Check for migration completion + if strings.Contains(line, successPattern) { + migrationCompleted = true + } + + // Check for daemon ready + if strings.Contains(line, "Daemon is ready") { + done <- true + return + } + } + done <- false + }() + + // Wait for daemon to be ready or timeout + daemonReady := false + select { + case ready := <-done: + daemonReady = ready + case <-ctx.Done(): + t.Log("Daemon startup timed out") + } + + // Stop the daemon using ipfs shutdown command for graceful shutdown + if cmd.Process != nil { + shutdownCmd := exec.Command(node.IPFSBin, "shutdown") + shutdownCmd.Dir = node.Dir + for k, v := range node.Runner.Env { + shutdownCmd.Env = append(shutdownCmd.Env, k+"="+v) + } + + if err := shutdownCmd.Run(); err != nil { + // If graceful shutdown fails, force kill + _ = cmd.Process.Kill() + } + + // Wait for process to exit + _ = cmd.Wait() + } + + return outputBuffer.String(), daemonReady && migrationStarted && migrationCompleted +} + +// buildCustomPath creates a custom PATH with mock migration binaries prepended. +// This is necessary for test isolation when running tests in parallel with t.Parallel(). +// Without isolated PATH handling, parallel tests can interfere with each other through +// global PATH modifications, causing tests to download real migration binaries instead +// of using the test mocks. +func buildCustomPath(mockBinDirs ...string) string { + // Prepend mock directories to ensure they're found first + pathElements := append(mockBinDirs, os.Getenv("PATH")) + return strings.Join(pathElements, string(filepath.ListSeparator)) +} + +// runMigrationWithCustomPath runs a migration command with a custom PATH environment. +// This ensures the migration uses our mock binaries instead of downloading real ones. +func runMigrationWithCustomPath(node *harness.Node, customPath string, args ...string) *harness.RunResult { + return node.Runner.Run(harness.RunRequest{ + Path: node.IPFSBin, + Args: args, + CmdOpts: []harness.CmdOpt{ + func(cmd *exec.Cmd) { + // Remove existing PATH entries using slices.DeleteFunc + cmd.Env = slices.DeleteFunc(cmd.Env, func(s string) bool { + return strings.HasPrefix(s, "PATH=") + }) + // Add custom PATH + cmd.Env = append(cmd.Env, "PATH="+customPath) + }, + }, + }) +} + +// createMockMigrationBinary creates a platform-agnostic Go binary for migration testing. +// Returns the directory containing the binary to be added to PATH. +func createMockMigrationBinary(t *testing.T, fromVer, toVer string) string { + // Create bin directory for migration binaries + binDir := t.TempDir() + + // Create Go source for mock migration binary + scriptName := fmt.Sprintf("fs-repo-%s-to-%s", fromVer, toVer) + sourceFile := filepath.Join(binDir, scriptName+".go") + binaryPath := filepath.Join(binDir, scriptName) + if runtime.GOOS == "windows" { + binaryPath += ".exe" + } + + // Generate minimal mock migration binary code + goSource := fmt.Sprintf(`package main +import ("fmt"; "os"; "path/filepath"; "strings"; "time") +func main() { + var path string + var revert bool + for _, a := range os.Args[1:] { + if strings.HasPrefix(a, "-path=") { path = a[6:] } + if a == "-revert" { revert = true } + } + if path == "" { fmt.Fprintln(os.Stderr, "missing -path="); os.Exit(1) } + + from, to := "%s", "%s" + if revert { from, to = to, from } + fmt.Printf("fake applying %%s-to-%%s repo migration\n", from, to) + + // Create and immediately remove lock file to simulate proper locking behavior + lockPath := filepath.Join(path, "repo.lock") + lockFile, err := os.Create(lockPath) + if err != nil && !os.IsExist(err) { + fmt.Fprintf(os.Stderr, "Error creating lock: %%v\n", err) + os.Exit(1) + } + if lockFile != nil { + lockFile.Close() + defer os.Remove(lockPath) + } + + // Small delay to simulate migration work + time.Sleep(10 * time.Millisecond) + + if err := os.WriteFile(filepath.Join(path, "version"), []byte(to), 0644); err != nil { + fmt.Fprintf(os.Stderr, "Error: %%v\n", err) + os.Exit(1) + } +}`, fromVer, toVer) + + require.NoError(t, os.WriteFile(sourceFile, []byte(goSource), 0644)) + + // Compile the Go binary + cmd := exec.Command("go", "build", "-o", binaryPath, sourceFile) + cmd.Env = append(os.Environ(), "CGO_ENABLED=0") // Ensure static binary + require.NoError(t, cmd.Run()) + + // Verify the binary exists and is executable + _, err := os.Stat(binaryPath) + require.NoError(t, err, "Mock binary should exist") + + // Return the bin directory to be added to PATH + return binDir +} + +// expectedMigrationSteps generates the expected migration step strings for a version range. +// For forward migrations (from < to), it returns strings like "Running embedded migration fs-repo-16-to-17" +// For reverse migrations (from > to), it returns strings for the reverse path. +func expectedMigrationSteps(from, to int, forward bool) []string { + var steps []string + + if forward { + // Forward migration: increment by 1 each step + for v := from; v < to; v++ { + migrationName := fmt.Sprintf("fs-repo-%d-to-%d", v, v+1) + steps = append(steps, fmt.Sprintf("Running embedded migration %s", migrationName)) + } + } else { + // Reverse migration: decrement by 1 each step + for v := from; v > to; v-- { + migrationName := fmt.Sprintf("fs-repo-%d-to-%d", v, v-1) + steps = append(steps, fmt.Sprintf("Running reverse migration %s", migrationName)) + } + } + + return steps +} + +// verifyMigrationSteps checks that all expected migration steps appear in the output +func verifyMigrationSteps(t *testing.T, output string, from, to int, forward bool) { + steps := expectedMigrationSteps(from, to, forward) + for _, step := range steps { + require.Contains(t, output, step, "Migration output should contain: %s", step) + } +} + +// getNestedValue retrieves a nested value from a config map using dot notation +func getNestedValue(config map[string]interface{}, path string) interface{} { + parts := strings.Split(path, ".") + current := interface{}(config) + + for _, part := range parts { + switch v := current.(type) { + case map[string]interface{}: + current = v[part] + default: + return nil + } + if current == nil { + return nil + } + } + + return current +} + +func testRepoReverseHybridMigrationLatestTo15(t *testing.T) { + // TEST: Reverse hybrid migration from latest to v15 using 'ipfs repo migrate --to=15 --allow-downgrade' + // This tests reverse hybrid migration: embedded (17→16) + external (16→15) + + // Start with v15 fixture and migrate forward to latest to create proper backup files + node := setupStaticV15Repo(t) + + // Create mock migration binaries for both forward and reverse migrations + mockBinDirs := []string{ + createMockMigrationBinary(t, "15", "16"), // for forward migration + createMockMigrationBinary(t, "16", "15"), // for downgrade + } + customPath := buildCustomPath(mockBinDirs...) + + configPath := filepath.Join(node.Dir, "config") + versionPath := filepath.Join(node.Dir, "version") + + // Step 1: Forward migration from v15 to latest to create backup files + t.Logf("Step 1: Forward migration v15 → v%d", ipfs.RepoVersion) + result := runMigrationWithCustomPath(node, customPath, "repo", "migrate") + + // Debug: print the output to see what happened + t.Logf("Forward migration stdout:\n%s", result.Stdout.String()) + t.Logf("Forward migration stderr:\n%s", result.Stderr.String()) + + require.Empty(t, result.Stderr.String(), "Forward migration should succeed without errors") + + // Verify we're at latest version after forward migration + versionData, err := os.ReadFile(versionPath) + require.NoError(t, err) + latestVersion := fmt.Sprintf("%d", ipfs.RepoVersion) + require.Equal(t, latestVersion, strings.TrimSpace(string(versionData)), "Should be at latest version after forward migration") + + // Read config after forward migration to use as baseline for downgrade + var latestConfig map[string]interface{} + configData, err := os.ReadFile(configPath) + require.NoError(t, err) + require.NoError(t, json.Unmarshal(configData, &latestConfig)) + + originalPeerID := getNestedValue(latestConfig, "Identity.PeerID") + + // Step 2: Reverse hybrid migration from latest to v15 + t.Logf("Step 2: Reverse hybrid migration v%d → v15", ipfs.RepoVersion) + result = runMigrationWithCustomPath(node, customPath, "repo", "migrate", "--to=15", "--allow-downgrade") + require.Empty(t, result.Stderr.String(), "Reverse hybrid migration should succeed without errors") + + // Debug output + t.Logf("Downgrade migration output:\n%s", result.Stdout.String()) + + // Verify final version is 15 + versionData, err = os.ReadFile(versionPath) + require.NoError(t, err) + require.Equal(t, "15", strings.TrimSpace(string(versionData)), "Version should be updated to 15") + + // Verify config is still valid JSON and key fields preserved + var finalConfig map[string]interface{} + configData, err = os.ReadFile(configPath) + require.NoError(t, err) + require.NoError(t, json.Unmarshal(configData, &finalConfig), "Config should remain valid JSON") + + // Verify essential fields preserved + finalPeerID := getNestedValue(finalConfig, "Identity.PeerID") + require.Equal(t, originalPeerID, finalPeerID, "Identity.PeerID should be preserved") + + // Verify bootstrap exists (may be modified by migrations) + finalBootstrap := getNestedValue(finalConfig, "Bootstrap") + require.NotNil(t, finalBootstrap, "Bootstrap should exist after migration") + + // AutoConf should be removed by the downgrade (was added in 16→17) + autoConf := getNestedValue(finalConfig, "AutoConf") + require.Nil(t, autoConf, "AutoConf should be removed by downgrade to v15") +} diff --git a/test/cli/migrations/testdata/v15-repo/blocks/SHARDING b/test/cli/migrations/testdata/v15-repo/blocks/SHARDING new file mode 100644 index 000000000..a153331da Binary files /dev/null and b/test/cli/migrations/testdata/v15-repo/blocks/SHARDING differ diff --git a/test/cli/migrations/testdata/v15-repo/blocks/X3/CIQFTFEEHEDF6KLBT32BFAGLXEZL4UWFNWM4LFTLMXQBCERZ6CMLX3Y.data b/test/cli/migrations/testdata/v15-repo/blocks/X3/CIQFTFEEHEDF6KLBT32BFAGLXEZL4UWFNWM4LFTLMXQBCERZ6CMLX3Y.data new file mode 100644 index 000000000..9553a942d Binary files /dev/null and b/test/cli/migrations/testdata/v15-repo/blocks/X3/CIQFTFEEHEDF6KLBT32BFAGLXEZL4UWFNWM4LFTLMXQBCERZ6CMLX3Y.data differ diff --git a/test/cli/migrations/testdata/v15-repo/blocks/_README b/test/cli/migrations/testdata/v15-repo/blocks/_README new file mode 100644 index 000000000..572e7e4d0 Binary files /dev/null and b/test/cli/migrations/testdata/v15-repo/blocks/_README differ diff --git a/test/cli/migrations/testdata/v15-repo/blocks/diskUsage.cache b/test/cli/migrations/testdata/v15-repo/blocks/diskUsage.cache new file mode 100644 index 000000000..15876dc11 Binary files /dev/null and b/test/cli/migrations/testdata/v15-repo/blocks/diskUsage.cache differ diff --git a/test/cli/migrations/testdata/v15-repo/config b/test/cli/migrations/testdata/v15-repo/config new file mode 100644 index 000000000..c789c2cea Binary files /dev/null and b/test/cli/migrations/testdata/v15-repo/config differ diff --git a/test/cli/migrations/testdata/v15-repo/datastore/000001.log b/test/cli/migrations/testdata/v15-repo/datastore/000001.log new file mode 100644 index 000000000..9591b22ef Binary files /dev/null and b/test/cli/migrations/testdata/v15-repo/datastore/000001.log differ diff --git a/test/cli/migrations/testdata/v15-repo/datastore/CURRENT b/test/cli/migrations/testdata/v15-repo/datastore/CURRENT new file mode 100644 index 000000000..feda7d6b2 Binary files /dev/null and b/test/cli/migrations/testdata/v15-repo/datastore/CURRENT differ diff --git a/test/cli/migrations/testdata/v15-repo/datastore/LOCK b/test/cli/migrations/testdata/v15-repo/datastore/LOCK new file mode 100644 index 000000000..e69de29bb diff --git a/test/cli/migrations/testdata/v15-repo/datastore/LOG b/test/cli/migrations/testdata/v15-repo/datastore/LOG new file mode 100644 index 000000000..74e0f5f6b Binary files /dev/null and b/test/cli/migrations/testdata/v15-repo/datastore/LOG differ diff --git a/test/cli/migrations/testdata/v15-repo/datastore/MANIFEST-000000 b/test/cli/migrations/testdata/v15-repo/datastore/MANIFEST-000000 new file mode 100644 index 000000000..9d54f6733 Binary files /dev/null and b/test/cli/migrations/testdata/v15-repo/datastore/MANIFEST-000000 differ diff --git a/test/cli/migrations/testdata/v15-repo/datastore_spec b/test/cli/migrations/testdata/v15-repo/datastore_spec new file mode 100644 index 000000000..7bf9626c2 Binary files /dev/null and b/test/cli/migrations/testdata/v15-repo/datastore_spec differ diff --git a/test/cli/migrations/testdata/v15-repo/version b/test/cli/migrations/testdata/v15-repo/version new file mode 100644 index 000000000..60d3b2f4a Binary files /dev/null and b/test/cli/migrations/testdata/v15-repo/version differ diff --git a/test/cli/migrations/testdata/v16-repo/blocks/SHARDING b/test/cli/migrations/testdata/v16-repo/blocks/SHARDING new file mode 100644 index 000000000..a153331da Binary files /dev/null and b/test/cli/migrations/testdata/v16-repo/blocks/SHARDING differ diff --git a/test/cli/migrations/testdata/v16-repo/blocks/X3/CIQFTFEEHEDF6KLBT32BFAGLXEZL4UWFNWM4LFTLMXQBCERZ6CMLX3Y.data b/test/cli/migrations/testdata/v16-repo/blocks/X3/CIQFTFEEHEDF6KLBT32BFAGLXEZL4UWFNWM4LFTLMXQBCERZ6CMLX3Y.data new file mode 100644 index 000000000..9553a942d Binary files /dev/null and b/test/cli/migrations/testdata/v16-repo/blocks/X3/CIQFTFEEHEDF6KLBT32BFAGLXEZL4UWFNWM4LFTLMXQBCERZ6CMLX3Y.data differ diff --git a/test/cli/migrations/testdata/v16-repo/blocks/_README b/test/cli/migrations/testdata/v16-repo/blocks/_README new file mode 100644 index 000000000..572e7e4d0 Binary files /dev/null and b/test/cli/migrations/testdata/v16-repo/blocks/_README differ diff --git a/test/cli/migrations/testdata/v16-repo/blocks/diskUsage.cache b/test/cli/migrations/testdata/v16-repo/blocks/diskUsage.cache new file mode 100644 index 000000000..15876dc11 Binary files /dev/null and b/test/cli/migrations/testdata/v16-repo/blocks/diskUsage.cache differ diff --git a/test/cli/migrations/testdata/v16-repo/config b/test/cli/migrations/testdata/v16-repo/config new file mode 100644 index 000000000..dcbceb49c Binary files /dev/null and b/test/cli/migrations/testdata/v16-repo/config differ diff --git a/test/cli/migrations/testdata/v16-repo/datastore/000001.log b/test/cli/migrations/testdata/v16-repo/datastore/000001.log new file mode 100644 index 000000000..51686e36c Binary files /dev/null and b/test/cli/migrations/testdata/v16-repo/datastore/000001.log differ diff --git a/test/cli/migrations/testdata/v16-repo/datastore/CURRENT b/test/cli/migrations/testdata/v16-repo/datastore/CURRENT new file mode 100644 index 000000000..feda7d6b2 Binary files /dev/null and b/test/cli/migrations/testdata/v16-repo/datastore/CURRENT differ diff --git a/test/cli/migrations/testdata/v16-repo/datastore/LOCK b/test/cli/migrations/testdata/v16-repo/datastore/LOCK new file mode 100644 index 000000000..e69de29bb diff --git a/test/cli/migrations/testdata/v16-repo/datastore/LOG b/test/cli/migrations/testdata/v16-repo/datastore/LOG new file mode 100644 index 000000000..c19fc88e4 Binary files /dev/null and b/test/cli/migrations/testdata/v16-repo/datastore/LOG differ diff --git a/test/cli/migrations/testdata/v16-repo/datastore/MANIFEST-000000 b/test/cli/migrations/testdata/v16-repo/datastore/MANIFEST-000000 new file mode 100644 index 000000000..9d54f6733 Binary files /dev/null and b/test/cli/migrations/testdata/v16-repo/datastore/MANIFEST-000000 differ diff --git a/test/cli/migrations/testdata/v16-repo/datastore_spec b/test/cli/migrations/testdata/v16-repo/datastore_spec new file mode 100644 index 000000000..7bf9626c2 Binary files /dev/null and b/test/cli/migrations/testdata/v16-repo/datastore_spec differ diff --git a/test/cli/migrations/testdata/v16-repo/version b/test/cli/migrations/testdata/v16-repo/version new file mode 100644 index 000000000..b6a7d89c6 Binary files /dev/null and b/test/cli/migrations/testdata/v16-repo/version differ diff --git a/test/cli/name_test.go b/test/cli/name_test.go index 42c649c09..cf5df2bb0 100644 --- a/test/cli/name_test.go +++ b/test/cli/name_test.go @@ -103,6 +103,7 @@ func TestName(t *testing.T) { }) node.StartDaemon() + defer node.StopDaemon() t.Run("Resolving self offline succeeds (daemon on)", func(t *testing.T) { res = node.IPFS("name", "resolve", "--offline", "/ipns/"+name.String()) @@ -147,16 +148,18 @@ func TestName(t *testing.T) { t.Run("Fails to publish in offline mode", func(t *testing.T) { t.Parallel() node := makeDaemon(t, nil).StartDaemon("--offline") + defer node.StopDaemon() res := node.RunIPFS("name", "publish", "/ipfs/"+fixtureCid) require.Error(t, res.Err) require.Equal(t, 1, res.ExitCode()) - require.Contains(t, res.Stderr.String(), `can't publish while offline`) + require.Contains(t, res.Stderr.String(), "can't publish while offline: pass `--allow-offline` to override or `--allow-delegated` if Ipns.DelegatedPublishers are set up") }) t.Run("Publish V2-only record", func(t *testing.T) { t.Parallel() node := makeDaemon(t, nil).StartDaemon() + defer node.StopDaemon() ipnsName := ipns.NameFromPeer(node.PeerID()).String() ipnsPath := ipns.NamespacePrefix + ipnsName publishPath := "/ipfs/" + fixtureCid @@ -187,6 +190,7 @@ func TestName(t *testing.T) { t.Parallel() node := makeDaemon(t, nil).StartDaemon() + t.Cleanup(func() { node.StopDaemon() }) ipnsPath := ipns.NamespacePrefix + ipns.NameFromPeer(node.PeerID()).String() publishPath := "/ipfs/" + fixtureCid @@ -227,6 +231,7 @@ func TestName(t *testing.T) { t.Run("Inspect with verification using wrong RSA key errors", func(t *testing.T) { t.Parallel() node := makeDaemon(t, nil).StartDaemon() + defer node.StopDaemon() // Prepare RSA Key 1 res := node.IPFS("key", "gen", "--type=rsa", "--size=4096", "key1") @@ -263,4 +268,72 @@ func TestName(t *testing.T) { require.NoError(t, err) require.False(t, val.Validation.Valid) }) + + t.Run("Publishing with custom sequence number", func(t *testing.T) { + t.Parallel() + + node := makeDaemon(t, nil) + publishPath := "/ipfs/" + fixtureCid + name := ipns.NameFromPeer(node.PeerID()) + + t.Run("Publish with sequence=0 is not allowed", func(t *testing.T) { + // Sequence=0 is never valid, even on a fresh node + res := node.RunIPFS("name", "publish", "--allow-offline", "--ttl=0", "--sequence=0", publishPath) + require.NotEqual(t, 0, res.ExitCode(), "Expected publish with sequence=0 to fail") + require.Contains(t, res.Stderr.String(), "sequence number must be greater than the current record sequence") + }) + + t.Run("Publish with sequence=1 on fresh node", func(t *testing.T) { + // Sequence=1 is the minimum valid sequence number for first publish + res := node.IPFS("name", "publish", "--allow-offline", "--ttl=0", "--sequence=1", publishPath) + require.Equal(t, fmt.Sprintf("Published to %s: %s\n", name.String(), publishPath), res.Stdout.String()) + }) + + t.Run("Publish with sequence=42", func(t *testing.T) { + res := node.IPFS("name", "publish", "--allow-offline", "--ttl=0", "--sequence=42", publishPath) + require.Equal(t, fmt.Sprintf("Published to %s: %s\n", name.String(), publishPath), res.Stdout.String()) + }) + + t.Run("Publish with large sequence number", func(t *testing.T) { + res := node.IPFS("name", "publish", "--allow-offline", "--ttl=0", "--sequence=18446744073709551615", publishPath) // Max uint64 + require.Equal(t, fmt.Sprintf("Published to %s: %s\n", name.String(), publishPath), res.Stdout.String()) + }) + }) + + t.Run("Sequence number monotonic check", func(t *testing.T) { + t.Parallel() + + node := makeDaemon(t, nil).StartDaemon() + defer node.StopDaemon() + publishPath1 := "/ipfs/" + fixtureCid + publishPath2 := "/ipfs/" + dagCid // Different content + name := ipns.NameFromPeer(node.PeerID()) + + // First, publish with a high sequence number (1000) + res := node.IPFS("name", "publish", "--ttl=0", "--sequence=1000", publishPath1) + require.Equal(t, fmt.Sprintf("Published to %s: %s\n", name.String(), publishPath1), res.Stdout.String()) + + // Verify the record was published successfully + res = node.IPFS("name", "resolve", name.String()) + require.Contains(t, res.Stdout.String(), publishPath1) + + // Now try to publish different content with a LOWER sequence number (500) + // This should fail due to monotonic sequence check + res = node.RunIPFS("name", "publish", "--ttl=0", "--sequence=500", publishPath2) + require.NotEqual(t, 0, res.ExitCode(), "Expected publish with lower sequence to fail") + require.Contains(t, res.Stderr.String(), "sequence number", "Expected error about sequence number") + + // Verify the original content is still published (not overwritten) + res = node.IPFS("name", "resolve", name.String()) + require.Contains(t, res.Stdout.String(), publishPath1, "Original content should still be published") + require.NotContains(t, res.Stdout.String(), publishPath2, "New content should not have been published") + + // Publishing with a HIGHER sequence number should succeed + res = node.IPFS("name", "publish", "--ttl=0", "--sequence=2000", publishPath2) + require.Equal(t, fmt.Sprintf("Published to %s: %s\n", name.String(), publishPath2), res.Stdout.String()) + + // Verify the new content is now published + res = node.IPFS("name", "resolve", name.String()) + require.Contains(t, res.Stdout.String(), publishPath2, "New content should now be published") + }) } diff --git a/test/cli/p2p_test.go b/test/cli/p2p_test.go new file mode 100644 index 000000000..2400d7d8b --- /dev/null +++ b/test/cli/p2p_test.go @@ -0,0 +1,430 @@ +package cli + +import ( + "encoding/json" + "fmt" + "io" + "net" + "net/http" + "os/exec" + "slices" + "syscall" + "testing" + "time" + + "github.com/ipfs/kubo/core/commands" + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/require" +) + +// waitForListenerCount waits until the node has exactly the expected number of listeners. +func waitForListenerCount(t *testing.T, node *harness.Node, expectedCount int) { + t.Helper() + require.Eventually(t, func() bool { + lsOut := node.IPFS("p2p", "ls", "--enc=json") + var lsResult commands.P2PLsOutput + if err := json.Unmarshal(lsOut.Stdout.Bytes(), &lsResult); err != nil { + return false + } + return len(lsResult.Listeners) == expectedCount + }, 5*time.Second, 100*time.Millisecond, "expected %d listeners", expectedCount) +} + +// waitForListenerProtocol waits until the node has a listener with the given protocol. +func waitForListenerProtocol(t *testing.T, node *harness.Node, protocol string) { + t.Helper() + require.Eventually(t, func() bool { + lsOut := node.IPFS("p2p", "ls", "--enc=json") + var lsResult commands.P2PLsOutput + if err := json.Unmarshal(lsOut.Stdout.Bytes(), &lsResult); err != nil { + return false + } + return slices.ContainsFunc(lsResult.Listeners, func(l commands.P2PListenerInfoOutput) bool { + return l.Protocol == protocol + }) + }, 5*time.Second, 100*time.Millisecond, "expected listener with protocol %s", protocol) +} + +func TestP2PForeground(t *testing.T) { + t.Parallel() + + t.Run("listen foreground creates listener and removes on interrupt", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init() + node.IPFS("config", "--json", "Experimental.Libp2pStreamMounting", "true") + node.StartDaemon() + + listenPort := harness.NewRandPort() + + // Start foreground listener asynchronously + res := node.Runner.Run(harness.RunRequest{ + Path: node.IPFSBin, + Args: []string{"p2p", "listen", "--foreground", "/x/fgtest", fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", listenPort)}, + RunFunc: (*exec.Cmd).Start, + }) + require.NoError(t, res.Err) + + // Wait for listener to be created + waitForListenerProtocol(t, node, "/x/fgtest") + + // Send SIGTERM + _ = res.Cmd.Process.Signal(syscall.SIGTERM) + _ = res.Cmd.Wait() + + // Wait for listener to be removed + waitForListenerCount(t, node, 0) + }) + + t.Run("listen foreground text output on SIGTERM", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init() + node.IPFS("config", "--json", "Experimental.Libp2pStreamMounting", "true") + node.StartDaemon() + + listenPort := harness.NewRandPort() + + // Run without --enc=json to test actual text output users see + res := node.Runner.Run(harness.RunRequest{ + Path: node.IPFSBin, + Args: []string{"p2p", "listen", "--foreground", "/x/sigterm", fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", listenPort)}, + RunFunc: (*exec.Cmd).Start, + }) + require.NoError(t, res.Err) + + waitForListenerProtocol(t, node, "/x/sigterm") + + _ = res.Cmd.Process.Signal(syscall.SIGTERM) + _ = res.Cmd.Wait() + + // Verify stdout shows "waiting for interrupt" message + stdout := res.Stdout.String() + require.Contains(t, stdout, "waiting for interrupt") + + // Note: "Received interrupt, removing listener" message is NOT visible to CLI on SIGTERM + // because the command runs in the daemon via RPC and the response stream closes before + // the message can be emitted. The important behavior is verified in the first test: + // the listener IS removed when SIGTERM is sent. + }) + + t.Run("forward foreground creates forwarder and removes on interrupt", func(t *testing.T) { + t.Parallel() + nodes := harness.NewT(t).NewNodes(2).Init() + nodes.ForEachPar(func(n *harness.Node) { + n.IPFS("config", "--json", "Experimental.Libp2pStreamMounting", "true") + }) + nodes.StartDaemons().Connect() + + forwardPort := harness.NewRandPort() + + // Start foreground forwarder asynchronously on node 0 + res := nodes[0].Runner.Run(harness.RunRequest{ + Path: nodes[0].IPFSBin, + Args: []string{"p2p", "forward", "--foreground", "/x/fgfwd", fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", forwardPort), "/p2p/" + nodes[1].PeerID().String()}, + RunFunc: (*exec.Cmd).Start, + }) + require.NoError(t, res.Err) + + // Wait for forwarder to be created + waitForListenerCount(t, nodes[0], 1) + + // Send SIGTERM + _ = res.Cmd.Process.Signal(syscall.SIGTERM) + _ = res.Cmd.Wait() + + // Wait for forwarder to be removed + waitForListenerCount(t, nodes[0], 0) + }) + + t.Run("forward foreground text output on SIGTERM", func(t *testing.T) { + t.Parallel() + nodes := harness.NewT(t).NewNodes(2).Init() + nodes.ForEachPar(func(n *harness.Node) { + n.IPFS("config", "--json", "Experimental.Libp2pStreamMounting", "true") + }) + nodes.StartDaemons().Connect() + + forwardPort := harness.NewRandPort() + + // Run without --enc=json to test actual text output users see + res := nodes[0].Runner.Run(harness.RunRequest{ + Path: nodes[0].IPFSBin, + Args: []string{"p2p", "forward", "--foreground", "/x/fwdsigterm", fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", forwardPort), "/p2p/" + nodes[1].PeerID().String()}, + RunFunc: (*exec.Cmd).Start, + }) + require.NoError(t, res.Err) + + waitForListenerCount(t, nodes[0], 1) + + _ = res.Cmd.Process.Signal(syscall.SIGTERM) + _ = res.Cmd.Wait() + + // Verify stdout shows "waiting for interrupt" message + stdout := res.Stdout.String() + require.Contains(t, stdout, "waiting for interrupt") + + // Note: "Received interrupt, removing forwarder" message is NOT visible to CLI on SIGTERM + // because the response stream closes before the message can be emitted. + }) + + t.Run("listen without foreground returns immediately and persists", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init() + node.IPFS("config", "--json", "Experimental.Libp2pStreamMounting", "true") + node.StartDaemon() + + listenPort := harness.NewRandPort() + + // This should return immediately (not block) + node.IPFS("p2p", "listen", "/x/nofg", fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", listenPort)) + + // Listener should still exist + waitForListenerProtocol(t, node, "/x/nofg") + + // Clean up + node.IPFS("p2p", "close", "-p", "/x/nofg") + }) + + t.Run("listen foreground text output on p2p close", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init() + node.IPFS("config", "--json", "Experimental.Libp2pStreamMounting", "true") + node.StartDaemon() + + listenPort := harness.NewRandPort() + + // Run without --enc=json to test actual text output users see + res := node.Runner.Run(harness.RunRequest{ + Path: node.IPFSBin, + Args: []string{"p2p", "listen", "--foreground", "/x/closetest", fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", listenPort)}, + RunFunc: (*exec.Cmd).Start, + }) + require.NoError(t, res.Err) + + // Wait for listener to be created + waitForListenerProtocol(t, node, "/x/closetest") + + // Close the listener via ipfs p2p close command + node.IPFS("p2p", "close", "-p", "/x/closetest") + + // Wait for foreground command to exit (it should exit quickly after close) + done := make(chan error, 1) + go func() { + done <- res.Cmd.Wait() + }() + + select { + case <-done: + // Good - command exited + case <-time.After(5 * time.Second): + _ = res.Cmd.Process.Kill() + t.Fatal("foreground command did not exit after listener was closed via ipfs p2p close") + } + + // Wait for listener to be removed + waitForListenerCount(t, node, 0) + + // Verify text output shows BOTH messages when closed via p2p close + // (unlike SIGTERM, the stream is still open so "Received interrupt" is emitted) + out := res.Stdout.String() + require.Contains(t, out, "waiting for interrupt") + require.Contains(t, out, "Received interrupt, removing listener") + }) + + t.Run("forward foreground text output on p2p close", func(t *testing.T) { + t.Parallel() + nodes := harness.NewT(t).NewNodes(2).Init() + nodes.ForEachPar(func(n *harness.Node) { + n.IPFS("config", "--json", "Experimental.Libp2pStreamMounting", "true") + }) + nodes.StartDaemons().Connect() + + forwardPort := harness.NewRandPort() + + // Run without --enc=json to test actual text output users see + res := nodes[0].Runner.Run(harness.RunRequest{ + Path: nodes[0].IPFSBin, + Args: []string{"p2p", "forward", "--foreground", "/x/fwdclose", fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", forwardPort), "/p2p/" + nodes[1].PeerID().String()}, + RunFunc: (*exec.Cmd).Start, + }) + require.NoError(t, res.Err) + + // Wait for forwarder to be created + waitForListenerCount(t, nodes[0], 1) + + // Close the forwarder via ipfs p2p close command + nodes[0].IPFS("p2p", "close", "-a") + + // Wait for foreground command to exit + done := make(chan error, 1) + go func() { + done <- res.Cmd.Wait() + }() + + select { + case <-done: + // Good - command exited + case <-time.After(5 * time.Second): + _ = res.Cmd.Process.Kill() + t.Fatal("foreground command did not exit after forwarder was closed via ipfs p2p close") + } + + // Wait for forwarder to be removed + waitForListenerCount(t, nodes[0], 0) + + // Verify text output shows BOTH messages when closed via p2p close + out := res.Stdout.String() + require.Contains(t, out, "waiting for interrupt") + require.Contains(t, out, "Received interrupt, removing forwarder") + }) + + t.Run("listen foreground tunnel transfers data and cleans up on SIGTERM", func(t *testing.T) { + t.Parallel() + nodes := harness.NewT(t).NewNodes(2).Init() + nodes.ForEachPar(func(n *harness.Node) { + n.IPFS("config", "--json", "Experimental.Libp2pStreamMounting", "true") + }) + nodes.StartDaemons().Connect() + + httpServerPort := harness.NewRandPort() + forwardPort := harness.NewRandPort() + + // Start HTTP server + expectedBody := "Hello from p2p tunnel!" + httpServer := &http.Server{ + Addr: fmt.Sprintf("127.0.0.1:%d", httpServerPort), + Handler: http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + _, _ = w.Write([]byte(expectedBody)) + }), + } + listener, err := net.Listen("tcp", httpServer.Addr) + require.NoError(t, err) + go func() { _ = httpServer.Serve(listener) }() + defer httpServer.Close() + + // Node 0: listen --foreground + listenRes := nodes[0].Runner.Run(harness.RunRequest{ + Path: nodes[0].IPFSBin, + Args: []string{"p2p", "listen", "--foreground", "/x/httptest", fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", httpServerPort)}, + RunFunc: (*exec.Cmd).Start, + }) + require.NoError(t, listenRes.Err) + + // Wait for listener to be created + waitForListenerProtocol(t, nodes[0], "/x/httptest") + + // Node 1: forward (non-foreground) + nodes[1].IPFS("p2p", "forward", "/x/httptest", fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", forwardPort), "/p2p/"+nodes[0].PeerID().String()) + + // Verify data flows through tunnel + resp, err := http.Get(fmt.Sprintf("http://127.0.0.1:%d/", forwardPort)) + require.NoError(t, err) + body, err := io.ReadAll(resp.Body) + resp.Body.Close() + require.NoError(t, err) + require.Equal(t, expectedBody, string(body)) + + // Clean up forwarder on node 1 + nodes[1].IPFS("p2p", "close", "-a") + + // SIGTERM the listen --foreground command + _ = listenRes.Cmd.Process.Signal(syscall.SIGTERM) + _ = listenRes.Cmd.Wait() + + // Wait for listener to be removed on node 0 + waitForListenerCount(t, nodes[0], 0) + }) + + t.Run("forward foreground tunnel transfers data and cleans up on SIGTERM", func(t *testing.T) { + t.Parallel() + nodes := harness.NewT(t).NewNodes(2).Init() + nodes.ForEachPar(func(n *harness.Node) { + n.IPFS("config", "--json", "Experimental.Libp2pStreamMounting", "true") + }) + nodes.StartDaemons().Connect() + + httpServerPort := harness.NewRandPort() + forwardPort := harness.NewRandPort() + + // Start HTTP server + expectedBody := "Hello from forward foreground tunnel!" + httpServer := &http.Server{ + Addr: fmt.Sprintf("127.0.0.1:%d", httpServerPort), + Handler: http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + _, _ = w.Write([]byte(expectedBody)) + }), + } + listener, err := net.Listen("tcp", httpServer.Addr) + require.NoError(t, err) + go func() { _ = httpServer.Serve(listener) }() + defer httpServer.Close() + + // Node 0: listen (non-foreground) + nodes[0].IPFS("p2p", "listen", "/x/httptest", fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", httpServerPort)) + + // Node 1: forward --foreground + forwardRes := nodes[1].Runner.Run(harness.RunRequest{ + Path: nodes[1].IPFSBin, + Args: []string{"p2p", "forward", "--foreground", "/x/httptest", fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", forwardPort), "/p2p/" + nodes[0].PeerID().String()}, + RunFunc: (*exec.Cmd).Start, + }) + require.NoError(t, forwardRes.Err) + + // Wait for forwarder to be created + waitForListenerCount(t, nodes[1], 1) + + // Verify data flows through tunnel + resp, err := http.Get(fmt.Sprintf("http://127.0.0.1:%d/", forwardPort)) + require.NoError(t, err) + body, err := io.ReadAll(resp.Body) + resp.Body.Close() + require.NoError(t, err) + require.Equal(t, expectedBody, string(body)) + + // SIGTERM the forward --foreground command + _ = forwardRes.Cmd.Process.Signal(syscall.SIGTERM) + _ = forwardRes.Cmd.Wait() + + // Wait for forwarder to be removed on node 1 + waitForListenerCount(t, nodes[1], 0) + + // Clean up listener on node 0 + nodes[0].IPFS("p2p", "close", "-a") + }) + + t.Run("foreground command exits when daemon shuts down", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init() + node.IPFS("config", "--json", "Experimental.Libp2pStreamMounting", "true") + node.StartDaemon() + + listenPort := harness.NewRandPort() + + // Start foreground listener + res := node.Runner.Run(harness.RunRequest{ + Path: node.IPFSBin, + Args: []string{"p2p", "listen", "--foreground", "/x/daemontest", fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", listenPort)}, + RunFunc: (*exec.Cmd).Start, + }) + require.NoError(t, res.Err) + + // Wait for listener to be created + waitForListenerProtocol(t, node, "/x/daemontest") + + // Stop the daemon + node.StopDaemon() + + // Wait for foreground command to exit + done := make(chan error, 1) + go func() { + done <- res.Cmd.Wait() + }() + + select { + case <-done: + // Good - foreground command exited when daemon stopped + case <-time.After(5 * time.Second): + _ = res.Cmd.Process.Kill() + t.Fatal("foreground command did not exit when daemon was stopped") + } + }) +} diff --git a/test/cli/peering_test.go b/test/cli/peering_test.go index 9c6ab975d..227e83f18 100644 --- a/test/cli/peering_test.go +++ b/test/cli/peering_test.go @@ -62,6 +62,7 @@ func TestPeering(t *testing.T) { h, nodes := harness.CreatePeerNodes(t, 3, peerings) nodes.StartDaemons() + defer nodes.StopDaemons() assertPeerings(h, nodes, peerings) nodes[0].Disconnect(nodes[1]) @@ -74,6 +75,7 @@ func TestPeering(t *testing.T) { h, nodes := harness.CreatePeerNodes(t, 3, peerings) nodes.StartDaemons() + defer nodes.StopDaemons() assertPeerings(h, nodes, peerings) nodes[2].Disconnect(nodes[1]) @@ -85,6 +87,7 @@ func TestPeering(t *testing.T) { peerings := []harness.Peering{{From: 0, To: 1}, {From: 1, To: 0}, {From: 1, To: 2}} h, nodes := harness.CreatePeerNodes(t, 3, peerings) + defer nodes.StopDaemons() nodes[0].StartDaemon() nodes[1].StartDaemon() assertPeerings(h, nodes, []harness.Peering{{From: 0, To: 1}, {From: 1, To: 0}}) @@ -99,6 +102,7 @@ func TestPeering(t *testing.T) { h, nodes := harness.CreatePeerNodes(t, 3, peerings) nodes.StartDaemons() + defer nodes.StopDaemons() assertPeerings(h, nodes, peerings) nodes[2].StopDaemon() diff --git a/test/cli/pin_ls_names_test.go b/test/cli/pin_ls_names_test.go new file mode 100644 index 000000000..f8ae76885 --- /dev/null +++ b/test/cli/pin_ls_names_test.go @@ -0,0 +1,534 @@ +package cli + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/require" +) + +// pinInfo represents the JSON structure for pin ls output +type pinInfo struct { + Type string `json:"Type"` + Name string `json:"Name"` +} + +// pinLsJSON represents the JSON output structure for pin ls command +type pinLsJSON struct { + Keys map[string]pinInfo `json:"Keys"` +} + +// Helper function to initialize a test node with daemon +func setupTestNode(t *testing.T) *harness.Node { + t.Helper() + node := harness.NewT(t).NewNode().Init() + node.StartDaemon("--offline") + t.Cleanup(func() { + node.StopDaemon() + }) + return node +} + +// Helper function to assert pin name and CID are present in output +func assertPinOutput(t *testing.T, output, cid, pinName string) { + t.Helper() + require.Contains(t, output, pinName, "pin name '%s' not found in output: %s", pinName, output) + require.Contains(t, output, cid, "CID %s not found in output: %s", cid, output) +} + +// Helper function to assert CID is present but name is not +func assertCIDOnly(t *testing.T, output, cid string) { + t.Helper() + require.Contains(t, output, cid, "CID %s not found in output: %s", cid, output) +} + +// Helper function to assert neither CID nor name are present +func assertNotPresent(t *testing.T, output, cid, pinName string) { + t.Helper() + require.NotContains(t, output, cid, "CID %s should not be present in output: %s", cid, output) + require.NotContains(t, output, pinName, "pin name '%s' should not be present in output: %s", pinName, output) +} + +// Test that pin ls returns names when querying specific CIDs with --names flag +func TestPinLsWithNamesForSpecificCIDs(t *testing.T) { + t.Parallel() + + t.Run("pin ls with specific CID returns name", func(t *testing.T) { + t.Parallel() + node := setupTestNode(t) + + // Add content without pinning + cidA := node.IPFSAddStr("content A", "--pin=false") + cidB := node.IPFSAddStr("content B", "--pin=false") + cidC := node.IPFSAddStr("content C", "--pin=false") + + // Pin with names + node.IPFS("pin", "add", "--name=pin-a", cidA) + node.IPFS("pin", "add", "--name=pin-b", cidB) + node.IPFS("pin", "add", cidC) // No name + + // Test: pin ls --names should return the name + res := node.IPFS("pin", "ls", cidA, "--names") + assertPinOutput(t, res.Stdout.String(), cidA, "pin-a") + + res = node.IPFS("pin", "ls", cidB, "--names") + assertPinOutput(t, res.Stdout.String(), cidB, "pin-b") + + // Test: pin without name should work + res = node.IPFS("pin", "ls", cidC, "--names") + output := res.Stdout.String() + assertCIDOnly(t, output, cidC) + require.Contains(t, output, "recursive", "pin type 'recursive' not found for CID %s in output: %s", cidC, output) + + // Test: without --names flag, no names returned + res = node.IPFS("pin", "ls", cidA) + output = res.Stdout.String() + require.NotContains(t, output, "pin-a", "pin name 'pin-a' should not be present without --names flag, but found in: %s", output) + assertCIDOnly(t, output, cidA) + }) + + t.Run("pin ls with multiple CIDs returns names", func(t *testing.T) { + t.Parallel() + node := setupTestNode(t) + + // Create test content + cidA := node.IPFSAddStr("multi A", "--pin=false") + cidB := node.IPFSAddStr("multi B", "--pin=false") + + // Pin with names + node.IPFS("pin", "add", "--name=multi-pin-a", cidA) + node.IPFS("pin", "add", "--name=multi-pin-b", cidB) + + // Test multiple CIDs at once + res := node.IPFS("pin", "ls", cidA, cidB, "--names") + output := res.Stdout.String() + assertPinOutput(t, output, cidA, "multi-pin-a") + assertPinOutput(t, output, cidB, "multi-pin-b") + }) + + t.Run("pin ls without CID lists all pins with names", func(t *testing.T) { + t.Parallel() + node := setupTestNode(t) + + // Create and pin content with names + cidA := node.IPFSAddStr("list all A", "--pin=false") + cidB := node.IPFSAddStr("list all B", "--pin=false") + cidC := node.IPFSAddStr("list all C", "--pin=false") + + node.IPFS("pin", "add", "--name=all-pin-a", cidA) + node.IPFS("pin", "add", "--name=all-pin-b", "--recursive=false", cidB) + node.IPFS("pin", "add", cidC) // No name + + // Test: pin ls --names (without CID) should list all pins with their names + res := node.IPFS("pin", "ls", "--names") + output := res.Stdout.String() + + // Should contain all pins with their names + assertPinOutput(t, output, cidA, "all-pin-a") + assertPinOutput(t, output, cidB, "all-pin-b") + assertCIDOnly(t, output, cidC) + + // Pin C should appear but without a name (just type) + lines := strings.Split(output, "\n") + for _, line := range lines { + if strings.Contains(line, cidC) { + // Should have CID and type but no name + require.Contains(t, line, "recursive", "pin type 'recursive' not found for unnamed pin %s in line: %s", cidC, line) + require.NotContains(t, line, "all-pin", "pin name should not be present for unnamed pin %s, but found in line: %s", cidC, line) + } + } + }) + + t.Run("pin ls --type with --names", func(t *testing.T) { + t.Parallel() + node := setupTestNode(t) + + // Create test content + cidDirect := node.IPFSAddStr("direct content", "--pin=false") + cidRecursive := node.IPFSAddStr("recursive content", "--pin=false") + + // Create a DAG for indirect testing + childCid := node.IPFSAddStr("child for indirect", "--pin=false") + parentContent := fmt.Sprintf(`{"link": "/ipfs/%s"}`, childCid) + parentCid := node.PipeStrToIPFS(parentContent, "dag", "put", "--input-codec=json", "--store-codec=dag-cbor").Stdout.Trimmed() + + // Pin with different types and names + node.IPFS("pin", "add", "--name=direct-pin", "--recursive=false", cidDirect) + node.IPFS("pin", "add", "--name=recursive-pin", cidRecursive) + node.IPFS("pin", "add", "--name=parent-pin", parentCid) + + // Test: --type=direct --names + res := node.IPFS("pin", "ls", "--type=direct", "--names") + output := res.Stdout.String() + assertPinOutput(t, output, cidDirect, "direct-pin") + assertNotPresent(t, output, cidRecursive, "recursive-pin") + + // Test: --type=recursive --names + res = node.IPFS("pin", "ls", "--type=recursive", "--names") + output = res.Stdout.String() + assertPinOutput(t, output, cidRecursive, "recursive-pin") + assertPinOutput(t, output, parentCid, "parent-pin") + assertNotPresent(t, output, cidDirect, "direct-pin") + + // Test: --type=indirect with proper directory structure + // Create a directory with a file for indirect pin testing + dirPath := t.TempDir() + require.NoError(t, os.WriteFile(filepath.Join(dirPath, "file.txt"), []byte("test content"), 0644)) + + // Add directory recursively + dirAddRes := node.IPFS("add", "-r", "-q", dirPath) + dirCidStr := strings.TrimSpace(dirAddRes.Stdout.Lines()[len(dirAddRes.Stdout.Lines())-1]) + + // Add file separately without pinning to get its CID + fileAddRes := node.IPFS("add", "-q", "--pin=false", filepath.Join(dirPath, "file.txt")) + fileCidStr := strings.TrimSpace(fileAddRes.Stdout.String()) + + // Check if file shows as indirect + res = node.IPFS("pin", "ls", "--type=indirect", fileCidStr) + output = res.Stdout.String() + require.Contains(t, output, fileCidStr, "indirect pin CID %s not found in output: %s", fileCidStr, output) + require.Contains(t, output, "indirect through "+dirCidStr, "indirect relationship not found for CID %s through %s in output: %s", fileCidStr, dirCidStr, output) + + // Test: --type=all --names + res = node.IPFS("pin", "ls", "--type=all", "--names") + output = res.Stdout.String() + assertPinOutput(t, output, cidDirect, "direct-pin") + assertPinOutput(t, output, cidRecursive, "recursive-pin") + assertPinOutput(t, output, parentCid, "parent-pin") + // Indirect pins are included in --type=all output + }) + + t.Run("pin ls JSON output with names", func(t *testing.T) { + t.Parallel() + node := setupTestNode(t) + + // Add and pin content with name + cidA := node.IPFSAddStr("json content", "--pin=false") + node.IPFS("pin", "add", "--name=json-pin", cidA) + + // Test JSON output with specific CID + res := node.IPFS("pin", "ls", cidA, "--names", "--enc=json") + var pinOutput pinLsJSON + err := json.Unmarshal([]byte(res.Stdout.String()), &pinOutput) + require.NoError(t, err, "failed to unmarshal JSON output: %s", res.Stdout.String()) + + pinData, ok := pinOutput.Keys[cidA] + require.True(t, ok, "CID %s should be in Keys map, got: %+v", cidA, pinOutput.Keys) + require.Equal(t, "recursive", pinData.Type, "expected pin type 'recursive', got '%s'", pinData.Type) + require.Equal(t, "json-pin", pinData.Name, "expected pin name 'json-pin', got '%s'", pinData.Name) + + // Without names flag + res = node.IPFS("pin", "ls", cidA, "--enc=json") + err = json.Unmarshal([]byte(res.Stdout.String()), &pinOutput) + require.NoError(t, err, "failed to unmarshal JSON output: %s", res.Stdout.String()) + + pinData, ok = pinOutput.Keys[cidA] + require.True(t, ok, "CID %s should be in Keys map, got: %+v", cidA, pinOutput.Keys) + // Name should be empty without --names flag + require.Equal(t, "", pinData.Name, "pin name should be empty without --names flag, got '%s'", pinData.Name) + + // Test JSON output without CID (list all) + res = node.IPFS("pin", "ls", "--names", "--enc=json") + var listOutput pinLsJSON + err = json.Unmarshal([]byte(res.Stdout.String()), &listOutput) + require.NoError(t, err, "failed to unmarshal JSON list output: %s", res.Stdout.String()) + // Should have at least one pin (the one we just added) + require.NotEmpty(t, listOutput.Keys, "pin list should not be empty") + // Check that our pin is in the list + pinData, ok = listOutput.Keys[cidA] + require.True(t, ok, "our pin with CID %s should be in the list, got: %+v", cidA, listOutput.Keys) + require.Equal(t, "json-pin", pinData.Name, "expected pin name 'json-pin' in list, got '%s'", pinData.Name) + }) + + t.Run("direct and indirect pins with names", func(t *testing.T) { + t.Parallel() + node := setupTestNode(t) + + // Create a small DAG: parent -> child + childCid := node.IPFSAddStr("child content", "--pin=false") + + // Create parent that references child + parentContent := fmt.Sprintf(`{"link": "/ipfs/%s"}`, childCid) + parentCid := node.PipeStrToIPFS(parentContent, "dag", "put", "--input-codec=json", "--store-codec=dag-cbor").Stdout.Trimmed() + + // Pin child directly with a name + node.IPFS("pin", "add", "--name=direct-child", "--recursive=false", childCid) + + // Pin parent recursively with a name + node.IPFS("pin", "add", "--name=recursive-parent", parentCid) + + // Check direct pin with specific CID + res := node.IPFS("pin", "ls", "--type=direct", childCid, "--names") + output := res.Stdout.String() + require.Contains(t, output, "direct-child", "pin name 'direct-child' not found in output: %s", output) + require.Contains(t, output, "direct", "pin type 'direct' not found in output: %s", output) + + // Check recursive pin with specific CID + res = node.IPFS("pin", "ls", "--type=recursive", parentCid, "--names") + output = res.Stdout.String() + require.Contains(t, output, "recursive-parent", "pin name 'recursive-parent' not found in output: %s", output) + require.Contains(t, output, "recursive", "pin type 'recursive' not found in output: %s", output) + + // Child is both directly pinned and indirectly pinned through parent + // Both relationships are valid and can be checked + }) + + t.Run("pin update preserves name", func(t *testing.T) { + t.Parallel() + node := setupTestNode(t) + + // Create two pieces of content + cidOld := node.IPFSAddStr("old content", "--pin=false") + cidNew := node.IPFSAddStr("new content", "--pin=false") + + // Pin with name + node.IPFS("pin", "add", "--name=my-pin", cidOld) + + // Update pin + node.IPFS("pin", "update", cidOld, cidNew) + + // Check that new pin has the same name + res := node.IPFS("pin", "ls", cidNew, "--names") + require.Contains(t, res.Stdout.String(), "my-pin", "pin name 'my-pin' not preserved after update, output: %s", res.Stdout.String()) + + // Old pin should not exist + res = node.RunIPFS("pin", "ls", cidOld) + require.Equal(t, 1, res.ExitCode(), "expected exit code 1 for unpinned CID, got %d", res.ExitCode()) + require.Contains(t, res.Stderr.String(), "is not pinned", "expected 'is not pinned' error for old CID %s, got: %s", cidOld, res.Stderr.String()) + }) + + t.Run("pin ls with invalid CID returns error", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init() + + res := node.RunIPFS("pin", "ls", "invalid-cid") + require.Equal(t, 1, res.ExitCode(), "expected exit code 1 for invalid CID, got %d", res.ExitCode()) + require.Contains(t, res.Stderr.String(), "invalid", "expected 'invalid' in error message, got: %s", res.Stderr.String()) + }) + + t.Run("pin ls with unpinned CID returns error", func(t *testing.T) { + t.Parallel() + node := setupTestNode(t) + + // Add content without pinning + cid := node.IPFSAddStr("unpinned content", "--pin=false") + + res := node.RunIPFS("pin", "ls", cid) + require.Equal(t, 1, res.ExitCode(), "expected exit code 1 for unpinned CID, got %d", res.ExitCode()) + require.Contains(t, res.Stderr.String(), "is not pinned", "expected 'is not pinned' error for CID %s, got: %s", cid, res.Stderr.String()) + }) + + t.Run("pin with special characters in name", func(t *testing.T) { + t.Parallel() + node := setupTestNode(t) + + testCases := []struct { + name string + pinName string + }{ + {"unicode", "test-📌-pin"}, + {"spaces", "test pin name"}, + {"special chars", "test!@#$%"}, + {"path-like", "test/pin/name"}, + {"dots", "test.pin.name"}, + {"long name", strings.Repeat("a", 255)}, + {"empty name", ""}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + cid := node.IPFSAddStr("content for "+tc.name, "--pin=false") + node.IPFS("pin", "add", "--name="+tc.pinName, cid) + + res := node.IPFS("pin", "ls", cid, "--names") + if tc.pinName != "" { + require.Contains(t, res.Stdout.String(), tc.pinName, + "pin name '%s' not found in output for test case '%s'", tc.pinName, tc.name) + } + }) + } + }) + + t.Run("concurrent pin operations with names", func(t *testing.T) { + t.Parallel() + node := setupTestNode(t) + + // Create multiple goroutines adding pins with names + numPins := 10 + done := make(chan struct{}, numPins) + + for i := 0; i < numPins; i++ { + go func(idx int) { + defer func() { done <- struct{}{} }() + + content := fmt.Sprintf("concurrent content %d", idx) + cid := node.IPFSAddStr(content, "--pin=false") + pinName := fmt.Sprintf("concurrent-pin-%d", idx) + node.IPFS("pin", "add", "--name="+pinName, cid) + }(i) + } + + // Wait for all goroutines + for i := 0; i < numPins; i++ { + <-done + } + + // Verify all pins have correct names + res := node.IPFS("pin", "ls", "--names") + output := res.Stdout.String() + for i := 0; i < numPins; i++ { + pinName := fmt.Sprintf("concurrent-pin-%d", i) + require.Contains(t, output, pinName, + "concurrent pin name '%s' not found in output", pinName) + } + }) + + t.Run("pin rm removes name association", func(t *testing.T) { + t.Parallel() + node := setupTestNode(t) + + // Add and pin with name + cid := node.IPFSAddStr("content to remove", "--pin=false") + node.IPFS("pin", "add", "--name=to-be-removed", cid) + + // Verify pin exists with name + res := node.IPFS("pin", "ls", cid, "--names") + require.Contains(t, res.Stdout.String(), "to-be-removed") + + // Remove pin + node.IPFS("pin", "rm", cid) + + // Verify pin and name are gone + res = node.RunIPFS("pin", "ls", cid) + require.Equal(t, 1, res.ExitCode()) + require.Contains(t, res.Stderr.String(), "is not pinned") + }) + + t.Run("garbage collection preserves named pins", func(t *testing.T) { + t.Parallel() + node := setupTestNode(t) + + // Add content with and without pin names + cidNamed := node.IPFSAddStr("named content", "--pin=false") + cidUnnamed := node.IPFSAddStr("unnamed content", "--pin=false") + cidUnpinned := node.IPFSAddStr("unpinned content", "--pin=false") + + node.IPFS("pin", "add", "--name=important-data", cidNamed) + node.IPFS("pin", "add", cidUnnamed) + + // Run garbage collection + node.IPFS("repo", "gc") + + // Named and unnamed pins should still exist + res := node.IPFS("pin", "ls", cidNamed, "--names") + require.Contains(t, res.Stdout.String(), "important-data") + + res = node.IPFS("pin", "ls", cidUnnamed) + require.Contains(t, res.Stdout.String(), cidUnnamed) + + // Unpinned content should be gone (cat should fail) + res = node.RunIPFS("cat", cidUnpinned) + require.NotEqual(t, 0, res.ExitCode(), "unpinned content should be garbage collected") + }) + + t.Run("pin add with same name can be used for multiple pins", func(t *testing.T) { + t.Parallel() + node := setupTestNode(t) + + // Add two different pieces of content + cid1 := node.IPFSAddStr("first content", "--pin=false") + cid2 := node.IPFSAddStr("second content", "--pin=false") + + // Pin both with the same name - this is allowed + node.IPFS("pin", "add", "--name=shared-name", cid1) + node.IPFS("pin", "add", "--name=shared-name", cid2) + + // List all pins with names + res := node.IPFS("pin", "ls", "--names") + output := res.Stdout.String() + + // Both CIDs should be pinned + require.Contains(t, output, cid1) + require.Contains(t, output, cid2) + + // Both pins can have the same name + lines := strings.Split(output, "\n") + foundCid1WithName := false + foundCid2WithName := false + for _, line := range lines { + if strings.Contains(line, cid1) && strings.Contains(line, "shared-name") { + foundCid1WithName = true + } + if strings.Contains(line, cid2) && strings.Contains(line, "shared-name") { + foundCid2WithName = true + } + } + require.True(t, foundCid1WithName, "first pin should have the name") + require.True(t, foundCid2WithName, "second pin should have the name") + }) + + t.Run("pin names persist across daemon restarts", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init() + node.StartDaemon("--offline") + + // Add content with pin name + cid := node.IPFSAddStr("persistent content") + node.IPFS("pin", "add", "--name=persistent-pin", cid) + + // Restart daemon + node.StopDaemon() + node.StartDaemon("--offline") + + // Check pin name persisted + res := node.IPFS("pin", "ls", cid, "--names") + require.Contains(t, res.Stdout.String(), "persistent-pin", + "pin name should persist across daemon restarts") + + node.StopDaemon() + }) +} + +// TestPinLsEdgeCases tests edge cases for pin ls command +func TestPinLsEdgeCases(t *testing.T) { + t.Parallel() + + t.Run("invalid pin type returns error", func(t *testing.T) { + t.Parallel() + node := setupTestNode(t) + + // Try to list pins with invalid type + res := node.RunIPFS("pin", "ls", "--type=invalid") + require.NotEqual(t, 0, res.ExitCode()) + require.Contains(t, res.Stderr.String(), "invalid type 'invalid'") + require.Contains(t, res.Stderr.String(), "must be one of {direct, indirect, recursive, all}") + }) + + t.Run("non-existent path returns proper error", func(t *testing.T) { + t.Parallel() + node := setupTestNode(t) + + // Try to list a non-existent CID + fakeCID := "QmNonExistent123456789" + res := node.RunIPFS("pin", "ls", fakeCID) + require.NotEqual(t, 0, res.ExitCode()) + }) + + t.Run("unpinned CID returns not pinned error", func(t *testing.T) { + t.Parallel() + node := setupTestNode(t) + + // Add content but don't pin it explicitly (it's just in blockstore) + unpinnedCID := node.IPFSAddStr("unpinned content", "--pin=false") + + // Try to list specific unpinned CID + res := node.RunIPFS("pin", "ls", unpinnedCID) + require.NotEqual(t, 0, res.ExitCode()) + require.Contains(t, res.Stderr.String(), "is not pinned") + }) +} diff --git a/test/cli/pin_name_validation_test.go b/test/cli/pin_name_validation_test.go new file mode 100644 index 000000000..049118642 --- /dev/null +++ b/test/cli/pin_name_validation_test.go @@ -0,0 +1,184 @@ +package cli + +import ( + "fmt" + "strings" + "testing" + + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/require" +) + +func TestPinNameValidation(t *testing.T) { + t.Parallel() + + // Create a test node and add a test file + node := harness.NewT(t).NewNode().Init().StartDaemon("--offline") + defer node.StopDaemon() + + // Add a test file to get a CID + testContent := "test content for pin name validation" + testCID := node.IPFSAddStr(testContent, "--pin=false") + + t.Run("pin add accepts valid names", func(t *testing.T) { + testCases := []struct { + name string + pinName string + description string + }{ + { + name: "empty_name", + pinName: "", + description: "Empty name should be allowed", + }, + { + name: "short_name", + pinName: "test", + description: "Short ASCII name should be allowed", + }, + { + name: "max_255_bytes", + pinName: strings.Repeat("a", 255), + description: "Exactly 255 bytes should be allowed", + }, + { + name: "unicode_within_limit", + pinName: "测试名称🔥", // Chinese characters and emoji + description: "Unicode characters within 255 bytes should be allowed", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + var args []string + if tc.pinName != "" { + args = []string{"pin", "add", "--name", tc.pinName, testCID} + } else { + args = []string{"pin", "add", testCID} + } + + res := node.RunIPFS(args...) + require.Equal(t, 0, res.ExitCode(), tc.description) + + // Clean up - unpin + node.RunIPFS("pin", "rm", testCID) + }) + } + }) + + t.Run("pin add rejects names exceeding 255 bytes", func(t *testing.T) { + testCases := []struct { + name string + pinName string + description string + }{ + { + name: "256_bytes", + pinName: strings.Repeat("a", 256), + description: "256 bytes should be rejected", + }, + { + name: "300_bytes", + pinName: strings.Repeat("b", 300), + description: "300 bytes should be rejected", + }, + { + name: "unicode_exceeding_limit", + pinName: strings.Repeat("测", 100), // Each Chinese character is 3 bytes, total 300 bytes + description: "Unicode string exceeding 255 bytes should be rejected", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + res := node.RunIPFS("pin", "add", "--name", tc.pinName, testCID) + require.NotEqual(t, 0, res.ExitCode(), tc.description) + require.Contains(t, res.Stderr.String(), "max 255 bytes", "Error should mention the 255 byte limit") + }) + } + }) + + t.Run("pin ls with name filter validates length", func(t *testing.T) { + // Test valid filter + res := node.RunIPFS("pin", "ls", "--name", strings.Repeat("a", 255)) + require.Equal(t, 0, res.ExitCode(), "255-byte name filter should be accepted") + + // Test invalid filter + res = node.RunIPFS("pin", "ls", "--name", strings.Repeat("a", 256)) + require.NotEqual(t, 0, res.ExitCode(), "256-byte name filter should be rejected") + require.Contains(t, res.Stderr.String(), "max 255 bytes", "Error should mention the 255 byte limit") + }) +} + +func TestAddPinNameValidation(t *testing.T) { + t.Parallel() + + node := harness.NewT(t).NewNode().Init().StartDaemon("--offline") + defer node.StopDaemon() + + // Create a test file + testFile := "test.txt" + node.WriteBytes(testFile, []byte("test content for add command")) + + t.Run("ipfs add with --pin-name accepts valid names", func(t *testing.T) { + testCases := []struct { + name string + pinName string + description string + }{ + { + name: "short_name", + pinName: "test-add", + description: "Short ASCII name should be allowed", + }, + { + name: "max_255_bytes", + pinName: strings.Repeat("x", 255), + description: "Exactly 255 bytes should be allowed", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + res := node.RunIPFS("add", fmt.Sprintf("--pin-name=%s", tc.pinName), "-q", testFile) + require.Equal(t, 0, res.ExitCode(), tc.description) + cid := strings.TrimSpace(res.Stdout.String()) + + // Verify pin exists with name + lsRes := node.RunIPFS("pin", "ls", "--names", "--type=recursive", cid) + require.Equal(t, 0, lsRes.ExitCode()) + require.Contains(t, lsRes.Stdout.String(), tc.pinName, "Pin should have the specified name") + + // Clean up + node.RunIPFS("pin", "rm", cid) + }) + } + }) + + t.Run("ipfs add with --pin-name rejects names exceeding 255 bytes", func(t *testing.T) { + testCases := []struct { + name string + pinName string + description string + }{ + { + name: "256_bytes", + pinName: strings.Repeat("y", 256), + description: "256 bytes should be rejected", + }, + { + name: "500_bytes", + pinName: strings.Repeat("z", 500), + description: "500 bytes should be rejected", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + res := node.RunIPFS("add", fmt.Sprintf("--pin-name=%s", tc.pinName), testFile) + require.NotEqual(t, 0, res.ExitCode(), tc.description) + require.Contains(t, res.Stderr.String(), "max 255 bytes", "Error should mention the 255 byte limit") + }) + } + }) +} diff --git a/test/cli/ping_test.go b/test/cli/ping_test.go index 9470e67d8..85de29cf9 100644 --- a/test/cli/ping_test.go +++ b/test/cli/ping_test.go @@ -15,6 +15,7 @@ func TestPing(t *testing.T) { t.Run("other", func(t *testing.T) { t.Parallel() nodes := harness.NewT(t).NewNodes(2).Init().StartDaemons().Connect() + defer nodes.StopDaemons() node1 := nodes[0] node2 := nodes[1] @@ -25,6 +26,7 @@ func TestPing(t *testing.T) { t.Run("ping unreachable peer", func(t *testing.T) { t.Parallel() nodes := harness.NewT(t).NewNodes(2).Init().StartDaemons().Connect() + defer nodes.StopDaemons() node1 := nodes[0] badPeer := "QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJx" @@ -37,6 +39,7 @@ func TestPing(t *testing.T) { t.Run("self", func(t *testing.T) { t.Parallel() nodes := harness.NewT(t).NewNodes(2).Init().StartDaemons() + defer nodes.StopDaemons() node1 := nodes[0] node2 := nodes[1] @@ -52,6 +55,7 @@ func TestPing(t *testing.T) { t.Run("0", func(t *testing.T) { t.Parallel() nodes := harness.NewT(t).NewNodes(2).Init().StartDaemons().Connect() + defer nodes.StopDaemons() node1 := nodes[0] node2 := nodes[1] @@ -63,6 +67,7 @@ func TestPing(t *testing.T) { t.Run("offline", func(t *testing.T) { t.Parallel() nodes := harness.NewT(t).NewNodes(2).Init().StartDaemons().Connect() + defer nodes.StopDaemons() node1 := nodes[0] node2 := nodes[1] diff --git a/test/cli/pinning_remote_test.go b/test/cli/pinning_remote_test.go index fede942ba..6c802aaa0 100644 --- a/test/cli/pinning_remote_test.go +++ b/test/cli/pinning_remote_test.go @@ -9,8 +9,8 @@ import ( "time" "github.com/google/uuid" + "github.com/ipfs/go-test/random" "github.com/ipfs/kubo/test/cli/harness" - "github.com/ipfs/kubo/test/cli/testutils" "github.com/ipfs/kubo/test/cli/testutils/pinningservice" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -51,6 +51,7 @@ func TestRemotePinning(t *testing.T) { node.IPFS("config", "--json", "Pinning.RemoteServices.svc.Policies.MFS.Enable", "true") node.StartDaemon() + t.Cleanup(func() { node.StopDaemon() }) node.IPFS("files", "cp", "/ipfs/bafkqaaa", "/mfs-pinning-test-"+uuid.NewString()) node.IPFS("files", "flush") @@ -133,6 +134,8 @@ func TestRemotePinning(t *testing.T) { t.Run("pin remote service ls --stat", func(t *testing.T) { t.Parallel() node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + _, svcURL := runPinningService(t, authToken) node.IPFS("pin", "remote", "service", "add", "svc", svcURL, authToken) @@ -155,6 +158,7 @@ func TestRemotePinning(t *testing.T) { t.Run("adding service with invalid URL fails", func(t *testing.T) { t.Parallel() node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() res := node.RunIPFS("pin", "remote", "service", "add", "svc", "invalid-service.example.com", "key") assert.Equal(t, 1, res.ExitCode()) @@ -168,6 +172,7 @@ func TestRemotePinning(t *testing.T) { t.Run("unauthorized pinning service calls fail", func(t *testing.T) { t.Parallel() node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() _, svcURL := runPinningService(t, authToken) node.IPFS("pin", "remote", "service", "add", "svc", svcURL, "othertoken") @@ -180,6 +185,7 @@ func TestRemotePinning(t *testing.T) { t.Run("pinning service calls fail when there is a wrong path", func(t *testing.T) { t.Parallel() node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() _, svcURL := runPinningService(t, authToken) node.IPFS("pin", "remote", "service", "add", "svc", svcURL+"/invalid-path", authToken) @@ -191,6 +197,7 @@ func TestRemotePinning(t *testing.T) { t.Run("pinning service calls fail when DNS resolution fails", func(t *testing.T) { t.Parallel() node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() node.IPFS("pin", "remote", "service", "add", "svc", "https://invalid-service.example.com", authToken) res := node.RunIPFS("pin", "remote", "ls", "--service=svc") @@ -201,6 +208,7 @@ func TestRemotePinning(t *testing.T) { t.Run("pin remote service rm", func(t *testing.T) { t.Parallel() node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() node.IPFS("pin", "remote", "service", "add", "svc", "https://example.com", authToken) node.IPFS("pin", "remote", "service", "rm", "svc") res := node.IPFS("pin", "remote", "service", "ls") @@ -225,6 +233,7 @@ func TestRemotePinning(t *testing.T) { t.Run("'ipfs pin remote add --background=true'", func(t *testing.T) { node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() svc, svcURL := runPinningService(t, authToken) node.IPFS("pin", "remote", "service", "add", "svc", svcURL, authToken) @@ -266,6 +275,7 @@ func TestRemotePinning(t *testing.T) { t.Run("'ipfs pin remote add --background=false'", func(t *testing.T) { t.Parallel() node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() svc, svcURL := runPinningService(t, authToken) node.IPFS("pin", "remote", "service", "add", "svc", svcURL, authToken) @@ -287,6 +297,7 @@ func TestRemotePinning(t *testing.T) { t.Run("'ipfs pin remote ls' with multiple statuses", func(t *testing.T) { t.Parallel() node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() svc, svcURL := runPinningService(t, authToken) node.IPFS("pin", "remote", "service", "add", "svc", svcURL, authToken) @@ -340,6 +351,7 @@ func TestRemotePinning(t *testing.T) { t.Run("'ipfs pin remote ls' by CID", func(t *testing.T) { t.Parallel() node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() svc, svcURL := runPinningService(t, authToken) node.IPFS("pin", "remote", "service", "add", "svc", svcURL, authToken) @@ -350,7 +362,7 @@ func TestRemotePinning(t *testing.T) { pin.Status = "pinned" transitionedCh <- struct{}{} } - hash := node.IPFSAddStr(string(testutils.RandomBytes(1000))) + hash := node.IPFSAddStr(string(random.Bytes(1000))) node.IPFS("pin", "remote", "add", "--background=false", "--service=svc", hash) <-transitionedCh res := node.IPFS("pin", "remote", "ls", "--service=svc", "--cid="+hash, "--enc=json").Stdout.String() @@ -360,6 +372,7 @@ func TestRemotePinning(t *testing.T) { t.Run("'ipfs pin remote rm --name' without --force when multiple pins match", func(t *testing.T) { t.Parallel() node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() svc, svcURL := runPinningService(t, authToken) node.IPFS("pin", "remote", "service", "add", "svc", svcURL, authToken) @@ -368,7 +381,7 @@ func TestRemotePinning(t *testing.T) { defer pin.M.Unlock() pin.Status = "pinned" } - hash := node.IPFSAddStr(string(testutils.RandomBytes(1000))) + hash := node.IPFSAddStr(string(random.Bytes(1000))) node.IPFS("pin", "remote", "add", "--service=svc", "--name=force-test-name", hash) node.IPFS("pin", "remote", "add", "--service=svc", "--name=force-test-name", hash) @@ -388,6 +401,7 @@ func TestRemotePinning(t *testing.T) { t.Run("'ipfs pin remote rm --name --force' remove multiple pins", func(t *testing.T) { t.Parallel() node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() svc, svcURL := runPinningService(t, authToken) node.IPFS("pin", "remote", "service", "add", "svc", svcURL, authToken) @@ -396,7 +410,7 @@ func TestRemotePinning(t *testing.T) { defer pin.M.Unlock() pin.Status = "pinned" } - hash := node.IPFSAddStr(string(testutils.RandomBytes(1000))) + hash := node.IPFSAddStr(string(random.Bytes(1000))) node.IPFS("pin", "remote", "add", "--service=svc", "--name=force-test-name", hash) node.IPFS("pin", "remote", "add", "--service=svc", "--name=force-test-name", hash) @@ -408,6 +422,7 @@ func TestRemotePinning(t *testing.T) { t.Run("'ipfs pin remote rm --force' removes all pins", func(t *testing.T) { t.Parallel() node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() svc, svcURL := runPinningService(t, authToken) node.IPFS("pin", "remote", "service", "add", "svc", svcURL, authToken) @@ -417,7 +432,7 @@ func TestRemotePinning(t *testing.T) { pin.Status = "pinned" } for i := 0; i < 4; i++ { - hash := node.IPFSAddStr(string(testutils.RandomBytes(1000))) + hash := node.IPFSAddStr(string(random.Bytes(1000))) name := fmt.Sprintf("--name=%d", i) node.IPFS("pin", "remote", "add", "--service=svc", "--name="+name, hash) } @@ -438,7 +453,7 @@ func TestRemotePinning(t *testing.T) { _, svcURL := runPinningService(t, authToken) node.IPFS("pin", "remote", "service", "add", "svc", svcURL, authToken) - hash := node.IPFSAddStr(string(testutils.RandomBytes(1000))) + hash := node.IPFSAddStr(string(random.Bytes(1000))) res := node.IPFS("pin", "remote", "add", "--service=svc", "--background", hash) warningMsg := "WARNING: the local node is offline and remote pinning may fail if there is no other provider for this CID" assert.Contains(t, res.Stdout.String(), warningMsg) diff --git a/test/cli/pins_test.go b/test/cli/pins_test.go index 3e3325a01..8e98aa7fe 100644 --- a/test/cli/pins_test.go +++ b/test/cli/pins_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/ipfs/go-cid" + "github.com/ipfs/go-test/random" "github.com/ipfs/kubo/test/cli/harness" . "github.com/ipfs/kubo/test/cli/testutils" "github.com/stretchr/testify/assert" @@ -25,6 +26,7 @@ func testPins(t *testing.T, args testPinsArgs) { node := harness.NewT(t).NewNode().Init() if args.runDaemon { node.StartDaemon("--offline") + defer node.StopDaemon() } strs := []string{"a", "b", "c", "d", "e", "f", "g"} @@ -126,6 +128,7 @@ func testPinsErrorReporting(t *testing.T, args testPinsArgs) { node := harness.NewT(t).NewNode().Init() if args.runDaemon { node.StartDaemon("--offline") + defer node.StopDaemon() } randomCID := "Qme8uX5n9hn15pw9p6WcVKoziyyC9LXv4LEgvsmKMULjnV" res := node.RunIPFS(StrCat("pin", "add", args.pinArg, randomCID)...) @@ -141,8 +144,9 @@ func testPinDAG(t *testing.T, args testPinsArgs) { node := h.NewNode().Init() if args.runDaemon { node.StartDaemon("--offline") + defer node.StopDaemon() } - bytes := RandomBytes(1 << 20) // 1 MiB + bytes := random.Bytes(1 << 20) // 1 MiB tmpFile := h.WriteToTemp(string(bytes)) cid := node.IPFS(StrCat("add", args.pinArg, "--pin=false", "-q", tmpFile)...).Stdout.Trimmed() @@ -167,16 +171,17 @@ func testPinProgress(t *testing.T, args testPinsArgs) { if args.runDaemon { node.StartDaemon("--offline") + defer node.StopDaemon() } - bytes := RandomBytes(1 << 20) // 1 MiB + bytes := random.Bytes(1 << 20) // 1 MiB tmpFile := h.WriteToTemp(string(bytes)) cid := node.IPFS(StrCat("add", args.pinArg, "--pin=false", "-q", tmpFile)...).Stdout.Trimmed() res := node.RunIPFS("pin", "add", "--progress", cid) node.Runner.AssertNoError(res) - assert.Contains(t, res.Stderr.String(), " 5 nodes") + assert.Contains(t, res.Stderr.String(), " 5 nodes (1.0 MB)") }) } @@ -218,8 +223,8 @@ func TestPins(t *testing.T) { t.Parallel() node := harness.NewT(t).NewNode().Init() - cidAStr := node.IPFSAddStr(RandomStr(1000), "--pin=false") - cidBStr := node.IPFSAddStr(RandomStr(1000), "--pin=false") + cidAStr := node.IPFSAddStr(string(random.Bytes(1000)), "--pin=false") + cidBStr := node.IPFSAddStr(string(random.Bytes(1000)), "--pin=false") _ = node.IPFS("pin", "add", "--name", "testPin", cidAStr) @@ -246,9 +251,9 @@ func TestPins(t *testing.T) { t.Parallel() node := harness.NewT(t).NewNode().Init() - cidAStr := node.IPFSAddStr(RandomStr(1000), "--pin=false") - cidBStr := node.IPFSAddStr(RandomStr(1000), "--pin=false") - cidCStr := node.IPFSAddStr(RandomStr(1000), "--pin=false") + cidAStr := node.IPFSAddStr(string(random.Bytes(1000)), "--pin=false") + cidBStr := node.IPFSAddStr(string(random.Bytes(1000)), "--pin=false") + cidCStr := node.IPFSAddStr(string(random.Bytes(1000)), "--pin=false") outA := cidAStr + " recursive testPin" outB := cidBStr + " recursive testPin" @@ -284,7 +289,7 @@ func TestPins(t *testing.T) { t.Parallel() node := harness.NewT(t).NewNode().Init() - cidStr := node.IPFSAddStr(RandomStr(1000), "--pin=false") + cidStr := node.IPFSAddStr(string(random.Bytes(1000)), "--pin=false") outBefore := cidStr + " recursive A" outAfter := cidStr + " recursive B" @@ -305,8 +310,8 @@ func TestPins(t *testing.T) { t.Parallel() node := harness.NewT(t).NewNode().Init() - cidAStr := node.IPFSAddStr(RandomStr(1000), "--pin=false") - cidBStr := node.IPFSAddStr(RandomStr(1000), "--pin=false") + cidAStr := node.IPFSAddStr(string(random.Bytes(1000)), "--pin=false") + cidBStr := node.IPFSAddStr(string(random.Bytes(1000)), "--pin=false") _ = node.IPFS("pin", "add", "--name", "testPinJson", cidAStr) diff --git a/test/cli/provide_stats_test.go b/test/cli/provide_stats_test.go new file mode 100644 index 000000000..fede31c0f --- /dev/null +++ b/test/cli/provide_stats_test.go @@ -0,0 +1,524 @@ +package cli + +import ( + "bufio" + "encoding/json" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + provideStatEventuallyTimeout = 15 * time.Second + provideStatEventuallyTick = 100 * time.Millisecond +) + +// sweepStats mirrors the subset of JSON fields actually used by tests. +// This type is intentionally independent from upstream types to detect breaking changes. +// Only includes fields that tests actually access to keep it simple and maintainable. +type sweepStats struct { + Sweep struct { + Closed bool `json:"closed"` + Connectivity struct { + Status string `json:"status"` + } `json:"connectivity"` + Queues struct { + PendingKeyProvides int `json:"pending_key_provides"` + } `json:"queues"` + Schedule struct { + Keys int `json:"keys"` + } `json:"schedule"` + } `json:"Sweep"` +} + +// parseSweepStats parses JSON output from ipfs provide stat command. +// Tests will naturally fail if upstream removes/renames fields we depend on. +func parseSweepStats(t *testing.T, jsonOutput string) sweepStats { + t.Helper() + var stats sweepStats + err := json.Unmarshal([]byte(jsonOutput), &stats) + require.NoError(t, err, "failed to parse provide stat JSON output") + return stats +} + +// TestProvideStatAllMetricsDocumented verifies that all metrics output by +// `ipfs provide stat --all` are documented in docs/provide-stats.md. +// +// The test works as follows: +// 1. Starts an IPFS node with Provide.DHT.SweepEnabled=true +// 2. Runs `ipfs provide stat --all` to get all metrics +// 3. Parses the output and extracts all lines with exactly 2 spaces indent +// (these are the actual metric lines) +// 4. Reads docs/provide-stats.md and extracts all ### section headers +// 5. Ensures every metric in the output has a corresponding ### section in the docs +func TestProvideStatAllMetricsDocumented(t *testing.T) { + t.Parallel() + + h := harness.NewT(t) + node := h.NewNode().Init() + + // Enable sweep provider + node.SetIPFSConfig("Provide.DHT.SweepEnabled", true) + node.SetIPFSConfig("Provide.Enabled", true) + + node.StartDaemon() + defer node.StopDaemon() + + // Run `ipfs provide stat --all` to get all metrics + res := node.IPFS("provide", "stat", "--all") + require.NoError(t, res.Err) + + // Parse metrics from the command output + // Only consider lines with exactly two spaces of padding (" ") + // These are the actual metric lines as shown in provide.go + outputMetrics := make(map[string]bool) + scanner := bufio.NewScanner(strings.NewReader(res.Stdout.String())) + // Only consider lines that start with exactly two spaces + indent := " " + for scanner.Scan() { + line := scanner.Text() + if !strings.HasPrefix(line, indent) || strings.HasPrefix(line, indent) { + continue + } + + // Remove the indent + line = strings.TrimPrefix(line, indent) + + // Extract metric name - everything before the first ':' + parts := strings.SplitN(line, ":", 2) + if len(parts) >= 1 { + metricName := strings.TrimSpace(parts[0]) + if metricName != "" { + outputMetrics[metricName] = true + } + } + } + require.NoError(t, scanner.Err()) + + // Read docs/provide-stats.md + // Find the repo root by looking for go.mod + repoRoot := ".." + for range 6 { + if _, err := os.Stat(filepath.Join(repoRoot, "go.mod")); err == nil { + break + } + repoRoot = filepath.Join("..", repoRoot) + } + docsPath := filepath.Join(repoRoot, "docs", "provide-stats.md") + docsFile, err := os.Open(docsPath) + require.NoError(t, err, "Failed to open provide-stats.md") + defer docsFile.Close() + + // Parse all ### metric headers from the docs + documentedMetrics := make(map[string]bool) + docsScanner := bufio.NewScanner(docsFile) + for docsScanner.Scan() { + line := docsScanner.Text() + if metricName, found := strings.CutPrefix(line, "### "); found { + metricName = strings.TrimSpace(metricName) + documentedMetrics[metricName] = true + } + } + require.NoError(t, docsScanner.Err()) + + // Check that all output metrics are documented + var undocumentedMetrics []string + for metric := range outputMetrics { + if !documentedMetrics[metric] { + undocumentedMetrics = append(undocumentedMetrics, metric) + } + } + + require.Empty(t, undocumentedMetrics, + "The following metrics from 'ipfs provide stat --all' are not documented in docs/provide-stats.md: %v\n"+ + "All output metrics: %v\n"+ + "Documented metrics: %v", + undocumentedMetrics, outputMetrics, documentedMetrics) +} + +// TestProvideStatBasic tests basic functionality of ipfs provide stat +func TestProvideStatBasic(t *testing.T) { + t.Parallel() + + t.Run("works with Sweep provider and shows brief output", func(t *testing.T) { + t.Parallel() + + h := harness.NewT(t) + node := h.NewNode().Init() + node.SetIPFSConfig("Provide.DHT.SweepEnabled", true) + node.SetIPFSConfig("Provide.Enabled", true) + node.StartDaemon() + defer node.StopDaemon() + + res := node.IPFS("provide", "stat") + require.NoError(t, res.Err) + assert.Empty(t, res.Stderr.String()) + + output := res.Stdout.String() + // Brief output should contain specific full labels + assert.Contains(t, output, "Provide queue:") + assert.Contains(t, output, "Reprovide queue:") + assert.Contains(t, output, "CIDs scheduled:") + assert.Contains(t, output, "Regions scheduled:") + assert.Contains(t, output, "Avg record holders:") + assert.Contains(t, output, "Ongoing provides:") + assert.Contains(t, output, "Ongoing reprovides:") + assert.Contains(t, output, "Total CIDs provided:") + }) + + t.Run("requires daemon to be online", func(t *testing.T) { + t.Parallel() + + h := harness.NewT(t) + node := h.NewNode().Init() + + res := node.RunIPFS("provide", "stat") + assert.Error(t, res.Err) + assert.Contains(t, res.Stderr.String(), "this command must be run in online mode") + }) +} + +// TestProvideStatFlags tests various command flags +func TestProvideStatFlags(t *testing.T) { + t.Parallel() + + t.Run("--all flag shows all sections with headings", func(t *testing.T) { + t.Parallel() + + h := harness.NewT(t) + node := h.NewNode().Init() + node.SetIPFSConfig("Provide.DHT.SweepEnabled", true) + node.SetIPFSConfig("Provide.Enabled", true) + node.StartDaemon() + defer node.StopDaemon() + + res := node.IPFS("provide", "stat", "--all") + require.NoError(t, res.Err) + + output := res.Stdout.String() + // Should contain section headings with colons + assert.Contains(t, output, "Connectivity:") + assert.Contains(t, output, "Queues:") + assert.Contains(t, output, "Schedule:") + assert.Contains(t, output, "Timings:") + assert.Contains(t, output, "Network:") + assert.Contains(t, output, "Operations:") + assert.Contains(t, output, "Workers:") + + // Should contain detailed metrics not in brief mode + assert.Contains(t, output, "Uptime:") + assert.Contains(t, output, "Cycle started:") + assert.Contains(t, output, "Reprovide interval:") + assert.Contains(t, output, "Peers swept:") + assert.Contains(t, output, "Full keyspace coverage:") + }) + + t.Run("--compact requires --all", func(t *testing.T) { + t.Parallel() + + h := harness.NewT(t) + node := h.NewNode().Init() + node.SetIPFSConfig("Provide.DHT.SweepEnabled", true) + node.SetIPFSConfig("Provide.Enabled", true) + node.StartDaemon() + defer node.StopDaemon() + + res := node.RunIPFS("provide", "stat", "--compact") + assert.Error(t, res.Err) + assert.Contains(t, res.Stderr.String(), "--compact requires --all flag") + }) + + t.Run("--compact with --all shows 2-column layout", func(t *testing.T) { + t.Parallel() + + h := harness.NewT(t) + node := h.NewNode().Init() + node.SetIPFSConfig("Provide.DHT.SweepEnabled", true) + node.SetIPFSConfig("Provide.Enabled", true) + node.StartDaemon() + defer node.StopDaemon() + + res := node.IPFS("provide", "stat", "--all", "--compact") + require.NoError(t, res.Err) + + output := res.Stdout.String() + lines := strings.Split(strings.TrimSpace(output), "\n") + require.NotEmpty(t, lines) + + // In compact mode, find a line that has both Schedule and Connectivity metrics + // This confirms 2-column layout is working + foundTwoColumns := false + for _, line := range lines { + if strings.Contains(line, "CIDs scheduled:") && strings.Contains(line, "Status:") { + foundTwoColumns = true + break + } + } + assert.True(t, foundTwoColumns, "Should have at least one line with both 'CIDs scheduled:' and 'Status:' confirming 2-column layout") + }) + + t.Run("individual section flags work with full labels", func(t *testing.T) { + t.Parallel() + + h := harness.NewT(t) + node := h.NewNode().Init() + node.SetIPFSConfig("Provide.DHT.SweepEnabled", true) + node.SetIPFSConfig("Provide.Enabled", true) + node.StartDaemon() + defer node.StopDaemon() + + testCases := []struct { + flag string + contains []string + }{ + { + flag: "--connectivity", + contains: []string{"Status:"}, + }, + { + flag: "--queues", + contains: []string{"Provide queue:", "Reprovide queue:"}, + }, + { + flag: "--schedule", + contains: []string{"CIDs scheduled:", "Regions scheduled:", "Avg prefix length:", "Next region prefix:", "Next region reprovide:"}, + }, + { + flag: "--timings", + contains: []string{"Uptime:", "Current time offset:", "Cycle started:", "Reprovide interval:"}, + }, + { + flag: "--network", + contains: []string{"Avg record holders:", "Peers swept:", "Full keyspace coverage:", "Reachable peers:", "Avg region size:", "Replication factor:"}, + }, + { + flag: "--operations", + contains: []string{"Ongoing provides:", "Ongoing reprovides:", "Total CIDs provided:", "Total records provided:", "Total provide errors:"}, + }, + { + flag: "--workers", + contains: []string{"Active workers:", "Free workers:", "Workers stats:", "Periodic", "Burst"}, + }, + } + + for _, tc := range testCases { + res := node.IPFS("provide", "stat", tc.flag) + require.NoError(t, res.Err, "flag %s should work", tc.flag) + output := res.Stdout.String() + for _, expected := range tc.contains { + assert.Contains(t, output, expected, "flag %s should contain '%s'", tc.flag, expected) + } + } + }) + + t.Run("multiple section flags can be combined", func(t *testing.T) { + t.Parallel() + + h := harness.NewT(t) + node := h.NewNode().Init() + node.SetIPFSConfig("Provide.DHT.SweepEnabled", true) + node.SetIPFSConfig("Provide.Enabled", true) + node.StartDaemon() + defer node.StopDaemon() + + res := node.IPFS("provide", "stat", "--network", "--operations") + require.NoError(t, res.Err) + + output := res.Stdout.String() + // Should have section headings when multiple flags combined + assert.Contains(t, output, "Network:") + assert.Contains(t, output, "Operations:") + assert.Contains(t, output, "Avg record holders:") + assert.Contains(t, output, "Ongoing provides:") + }) +} + +// TestProvideStatLegacyProvider tests Legacy provider specific behavior +func TestProvideStatLegacyProvider(t *testing.T) { + t.Parallel() + + h := harness.NewT(t) + node := h.NewNode().Init() + node.SetIPFSConfig("Provide.DHT.SweepEnabled", false) + node.SetIPFSConfig("Provide.Enabled", true) + node.StartDaemon() + defer node.StopDaemon() + + t.Run("shows legacy stats from old provider system", func(t *testing.T) { + res := node.IPFS("provide", "stat") + require.NoError(t, res.Err) + + // Legacy provider shows stats from the old reprovider system + output := res.Stdout.String() + assert.Contains(t, output, "TotalReprovides:") + assert.Contains(t, output, "AvgReprovideDuration:") + assert.Contains(t, output, "LastReprovideDuration:") + }) + + t.Run("rejects flags with legacy provider", func(t *testing.T) { + flags := []string{"--all", "--connectivity", "--queues", "--network", "--workers"} + for _, flag := range flags { + res := node.RunIPFS("provide", "stat", flag) + assert.Error(t, res.Err, "flag %s should be rejected for legacy provider", flag) + assert.Contains(t, res.Stderr.String(), "cannot use flags with legacy provide stats") + } + }) + + t.Run("rejects --lan flag with legacy provider", func(t *testing.T) { + res := node.RunIPFS("provide", "stat", "--lan") + assert.Error(t, res.Err) + assert.Contains(t, res.Stderr.String(), "LAN stats only available for Sweep provider with Dual DHT") + }) +} + +// TestProvideStatOutputFormats tests different output formats +func TestProvideStatOutputFormats(t *testing.T) { + t.Parallel() + + t.Run("JSON output with Sweep provider", func(t *testing.T) { + t.Parallel() + + h := harness.NewT(t) + node := h.NewNode().Init() + node.SetIPFSConfig("Provide.DHT.SweepEnabled", true) + node.SetIPFSConfig("Provide.Enabled", true) + node.StartDaemon() + defer node.StopDaemon() + + res := node.IPFS("provide", "stat", "--enc=json") + require.NoError(t, res.Err) + + // Parse JSON to verify structure + var result struct { + Sweep map[string]interface{} `json:"Sweep"` + Legacy map[string]interface{} `json:"Legacy"` + } + err := json.Unmarshal([]byte(res.Stdout.String()), &result) + require.NoError(t, err, "Output should be valid JSON") + assert.NotNil(t, result.Sweep, "Sweep stats should be present") + assert.Nil(t, result.Legacy, "Legacy stats should not be present") + }) + + t.Run("JSON output with Legacy provider", func(t *testing.T) { + t.Parallel() + + h := harness.NewT(t) + node := h.NewNode().Init() + node.SetIPFSConfig("Provide.DHT.SweepEnabled", false) + node.SetIPFSConfig("Provide.Enabled", true) + node.StartDaemon() + defer node.StopDaemon() + + res := node.IPFS("provide", "stat", "--enc=json") + require.NoError(t, res.Err) + + // Parse JSON to verify structure + var result struct { + Sweep map[string]interface{} `json:"Sweep"` + Legacy map[string]interface{} `json:"Legacy"` + } + err := json.Unmarshal([]byte(res.Stdout.String()), &result) + require.NoError(t, err, "Output should be valid JSON") + assert.Nil(t, result.Sweep, "Sweep stats should not be present") + assert.NotNil(t, result.Legacy, "Legacy stats should be present") + }) +} + +// TestProvideStatIntegration tests integration with provide operations +func TestProvideStatIntegration(t *testing.T) { + t.Parallel() + + t.Run("stats reflect content being added to schedule", func(t *testing.T) { + t.Parallel() + + h := harness.NewT(t) + node := h.NewNode().Init() + node.SetIPFSConfig("Provide.DHT.SweepEnabled", true) + node.SetIPFSConfig("Provide.Enabled", true) + node.SetIPFSConfig("Provide.DHT.Interval", "1h") + node.StartDaemon() + defer node.StopDaemon() + + // Get initial scheduled CID count + res1 := node.IPFS("provide", "stat", "--enc=json") + require.NoError(t, res1.Err) + initialKeys := parseSweepStats(t, res1.Stdout.String()).Sweep.Schedule.Keys + + // Add content - this should increase CIDs scheduled + node.IPFSAddStr("test content for stats") + + // Wait for content to appear in schedule (with timeout) + // The buffered provider may take a moment to schedule items + require.Eventually(t, func() bool { + res := node.IPFS("provide", "stat", "--enc=json") + require.NoError(t, res.Err) + stats := parseSweepStats(t, res.Stdout.String()) + return stats.Sweep.Schedule.Keys > initialKeys + }, provideStatEventuallyTimeout, provideStatEventuallyTick, "Content should appear in schedule after adding") + }) + + t.Run("stats work with all documented strategies", func(t *testing.T) { + t.Parallel() + + // Test all strategies documented in docs/config.md#providestrategy + strategies := []string{"all", "pinned", "roots", "mfs", "pinned+mfs"} + for _, strategy := range strategies { + h := harness.NewT(t) + node := h.NewNode().Init() + node.SetIPFSConfig("Provide.DHT.SweepEnabled", true) + node.SetIPFSConfig("Provide.Enabled", true) + node.SetIPFSConfig("Provide.Strategy", strategy) + node.StartDaemon() + + res := node.IPFS("provide", "stat") + require.NoError(t, res.Err, "stats should work with strategy %s", strategy) + output := res.Stdout.String() + assert.NotEmpty(t, output) + assert.Contains(t, output, "CIDs scheduled:") + + node.StopDaemon() + } + }) +} + +// TestProvideStatDisabledConfig tests behavior when provide system is disabled +func TestProvideStatDisabledConfig(t *testing.T) { + t.Parallel() + + t.Run("Provide.Enabled=false returns error stats not available", func(t *testing.T) { + t.Parallel() + + h := harness.NewT(t) + node := h.NewNode().Init() + node.SetIPFSConfig("Provide.DHT.SweepEnabled", true) + node.SetIPFSConfig("Provide.Enabled", false) + node.StartDaemon() + defer node.StopDaemon() + + res := node.RunIPFS("provide", "stat") + assert.Error(t, res.Err) + assert.Contains(t, res.Stderr.String(), "stats not available") + }) + + t.Run("Provide.Enabled=true with Provide.DHT.Interval=0 returns error stats not available", func(t *testing.T) { + t.Parallel() + + h := harness.NewT(t) + node := h.NewNode().Init() + node.SetIPFSConfig("Provide.DHT.SweepEnabled", true) + node.SetIPFSConfig("Provide.Enabled", true) + node.SetIPFSConfig("Provide.DHT.Interval", "0") + node.StartDaemon() + defer node.StopDaemon() + + res := node.RunIPFS("provide", "stat") + assert.Error(t, res.Err) + assert.Contains(t, res.Stderr.String(), "stats not available") + }) +} diff --git a/test/cli/provider_test.go b/test/cli/provider_test.go index 5ecf8f3ca..9d5e0d175 100644 --- a/test/cli/provider_test.go +++ b/test/cli/provider_test.go @@ -2,21 +2,45 @@ package cli import ( "bytes" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "strings" + "sync/atomic" "testing" "time" + "github.com/ipfs/go-test/random" "github.com/ipfs/kubo/test/cli/harness" - "github.com/ipfs/kubo/test/cli/testutils" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func TestProvider(t *testing.T) { - t.Parallel() +const ( + timeStep = 20 * time.Millisecond + timeout = time.Second +) + +type cfgApplier func(*harness.Node) + +func runProviderSuite(t *testing.T, reprovide bool, apply cfgApplier) { + t.Helper() initNodes := func(t *testing.T, n int, fn func(n *harness.Node)) harness.Nodes { nodes := harness.NewT(t).NewNodes(n).Init() + nodes.ForEachPar(apply) nodes.ForEachPar(fn) - return nodes.StartDaemons().Connect() + nodes = nodes.StartDaemons().Connect() + time.Sleep(500 * time.Millisecond) // wait for DHT clients to be bootstrapped + return nodes + } + + initNodesWithoutStart := func(t *testing.T, n int, fn func(n *harness.Node)) harness.Nodes { + nodes := harness.NewT(t).NewNodes(n).Init() + nodes.ForEachPar(apply) + nodes.ForEachPar(fn) + return nodes } expectNoProviders := func(t *testing.T, cid string, nodes ...*harness.Node) { @@ -27,17 +51,23 @@ func TestProvider(t *testing.T) { } expectProviders := func(t *testing.T, cid, expectedProvider string, nodes ...*harness.Node) { + outerLoop: for _, node := range nodes { - res := node.IPFS("routing", "findprovs", "-n=1", cid) - require.Equal(t, expectedProvider, res.Stdout.Trimmed()) + for i := time.Duration(0); i*timeStep < timeout; i++ { + res := node.IPFS("routing", "findprovs", "-n=1", cid) + if res.Stdout.Trimmed() == expectedProvider { + continue outerLoop + } + } + require.FailNowf(t, "found no providers", "expected a provider for %s", cid) } } - t.Run("Basic Providing", func(t *testing.T) { + t.Run("Provide.Enabled=true announces new CIDs created by ipfs add", func(t *testing.T) { t.Parallel() nodes := initNodes(t, 2, func(n *harness.Node) { - n.SetIPFSConfig("Experimental.StrategicProviding", false) + n.SetIPFSConfig("Provide.Enabled", true) }) defer nodes.StopDaemons() @@ -45,11 +75,52 @@ func TestProvider(t *testing.T) { expectProviders(t, cid, nodes[0].PeerID().String(), nodes[1:]...) }) - t.Run("Basic Strategic Providing", func(t *testing.T) { + t.Run("Provide.Enabled=true announces new CIDs created by ipfs add --pin=false with default strategy", func(t *testing.T) { t.Parallel() nodes := initNodes(t, 2, func(n *harness.Node) { - n.SetIPFSConfig("Experimental.StrategicProviding", true) + n.SetIPFSConfig("Provide.Enabled", true) + // Default strategy is "all" which should provide even unpinned content + }) + defer nodes.StopDaemons() + + cid := nodes[0].IPFSAddStr(time.Now().String(), "--pin=false") + expectProviders(t, cid, nodes[0].PeerID().String(), nodes[1:]...) + }) + + t.Run("Provide.Enabled=true announces new CIDs created by ipfs block put --pin=false with default strategy", func(t *testing.T) { + t.Parallel() + + nodes := initNodes(t, 2, func(n *harness.Node) { + n.SetIPFSConfig("Provide.Enabled", true) + // Default strategy is "all" which should provide unpinned content from block put + }) + defer nodes.StopDaemons() + + data := random.Bytes(256) + cid := nodes[0].IPFSBlockPut(bytes.NewReader(data), "--pin=false") + expectProviders(t, cid, nodes[0].PeerID().String(), nodes[1:]...) + }) + + t.Run("Provide.Enabled=true announces new CIDs created by ipfs dag put --pin=false with default strategy", func(t *testing.T) { + t.Parallel() + + nodes := initNodes(t, 2, func(n *harness.Node) { + n.SetIPFSConfig("Provide.Enabled", true) + // Default strategy is "all" which should provide unpinned content from dag put + }) + defer nodes.StopDaemons() + + dagData := `{"hello": "world", "timestamp": "` + time.Now().String() + `"}` + cid := nodes[0].IPFSDAGPut(bytes.NewReader([]byte(dagData)), "--pin=false") + expectProviders(t, cid, nodes[0].PeerID().String(), nodes[1:]...) + }) + + t.Run("Provide.Enabled=false disables announcement of new CID from ipfs add", func(t *testing.T) { + t.Parallel() + + nodes := initNodes(t, 2, func(n *harness.Node) { + n.SetIPFSConfig("Provide.Enabled", false) }) defer nodes.StopDaemons() @@ -57,109 +128,718 @@ func TestProvider(t *testing.T) { expectNoProviders(t, cid, nodes[1:]...) }) - t.Run("Reprovides with 'all' strategy", func(t *testing.T) { + t.Run("Provide.Enabled=false disables manual announcement via RPC command", func(t *testing.T) { t.Parallel() nodes := initNodes(t, 2, func(n *harness.Node) { - n.SetIPFSConfig("Reprovider.Strategy", "all") + n.SetIPFSConfig("Provide.Enabled", false) }) defer nodes.StopDaemons() - cid := nodes[0].IPFSAddStr(time.Now().String(), "--local") + cid := nodes[0].IPFSAddStr(time.Now().String()) + res := nodes[0].RunIPFS("routing", "provide", cid) + assert.Contains(t, res.Stderr.Trimmed(), "invalid configuration: Provide.Enabled is set to 'false'") + assert.Equal(t, 1, res.ExitCode()) + + expectNoProviders(t, cid, nodes[1:]...) + }) + + t.Run("manual provide fails when no libp2p peers and no custom HTTP router", func(t *testing.T) { + t.Parallel() + + h := harness.NewT(t) + node := h.NewNode().Init() + apply(node) + node.SetIPFSConfig("Provide.Enabled", true) + node.StartDaemon() + defer node.StopDaemon() + + cid := node.IPFSAddStr(time.Now().String()) + res := node.RunIPFS("routing", "provide", cid) + assert.Contains(t, res.Stderr.Trimmed(), "cannot provide, no connected peers") + assert.Equal(t, 1, res.ExitCode()) + }) + + t.Run("manual provide succeeds via custom HTTP router when no libp2p peers", func(t *testing.T) { + t.Parallel() + + // Create a mock HTTP server that accepts provide requests. + // This simulates the undocumented API behavior described in + // https://discuss.ipfs.tech/t/only-peers-found-from-dht-seem-to-be-getting-used-as-relays-so-cant-use-http-routers/19545/9 + // Note: This is NOT IPIP-378, which was not implemented. + mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Accept both PUT and POST requests to /routing/v1/providers and /routing/v1/ipns + if (r.Method == http.MethodPut || r.Method == http.MethodPost) && + (strings.HasPrefix(r.URL.Path, "/routing/v1/providers") || strings.HasPrefix(r.URL.Path, "/routing/v1/ipns")) { + // Return HTTP 200 to indicate successful publishing + w.WriteHeader(http.StatusOK) + } else { + w.WriteHeader(http.StatusNotFound) + } + })) + defer mockServer.Close() + + h := harness.NewT(t) + node := h.NewNode().Init() + apply(node) + node.SetIPFSConfig("Provide.Enabled", true) + // Configure a custom HTTP router for providing. + // Using our mock server that will accept the provide requests. + routingConf := map[string]any{ + "Type": "custom", // https://github.com/ipfs/kubo/blob/master/docs/delegated-routing.md#configuration-file-example + "Methods": map[string]any{ + "provide": map[string]any{"RouterName": "MyCustomRouter"}, + "get-ipns": map[string]any{"RouterName": "MyCustomRouter"}, + "put-ipns": map[string]any{"RouterName": "MyCustomRouter"}, + "find-peers": map[string]any{"RouterName": "MyCustomRouter"}, + "find-providers": map[string]any{"RouterName": "MyCustomRouter"}, + }, + "Routers": map[string]any{ + "MyCustomRouter": map[string]any{ + "Type": "http", + "Parameters": map[string]any{ + // Use the mock server URL + "Endpoint": mockServer.URL, + }, + }, + }, + } + node.SetIPFSConfig("Routing", routingConf) + node.StartDaemon() + defer node.StopDaemon() + + cid := node.IPFSAddStr(time.Now().String()) + // The command should successfully provide via HTTP even without libp2p peers + res := node.RunIPFS("routing", "provide", cid) + assert.Empty(t, res.Stderr.String(), "Should have no errors when providing via HTTP router") + assert.Equal(t, 0, res.ExitCode(), "Should succeed with exit code 0") + }) + + // Right now Provide and Reprovide are tied together + t.Run("Reprovide.Interval=0 disables announcement of new CID too", func(t *testing.T) { + t.Parallel() + + nodes := initNodes(t, 2, func(n *harness.Node) { + n.SetIPFSConfig("Provide.DHT.Interval", "0") + }) + defer nodes.StopDaemons() + + cid := nodes[0].IPFSAddStr(time.Now().String()) + expectNoProviders(t, cid, nodes[1:]...) + }) + + // It is a lesser evil - forces users to fix their config and have some sort of interval + t.Run("Manual Reprovide trigger does not work when periodic reprovide is disabled", func(t *testing.T) { + t.Parallel() + + nodes := initNodes(t, 2, func(n *harness.Node) { + n.SetIPFSConfig("Provide.DHT.Interval", "0") + }) + defer nodes.StopDaemons() + + cid := nodes[0].IPFSAddStr(time.Now().String()) expectNoProviders(t, cid, nodes[1:]...) - nodes[0].IPFS("bitswap", "reprovide") + res := nodes[0].RunIPFS("routing", "reprovide") + assert.Contains(t, res.Stderr.Trimmed(), "invalid configuration: Provide.DHT.Interval is set to '0'") + assert.Equal(t, 1, res.ExitCode()) - expectProviders(t, cid, nodes[0].PeerID().String(), nodes[1:]...) + expectNoProviders(t, cid, nodes[1:]...) }) - t.Run("Reprovides with 'flat' strategy", func(t *testing.T) { + // It is a lesser evil - forces users to fix their config and have some sort of interval + t.Run("Manual Reprovide trigger does not work when Provide system is disabled", func(t *testing.T) { t.Parallel() nodes := initNodes(t, 2, func(n *harness.Node) { - n.SetIPFSConfig("Reprovider.Strategy", "flat") + n.SetIPFSConfig("Provide.Enabled", false) }) defer nodes.StopDaemons() - cid := nodes[0].IPFSAddStr(time.Now().String(), "--local") + cid := nodes[0].IPFSAddStr(time.Now().String()) expectNoProviders(t, cid, nodes[1:]...) - nodes[0].IPFS("bitswap", "reprovide") - - expectProviders(t, cid, nodes[0].PeerID().String(), nodes[1:]...) - }) - - t.Run("Reprovides with 'pinned' strategy", func(t *testing.T) { - t.Parallel() - - foo := testutils.RandomBytes(1000) - bar := testutils.RandomBytes(1000) - - nodes := initNodes(t, 2, func(n *harness.Node) { - n.SetIPFSConfig("Reprovider.Strategy", "pinned") - }) - defer nodes.StopDaemons() - - cidFoo := nodes[0].IPFSAdd(bytes.NewReader(foo), "--offline", "--pin=false") - cidBar := nodes[0].IPFSAdd(bytes.NewReader(bar), "--offline", "--pin=false") - cidBarDir := nodes[0].IPFSAdd(bytes.NewReader(bar), "-Q", "--offline", "-w") - - expectNoProviders(t, cidFoo, nodes[1:]...) - expectNoProviders(t, cidBar, nodes[1:]...) - expectNoProviders(t, cidBarDir, nodes[1:]...) - - nodes[0].IPFS("bitswap", "reprovide") - - expectNoProviders(t, cidFoo, nodes[1:]...) - expectProviders(t, cidBar, nodes[0].PeerID().String(), nodes[1:]...) - expectProviders(t, cidBarDir, nodes[0].PeerID().String(), nodes[1:]...) - }) - - t.Run("Reprovides with 'roots' strategy", func(t *testing.T) { - t.Parallel() - - foo := testutils.RandomBytes(1000) - bar := testutils.RandomBytes(1000) - baz := testutils.RandomBytes(1000) - - nodes := initNodes(t, 2, func(n *harness.Node) { - n.SetIPFSConfig("Reprovider.Strategy", "roots") - }) - defer nodes.StopDaemons() - - cidFoo := nodes[0].IPFSAdd(bytes.NewReader(foo), "--offline", "--pin=false") - cidBar := nodes[0].IPFSAdd(bytes.NewReader(bar), "--offline", "--pin=false") - cidBaz := nodes[0].IPFSAdd(bytes.NewReader(baz), "--offline") - cidBarDir := nodes[0].IPFSAdd(bytes.NewReader(bar), "-Q", "--offline", "-w") - - expectNoProviders(t, cidFoo, nodes[1:]...) - expectNoProviders(t, cidBar, nodes[1:]...) - expectNoProviders(t, cidBarDir, nodes[1:]...) - - nodes[0].IPFS("bitswap", "reprovide") - - expectNoProviders(t, cidFoo, nodes[1:]...) - expectNoProviders(t, cidBar, nodes[1:]...) - expectProviders(t, cidBaz, nodes[0].PeerID().String(), nodes[1:]...) - expectProviders(t, cidBarDir, nodes[0].PeerID().String(), nodes[1:]...) - }) - - t.Run("Providing works without ticking", func(t *testing.T) { - t.Parallel() - - nodes := initNodes(t, 2, func(n *harness.Node) { - n.SetIPFSConfig("Reprovider.Interval", "0") - }) - defer nodes.StopDaemons() - - cid := nodes[0].IPFSAddStr(time.Now().String(), "--offline") + res := nodes[0].RunIPFS("routing", "reprovide") + assert.Contains(t, res.Stderr.Trimmed(), "invalid configuration: Provide.Enabled is set to 'false'") + assert.Equal(t, 1, res.ExitCode()) expectNoProviders(t, cid, nodes[1:]...) + }) - nodes[0].IPFS("bitswap", "reprovide") + t.Run("Provide with 'all' strategy", func(t *testing.T) { + t.Parallel() + nodes := initNodes(t, 2, func(n *harness.Node) { + n.SetIPFSConfig("Provide.Strategy", "all") + }) + defer nodes.StopDaemons() + + cid := nodes[0].IPFSAddStr("all strategy") expectProviders(t, cid, nodes[0].PeerID().String(), nodes[1:]...) }) + + t.Run("Provide with 'pinned' strategy", func(t *testing.T) { + t.Parallel() + + nodes := initNodes(t, 2, func(n *harness.Node) { + n.SetIPFSConfig("Provide.Strategy", "pinned") + }) + defer nodes.StopDaemons() + + // Add a non-pinned CID (should not be provided) + cid := nodes[0].IPFSAddStr("pinned strategy", "--pin=false") + expectNoProviders(t, cid, nodes[1:]...) + + // Pin the CID (should now be provided) + nodes[0].IPFS("pin", "add", cid) + expectProviders(t, cid, nodes[0].PeerID().String(), nodes[1:]...) + }) + + t.Run("Provide with 'pinned+mfs' strategy", func(t *testing.T) { + t.Parallel() + + nodes := initNodes(t, 2, func(n *harness.Node) { + n.SetIPFSConfig("Provide.Strategy", "pinned+mfs") + }) + defer nodes.StopDaemons() + + // Add a pinned CID (should be provided) + cidPinned := nodes[0].IPFSAddStr("pinned content") + cidUnpinned := nodes[0].IPFSAddStr("unpinned content", "--pin=false") + cidMFS := nodes[0].IPFSAddStr("mfs content", "--pin=false") + nodes[0].IPFS("files", "cp", "/ipfs/"+cidMFS, "/myfile") + + n0pid := nodes[0].PeerID().String() + expectProviders(t, cidPinned, n0pid, nodes[1:]...) + expectNoProviders(t, cidUnpinned, nodes[1:]...) + expectProviders(t, cidMFS, n0pid, nodes[1:]...) + }) + + t.Run("Provide with 'roots' strategy", func(t *testing.T) { + t.Parallel() + + nodes := initNodes(t, 2, func(n *harness.Node) { + n.SetIPFSConfig("Provide.Strategy", "roots") + }) + defer nodes.StopDaemons() + + // Add a root CID (should be provided) + cidRoot := nodes[0].IPFSAddStr("roots strategy", "-w", "-Q") + // the same without wrapping should give us a child node. + cidChild := nodes[0].IPFSAddStr("root strategy", "--pin=false") + + expectProviders(t, cidRoot, nodes[0].PeerID().String(), nodes[1:]...) + expectNoProviders(t, cidChild, nodes[1:]...) + }) + + t.Run("Provide with 'mfs' strategy", func(t *testing.T) { + t.Parallel() + + nodes := initNodes(t, 2, func(n *harness.Node) { + n.SetIPFSConfig("Provide.Strategy", "mfs") + }) + defer nodes.StopDaemons() + + // Add a file to MFS (should be provided) + data := random.Bytes(1000) + cid := nodes[0].IPFSAdd(bytes.NewReader(data), "-Q") + + // not yet in MFS + expectNoProviders(t, cid, nodes[1:]...) + + nodes[0].IPFS("files", "cp", "/ipfs/"+cid, "/myfile") + expectProviders(t, cid, nodes[0].PeerID().String(), nodes[1:]...) + }) + + if reprovide { + + t.Run("Reprovides with 'all' strategy when strategy is '' (empty)", func(t *testing.T) { + t.Parallel() + + nodes := initNodesWithoutStart(t, 2, func(n *harness.Node) { + n.SetIPFSConfig("Provide.Strategy", "") + }) + + cid := nodes[0].IPFSAddStr(time.Now().String()) + + nodes = nodes.StartDaemons().Connect() + defer nodes.StopDaemons() + expectNoProviders(t, cid, nodes[1:]...) + + nodes[0].IPFS("routing", "reprovide") + + expectProviders(t, cid, nodes[0].PeerID().String(), nodes[1:]...) + }) + + t.Run("Reprovides with 'all' strategy", func(t *testing.T) { + t.Parallel() + + nodes := initNodesWithoutStart(t, 2, func(n *harness.Node) { + n.SetIPFSConfig("Provide.Strategy", "all") + }) + + cid := nodes[0].IPFSAddStr(time.Now().String()) + + nodes = nodes.StartDaemons().Connect() + defer nodes.StopDaemons() + expectNoProviders(t, cid, nodes[1:]...) + + nodes[0].IPFS("routing", "reprovide") + + expectProviders(t, cid, nodes[0].PeerID().String(), nodes[1:]...) + }) + + t.Run("Reprovides with 'pinned' strategy", func(t *testing.T) { + t.Parallel() + + foo := random.Bytes(1000) + bar := random.Bytes(1000) + + nodes := initNodesWithoutStart(t, 2, func(n *harness.Node) { + n.SetIPFSConfig("Provide.Strategy", "pinned") + }) + + // Add a pin while offline so it cannot be provided + cidBarDir := nodes[0].IPFSAdd(bytes.NewReader(bar), "-Q", "-w") + + nodes = nodes.StartDaemons().Connect() + defer nodes.StopDaemons() + + // Add content without pinning while daemon line + cidFoo := nodes[0].IPFSAdd(bytes.NewReader(foo), "--pin=false") + cidBar := nodes[0].IPFSAdd(bytes.NewReader(bar), "--pin=false") + + // Nothing should have been provided. The pin was offline, and + // the others should not be provided per the strategy. + expectNoProviders(t, cidFoo, nodes[1:]...) + expectNoProviders(t, cidBar, nodes[1:]...) + expectNoProviders(t, cidBarDir, nodes[1:]...) + + nodes[0].IPFS("routing", "reprovide") + + // cidFoo is not pinned so should not be provided. + expectNoProviders(t, cidFoo, nodes[1:]...) + // cidBar gets provided by being a child from cidBarDir even though we added with pin=false. + expectProviders(t, cidBar, nodes[0].PeerID().String(), nodes[1:]...) + expectProviders(t, cidBarDir, nodes[0].PeerID().String(), nodes[1:]...) + }) + + t.Run("Reprovides with 'roots' strategy", func(t *testing.T) { + t.Parallel() + + foo := random.Bytes(1000) + bar := random.Bytes(1000) + + nodes := initNodesWithoutStart(t, 2, func(n *harness.Node) { + n.SetIPFSConfig("Provide.Strategy", "roots") + }) + n0pid := nodes[0].PeerID().String() + + // Add a pin. Only root should get pinned but not provided + // because node not started + cidBarDir := nodes[0].IPFSAdd(bytes.NewReader(bar), "-Q", "-w") + + nodes = nodes.StartDaemons().Connect() + defer nodes.StopDaemons() + + cidFoo := nodes[0].IPFSAdd(bytes.NewReader(foo)) + cidBar := nodes[0].IPFSAdd(bytes.NewReader(bar), "--pin=false") + + // cidFoo will get provided per the strategy but cidBar will not. + expectProviders(t, cidFoo, n0pid, nodes[1:]...) + expectNoProviders(t, cidBar, nodes[1:]...) + + nodes[0].IPFS("routing", "reprovide") + + expectProviders(t, cidFoo, n0pid, nodes[1:]...) + expectNoProviders(t, cidBar, nodes[1:]...) + expectProviders(t, cidBarDir, n0pid, nodes[1:]...) + }) + + t.Run("Reprovides with 'mfs' strategy", func(t *testing.T) { + t.Parallel() + + bar := random.Bytes(1000) + + nodes := initNodesWithoutStart(t, 2, func(n *harness.Node) { + n.SetIPFSConfig("Provide.Strategy", "mfs") + }) + n0pid := nodes[0].PeerID().String() + + // add something and lets put it in MFS + cidBar := nodes[0].IPFSAdd(bytes.NewReader(bar), "--pin=false", "-Q") + nodes[0].IPFS("files", "cp", "/ipfs/"+cidBar, "/myfile") + + nodes = nodes.StartDaemons().Connect() + defer nodes.StopDaemons() + + // cidBar is in MFS but not provided + expectNoProviders(t, cidBar, nodes[1:]...) + + nodes[0].IPFS("routing", "reprovide") + + // And now is provided + expectProviders(t, cidBar, n0pid, nodes[1:]...) + }) + + t.Run("Reprovides with 'pinned+mfs' strategy", func(t *testing.T) { + t.Parallel() + + nodes := initNodesWithoutStart(t, 2, func(n *harness.Node) { + n.SetIPFSConfig("Provide.Strategy", "pinned+mfs") + }) + n0pid := nodes[0].PeerID().String() + + // Add a pinned CID (should be provided) + cidPinned := nodes[0].IPFSAddStr("pinned content", "--pin=true") + // Add a CID to MFS (should be provided) + cidMFS := nodes[0].IPFSAddStr("mfs content") + nodes[0].IPFS("files", "cp", "/ipfs/"+cidMFS, "/myfile") + // Add a CID that is neither pinned nor in MFS (should not be provided) + cidNeither := nodes[0].IPFSAddStr("neither content", "--pin=false") + + nodes = nodes.StartDaemons().Connect() + defer nodes.StopDaemons() + + // Trigger reprovide + nodes[0].IPFS("routing", "reprovide") + + // Check that pinned CID is provided + expectProviders(t, cidPinned, n0pid, nodes[1:]...) + // Check that MFS CID is provided + expectProviders(t, cidMFS, n0pid, nodes[1:]...) + // Check that neither CID is not provided + expectNoProviders(t, cidNeither, nodes[1:]...) + }) + } + + t.Run("provide clear command removes items from provide queue", func(t *testing.T) { + t.Parallel() + + nodes := harness.NewT(t).NewNodes(1).Init() + nodes.ForEachPar(func(n *harness.Node) { + n.SetIPFSConfig("Provide.Enabled", true) + n.SetIPFSConfig("Provide.DHT.Interval", "22h") + n.SetIPFSConfig("Provide.Strategy", "all") + }) + nodes.StartDaemons() + defer nodes.StopDaemons() + + // Clear the provide queue first time - works regardless of queue state + res1 := nodes[0].IPFS("provide", "clear") + require.NoError(t, res1.Err) + + // Should report cleared items and proper message format + assert.Contains(t, res1.Stdout.String(), "removed") + assert.Contains(t, res1.Stdout.String(), "items from provide queue") + + // Clear the provide queue second time - should definitely report 0 items + res2 := nodes[0].IPFS("provide", "clear") + require.NoError(t, res2.Err) + + // Should report 0 items cleared since queue was already cleared + assert.Contains(t, res2.Stdout.String(), "removed 0 items from provide queue") + }) + + t.Run("provide clear command with quiet option", func(t *testing.T) { + t.Parallel() + + nodes := harness.NewT(t).NewNodes(1).Init() + nodes.ForEachPar(func(n *harness.Node) { + n.SetIPFSConfig("Provide.Enabled", true) + n.SetIPFSConfig("Provide.DHT.Interval", "22h") + n.SetIPFSConfig("Provide.Strategy", "all") + }) + nodes.StartDaemons() + defer nodes.StopDaemons() + + // Clear the provide queue with quiet option + res := nodes[0].IPFS("provide", "clear", "-q") + require.NoError(t, res.Err) + + // Should have no output when quiet + assert.Empty(t, res.Stdout.String()) + }) + + t.Run("provide clear command works when provider is disabled", func(t *testing.T) { + t.Parallel() + + nodes := harness.NewT(t).NewNodes(1).Init() + nodes.ForEachPar(func(n *harness.Node) { + n.SetIPFSConfig("Provide.Enabled", false) + n.SetIPFSConfig("Provide.DHT.Interval", "22h") + n.SetIPFSConfig("Provide.Strategy", "all") + }) + nodes.StartDaemons() + defer nodes.StopDaemons() + + // Clear should succeed even when provider is disabled + res := nodes[0].IPFS("provide", "clear") + require.NoError(t, res.Err) + }) + + t.Run("provide clear command returns JSON with removed item count", func(t *testing.T) { + t.Parallel() + + nodes := harness.NewT(t).NewNodes(1).Init() + nodes.ForEachPar(func(n *harness.Node) { + n.SetIPFSConfig("Provide.Enabled", true) + n.SetIPFSConfig("Provide.DHT.Interval", "22h") + n.SetIPFSConfig("Provide.Strategy", "all") + }) + nodes.StartDaemons() + defer nodes.StopDaemons() + + // Clear the provide queue with JSON encoding + res := nodes[0].IPFS("provide", "clear", "--enc=json") + require.NoError(t, res.Err) + + // Should return valid JSON with the number of removed items + output := res.Stdout.String() + assert.NotEmpty(t, output) + + // Parse JSON to verify structure + var result int + err := json.Unmarshal([]byte(output), &result) + require.NoError(t, err, "Output should be valid JSON") + + // Should be a non-negative integer (0 or positive) + assert.GreaterOrEqual(t, result, 0) + }) +} + +// runResumeTests validates Provide.DHT.ResumeEnabled behavior for SweepingProvider. +// +// Background: The provider tracks current_time_offset = (now - cycleStart) % interval +// where cycleStart is the timestamp marking the beginning of the reprovide cycle. +// With ResumeEnabled=true, cycleStart persists in the datastore across restarts. +// With ResumeEnabled=false, cycleStart resets to 'now' on each startup. +func runResumeTests(t *testing.T, apply cfgApplier) { + t.Helper() + + const ( + reprovideInterval = 30 * time.Second + initialRuntime = 10 * time.Second // Let cycle progress + downtime = 5 * time.Second // Simulated offline period + restartTime = 2 * time.Second // Daemon restart stabilization + + // Thresholds account for timing jitter (~2-3s margin) + minOffsetBeforeRestart = 8 * time.Second // Expect ~10s + minOffsetAfterResume = 12 * time.Second // Expect ~17s (10s + 5s + 2s) + maxOffsetAfterReset = 5 * time.Second // Expect ~2s (fresh start) + ) + + setupNode := func(t *testing.T, resumeEnabled bool) *harness.Node { + node := harness.NewT(t).NewNode().Init() + apply(node) // Sets Provide.DHT.SweepEnabled=true + node.SetIPFSConfig("Provide.DHT.ResumeEnabled", resumeEnabled) + node.SetIPFSConfig("Provide.DHT.Interval", reprovideInterval.String()) + node.SetIPFSConfig("Bootstrap", []string{}) + node.StartDaemon() + return node + } + + t.Run("preserves cycle state across restart", func(t *testing.T) { + t.Parallel() + + node := setupNode(t, true) + defer node.StopDaemon() + + for i := 0; i < 10; i++ { + node.IPFSAddStr(fmt.Sprintf("resume-test-%d-%d", i, time.Now().UnixNano())) + } + + time.Sleep(initialRuntime) + + beforeRestart := node.IPFS("provide", "stat", "--enc=json") + offsetBeforeRestart, _, err := parseProvideStatJSON(beforeRestart.Stdout.String()) + require.NoError(t, err) + require.Greater(t, offsetBeforeRestart, minOffsetBeforeRestart, + "cycle should have progressed") + + node.StopDaemon() + time.Sleep(downtime) + node.StartDaemon() + time.Sleep(restartTime) + + afterRestart := node.IPFS("provide", "stat", "--enc=json") + offsetAfterRestart, _, err := parseProvideStatJSON(afterRestart.Stdout.String()) + require.NoError(t, err) + + assert.GreaterOrEqual(t, offsetAfterRestart, minOffsetAfterResume, + "offset should account for downtime") + }) + + t.Run("resets cycle when disabled", func(t *testing.T) { + t.Parallel() + + node := setupNode(t, false) + defer node.StopDaemon() + + for i := 0; i < 10; i++ { + node.IPFSAddStr(fmt.Sprintf("no-resume-%d-%d", i, time.Now().UnixNano())) + } + + time.Sleep(initialRuntime) + + beforeRestart := node.IPFS("provide", "stat", "--enc=json") + offsetBeforeRestart, _, err := parseProvideStatJSON(beforeRestart.Stdout.String()) + require.NoError(t, err) + require.Greater(t, offsetBeforeRestart, minOffsetBeforeRestart, + "cycle should have progressed") + + node.StopDaemon() + time.Sleep(downtime) + node.StartDaemon() + time.Sleep(restartTime) + + afterRestart := node.IPFS("provide", "stat", "--enc=json") + offsetAfterRestart, _, err := parseProvideStatJSON(afterRestart.Stdout.String()) + require.NoError(t, err) + + assert.Less(t, offsetAfterRestart, maxOffsetAfterReset, + "offset should reset to near zero") + }) +} + +type provideStatJSON struct { + Sweep struct { + Timing struct { + CurrentTimeOffset int64 `json:"current_time_offset"` // nanoseconds + } `json:"timing"` + Schedule struct { + NextReprovidePrefix string `json:"next_reprovide_prefix"` + } `json:"schedule"` + } `json:"Sweep"` +} + +// parseProvideStatJSON extracts timing and schedule information from +// the JSON output of 'ipfs provide stat --enc=json'. +// Note: prefix is unused in current tests but kept for potential future use. +func parseProvideStatJSON(output string) (offset time.Duration, prefix string, err error) { + var stat provideStatJSON + if err := json.Unmarshal([]byte(output), &stat); err != nil { + return 0, "", err + } + offset = time.Duration(stat.Sweep.Timing.CurrentTimeOffset) + prefix = stat.Sweep.Schedule.NextReprovidePrefix + return offset, prefix, nil +} + +func TestProvider(t *testing.T) { + t.Parallel() + + variants := []struct { + name string + reprovide bool + apply cfgApplier + }{ + { + name: "LegacyProvider", + reprovide: true, + apply: func(n *harness.Node) { + n.SetIPFSConfig("Provide.DHT.SweepEnabled", false) + }, + }, + { + name: "SweepingProvider", + reprovide: false, + apply: func(n *harness.Node) { + n.SetIPFSConfig("Provide.DHT.SweepEnabled", true) + }, + }, + } + + for _, v := range variants { + v := v // capture + t.Run(v.name, func(t *testing.T) { + // t.Parallel() + runProviderSuite(t, v.reprovide, v.apply) + + // Resume tests only apply to SweepingProvider + if v.name == "SweepingProvider" { + runResumeTests(t, v.apply) + } + }) + } +} + +// TestHTTPOnlyProviderWithSweepEnabled tests that provider records are correctly +// sent to HTTP routers when Routing.Type="custom" with only HTTP routers configured, +// even when Provide.DHT.SweepEnabled=true (the default since v0.39). +// +// This is a regression test for https://github.com/ipfs/kubo/issues/11089 +func TestHTTPOnlyProviderWithSweepEnabled(t *testing.T) { + t.Parallel() + + // Track provide requests received by the mock HTTP router + var provideRequests atomic.Int32 + mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if (r.Method == http.MethodPut || r.Method == http.MethodPost) && + strings.HasPrefix(r.URL.Path, "/routing/v1/providers") { + provideRequests.Add(1) + w.WriteHeader(http.StatusOK) + } else if strings.HasPrefix(r.URL.Path, "/routing/v1/providers") && r.Method == http.MethodGet { + // Return empty providers for findprovs + w.Header().Set("Content-Type", "application/x-ndjson") + w.WriteHeader(http.StatusOK) + } else { + w.WriteHeader(http.StatusNotFound) + } + })) + defer mockServer.Close() + + h := harness.NewT(t) + node := h.NewNode().Init() + + // Explicitly set SweepEnabled=true (the default since v0.39, but be explicit for test clarity) + node.SetIPFSConfig("Provide.DHT.SweepEnabled", true) + node.SetIPFSConfig("Provide.Enabled", true) + + // Configure HTTP-only custom routing (no DHT) with explicit Routing.Type=custom + routingConf := map[string]any{ + "Type": "custom", // Explicitly set Routing.Type=custom + "Methods": map[string]any{ + "provide": map[string]any{"RouterName": "HTTPRouter"}, + "get-ipns": map[string]any{"RouterName": "HTTPRouter"}, + "put-ipns": map[string]any{"RouterName": "HTTPRouter"}, + "find-peers": map[string]any{"RouterName": "HTTPRouter"}, + "find-providers": map[string]any{"RouterName": "HTTPRouter"}, + }, + "Routers": map[string]any{ + "HTTPRouter": map[string]any{ + "Type": "http", + "Parameters": map[string]any{ + "Endpoint": mockServer.URL, + }, + }, + }, + } + node.SetIPFSConfig("Routing", routingConf) + node.StartDaemon() + defer node.StopDaemon() + + // Add content and manually provide it + cid := node.IPFSAddStr(time.Now().String()) + + // Manual provide should succeed even without libp2p peers + res := node.RunIPFS("routing", "provide", cid) + // Check that the command succeeded (exit code 0) and no provide-related errors + assert.Equal(t, 0, res.ExitCode(), "routing provide should succeed with HTTP-only routing and SweepEnabled=true") + assert.NotContains(t, res.Stderr.String(), "cannot provide", "should not have provide errors") + + // Verify HTTP router received at least one provide request + assert.Greater(t, provideRequests.Load(), int32(0), + "HTTP router should have received provide requests") + + // Verify 'provide stat' works with HTTP-only routing (regression test for stats) + statRes := node.RunIPFS("provide", "stat") + assert.Equal(t, 0, statRes.ExitCode(), "provide stat should succeed with HTTP-only routing") + assert.NotContains(t, statRes.Stderr.String(), "stats not available", + "should not report stats unavailable") + // LegacyProvider outputs "TotalReprovides:" in its stats + assert.Contains(t, statRes.Stdout.String(), "TotalReprovides:", + "should show legacy provider stats") } diff --git a/test/cli/rcmgr_test.go b/test/cli/rcmgr_test.go index 50ea26979..66e6eb6ac 100644 --- a/test/cli/rcmgr_test.go +++ b/test/cli/rcmgr_test.go @@ -26,6 +26,7 @@ func TestRcmgr(t *testing.T) { }) node.StartDaemon() + defer node.StopDaemon() t.Run("swarm resources should fail", func(t *testing.T) { res := node.RunIPFS("swarm", "resources") @@ -41,6 +42,7 @@ func TestRcmgr(t *testing.T) { cfg.Swarm.ResourceMgr.Enabled = config.False }) node.StartDaemon() + defer node.StopDaemon() t.Run("swarm resources should fail", func(t *testing.T) { res := node.RunIPFS("swarm", "resources") @@ -56,6 +58,7 @@ func TestRcmgr(t *testing.T) { cfg.Swarm.ConnMgr.HighWater = config.NewOptionalInteger(1000) }) node.StartDaemon() + defer node.StopDaemon() res := node.RunIPFS("swarm", "resources", "--enc=json") require.Equal(t, 0, res.ExitCode()) @@ -73,7 +76,9 @@ func TestRcmgr(t *testing.T) { node.UpdateConfig(func(cfg *config.Config) { cfg.Swarm.ConnMgr.HighWater = config.NewOptionalInteger(1000) }) + node.StartDaemon() + t.Cleanup(func() { node.StopDaemon() }) t.Run("conns and streams are above 800 for default connmgr settings", func(t *testing.T) { t.Parallel() @@ -135,6 +140,7 @@ func TestRcmgr(t *testing.T) { overrides.System.ConnsInbound = rcmgr.Unlimited }) node.StartDaemon() + defer node.StopDaemon() res := node.RunIPFS("swarm", "resources", "--enc=json") limits := unmarshalLimits(t, res.Stdout.Bytes()) @@ -150,6 +156,7 @@ func TestRcmgr(t *testing.T) { overrides.Transient.Memory = 88888 }) node.StartDaemon() + defer node.StopDaemon() res := node.RunIPFS("swarm", "resources", "--enc=json") limits := unmarshalLimits(t, res.Stdout.Bytes()) @@ -163,6 +170,7 @@ func TestRcmgr(t *testing.T) { overrides.Service = map[string]rcmgr.ResourceLimits{"foo": {Memory: 77777}} }) node.StartDaemon() + defer node.StopDaemon() res := node.RunIPFS("swarm", "resources", "--enc=json") limits := unmarshalLimits(t, res.Stdout.Bytes()) @@ -176,6 +184,7 @@ func TestRcmgr(t *testing.T) { overrides.Protocol = map[protocol.ID]rcmgr.ResourceLimits{"foo": {Memory: 66666}} }) node.StartDaemon() + defer node.StopDaemon() res := node.RunIPFS("swarm", "resources", "--enc=json") limits := unmarshalLimits(t, res.Stdout.Bytes()) @@ -191,6 +200,7 @@ func TestRcmgr(t *testing.T) { overrides.Peer = map[peer.ID]rcmgr.ResourceLimits{validPeerID: {Memory: 55555}} }) node.StartDaemon() + defer node.StopDaemon() res := node.RunIPFS("swarm", "resources", "--enc=json") limits := unmarshalLimits(t, res.Stdout.Bytes()) @@ -218,6 +228,7 @@ func TestRcmgr(t *testing.T) { }) nodes.StartDaemons() + t.Cleanup(func() { nodes.StopDaemons() }) t.Run("node 0 should fail to connect to and ping node 1", func(t *testing.T) { t.Parallel() diff --git a/test/cli/repo_verify_test.go b/test/cli/repo_verify_test.go new file mode 100644 index 000000000..e75eec963 --- /dev/null +++ b/test/cli/repo_verify_test.go @@ -0,0 +1,384 @@ +package cli + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// Well-known block file names in flatfs blockstore that should not be corrupted during testing. +// Flatfs stores each block as a separate .data file on disk. +const ( + // emptyFileFlatfsFilename is the flatfs filename for an empty UnixFS file block + emptyFileFlatfsFilename = "CIQL7TG2PB52XIZLLHDYIUFMHUQLMMZWBNBZSLDXFCPZ5VDNQQ2WDZQ" + // emptyDirFlatfsFilename is the flatfs filename for an empty UnixFS directory block. + // This block has special handling and may be served from memory even when corrupted on disk. + emptyDirFlatfsFilename = "CIQFTFEEHEDF6KLBT32BFAGLXEZL4UWFNWM4LFTLMXQBCERZ6CMLX3Y" +) + +// getEligibleFlatfsBlockFiles returns flatfs block files (*.data) that are safe to corrupt in tests. +// Filters out well-known blocks (empty file/dir) that cause test flakiness. +// +// Note: This helper is specific to the flatfs blockstore implementation where each block +// is stored as a separate file on disk under blocks/*/*.data. +func getEligibleFlatfsBlockFiles(t *testing.T, node *harness.Node) []string { + blockFiles, err := filepath.Glob(filepath.Join(node.Dir, "blocks", "*", "*.data")) + require.NoError(t, err) + require.NotEmpty(t, blockFiles, "no flatfs block files found") + + var eligible []string + for _, f := range blockFiles { + name := filepath.Base(f) + if !strings.Contains(name, emptyFileFlatfsFilename) && + !strings.Contains(name, emptyDirFlatfsFilename) { + eligible = append(eligible, f) + } + } + return eligible +} + +// corruptRandomBlock corrupts a random block file in the flatfs blockstore. +// Returns the path to the corrupted file. +func corruptRandomBlock(t *testing.T, node *harness.Node) string { + eligible := getEligibleFlatfsBlockFiles(t, node) + require.NotEmpty(t, eligible, "no eligible blocks to corrupt") + + toCorrupt := eligible[0] + err := os.WriteFile(toCorrupt, []byte("corrupted data"), 0644) + require.NoError(t, err) + + return toCorrupt +} + +// corruptMultipleBlocks corrupts multiple block files in the flatfs blockstore. +// Returns the paths to the corrupted files. +func corruptMultipleBlocks(t *testing.T, node *harness.Node, count int) []string { + eligible := getEligibleFlatfsBlockFiles(t, node) + require.GreaterOrEqual(t, len(eligible), count, "not enough eligible blocks to corrupt") + + var corrupted []string + for i := 0; i < count && i < len(eligible); i++ { + err := os.WriteFile(eligible[i], []byte(fmt.Sprintf("corrupted data %d", i)), 0644) + require.NoError(t, err) + corrupted = append(corrupted, eligible[i]) + } + + return corrupted +} + +func TestRepoVerify(t *testing.T) { + t.Run("healthy repo passes", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init() + node.IPFS("add", "-q", "--raw-leaves=false", "-r", node.IPFSBin) + + res := node.IPFS("repo", "verify") + assert.Contains(t, res.Stdout.String(), "all blocks validated") + }) + + t.Run("detects corruption", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init() + node.IPFSAddStr("test content") + + corruptRandomBlock(t, node) + + res := node.RunIPFS("repo", "verify") + assert.Equal(t, 1, res.ExitCode()) + assert.Contains(t, res.Stdout.String(), "was corrupt") + assert.Contains(t, res.Stderr.String(), "1 blocks corrupt") + }) + + t.Run("drop removes corrupt blocks", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init() + cid := node.IPFSAddStr("test content") + + corruptRandomBlock(t, node) + + res := node.RunIPFS("repo", "verify", "--drop") + assert.Equal(t, 0, res.ExitCode(), "should exit 0 when all corrupt blocks removed successfully") + output := res.Stdout.String() + assert.Contains(t, output, "1 blocks corrupt") + assert.Contains(t, output, "1 removed") + + // Verify block is gone + res = node.RunIPFS("block", "stat", cid) + assert.NotEqual(t, 0, res.ExitCode()) + }) + + t.Run("heal requires online mode", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init() + node.IPFSAddStr("test content") + + corruptRandomBlock(t, node) + + res := node.RunIPFS("repo", "verify", "--heal") + assert.NotEqual(t, 0, res.ExitCode()) + assert.Contains(t, res.Stderr.String(), "online mode") + }) + + t.Run("heal repairs from network", func(t *testing.T) { + t.Parallel() + nodes := harness.NewT(t).NewNodes(2).Init() + nodes.StartDaemons().Connect() + defer nodes.StopDaemons() + + // Add content to node 0 + cid := nodes[0].IPFSAddStr("test content for healing") + + // Wait for it to appear on node 1 + nodes[1].IPFS("block", "get", cid) + + // Corrupt on node 1 + corruptRandomBlock(t, nodes[1]) + + // Heal should restore from node 0 + res := nodes[1].RunIPFS("repo", "verify", "--heal") + assert.Equal(t, 0, res.ExitCode(), "should exit 0 when all corrupt blocks healed successfully") + output := res.Stdout.String() + + // Should report corruption and healing with specific counts + assert.Contains(t, output, "1 blocks corrupt") + assert.Contains(t, output, "1 removed") + assert.Contains(t, output, "1 healed") + + // Verify block is restored + nodes[1].IPFS("block", "stat", cid) + }) + + t.Run("healed blocks contain correct data", func(t *testing.T) { + t.Parallel() + nodes := harness.NewT(t).NewNodes(2).Init() + nodes.StartDaemons().Connect() + defer nodes.StopDaemons() + + // Add specific content to node 0 + testContent := "this is the exact content that should be healed correctly" + cid := nodes[0].IPFSAddStr(testContent) + + // Fetch to node 1 and verify the content is correct initially + nodes[1].IPFS("block", "get", cid) + res := nodes[1].IPFS("cat", cid) + assert.Equal(t, testContent, res.Stdout.String()) + + // Corrupt on node 1 + corruptRandomBlock(t, nodes[1]) + + // Heal the corruption + res = nodes[1].RunIPFS("repo", "verify", "--heal") + assert.Equal(t, 0, res.ExitCode(), "should exit 0 when all corrupt blocks healed successfully") + output := res.Stdout.String() + assert.Contains(t, output, "1 blocks corrupt") + assert.Contains(t, output, "1 removed") + assert.Contains(t, output, "1 healed") + + // Verify the healed content matches the original exactly + res = nodes[1].IPFS("cat", cid) + assert.Equal(t, testContent, res.Stdout.String(), "healed content should match original") + + // Also verify via block get that the raw block data is correct + block0 := nodes[0].IPFS("block", "get", cid) + block1 := nodes[1].IPFS("block", "get", cid) + assert.Equal(t, block0.Stdout.String(), block1.Stdout.String(), "raw block data should match") + }) + + t.Run("multiple corrupt blocks", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init() + + // Create 20 blocks + for i := 0; i < 20; i++ { + node.IPFSAddStr(strings.Repeat("test content ", i+1)) + } + + // Corrupt 5 blocks + corruptMultipleBlocks(t, node, 5) + + // Verify detects all corruptions + res := node.RunIPFS("repo", "verify") + assert.Equal(t, 1, res.ExitCode()) + // Error summary is in stderr + assert.Contains(t, res.Stderr.String(), "5 blocks corrupt") + + // Test with --drop + res = node.RunIPFS("repo", "verify", "--drop") + assert.Equal(t, 0, res.ExitCode(), "should exit 0 when all corrupt blocks removed successfully") + assert.Contains(t, res.Stdout.String(), "5 blocks corrupt") + assert.Contains(t, res.Stdout.String(), "5 removed") + }) + + t.Run("empty repository", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init() + + // Verify empty repo passes + res := node.IPFS("repo", "verify") + assert.Equal(t, 0, res.ExitCode()) + assert.Contains(t, res.Stdout.String(), "all blocks validated") + + // Should work with --drop and --heal too + res = node.IPFS("repo", "verify", "--drop") + assert.Equal(t, 0, res.ExitCode()) + assert.Contains(t, res.Stdout.String(), "all blocks validated") + }) + + t.Run("partial heal success", func(t *testing.T) { + t.Parallel() + nodes := harness.NewT(t).NewNodes(2).Init() + + // Start both nodes and connect them + nodes.StartDaemons().Connect() + defer nodes.StopDaemons() + + // Add 5 blocks to node 0, pin them to keep available + cid1 := nodes[0].IPFSAddStr("content available for healing 1") + cid2 := nodes[0].IPFSAddStr("content available for healing 2") + cid3 := nodes[0].IPFSAddStr("content available for healing 3") + cid4 := nodes[0].IPFSAddStr("content available for healing 4") + cid5 := nodes[0].IPFSAddStr("content available for healing 5") + + // Pin these on node 0 to ensure they stay available + nodes[0].IPFS("pin", "add", cid1) + nodes[0].IPFS("pin", "add", cid2) + nodes[0].IPFS("pin", "add", cid3) + nodes[0].IPFS("pin", "add", cid4) + nodes[0].IPFS("pin", "add", cid5) + + // Node 1 fetches these blocks + nodes[1].IPFS("block", "get", cid1) + nodes[1].IPFS("block", "get", cid2) + nodes[1].IPFS("block", "get", cid3) + nodes[1].IPFS("block", "get", cid4) + nodes[1].IPFS("block", "get", cid5) + + // Now remove some blocks from node 0 to simulate partial availability + nodes[0].IPFS("pin", "rm", cid3) + nodes[0].IPFS("pin", "rm", cid4) + nodes[0].IPFS("pin", "rm", cid5) + nodes[0].IPFS("repo", "gc") + + // Verify node 1 is still connected + peers := nodes[1].IPFS("swarm", "peers") + require.Contains(t, peers.Stdout.String(), nodes[0].PeerID().String()) + + // Corrupt 5 blocks on node 1 + corruptMultipleBlocks(t, nodes[1], 5) + + // Heal should partially succeed (only cid1 and cid2 available from node 0) + res := nodes[1].RunIPFS("repo", "verify", "--heal") + assert.Equal(t, 1, res.ExitCode()) + + // Should show mixed results with specific counts in stderr + errOutput := res.Stderr.String() + assert.Contains(t, errOutput, "5 blocks corrupt") + assert.Contains(t, errOutput, "5 removed") + // Only cid1 and cid2 are available for healing, cid3-5 were GC'd + assert.Contains(t, errOutput, "2 healed") + assert.Contains(t, errOutput, "3 failed to heal") + }) + + t.Run("heal with block not available on network", func(t *testing.T) { + t.Parallel() + nodes := harness.NewT(t).NewNodes(2).Init() + + // Start both nodes and connect + nodes.StartDaemons().Connect() + defer nodes.StopDaemons() + + // Add unique content only to node 1 + nodes[1].IPFSAddStr("unique content that exists nowhere else") + + // Ensure nodes are connected + peers := nodes[1].IPFS("swarm", "peers") + require.Contains(t, peers.Stdout.String(), nodes[0].PeerID().String()) + + // Corrupt the block on node 1 + corruptRandomBlock(t, nodes[1]) + + // Heal should fail - node 0 doesn't have this content + res := nodes[1].RunIPFS("repo", "verify", "--heal") + assert.Equal(t, 1, res.ExitCode()) + + // Should report heal failure with specific counts in stderr + errOutput := res.Stderr.String() + assert.Contains(t, errOutput, "1 blocks corrupt") + assert.Contains(t, errOutput, "1 removed") + assert.Contains(t, errOutput, "1 failed to heal") + }) + + t.Run("large repository scale test", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init() + + // Create 1000 small blocks + for i := 0; i < 1000; i++ { + node.IPFSAddStr(fmt.Sprintf("content-%d", i)) + } + + // Corrupt 10 blocks + corruptMultipleBlocks(t, node, 10) + + // Verify handles large repos efficiently + res := node.RunIPFS("repo", "verify") + assert.Equal(t, 1, res.ExitCode()) + + // Should report exactly 10 corrupt blocks in stderr + assert.Contains(t, res.Stderr.String(), "10 blocks corrupt") + + // Test --drop at scale + res = node.RunIPFS("repo", "verify", "--drop") + assert.Equal(t, 0, res.ExitCode(), "should exit 0 when all corrupt blocks removed successfully") + output := res.Stdout.String() + assert.Contains(t, output, "10 blocks corrupt") + assert.Contains(t, output, "10 removed") + }) + + t.Run("drop with partial removal failures", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init() + + // Create several blocks + for i := 0; i < 5; i++ { + node.IPFSAddStr(fmt.Sprintf("content for removal test %d", i)) + } + + // Corrupt 3 blocks + corruptedFiles := corruptMultipleBlocks(t, node, 3) + require.Len(t, corruptedFiles, 3) + + // Make one of the corrupted files read-only to simulate removal failure + err := os.Chmod(corruptedFiles[0], 0400) // read-only + require.NoError(t, err) + defer func() { _ = os.Chmod(corruptedFiles[0], 0644) }() // cleanup + + // Also make the directory read-only to prevent deletion + blockDir := filepath.Dir(corruptedFiles[0]) + originalPerm, err := os.Stat(blockDir) + require.NoError(t, err) + err = os.Chmod(blockDir, 0500) // read+execute only, no write + require.NoError(t, err) + defer func() { _ = os.Chmod(blockDir, originalPerm.Mode()) }() // cleanup + + // Try to drop - should fail because at least one block can't be removed + res := node.RunIPFS("repo", "verify", "--drop") + assert.Equal(t, 1, res.ExitCode(), "should exit 1 when some blocks fail to remove") + + // Restore permissions for verification + _ = os.Chmod(blockDir, originalPerm.Mode()) + _ = os.Chmod(corruptedFiles[0], 0644) + + // Should report both successes and failures with specific counts + errOutput := res.Stderr.String() + assert.Contains(t, errOutput, "3 blocks corrupt") + assert.Contains(t, errOutput, "2 removed") + assert.Contains(t, errOutput, "1 failed to remove") + }) +} diff --git a/test/cli/routing_dht_test.go b/test/cli/routing_dht_test.go index fb0d39195..b1f3907b6 100644 --- a/test/cli/routing_dht_test.go +++ b/test/cli/routing_dht_test.go @@ -2,7 +2,10 @@ package cli import ( "fmt" + "strconv" + "strings" "testing" + "time" "github.com/ipfs/kubo/test/cli/harness" "github.com/ipfs/kubo/test/cli/testutils" @@ -10,6 +13,33 @@ import ( "github.com/stretchr/testify/require" ) +func waitUntilProvidesComplete(t *testing.T, n *harness.Node) { + getCidsCount := func(line string) int { + trimmed := strings.TrimSpace(line) + countStr := strings.SplitN(trimmed, " ", 2)[0] + count, err := strconv.Atoi(countStr) + require.NoError(t, err) + return count + } + + queuedProvides, ongoingProvides := true, true + for queuedProvides || ongoingProvides { + res := n.IPFS("provide", "stat", "-a") + require.NoError(t, res.Err) + for _, line := range res.Stdout.Lines() { + if trimmed, ok := strings.CutPrefix(line, " Provide queue:"); ok { + provideQueueSize := getCidsCount(trimmed) + queuedProvides = provideQueueSize > 0 + } + if trimmed, ok := strings.CutPrefix(line, " Ongoing provides:"); ok { + ongoingProvideCount := getCidsCount(trimmed) + ongoingProvides = ongoingProvideCount > 0 + } + } + time.Sleep(10 * time.Millisecond) + } +} + func testRoutingDHT(t *testing.T, enablePubsub bool) { t.Run(fmt.Sprintf("enablePubSub=%v", enablePubsub), func(t *testing.T) { t.Parallel() @@ -27,6 +57,7 @@ func testRoutingDHT(t *testing.T, enablePubsub bool) { } nodes.StartDaemons(daemonArgs...).Connect() + t.Cleanup(func() { nodes.StopDaemons() }) t.Run("ipfs routing findpeer", func(t *testing.T) { t.Parallel() @@ -84,6 +115,7 @@ func testRoutingDHT(t *testing.T, enablePubsub bool) { t.Run("ipfs routing findprovs", func(t *testing.T) { t.Parallel() hash := nodes[3].IPFSAddStr("some stuff") + waitUntilProvidesComplete(t, nodes[3]) res := nodes[4].IPFS("routing", "findprovs", hash) assert.Equal(t, nodes[3].PeerID().String(), res.Stdout.Trimmed()) }) @@ -126,6 +158,7 @@ func testSelfFindDHT(t *testing.T) { }) nodes.StartDaemons() + defer nodes.StopDaemons() res := nodes[0].RunIPFS("dht", "findpeer", nodes[0].PeerID().String()) assert.Equal(t, 1, res.ExitCode()) diff --git a/test/cli/rpc_auth_test.go b/test/cli/rpc_auth_test.go index c30b107cf..54b74013b 100644 --- a/test/cli/rpc_auth_test.go +++ b/test/cli/rpc_auth_test.go @@ -159,4 +159,127 @@ func TestRPCAuth(t *testing.T) { node.StopDaemon() }) + + t.Run("Requests without Authorization header are rejected when auth is enabled", func(t *testing.T) { + t.Parallel() + + node := makeAndStartProtectedNode(t, map[string]*config.RPCAuthScope{ + "userA": { + AuthSecret: "bearer:mytoken", + AllowedPaths: []string{"/api/v0"}, + }, + }) + + // Create client with NO auth + apiClient := node.APIClient() // Uses http.DefaultClient with no auth headers + + // Should be denied without auth header + resp := apiClient.Post("/api/v0/id", nil) + assert.Equal(t, 403, resp.StatusCode) + + // Should contain denial message + assert.Contains(t, resp.Body, rpcDeniedMsg) + + node.StopDaemon() + }) + + t.Run("Version endpoint is always accessible even with limited AllowedPaths", func(t *testing.T) { + t.Parallel() + + node := makeAndStartProtectedNode(t, map[string]*config.RPCAuthScope{ + "userA": { + AuthSecret: "bearer:mytoken", + AllowedPaths: []string{"/api/v0/id"}, // Only /id allowed + }, + }) + + apiClient := node.APIClient() + apiClient.Client = &http.Client{ + Transport: auth.NewAuthorizedRoundTripper("Bearer mytoken", http.DefaultTransport), + } + + // Can access /version even though not in AllowedPaths + resp := apiClient.Post("/api/v0/version", nil) + assert.Equal(t, 200, resp.StatusCode) + + node.StopDaemon() + }) + + t.Run("User cannot access API with another user's secret", func(t *testing.T) { + t.Parallel() + + node := makeAndStartProtectedNode(t, map[string]*config.RPCAuthScope{ + "alice": { + AuthSecret: "bearer:alice-secret", + AllowedPaths: []string{"/api/v0/id"}, + }, + "bob": { + AuthSecret: "bearer:bob-secret", + AllowedPaths: []string{"/api/v0/config"}, + }, + }) + + // Alice tries to use Bob's secret + apiClient := node.APIClient() + apiClient.Client = &http.Client{ + Transport: auth.NewAuthorizedRoundTripper("Bearer bob-secret", http.DefaultTransport), + } + + // Bob's secret should work for Bob's paths + resp := apiClient.Post("/api/v0/config/show", nil) + assert.Equal(t, 200, resp.StatusCode) + + // But not for Alice's paths (Bob doesn't have access to /id) + resp = apiClient.Post("/api/v0/id", nil) + assert.Equal(t, 403, resp.StatusCode) + + node.StopDaemon() + }) + + t.Run("Empty AllowedPaths denies all access except version", func(t *testing.T) { + t.Parallel() + + node := makeAndStartProtectedNode(t, map[string]*config.RPCAuthScope{ + "userA": { + AuthSecret: "bearer:mytoken", + AllowedPaths: []string{}, // Empty! + }, + }) + + apiClient := node.APIClient() + apiClient.Client = &http.Client{ + Transport: auth.NewAuthorizedRoundTripper("Bearer mytoken", http.DefaultTransport), + } + + // Should deny everything + resp := apiClient.Post("/api/v0/id", nil) + assert.Equal(t, 403, resp.StatusCode) + + resp = apiClient.Post("/api/v0/config/show", nil) + assert.Equal(t, 403, resp.StatusCode) + + // Except version + resp = apiClient.Post("/api/v0/version", nil) + assert.Equal(t, 200, resp.StatusCode) + + node.StopDaemon() + }) + + t.Run("CLI commands fail without --api-auth when auth is enabled", func(t *testing.T) { + t.Parallel() + + node := makeAndStartProtectedNode(t, map[string]*config.RPCAuthScope{ + "userA": { + AuthSecret: "bearer:mytoken", + AllowedPaths: []string{"/api/v0"}, + }, + }) + + // Try to run command without --api-auth flag + resp := node.RunIPFS("id") // No --api-auth flag + require.Error(t, resp.Err) + require.Contains(t, resp.Stderr.String(), rpcDeniedMsg) + + node.StopDaemon() + }) } diff --git a/test/cli/stats_test.go b/test/cli/stats_test.go index 05c1702b4..f835381e0 100644 --- a/test/cli/stats_test.go +++ b/test/cli/stats_test.go @@ -14,6 +14,7 @@ func TestStats(t *testing.T) { t.Run("stats dht", func(t *testing.T) { t.Parallel() nodes := harness.NewT(t).NewNodes(2).Init().StartDaemons().Connect() + defer nodes.StopDaemons() node1 := nodes[0] res := node1.IPFS("stats", "dht") diff --git a/test/cli/swarm_test.go b/test/cli/swarm_test.go index ecb668362..56c484ae1 100644 --- a/test/cli/swarm_test.go +++ b/test/cli/swarm_test.go @@ -31,6 +31,7 @@ func TestSwarm(t *testing.T) { t.Run("ipfs swarm peers returns empty peers when a node is not connected to any peers", func(t *testing.T) { t.Parallel() node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() res := node.RunIPFS("swarm", "peers", "--enc=json", "--identify") var output expectedOutputType err := json.Unmarshal(res.Stdout.Bytes(), &output) @@ -40,7 +41,9 @@ func TestSwarm(t *testing.T) { t.Run("ipfs swarm peers with flag identify outputs expected identify information about connected peers", func(t *testing.T) { t.Parallel() node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() otherNode := harness.NewT(t).NewNode().Init().StartDaemon() + defer otherNode.StopDaemon() node.Connect(otherNode) res := node.RunIPFS("swarm", "peers", "--enc=json", "--identify") @@ -50,7 +53,7 @@ func TestSwarm(t *testing.T) { actualID := output.Peers[0].Identify.ID actualPublicKey := output.Peers[0].Identify.PublicKey actualAgentVersion := output.Peers[0].Identify.AgentVersion - actualAdresses := output.Peers[0].Identify.Addresses + actualAddresses := output.Peers[0].Identify.Addresses actualProtocols := output.Peers[0].Identify.Protocols expectedID := otherNode.PeerID().String() @@ -59,15 +62,17 @@ func TestSwarm(t *testing.T) { assert.Equal(t, actualID, expectedID) assert.NotNil(t, actualPublicKey) assert.NotNil(t, actualAgentVersion) - assert.Len(t, actualAdresses, 1) - assert.Equal(t, expectedAddresses[0], actualAdresses[0]) + assert.Len(t, actualAddresses, 1) + assert.Equal(t, expectedAddresses[0], actualAddresses[0]) assert.Greater(t, len(actualProtocols), 0) }) t.Run("ipfs swarm peers with flag identify outputs Identify field with data that matches calling ipfs id on a peer", func(t *testing.T) { t.Parallel() node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() otherNode := harness.NewT(t).NewNode().Init().StartDaemon() + defer otherNode.StopDaemon() node.Connect(otherNode) otherNodeIDResponse := otherNode.RunIPFS("id", "--enc=json") diff --git a/test/cli/telemetry_test.go b/test/cli/telemetry_test.go new file mode 100644 index 000000000..ea174d638 --- /dev/null +++ b/test/cli/telemetry_test.go @@ -0,0 +1,317 @@ +package cli + +import ( + "encoding/json" + "io" + "maps" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "slices" + "testing" + "time" + + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestTelemetry(t *testing.T) { + t.Parallel() + + t.Run("opt-out via environment variable", func(t *testing.T) { + t.Parallel() + + // Create a new node + node := harness.NewT(t).NewNode().Init() + node.SetIPFSConfig("Plugins.Plugins.telemetry.Disabled", false) + + // Set the opt-out environment variable + node.Runner.Env["IPFS_TELEMETRY"] = "off" + node.Runner.Env["GOLOG_LOG_LEVEL"] = "telemetry=debug" + + // Capture daemon output + stdout := &harness.Buffer{} + stderr := &harness.Buffer{} + + // Start daemon with output capture + node.StartDaemonWithReq(harness.RunRequest{ + CmdOpts: []harness.CmdOpt{ + harness.RunWithStdout(stdout), + harness.RunWithStderr(stderr), + }, + }, "") + + time.Sleep(500 * time.Millisecond) + + // Get daemon output + output := stdout.String() + stderr.String() + + // Check that telemetry is disabled + assert.Contains(t, output, "telemetry disabled via opt-out", "Expected telemetry disabled message") + + // Stop daemon + node.StopDaemon() + + // Verify UUID file was not created or was removed + uuidPath := filepath.Join(node.Dir, "telemetry_uuid") + _, err := os.Stat(uuidPath) + assert.True(t, os.IsNotExist(err), "UUID file should not exist when opted out") + }) + + t.Run("opt-out via config", func(t *testing.T) { + t.Parallel() + + // Create a new node + node := harness.NewT(t).NewNode().Init() + node.SetIPFSConfig("Plugins.Plugins.telemetry.Disabled", false) + + // Set opt-out via config + node.IPFS("config", "Plugins.Plugins.telemetry.Config.Mode", "off") + + // Enable debug logging + node.Runner.Env["GOLOG_LOG_LEVEL"] = "telemetry=debug" + + // Capture daemon output + stdout := &harness.Buffer{} + stderr := &harness.Buffer{} + + // Start daemon with output capture + node.StartDaemonWithReq(harness.RunRequest{ + CmdOpts: []harness.CmdOpt{ + harness.RunWithStdout(stdout), + harness.RunWithStderr(stderr), + }, + }, "") + + time.Sleep(500 * time.Millisecond) + + // Get daemon output + output := stdout.String() + stderr.String() + + // Check that telemetry is disabled + assert.Contains(t, output, "telemetry disabled via opt-out", "Expected telemetry disabled message") + assert.Contains(t, output, "telemetry collection skipped: opted out", "Expected telemetry skipped message") + + // Stop daemon + node.StopDaemon() + + // Verify UUID file was not created or was removed + uuidPath := filepath.Join(node.Dir, "telemetry_uuid") + _, err := os.Stat(uuidPath) + assert.True(t, os.IsNotExist(err), "UUID file should not exist when opted out") + }) + + t.Run("opt-out removes existing UUID file", func(t *testing.T) { + t.Parallel() + + // Create a new node + node := harness.NewT(t).NewNode().Init() + node.SetIPFSConfig("Plugins.Plugins.telemetry.Disabled", false) + + // Create a UUID file manually to simulate previous telemetry run + uuidPath := filepath.Join(node.Dir, "telemetry_uuid") + testUUID := "test-uuid-12345" + err := os.WriteFile(uuidPath, []byte(testUUID), 0600) + require.NoError(t, err, "Failed to create test UUID file") + + // Verify file exists + _, err = os.Stat(uuidPath) + require.NoError(t, err, "UUID file should exist before opt-out") + + // Set the opt-out environment variable + node.Runner.Env["IPFS_TELEMETRY"] = "off" + node.Runner.Env["GOLOG_LOG_LEVEL"] = "telemetry=debug" + + // Capture daemon output + stdout := &harness.Buffer{} + stderr := &harness.Buffer{} + + // Start daemon with output capture + node.StartDaemonWithReq(harness.RunRequest{ + CmdOpts: []harness.CmdOpt{ + harness.RunWithStdout(stdout), + harness.RunWithStderr(stderr), + }, + }, "") + + time.Sleep(500 * time.Millisecond) + + // Get daemon output + output := stdout.String() + stderr.String() + + // Check that UUID file was removed + assert.Contains(t, output, "removed existing telemetry UUID file due to opt-out", "Expected UUID removal message") + + // Stop daemon + node.StopDaemon() + + // Verify UUID file was removed + _, err = os.Stat(uuidPath) + assert.True(t, os.IsNotExist(err), "UUID file should be removed after opt-out") + }) + + t.Run("telemetry enabled shows info message", func(t *testing.T) { + t.Parallel() + + // Create a new node + node := harness.NewT(t).NewNode().Init() + node.SetIPFSConfig("Plugins.Plugins.telemetry.Disabled", false) + + // Capture daemon output + stdout := &harness.Buffer{} + stderr := &harness.Buffer{} + + // Don't set opt-out, so telemetry will be enabled + // This should trigger the info message on first run + node.StartDaemonWithReq(harness.RunRequest{ + CmdOpts: []harness.CmdOpt{ + harness.RunWithStdout(stdout), + harness.RunWithStderr(stderr), + }, + }, "") + + time.Sleep(500 * time.Millisecond) + + // Get daemon output + output := stdout.String() + stderr.String() + + // First run - should show info message + assert.Contains(t, output, "Anonymous telemetry") + assert.Contains(t, output, "No data sent yet", "Expected no data sent message") + assert.Contains(t, output, "To opt-out before collection starts", "Expected opt-out instructions") + assert.Contains(t, output, "Learn more:", "Expected learn more link") + + // Stop daemon + node.StopDaemon() + + // Verify UUID file was created + uuidPath := filepath.Join(node.Dir, "telemetry_uuid") + _, err := os.Stat(uuidPath) + assert.NoError(t, err, "UUID file should exist when daemon started without telemetry opt-out") + }) + + t.Run("telemetry schema regression guard", func(t *testing.T) { + t.Parallel() + + // Define the exact set of expected telemetry fields + // This list must be updated whenever telemetry fields change + expectedFields := []string{ + "uuid", + "agent_version", + "private_network", + "bootstrappers_custom", + "repo_size_bucket", + "uptime_bucket", + "reprovider_strategy", + "provide_dht_sweep_enabled", + "provide_dht_interval_custom", + "provide_dht_max_workers_custom", + "routing_type", + "routing_accelerated_dht_client", + "routing_delegated_count", + "autonat_service_mode", + "autonat_reachability", + "swarm_enable_hole_punching", + "swarm_circuit_addresses", + "swarm_ipv4_public_addresses", + "swarm_ipv6_public_addresses", + "auto_tls_auto_wss", + "auto_tls_domain_suffix_custom", + "autoconf", + "autoconf_custom", + "discovery_mdns_enabled", + "platform_os", + "platform_arch", + "platform_containerized", + "platform_vm", + } + + // Channel to receive captured telemetry data + telemetryChan := make(chan map[string]interface{}, 1) + + // Create a mock HTTP server to capture telemetry + mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) + return + } + + body, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, "Failed to read body", http.StatusBadRequest) + return + } + + var telemetryData map[string]interface{} + if err := json.Unmarshal(body, &telemetryData); err != nil { + http.Error(w, "Invalid JSON", http.StatusBadRequest) + return + } + + // Send captured data through channel + select { + case telemetryChan <- telemetryData: + default: + } + + w.WriteHeader(http.StatusOK) + })) + defer mockServer.Close() + + // Create a new node + node := harness.NewT(t).NewNode().Init() + node.SetIPFSConfig("Plugins.Plugins.telemetry.Disabled", false) + + // Configure telemetry with a very short delay for testing + node.IPFS("config", "Plugins.Plugins.telemetry.Config.Delay", "100ms") + node.IPFS("config", "Plugins.Plugins.telemetry.Config.Endpoint", mockServer.URL) + + // Enable debug logging to see what's being sent + node.Runner.Env["GOLOG_LOG_LEVEL"] = "telemetry=debug" + + // Start daemon + node.StartDaemon() + defer node.StopDaemon() + + // Wait for telemetry to be sent (configured delay + buffer) + select { + case telemetryData := <-telemetryChan: + receivedFields := slices.Collect(maps.Keys(telemetryData)) + slices.Sort(expectedFields) + slices.Sort(receivedFields) + + // Fast path: check if fields match exactly + if !slices.Equal(expectedFields, receivedFields) { + var missingFields, unexpectedFields []string + for _, field := range expectedFields { + if _, ok := telemetryData[field]; !ok { + missingFields = append(missingFields, field) + } + } + + expectedSet := make(map[string]struct{}, len(expectedFields)) + for _, f := range expectedFields { + expectedSet[f] = struct{}{} + } + for field := range telemetryData { + if _, ok := expectedSet[field]; !ok { + unexpectedFields = append(unexpectedFields, field) + } + } + + t.Fatalf("Telemetry field mismatch:\n"+ + " Missing fields: %v\n"+ + " Unexpected fields: %v\n"+ + " Note: Update expectedFields list in this test when adding/removing telemetry fields", + missingFields, unexpectedFields) + } + + t.Logf("Telemetry field validation passed: %d fields verified", len(expectedFields)) + + case <-time.After(5 * time.Second): + t.Fatal("Timeout waiting for telemetry data to be sent") + } + }) +} diff --git a/test/cli/testutils/httprouting/mock_http_content_router.go b/test/cli/testutils/httprouting/mock_http_content_router.go new file mode 100644 index 000000000..19394005e --- /dev/null +++ b/test/cli/testutils/httprouting/mock_http_content_router.go @@ -0,0 +1,145 @@ +package httprouting + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/ipfs/boxo/ipns" + "github.com/ipfs/boxo/routing/http/server" + "github.com/ipfs/boxo/routing/http/types" + "github.com/ipfs/boxo/routing/http/types/iter" + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/routing" +) + +// MockHTTPContentRouter provides /routing/v1 +// (https://specs.ipfs.tech/routing/http-routing-v1/) server implementation +// based on github.com/ipfs/boxo/routing/http/server +type MockHTTPContentRouter struct { + m sync.Mutex + provideBitswapCalls int + findProvidersCalls int + findPeersCalls int + getClosestPeersCalls int + providers map[cid.Cid][]types.Record + peers map[peer.ID][]*types.PeerRecord + Debug bool +} + +func (r *MockHTTPContentRouter) FindProviders(ctx context.Context, key cid.Cid, limit int) (iter.ResultIter[types.Record], error) { + if r.Debug { + fmt.Printf("MockHTTPContentRouter.FindProviders(%s)\n", key.String()) + } + r.m.Lock() + defer r.m.Unlock() + r.findProvidersCalls++ + if r.providers == nil { + r.providers = make(map[cid.Cid][]types.Record) + } + records, found := r.providers[key] + if !found { + return iter.FromSlice([]iter.Result[types.Record]{}), nil + } + results := make([]iter.Result[types.Record], len(records)) + for i, rec := range records { + results[i] = iter.Result[types.Record]{Val: rec} + if r.Debug { + fmt.Printf("MockHTTPContentRouter.FindProviders(%s) result: %+v\n", key.String(), rec) + } + } + return iter.FromSlice(results), nil +} + +// nolint deprecated +func (r *MockHTTPContentRouter) ProvideBitswap(ctx context.Context, req *server.BitswapWriteProvideRequest) (time.Duration, error) { + r.m.Lock() + defer r.m.Unlock() + r.provideBitswapCalls++ + return 0, nil +} + +func (r *MockHTTPContentRouter) FindPeers(ctx context.Context, pid peer.ID, limit int) (iter.ResultIter[*types.PeerRecord], error) { + r.m.Lock() + defer r.m.Unlock() + r.findPeersCalls++ + + if r.peers == nil { + r.peers = make(map[peer.ID][]*types.PeerRecord) + } + records, found := r.peers[pid] + if !found { + return iter.FromSlice([]iter.Result[*types.PeerRecord]{}), nil + } + + results := make([]iter.Result[*types.PeerRecord], len(records)) + for i, rec := range records { + results[i] = iter.Result[*types.PeerRecord]{Val: rec} + if r.Debug { + fmt.Printf("MockHTTPContentRouter.FindPeers(%s) result: %+v\n", pid.String(), rec) + } + } + return iter.FromSlice(results), nil +} + +func (r *MockHTTPContentRouter) GetIPNS(ctx context.Context, name ipns.Name) (*ipns.Record, error) { + return nil, routing.ErrNotSupported +} + +func (r *MockHTTPContentRouter) PutIPNS(ctx context.Context, name ipns.Name, rec *ipns.Record) error { + return routing.ErrNotSupported +} + +func (r *MockHTTPContentRouter) NumFindProvidersCalls() int { + r.m.Lock() + defer r.m.Unlock() + return r.findProvidersCalls +} + +// AddProvider adds a record for a given CID +func (r *MockHTTPContentRouter) AddProvider(key cid.Cid, record types.Record) { + r.m.Lock() + defer r.m.Unlock() + if r.providers == nil { + r.providers = make(map[cid.Cid][]types.Record) + } + r.providers[key] = append(r.providers[key], record) + + peerRecord, ok := record.(*types.PeerRecord) + if ok { + if r.peers == nil { + r.peers = make(map[peer.ID][]*types.PeerRecord) + } + pid := peerRecord.ID + r.peers[*pid] = append(r.peers[*pid], peerRecord) + } +} + +func (r *MockHTTPContentRouter) GetClosestPeers(ctx context.Context, key cid.Cid) (iter.ResultIter[*types.PeerRecord], error) { + r.m.Lock() + defer r.m.Unlock() + r.getClosestPeersCalls++ + + if r.peers == nil { + r.peers = make(map[peer.ID][]*types.PeerRecord) + } + pid, err := peer.FromCid(key) + if err != nil { + return iter.FromSlice([]iter.Result[*types.PeerRecord]{}), nil + } + records, found := r.peers[pid] + if !found { + return iter.FromSlice([]iter.Result[*types.PeerRecord]{}), nil + } + + results := make([]iter.Result[*types.PeerRecord], len(records)) + for i, rec := range records { + results[i] = iter.Result[*types.PeerRecord]{Val: rec} + if r.Debug { + fmt.Printf("MockHTTPContentRouter.GetPeers(%s) result: %+v\n", pid.String(), rec) + } + } + return iter.FromSlice(results), nil +} diff --git a/test/cli/testutils/random.go b/test/cli/testutils/random.go deleted file mode 100644 index 6fa6528c3..000000000 --- a/test/cli/testutils/random.go +++ /dev/null @@ -1,16 +0,0 @@ -package testutils - -import "crypto/rand" - -func RandomBytes(n int) []byte { - bytes := make([]byte, n) - _, err := rand.Read(bytes) - if err != nil { - panic(err) - } - return bytes -} - -func RandomStr(n int) string { - return string(RandomBytes(n)) -} diff --git a/test/cli/testutils/random_deterministic.go b/test/cli/testutils/random_deterministic.go new file mode 100644 index 000000000..e55404168 --- /dev/null +++ b/test/cli/testutils/random_deterministic.go @@ -0,0 +1,46 @@ +package testutils + +import ( + "crypto/sha256" + "io" + + "github.com/dustin/go-humanize" + "golang.org/x/crypto/chacha20" +) + +type randomReader struct { + cipher *chacha20.Cipher + remaining int64 +} + +func (r *randomReader) Read(p []byte) (int, error) { + if r.remaining <= 0 { + return 0, io.EOF + } + n := int64(len(p)) + if n > r.remaining { + n = r.remaining + } + // Generate random bytes directly into the provided buffer + r.cipher.XORKeyStream(p[:n], make([]byte, n)) + r.remaining -= n + return int(n), nil +} + +// createRandomReader produces specified number of pseudo-random bytes +// from a seed. +func DeterministicRandomReader(sizeStr string, seed string) (io.Reader, error) { + size, err := humanize.ParseBytes(sizeStr) + if err != nil { + return nil, err + } + // Hash the seed string to a 32-byte key for ChaCha20 + key := sha256.Sum256([]byte(seed)) + // Use ChaCha20 for deterministic random bytes + var nonce [chacha20.NonceSize]byte // Zero nonce for simplicity + cipher, err := chacha20.NewUnauthenticatedCipher(key[:chacha20.KeySize], nonce[:]) + if err != nil { + return nil, err + } + return &randomReader{cipher: cipher, remaining: int64(size)}, nil +} diff --git a/test/cli/testutils/random_files.go b/test/cli/testutils/random_files.go deleted file mode 100644 index c7dca10d6..000000000 --- a/test/cli/testutils/random_files.go +++ /dev/null @@ -1,118 +0,0 @@ -package testutils - -import ( - "fmt" - "io" - "math/rand" - "os" - "path" - "time" -) - -var ( - AlphabetEasy = []rune("abcdefghijklmnopqrstuvwxyz01234567890-_") - AlphabetHard = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567890!@#$%^&*()-_+= ;.,<>'\"[]{}() ") -) - -type RandFiles struct { - Rand *rand.Rand - FileSize int // the size per file. - FilenameSize int - Alphabet []rune // for filenames - - FanoutDepth int // how deep the hierarchy goes - FanoutFiles int // how many files per dir - FanoutDirs int // how many dirs per dir - - RandomSize bool // randomize file sizes - RandomFanout bool // randomize fanout numbers -} - -func NewRandFiles() *RandFiles { - return &RandFiles{ - Rand: rand.New(rand.NewSource(time.Now().UnixNano())), - FileSize: 4096, - FilenameSize: 16, - Alphabet: AlphabetEasy, - FanoutDepth: 2, - FanoutDirs: 5, - FanoutFiles: 10, - RandomSize: true, - } -} - -func (r *RandFiles) WriteRandomFiles(root string, depth int) error { - numfiles := r.FanoutFiles - if r.RandomFanout { - numfiles = rand.Intn(r.FanoutFiles) + 1 - } - - for i := 0; i < numfiles; i++ { - if err := r.WriteRandomFile(root); err != nil { - return err - } - } - - if depth+1 <= r.FanoutDepth { - numdirs := r.FanoutDirs - if r.RandomFanout { - numdirs = r.Rand.Intn(numdirs) + 1 - } - - for i := 0; i < numdirs; i++ { - if err := r.WriteRandomDir(root, depth+1); err != nil { - return err - } - } - } - - return nil -} - -func (r *RandFiles) RandomFilename(length int) string { - b := make([]rune, length) - for i := range b { - b[i] = r.Alphabet[r.Rand.Intn(len(r.Alphabet))] - } - return string(b) -} - -func (r *RandFiles) WriteRandomFile(root string) error { - filesize := int64(r.FileSize) - if r.RandomSize { - filesize = r.Rand.Int63n(filesize) + 1 - } - - n := rand.Intn(r.FilenameSize-4) + 4 - name := r.RandomFilename(n) - filepath := path.Join(root, name) - f, err := os.Create(filepath) - if err != nil { - return fmt.Errorf("creating random file: %w", err) - } - - if _, err := io.CopyN(f, r.Rand, filesize); err != nil { - return fmt.Errorf("copying random file: %w", err) - } - - return f.Close() -} - -func (r *RandFiles) WriteRandomDir(root string, depth int) error { - if depth > r.FanoutDepth { - return nil - } - - n := rand.Intn(r.FilenameSize-4) + 4 - name := r.RandomFilename(n) - root = path.Join(root, name) - if err := os.MkdirAll(root, 0o755); err != nil { - return fmt.Errorf("creating random dir: %w", err) - } - - err := r.WriteRandomFiles(root, depth) - if err != nil { - return fmt.Errorf("writing random files in random dir: %w", err) - } - return nil -} diff --git a/test/cli/testutils/requires.go b/test/cli/testutils/requires.go index 1462b7fee..b0070e441 100644 --- a/test/cli/testutils/requires.go +++ b/test/cli/testutils/requires.go @@ -2,6 +2,7 @@ package testutils import ( "os" + "os/exec" "runtime" "testing" ) @@ -13,9 +14,48 @@ func RequiresDocker(t *testing.T) { } func RequiresFUSE(t *testing.T) { - if os.Getenv("TEST_FUSE") != "1" { - t.SkipNow() + // Skip if FUSE tests are explicitly disabled + if os.Getenv("TEST_FUSE") == "0" { + t.Skip("FUSE tests disabled via TEST_FUSE=0") } + + // If TEST_FUSE=1 is set, always run (for backwards compatibility) + if os.Getenv("TEST_FUSE") == "1" { + return + } + + // Auto-detect FUSE availability based on platform and tools + if !isFUSEAvailable(t) { + t.Skip("FUSE not available (no fusermount/umount found or unsupported platform)") + } +} + +// isFUSEAvailable checks if FUSE is available on the current system +func isFUSEAvailable(t *testing.T) bool { + t.Helper() + + // Check platform support + switch runtime.GOOS { + case "linux", "darwin", "freebsd", "openbsd", "netbsd": + // These platforms potentially support FUSE + case "windows": + // Windows has limited FUSE support via WinFsp, but skip for now + return false + default: + // Unknown platform, assume no FUSE support + return false + } + + // Check for required unmount tools + var unmountCmd string + if runtime.GOOS == "linux" { + unmountCmd = "fusermount" + } else { + unmountCmd = "umount" + } + + _, err := exec.LookPath(unmountCmd) + return err == nil } func RequiresExpensive(t *testing.T) { diff --git a/test/cli/testutils/strings.go b/test/cli/testutils/strings.go index 110051e67..9bd73b379 100644 --- a/test/cli/testutils/strings.go +++ b/test/cli/testutils/strings.go @@ -13,6 +13,11 @@ import ( manet "github.com/multiformats/go-multiaddr/net" ) +var ( + AlphabetEasy = []rune("abcdefghijklmnopqrstuvwxyz01234567890-_") + AlphabetHard = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567890!@#$%^&*()-_+= ;.,<>'\"[]{}() ") +) + // StrCat takes a bunch of strings or string slices // and concats them all together into one string slice. // If an arg is not one of those types, this panics. diff --git a/test/cli/tracing_test.go b/test/cli/tracing_test.go index 6f19759be..7be60fea0 100644 --- a/test/cli/tracing_test.go +++ b/test/cli/tracing_test.go @@ -76,6 +76,7 @@ func TestTracing(t *testing.T) { node.Runner.Env["OTEL_EXPORTER_OTLP_PROTOCOL"] = "grpc" node.Runner.Env["OTEL_EXPORTER_OTLP_ENDPOINT"] = "http://localhost:4317" node.StartDaemon() + defer node.StopDaemon() assert.Eventually(t, func() bool { diff --git a/test/cli/transports_test.go b/test/cli/transports_test.go index cbef5c57d..e36d27287 100644 --- a/test/cli/transports_test.go +++ b/test/cli/transports_test.go @@ -6,9 +6,10 @@ import ( "path/filepath" "testing" + "github.com/ipfs/go-test/random" + "github.com/ipfs/go-test/random/files" "github.com/ipfs/kubo/config" "github.com/ipfs/kubo/test/cli/harness" - "github.com/ipfs/kubo/test/cli/testutils" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -23,7 +24,7 @@ func TestTransports(t *testing.T) { }) } checkSingleFile := func(nodes harness.Nodes) { - s := testutils.RandomStr(100) + s := string(random.Bytes(100)) hash := nodes[0].IPFSAddStr(s) nodes.ForEachPar(func(n *harness.Node) { val := n.IPFS("cat", hash).Stdout.String() @@ -33,10 +34,11 @@ func TestTransports(t *testing.T) { checkRandomDir := func(nodes harness.Nodes) { randDir := filepath.Join(nodes[0].Dir, "foobar") require.NoError(t, os.Mkdir(randDir, 0o777)) - rf := testutils.NewRandFiles() - rf.FanoutDirs = 3 - rf.FanoutFiles = 6 - require.NoError(t, rf.WriteRandomFiles(randDir, 4)) + rfCfg := files.DefaultConfig() + rfCfg.Dirs = 3 + rfCfg.Files = 6 + rfCfg.Depth = 4 + require.NoError(t, files.Create(rfCfg, randDir)) hash := nodes[1].IPFS("add", "-r", "-Q", randDir).Stdout.Trimmed() nodes.ForEachPar(func(n *harness.Node) { @@ -60,6 +62,8 @@ func TestTransports(t *testing.T) { cfg.Swarm.Transports.Network.WebTransport = config.False cfg.Swarm.Transports.Network.WebRTCDirect = config.False cfg.Swarm.Transports.Network.Websocket = config.False + // Disable AutoTLS since we're disabling WebSocket transport + cfg.AutoTLS.Enabled = config.False }) }) disableRouting(nodes) @@ -70,6 +74,7 @@ func TestTransports(t *testing.T) { t.Parallel() nodes := tcpNodes(t).StartDaemons().Connect() runTests(nodes) + nodes.StopDaemons() }) t.Run("tcp with NOISE", func(t *testing.T) { @@ -82,6 +87,7 @@ func TestTransports(t *testing.T) { }) nodes.StartDaemons().Connect() runTests(nodes) + nodes.StopDaemons() }) t.Run("QUIC", func(t *testing.T) { @@ -94,11 +100,13 @@ func TestTransports(t *testing.T) { cfg.Swarm.Transports.Network.QUIC = config.True cfg.Swarm.Transports.Network.WebTransport = config.False cfg.Swarm.Transports.Network.WebRTCDirect = config.False + cfg.Swarm.Transports.Network.Websocket = config.False }) }) disableRouting(nodes) nodes.StartDaemons().Connect() runTests(nodes) + nodes.StopDaemons() }) t.Run("QUIC+Webtransport", func(t *testing.T) { @@ -111,11 +119,13 @@ func TestTransports(t *testing.T) { cfg.Swarm.Transports.Network.QUIC = config.True cfg.Swarm.Transports.Network.WebTransport = config.True cfg.Swarm.Transports.Network.WebRTCDirect = config.False + cfg.Swarm.Transports.Network.Websocket = config.False }) }) disableRouting(nodes) nodes.StartDaemons().Connect() runTests(nodes) + nodes.StopDaemons() }) t.Run("QUIC connects with non-dialable transports", func(t *testing.T) { @@ -138,6 +148,7 @@ func TestTransports(t *testing.T) { disableRouting(nodes) nodes.StartDaemons().Connect() runTests(nodes) + nodes.StopDaemons() }) t.Run("WebRTC Direct", func(t *testing.T) { @@ -150,10 +161,12 @@ func TestTransports(t *testing.T) { cfg.Swarm.Transports.Network.QUIC = config.False cfg.Swarm.Transports.Network.WebTransport = config.False cfg.Swarm.Transports.Network.WebRTCDirect = config.True + cfg.Swarm.Transports.Network.Websocket = config.False }) }) disableRouting(nodes) nodes.StartDaemons().Connect() runTests(nodes) + nodes.StopDaemons() }) } diff --git a/test/cli/webui_test.go b/test/cli/webui_test.go new file mode 100644 index 000000000..93b8fe4cc --- /dev/null +++ b/test/cli/webui_test.go @@ -0,0 +1,88 @@ +package cli + +import ( + "net/http" + "testing" + + "github.com/ipfs/kubo/config" + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/assert" +) + +func TestWebUI(t *testing.T) { + t.Parallel() + + t.Run("NoFetch=true shows not available error", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init() + + node.UpdateConfig(func(cfg *config.Config) { + cfg.Gateway.NoFetch = true + }) + + node.StartDaemon("--offline") + + apiClient := node.APIClient() + resp := apiClient.Get("/webui/") + + // Should return 503 Service Unavailable when WebUI is not in local store + assert.Equal(t, http.StatusServiceUnavailable, resp.StatusCode) + + // Check response contains helpful information + body := resp.Body + assert.Contains(t, body, "IPFS WebUI Not Available") + assert.Contains(t, body, "Gateway.NoFetch=true") + assert.Contains(t, body, "ipfs pin add") + assert.Contains(t, body, "ipfs dag import") + assert.Contains(t, body, "https://github.com/ipfs/ipfs-webui/releases") + }) + + t.Run("DeserializedResponses=false shows incompatible error", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init() + + node.UpdateConfig(func(cfg *config.Config) { + cfg.Gateway.DeserializedResponses = config.False + }) + + node.StartDaemon() + + apiClient := node.APIClient() + resp := apiClient.Get("/webui/") + + // Should return 503 Service Unavailable + assert.Equal(t, http.StatusServiceUnavailable, resp.StatusCode) + + // Check response contains incompatibility message + body := resp.Body + assert.Contains(t, body, "IPFS WebUI Incompatible") + assert.Contains(t, body, "Gateway.DeserializedResponses=false") + assert.Contains(t, body, "WebUI requires deserializing IPFS responses") + assert.Contains(t, body, "Gateway.DeserializedResponses=true") + }) + + t.Run("Both NoFetch=true and DeserializedResponses=false shows incompatible error", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init() + + node.UpdateConfig(func(cfg *config.Config) { + cfg.Gateway.NoFetch = true + cfg.Gateway.DeserializedResponses = config.False + }) + + node.StartDaemon("--offline") + + apiClient := node.APIClient() + resp := apiClient.Get("/webui/") + + // Should return 503 Service Unavailable + assert.Equal(t, http.StatusServiceUnavailable, resp.StatusCode) + + // DeserializedResponses=false takes priority + body := resp.Body + assert.Contains(t, body, "IPFS WebUI Incompatible") + assert.Contains(t, body, "Gateway.DeserializedResponses=false") + // Should NOT mention NoFetch since DeserializedResponses check comes first + assert.NotContains(t, body, "NoFetch") + }) +} diff --git a/test/dependencies/dependencies.go b/test/dependencies/dependencies.go index 0d56cd5a7..848ffba2f 100644 --- a/test/dependencies/dependencies.go +++ b/test/dependencies/dependencies.go @@ -1,5 +1,4 @@ //go:build tools -// +build tools package tools @@ -7,9 +6,9 @@ import ( _ "github.com/Kubuxu/gocovmerge" _ "github.com/golangci/golangci-lint/cmd/golangci-lint" _ "github.com/ipfs/go-cidutil/cid-fmt" + _ "github.com/ipfs/go-test/cli/random-data" + _ "github.com/ipfs/go-test/cli/random-files" _ "github.com/ipfs/hang-fds" - _ "github.com/jbenet/go-random-files/random-files" - _ "github.com/jbenet/go-random/random" _ "github.com/multiformats/go-multihash/multihash" _ "gotest.tools/gotestsum" ) diff --git a/test/dependencies/go.mod b/test/dependencies/go.mod index 24af83852..75bbdf72c 100644 --- a/test/dependencies/go.mod +++ b/test/dependencies/go.mod @@ -1,77 +1,101 @@ module github.com/ipfs/kubo/test/dependencies -go 1.23 +go 1.25 replace github.com/ipfs/kubo => ../../ require ( github.com/Kubuxu/gocovmerge v0.0.0-20161216165753-7ecaa51963cd - github.com/golangci/golangci-lint v1.60.2 + github.com/golangci/golangci-lint v1.64.8 github.com/ipfs/go-cidutil v0.1.0 - github.com/ipfs/go-log v1.0.5 + github.com/ipfs/go-log/v2 v2.9.0 + github.com/ipfs/go-test v0.2.3 github.com/ipfs/hang-fds v0.1.0 - github.com/ipfs/iptb v1.4.0 - github.com/ipfs/iptb-plugins v0.5.0 - github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c - github.com/jbenet/go-random-files v0.0.0-20190219210431-31b3f20ebded - github.com/multiformats/go-multiaddr v0.13.0 + github.com/ipfs/iptb v1.4.1 + github.com/ipfs/iptb-plugins v0.5.1 + github.com/multiformats/go-multiaddr v0.16.1 github.com/multiformats/go-multihash v0.2.3 - gotest.tools/gotestsum v1.12.0 + gotest.tools/gotestsum v1.13.0 ) require ( - 4d63.com/gocheckcompilerdirectives v1.2.1 // indirect - 4d63.com/gochecknoglobals v0.2.1 // indirect - github.com/4meepo/tagalign v1.3.4 // indirect - github.com/Abirdcfly/dupword v0.0.14 // indirect - github.com/Antonboom/errname v0.1.13 // indirect - github.com/Antonboom/nilnil v0.1.9 // indirect - github.com/Antonboom/testifylint v1.4.3 // indirect + 4d63.com/gocheckcompilerdirectives v1.3.0 // indirect + 4d63.com/gochecknoglobals v0.2.2 // indirect + github.com/4meepo/tagalign v1.4.2 // indirect + github.com/Abirdcfly/dupword v0.1.3 // indirect + github.com/Antonboom/errname v1.0.0 // indirect + github.com/Antonboom/nilnil v1.0.1 // indirect + github.com/Antonboom/testifylint v1.5.2 // indirect github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c // indirect - github.com/Crocmagnon/fatcontext v0.4.0 // indirect + github.com/Crocmagnon/fatcontext v0.7.1 // indirect + github.com/DataDog/zstd v1.5.7 // indirect github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect - github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.0 // indirect - github.com/Masterminds/semver/v3 v3.2.1 // indirect - github.com/OpenPeeDeeP/depguard/v2 v2.2.0 // indirect - github.com/alecthomas/go-check-sumtype v0.1.4 // indirect - github.com/alexkohler/nakedret/v2 v2.0.4 // indirect + github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.1 // indirect + github.com/Jorropo/jsync v1.0.1 // indirect + github.com/Masterminds/semver/v3 v3.3.0 // indirect + github.com/OpenPeeDeeP/depguard/v2 v2.2.1 // indirect + github.com/RaduBerinde/axisds v0.0.0-20250419182453-5135a0650657 // indirect + github.com/RaduBerinde/btreemap v0.0.0-20250419174037-3d62b7205d54 // indirect + github.com/alecthomas/go-check-sumtype v0.3.1 // indirect + github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect + github.com/alexkohler/nakedret/v2 v2.0.5 // indirect github.com/alexkohler/prealloc v1.0.0 // indirect github.com/alingse/asasalint v0.0.11 // indirect + github.com/alingse/nilnesserr v0.1.2 // indirect github.com/ashanbrown/forbidigo v1.6.0 // indirect - github.com/ashanbrown/makezero v1.1.1 // indirect + github.com/ashanbrown/makezero v1.2.0 // indirect + github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bitfield/gotestdox v0.2.2 // indirect - github.com/bkielbasa/cyclop v1.2.1 // indirect + github.com/bkielbasa/cyclop v1.2.3 // indirect github.com/blizzy78/varnamelen v0.8.0 // indirect - github.com/bombsimon/wsl/v4 v4.4.1 // indirect - github.com/breml/bidichk v0.2.7 // indirect - github.com/breml/errchkjson v0.3.6 // indirect - github.com/butuzov/ireturn v0.3.0 // indirect - github.com/butuzov/mirror v1.2.0 // indirect - github.com/catenacyber/perfsprint v0.7.1 // indirect + github.com/bombsimon/wsl/v4 v4.5.0 // indirect + github.com/breml/bidichk v0.3.2 // indirect + github.com/breml/errchkjson v0.4.0 // indirect + github.com/butuzov/ireturn v0.3.1 // indirect + github.com/butuzov/mirror v1.3.0 // indirect + github.com/caddyserver/certmagic v0.23.0 // indirect + github.com/caddyserver/zerossl v0.1.3 // indirect + github.com/catenacyber/perfsprint v0.8.2 // indirect github.com/ccojocar/zxcvbn-go v1.0.2 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/charithe/durationcheck v0.0.10 // indirect github.com/chavacava/garif v0.1.0 // indirect - github.com/ckaznocha/intrange v0.1.2 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect - github.com/curioswitch/go-reassign v0.2.0 // indirect - github.com/daixiang0/gci v0.13.4 // indirect + github.com/ckaznocha/intrange v0.3.0 // indirect + github.com/cockroachdb/crlib v0.0.0-20241112164430-1264a2edc35b // indirect + github.com/cockroachdb/errors v1.11.3 // indirect + github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect + github.com/cockroachdb/pebble/v2 v2.1.3 // indirect + github.com/cockroachdb/redact v1.1.5 // indirect + github.com/cockroachdb/swiss v0.0.0-20251224182025-b0f6560f979b // indirect + github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect + github.com/crackcomm/go-gitignore v0.0.0-20241020182519-7843d2ba8fdf // indirect + github.com/curioswitch/go-reassign v0.3.0 // indirect + github.com/daixiang0/gci v0.13.5 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect + github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect github.com/denis-tingaikin/go-header v0.5.0 // indirect github.com/dnephin/pflag v1.0.7 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/ettle/strcase v0.2.0 // indirect github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5 // indirect - github.com/fatih/color v1.17.0 // indirect + github.com/fatih/color v1.18.0 // indirect github.com/fatih/structtag v1.2.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/filecoin-project/go-clock v0.1.0 // indirect github.com/firefart/nonamedreturns v1.0.5 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/flynn/noise v1.1.0 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect github.com/fzipp/gocyclo v0.6.0 // indirect - github.com/ghostiam/protogetter v0.3.6 // indirect - github.com/go-critic/go-critic v0.11.4 // indirect - github.com/go-logr/logr v1.4.2 // indirect + github.com/gabriel-vasile/mimetype v1.4.10 // indirect + github.com/gammazero/chanqueue v1.1.1 // indirect + github.com/gammazero/deque v1.2.0 // indirect + github.com/getsentry/sentry-go v0.27.0 // indirect + github.com/ghostiam/protogetter v0.3.9 // indirect + github.com/go-critic/go-critic v0.12.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-toolsmith/astcast v1.1.0 // indirect github.com/go-toolsmith/astcopy v1.1.0 // indirect @@ -80,87 +104,105 @@ require ( github.com/go-toolsmith/astp v1.1.0 // indirect github.com/go-toolsmith/strparse v1.1.0 // indirect github.com/go-toolsmith/typep v1.1.0 // indirect - github.com/go-viper/mapstructure/v2 v2.0.0 // indirect - github.com/go-xmlfmt/xmlfmt v1.1.2 // indirect + github.com/go-viper/mapstructure/v2 v2.4.0 // indirect + github.com/go-xmlfmt/xmlfmt v1.1.3 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/gofrs/flock v0.12.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect - github.com/golangci/gofmt v0.0.0-20240816233607-d8596aa466a9 // indirect + github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e // indirect + github.com/golangci/dupl v0.0.0-20250308024227-f665c8d69b32 // indirect + github.com/golangci/go-printf-func-name v0.1.0 // indirect + github.com/golangci/gofmt v0.0.0-20250106114630-d62b90e6713d // indirect github.com/golangci/misspell v0.6.0 // indirect - github.com/golangci/modinfo v0.3.4 // indirect github.com/golangci/plugin-module-register v0.1.1 // indirect - github.com/golangci/revgrep v0.5.3 // indirect + github.com/golangci/revgrep v0.8.0 // indirect github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed // indirect - github.com/google/go-cmp v0.6.0 // indirect + github.com/google/go-cmp v0.7.0 // indirect github.com/google/gopacket v1.1.19 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gordonklaus/ineffassign v0.1.0 // indirect github.com/gostaticanalysis/analysisutil v0.7.1 // indirect - github.com/gostaticanalysis/comment v1.4.2 // indirect - github.com/gostaticanalysis/forcetypeassert v0.1.0 // indirect + github.com/gostaticanalysis/comment v1.5.0 // indirect + github.com/gostaticanalysis/forcetypeassert v0.2.0 // indirect github.com/gostaticanalysis/nilerr v0.1.1 // indirect - github.com/gxed/go-shellwords v1.0.3 // indirect - github.com/hashicorp/errwrap v1.1.0 // indirect - github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-immutable-radix/v2 v2.1.0 // indirect github.com/hashicorp/go-version v1.7.0 // indirect github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/hexops/gotextdiff v1.0.3 // indirect + github.com/huin/goupnp v1.3.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/ipfs/bbloom v0.0.4 // indirect - github.com/ipfs/boxo v0.23.1-0.20240927234853-19a402b7dc34 // indirect - github.com/ipfs/go-block-format v0.2.0 // indirect - github.com/ipfs/go-cid v0.4.1 // indirect - github.com/ipfs/go-datastore v0.6.0 // indirect - github.com/ipfs/go-ipfs-util v0.0.3 // indirect - github.com/ipfs/go-ipld-format v0.6.0 // indirect - github.com/ipfs/go-ipld-legacy v0.2.1 // indirect - github.com/ipfs/go-log/v2 v2.5.1 // indirect - github.com/ipfs/go-metrics-interface v0.0.1 // indirect - github.com/ipfs/kubo v0.16.0 // indirect - github.com/ipld/go-codec-dagpb v1.6.0 // indirect + github.com/ipfs/boxo v0.35.3-0.20251202220026-0842ad274a0c // indirect + github.com/ipfs/go-bitfield v1.1.0 // indirect + github.com/ipfs/go-block-format v0.2.3 // indirect + github.com/ipfs/go-cid v0.6.0 // indirect + github.com/ipfs/go-datastore v0.9.0 // indirect + github.com/ipfs/go-dsqueue v0.1.1 // indirect + github.com/ipfs/go-ipfs-cmds v0.15.0 // indirect + github.com/ipfs/go-ipfs-redirects-file v0.1.2 // indirect + github.com/ipfs/go-ipld-cbor v0.2.1 // indirect + github.com/ipfs/go-ipld-format v0.6.3 // indirect + github.com/ipfs/go-ipld-legacy v0.2.2 // indirect + github.com/ipfs/go-metrics-interface v0.3.0 // indirect + github.com/ipfs/go-unixfsnode v1.10.2 // indirect + github.com/ipfs/kubo v0.31.0 // indirect + github.com/ipld/go-car/v2 v2.16.0 // indirect + github.com/ipld/go-codec-dagpb v1.7.0 // indirect github.com/ipld/go-ipld-prime v0.21.0 // indirect - github.com/jbenet/goprocess v0.1.4 // indirect + github.com/ipshipyard/p2p-forge v0.7.0 // indirect + github.com/jackpal/go-nat-pmp v1.0.2 // indirect + github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect github.com/jgautheron/goconst v1.7.1 // indirect github.com/jingyugao/rowserrcheck v1.1.1 // indirect - github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af // indirect - github.com/jjti/go-spancheck v0.6.2 // indirect - github.com/julz/importas v0.1.0 // indirect - github.com/karamaru-alpha/copyloopvar v1.1.0 // indirect - github.com/kisielk/errcheck v1.7.0 // indirect - github.com/kkHAIKE/contextcheck v1.1.5 // indirect - github.com/klauspost/cpuid/v2 v2.2.8 // indirect + github.com/jjti/go-spancheck v0.6.4 // indirect + github.com/julz/importas v0.2.0 // indirect + github.com/karamaru-alpha/copyloopvar v1.2.1 // indirect + github.com/kisielk/errcheck v1.9.0 // indirect + github.com/kkHAIKE/contextcheck v1.1.6 // indirect + github.com/klauspost/compress v1.18.0 // indirect + github.com/klauspost/cpuid/v2 v2.3.0 // indirect + github.com/koron/go-ssdp v0.0.6 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/kr/text v0.2.0 // indirect github.com/kulti/thelper v0.6.3 // indirect github.com/kunwardeep/paralleltest v1.0.10 // indirect - github.com/kyoh86/exportloopref v0.1.11 // indirect - github.com/lasiar/canonicalheader v1.1.1 // indirect - github.com/ldez/gomoddirectives v0.2.4 // indirect - github.com/ldez/tagliatelle v0.5.0 // indirect + github.com/lasiar/canonicalheader v1.1.2 // indirect + github.com/ldez/exptostd v0.4.2 // indirect + github.com/ldez/gomoddirectives v0.6.1 // indirect + github.com/ldez/grignotin v0.9.0 // indirect + github.com/ldez/tagliatelle v0.7.1 // indirect + github.com/ldez/usetesting v0.4.2 // indirect github.com/leonklingele/grouper v1.1.2 // indirect + github.com/libdns/libdns v1.0.0-beta.1 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/libp2p/go-cidranger v1.1.0 // indirect - github.com/libp2p/go-libp2p v0.36.3 // indirect + github.com/libp2p/go-doh-resolver v0.5.0 // indirect + github.com/libp2p/go-flow-metrics v0.3.0 // indirect + github.com/libp2p/go-libp2p v0.46.0 // indirect github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect - github.com/libp2p/go-libp2p-kad-dht v0.26.1 // indirect - github.com/libp2p/go-libp2p-kbucket v0.6.3 // indirect - github.com/libp2p/go-libp2p-record v0.2.0 // indirect - github.com/libp2p/go-libp2p-routing-helpers v0.7.4 // indirect + github.com/libp2p/go-libp2p-kad-dht v0.36.0 // indirect + github.com/libp2p/go-libp2p-kbucket v0.8.0 // indirect + github.com/libp2p/go-libp2p-record v0.3.1 // indirect + github.com/libp2p/go-libp2p-routing-helpers v0.7.5 // indirect github.com/libp2p/go-msgio v0.3.0 // indirect - github.com/libp2p/go-netroute v0.2.1 // indirect - github.com/lufeee/execinquery v1.2.1 // indirect + github.com/libp2p/go-netroute v0.3.0 // indirect + github.com/libp2p/go-reuseport v0.4.0 // indirect github.com/macabu/inamedparam v0.1.3 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/maratori/testableexamples v1.0.0 // indirect github.com/maratori/testpackage v1.1.1 // indirect - github.com/matoous/godox v0.0.0-20240105082147-c5b5e0e7c0c0 // indirect - github.com/mattn/go-colorable v0.1.13 // indirect + github.com/matoous/godox v1.1.0 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mattn/go-runewidth v0.0.15 // indirect - github.com/mgechev/revive v1.3.9 // indirect - github.com/miekg/dns v1.1.61 // indirect + github.com/mattn/go-runewidth v0.0.16 // indirect + github.com/mattn/go-shellwords v1.0.12 // indirect + github.com/mgechev/revive v1.7.0 // indirect + github.com/mholt/acmez/v3 v3.1.2 // indirect + github.com/miekg/dns v1.1.68 // indirect + github.com/minio/minlz v1.0.1-0.20250507153514-87eb42fe8882 // indirect github.com/minio/sha256-simd v1.0.1 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect @@ -168,105 +210,141 @@ require ( github.com/mr-tron/base58 v1.2.0 // indirect github.com/multiformats/go-base32 v0.1.0 // indirect github.com/multiformats/go-base36 v0.2.0 // indirect - github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect + github.com/multiformats/go-multiaddr-dns v0.4.1 // indirect + github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect github.com/multiformats/go-multibase v0.2.0 // indirect - github.com/multiformats/go-multicodec v0.9.0 // indirect - github.com/multiformats/go-multistream v0.5.0 // indirect - github.com/multiformats/go-varint v0.0.7 // indirect + github.com/multiformats/go-multicodec v0.10.0 // indirect + github.com/multiformats/go-multistream v0.6.1 // indirect + github.com/multiformats/go-varint v0.1.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/nakabonne/nestif v0.3.1 // indirect github.com/nishanths/exhaustive v0.12.0 // indirect github.com/nishanths/predeclared v0.2.2 // indirect - github.com/nunnatsa/ginkgolinter v0.16.2 // indirect + github.com/nunnatsa/ginkgolinter v0.19.1 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect - github.com/opentracing/opentracing-go v1.2.0 // indirect - github.com/pelletier/go-toml/v2 v2.2.2 // indirect + github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect + github.com/pelletier/go-toml/v2 v2.2.3 // indirect + github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 // indirect + github.com/pion/datachannel v1.5.10 // indirect + github.com/pion/dtls/v2 v2.2.12 // indirect + github.com/pion/dtls/v3 v3.0.6 // indirect + github.com/pion/ice/v4 v4.0.10 // indirect + github.com/pion/interceptor v0.1.40 // indirect + github.com/pion/logging v0.2.3 // indirect + github.com/pion/mdns/v2 v2.0.7 // indirect + github.com/pion/randutil v0.1.0 // indirect + github.com/pion/rtcp v1.2.15 // indirect + github.com/pion/rtp v1.8.19 // indirect + github.com/pion/sctp v1.8.39 // indirect + github.com/pion/sdp/v3 v3.0.13 // indirect + github.com/pion/srtp/v3 v3.0.6 // indirect + github.com/pion/stun v0.6.1 // indirect + github.com/pion/stun/v3 v3.0.0 // indirect + github.com/pion/transport/v2 v2.2.10 // indirect + github.com/pion/transport/v3 v3.0.7 // indirect + github.com/pion/turn/v4 v4.0.2 // indirect + github.com/pion/webrtc/v4 v4.1.2 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/polydawn/refmt v0.89.0 // indirect - github.com/polyfloyd/go-errorlint v1.6.0 // indirect - github.com/prometheus/client_golang v1.19.1 // indirect - github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.55.0 // indirect - github.com/prometheus/procfs v0.15.1 // indirect - github.com/quasilyte/go-ruleguard v0.4.2 // indirect + github.com/polyfloyd/go-errorlint v1.7.1 // indirect + github.com/prometheus/client_golang v1.23.2 // indirect + github.com/prometheus/client_model v0.6.2 // indirect + github.com/prometheus/common v0.66.1 // indirect + github.com/prometheus/procfs v0.17.0 // indirect + github.com/quasilyte/go-ruleguard v0.4.3-0.20240823090925-0fe6f58b47b1 // indirect github.com/quasilyte/go-ruleguard/dsl v0.3.22 // indirect github.com/quasilyte/gogrep v0.5.0 // indirect github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 // indirect github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect + github.com/quic-go/quic-go v0.57.1 // indirect + github.com/raeperd/recvcheck v0.2.0 // indirect github.com/rivo/uniseg v0.4.7 // indirect + github.com/rogpeppe/go-internal v1.14.1 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/ryancurrah/gomodguard v1.3.3 // indirect + github.com/ryancurrah/gomodguard v1.3.5 // indirect github.com/ryanrolds/sqlclosecheck v0.5.1 // indirect github.com/sagikazarmark/locafero v0.6.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect - github.com/samber/lo v1.46.0 // indirect - github.com/sanposhiho/wastedassign/v2 v2.0.7 // indirect - github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 // indirect + github.com/sanposhiho/wastedassign/v2 v2.1.0 // indirect + github.com/santhosh-tekuri/jsonschema/v6 v6.0.1 // indirect github.com/sashamelentyev/interfacebloat v1.1.0 // indirect - github.com/sashamelentyev/usestdlibvars v1.27.0 // indirect - github.com/securego/gosec/v2 v2.20.1-0.20240820084340-81cda2f91fbe // indirect - github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect + github.com/sashamelentyev/usestdlibvars v1.28.0 // indirect + github.com/securego/gosec/v2 v2.22.2 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/sivchari/containedctx v1.0.3 // indirect - github.com/sivchari/tenv v1.10.0 // indirect - github.com/sonatard/noctx v0.0.2 // indirect + github.com/sivchari/tenv v1.12.1 // indirect + github.com/sonatard/noctx v0.1.0 // indirect github.com/sourcegraph/conc v0.3.0 // indirect github.com/sourcegraph/go-diff v0.7.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect - github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/afero v1.12.0 // indirect github.com/spf13/cast v1.6.0 // indirect - github.com/spf13/cobra v1.8.1 // indirect - github.com/spf13/pflag v1.0.5 // indirect + github.com/spf13/cobra v1.9.1 // indirect + github.com/spf13/pflag v1.0.6 // indirect github.com/spf13/viper v1.19.0 // indirect github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect - github.com/stbenjam/no-sprintf-host-port v0.1.1 // indirect + github.com/stbenjam/no-sprintf-host-port v0.2.0 // indirect github.com/stretchr/objx v0.5.2 // indirect - github.com/stretchr/testify v1.9.0 // indirect + github.com/stretchr/testify v1.11.1 // indirect github.com/subosito/gotenv v1.6.0 // indirect - github.com/tdakkota/asciicheck v0.2.0 // indirect - github.com/tetafro/godot v1.4.16 // indirect - github.com/timakin/bodyclose v0.0.0-20240125160201-f835fa56326a // indirect - github.com/timonwong/loggercheck v0.9.4 // indirect - github.com/tomarrell/wrapcheck/v2 v2.9.0 // indirect + github.com/tdakkota/asciicheck v0.4.1 // indirect + github.com/tetafro/godot v1.5.0 // indirect + github.com/timakin/bodyclose v0.0.0-20241017074812-ed6a65f985e3 // indirect + github.com/timonwong/loggercheck v0.10.1 // indirect + github.com/tomarrell/wrapcheck/v2 v2.10.0 // indirect github.com/tommy-muehle/go-mnd/v2 v2.5.1 // indirect - github.com/ultraware/funlen v0.1.0 // indirect - github.com/ultraware/whitespace v0.1.1 // indirect - github.com/urfave/cli v1.22.10 // indirect - github.com/uudashr/gocognit v1.1.3 // indirect + github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb // indirect + github.com/ultraware/funlen v0.2.0 // indirect + github.com/ultraware/whitespace v0.2.0 // indirect + github.com/urfave/cli v1.22.16 // indirect + github.com/uudashr/gocognit v1.2.0 // indirect + github.com/uudashr/iface v1.3.1 // indirect github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc // indirect + github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 // indirect + github.com/whyrusleeping/cbor-gen v0.3.1 // indirect + github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f // indirect github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect + github.com/wlynxg/anet v0.0.5 // indirect github.com/xen0n/gosmopolitan v1.2.2 // indirect github.com/yagipy/maintidx v1.0.0 // indirect github.com/yeya24/promlinter v0.3.0 // indirect github.com/ykadowak/zerologlint v0.1.5 // indirect + github.com/zeebo/blake3 v0.2.4 // indirect gitlab.com/bosi/decorder v0.4.2 // indirect - go-simpler.org/musttag v0.12.2 // indirect - go-simpler.org/sloglint v0.7.2 // indirect - go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/otel v1.28.0 // indirect - go.opentelemetry.io/otel/metric v1.28.0 // indirect - go.opentelemetry.io/otel/trace v1.28.0 // indirect - go.uber.org/automaxprocs v1.5.3 // indirect + go-simpler.org/musttag v0.13.0 // indirect + go-simpler.org/sloglint v0.9.0 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 // indirect + go.opentelemetry.io/otel v1.38.0 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + go.opentelemetry.io/otel/trace v1.38.0 // indirect + go.uber.org/automaxprocs v1.6.0 // indirect + go.uber.org/dig v1.19.0 // indirect + go.uber.org/fx v1.24.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.26.0 // indirect - golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect - golang.org/x/exp/typeparams v0.0.0-20240613232115-7f521ea00fb8 // indirect - golang.org/x/mod v0.20.0 // indirect - golang.org/x/net v0.28.0 // indirect - golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.24.0 // indirect - golang.org/x/term v0.23.0 // indirect - golang.org/x/text v0.17.0 // indirect - golang.org/x/tools v0.24.0 // indirect - gonum.org/v1/gonum v0.15.0 // indirect - google.golang.org/protobuf v1.34.2 // indirect + go.uber.org/zap/exp v0.3.0 // indirect + go.yaml.in/yaml/v2 v2.4.3 // indirect + golang.org/x/crypto v0.45.0 // indirect + golang.org/x/exp v0.0.0-20251125195548-87e1e737ad39 // indirect + golang.org/x/exp/typeparams v0.0.0-20250210185358-939b2ce775ac // indirect + golang.org/x/mod v0.30.0 // indirect + golang.org/x/net v0.47.0 // indirect + golang.org/x/sync v0.18.0 // indirect + golang.org/x/sys v0.38.0 // indirect + golang.org/x/term v0.37.0 // indirect + golang.org/x/text v0.31.0 // indirect + golang.org/x/time v0.12.0 // indirect + golang.org/x/tools v0.39.0 // indirect + golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect + gonum.org/v1/gonum v0.16.0 // indirect + google.golang.org/protobuf v1.36.10 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - honnef.co/go/tools v0.5.1 // indirect - lukechampine.com/blake3 v1.3.0 // indirect + honnef.co/go/tools v0.6.1 // indirect + lukechampine.com/blake3 v1.4.1 // indirect mvdan.cc/gofumpt v0.7.0 // indirect mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f // indirect ) diff --git a/test/dependencies/go.sum b/test/dependencies/go.sum index 7d9722814..78d6acaef 100644 --- a/test/dependencies/go.sum +++ b/test/dependencies/go.sum @@ -1,151 +1,189 @@ -4d63.com/gocheckcompilerdirectives v1.2.1 h1:AHcMYuw56NPjq/2y615IGg2kYkBdTvOaojYCBcRE7MA= -4d63.com/gocheckcompilerdirectives v1.2.1/go.mod h1:yjDJSxmDTtIHHCqX0ufRYZDL6vQtMG7tJdKVeWwsqvs= -4d63.com/gochecknoglobals v0.2.1 h1:1eiorGsgHOFOuoOiJDy2psSrQbRdIHrlge0IJIkUgDc= -4d63.com/gochecknoglobals v0.2.1/go.mod h1:KRE8wtJB3CXCsb1xy421JfTHIIbmT3U5ruxw2Qu8fSU= -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/4meepo/tagalign v1.3.4 h1:P51VcvBnf04YkHzjfclN6BbsopfJR5rxs1n+5zHt+w8= -github.com/4meepo/tagalign v1.3.4/go.mod h1:M+pnkHH2vG8+qhE5bVc/zeP7HS/j910Fwa9TUSyZVI0= -github.com/Abirdcfly/dupword v0.0.14 h1:3U4ulkc8EUo+CaT105/GJ1BQwtgyj6+VaBVbAX11Ba8= -github.com/Abirdcfly/dupword v0.0.14/go.mod h1:VKDAbxdY8YbKUByLGg8EETzYSuC4crm9WwI6Y3S0cLI= -github.com/Antonboom/errname v0.1.13 h1:JHICqsewj/fNckzrfVSe+T33svwQxmjC+1ntDsHOVvM= -github.com/Antonboom/errname v0.1.13/go.mod h1:uWyefRYRN54lBg6HseYCFhs6Qjcy41Y3Jl/dVhA87Ns= -github.com/Antonboom/nilnil v0.1.9 h1:eKFMejSxPSA9eLSensFmjW2XTgTwJMjZ8hUHtV4s/SQ= -github.com/Antonboom/nilnil v0.1.9/go.mod h1:iGe2rYwCq5/Me1khrysB4nwI7swQvjclR8/YRPl5ihQ= -github.com/Antonboom/testifylint v1.4.3 h1:ohMt6AHuHgttaQ1xb6SSnxCeK4/rnK7KKzbvs7DmEck= -github.com/Antonboom/testifylint v1.4.3/go.mod h1:+8Q9+AOLsz5ZiQiiYujJKs9mNz398+M6UgslP4qgJLA= +4d63.com/gocheckcompilerdirectives v1.3.0 h1:Ew5y5CtcAAQeTVKUVFrE7EwHMrTO6BggtEj8BZSjZ3A= +4d63.com/gocheckcompilerdirectives v1.3.0/go.mod h1:ofsJ4zx2QAuIP/NO/NAh1ig6R1Fb18/GI7RVMwz7kAY= +4d63.com/gochecknoglobals v0.2.2 h1:H1vdnwnMaZdQW/N+NrkT1SZMTBmcwHe9Vq8lJcYYTtU= +4d63.com/gochecknoglobals v0.2.2/go.mod h1:lLxwTQjL5eIesRbvnzIP3jZtG140FnTdz+AlMa+ogt0= +github.com/4meepo/tagalign v1.4.2 h1:0hcLHPGMjDyM1gHG58cS73aQF8J4TdVR96TZViorO9E= +github.com/4meepo/tagalign v1.4.2/go.mod h1:+p4aMyFM+ra7nb41CnFG6aSDXqRxU/w1VQqScKqDARI= +github.com/Abirdcfly/dupword v0.1.3 h1:9Pa1NuAsZvpFPi9Pqkd93I7LIYRURj+A//dFd5tgBeE= +github.com/Abirdcfly/dupword v0.1.3/go.mod h1:8VbB2t7e10KRNdwTVoxdBaxla6avbhGzb8sCTygUMhw= +github.com/Antonboom/errname v1.0.0 h1:oJOOWR07vS1kRusl6YRSlat7HFnb3mSfMl6sDMRoTBA= +github.com/Antonboom/errname v1.0.0/go.mod h1:gMOBFzK/vrTiXN9Oh+HFs+e6Ndl0eTFbtsRTSRdXyGI= +github.com/Antonboom/nilnil v1.0.1 h1:C3Tkm0KUxgfO4Duk3PM+ztPncTFlOf0b2qadmS0s4xs= +github.com/Antonboom/nilnil v1.0.1/go.mod h1:CH7pW2JsRNFgEh8B2UaPZTEPhCMuFowP/e8Udp9Nnb0= +github.com/Antonboom/testifylint v1.5.2 h1:4s3Xhuv5AvdIgbd8wOOEeo0uZG7PbDKQyKY5lGoQazk= +github.com/Antonboom/testifylint v1.5.2/go.mod h1:vxy8VJ0bc6NavlYqjZfmp6EfqXMtBgQ4+mhCojwC1P8= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c h1:pxW6RcqyfI9/kWtOwnv/G+AzdKuy2ZrqINhenH4HyNs= github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= -github.com/Crocmagnon/fatcontext v0.4.0 h1:4ykozu23YHA0JB6+thiuEv7iT6xq995qS1vcuWZq0tg= -github.com/Crocmagnon/fatcontext v0.4.0/go.mod h1:ZtWrXkgyfsYPzS6K3O88va6t2GEglG93vnII/F94WC0= +github.com/Crocmagnon/fatcontext v0.7.1 h1:SC/VIbRRZQeQWj/TcQBS6JmrXcfA+BU4OGSVUt54PjM= +github.com/Crocmagnon/fatcontext v0.7.1/go.mod h1:1wMvv3NXEBJucFGfwOJBxSVWcoIO6emV215SMkW9MFU= +github.com/DataDog/zstd v1.5.7 h1:ybO8RBeh29qrxIhCA9E8gKY6xfONU9T6G6aP9DTKfLE= +github.com/DataDog/zstd v1.5.7/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= -github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.0 h1:/fTUt5vmbkAcMBt4YQiuC23cV0kEsN1MVMNqeOW43cU= -github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.0/go.mod h1:ONJg5sxcbsdQQ4pOW8TGdTidT2TMAUy/2Xhr8mrYaao= +github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.1 h1:Sz1JIXEcSfhz7fUi7xHnhpIE0thVASYjvosApmHuD2k= +github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.1/go.mod h1:n/LSCXNuIYqVfBlVXyHfMQkZDdp1/mmxfSjADd3z1Zg= +github.com/Jorropo/jsync v1.0.1 h1:6HgRolFZnsdfzRUj+ImB9og1JYOxQoReSywkHOGSaUU= +github.com/Jorropo/jsync v1.0.1/go.mod h1:jCOZj3vrBCri3bSU3ErUYvevKlnbssrXeCivybS5ABQ= github.com/Kubuxu/gocovmerge v0.0.0-20161216165753-7ecaa51963cd h1:HNhzThEtZW714v8Eda8sWWRcu9WSzJC+oCyjRjvZgRA= github.com/Kubuxu/gocovmerge v0.0.0-20161216165753-7ecaa51963cd/go.mod h1:bqoB8kInrTeEtYAwaIXoSRqdwnjQmFhsfusnzyui6yY= -github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= -github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= -github.com/OpenPeeDeeP/depguard/v2 v2.2.0 h1:vDfG60vDtIuf0MEOhmLlLLSzqaRM8EMcgJPdp74zmpA= -github.com/OpenPeeDeeP/depguard/v2 v2.2.0/go.mod h1:CIzddKRvLBC4Au5aYP/i3nyaWQ+ClszLIuVocRiCYFQ= -github.com/alecthomas/assert/v2 v2.2.2 h1:Z/iVC0xZfWTaFNE6bA3z07T86hd45Xe2eLt6WVy2bbk= -github.com/alecthomas/assert/v2 v2.2.2/go.mod h1:pXcQ2Asjp247dahGEmsZ6ru0UVwnkhktn7S0bBDLxvQ= -github.com/alecthomas/go-check-sumtype v0.1.4 h1:WCvlB3l5Vq5dZQTFmodqL2g68uHiSwwlWcT5a2FGK0c= -github.com/alecthomas/go-check-sumtype v0.1.4/go.mod h1:WyYPfhfkdhyrdaligV6svFopZV8Lqdzn5pyVBaV6jhQ= -github.com/alecthomas/repr v0.2.0 h1:HAzS41CIzNW5syS8Mf9UwXhNH1J9aix/BvDRf1Ml2Yk= -github.com/alecthomas/repr v0.2.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= -github.com/alexkohler/nakedret/v2 v2.0.4 h1:yZuKmjqGi0pSmjGpOC016LtPJysIL0WEUiaXW5SUnNg= -github.com/alexkohler/nakedret/v2 v2.0.4/go.mod h1:bF5i0zF2Wo2o4X4USt9ntUWve6JbFv02Ff4vlkmS/VU= +github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0= +github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/OpenPeeDeeP/depguard/v2 v2.2.1 h1:vckeWVESWp6Qog7UZSARNqfu/cZqvki8zsuj3piCMx4= +github.com/OpenPeeDeeP/depguard/v2 v2.2.1/go.mod h1:q4DKzC4UcVaAvcfd41CZh0PWpGgzrVxUYBlgKNGquUo= +github.com/RaduBerinde/axisds v0.0.0-20250419182453-5135a0650657 h1:8XBWWQD+vFF+JqOsm16t0Kab1a7YWV8+GISVEP8AuZ8= +github.com/RaduBerinde/axisds v0.0.0-20250419182453-5135a0650657/go.mod h1:UHGJonU9z4YYGKJxSaC6/TNcLOBptpmM5m2Cksbnw0Y= +github.com/RaduBerinde/btreemap v0.0.0-20250419174037-3d62b7205d54 h1:bsU8Tzxr/PNz75ayvCnxKZWEYdLMPDkUgticP4a4Bvk= +github.com/RaduBerinde/btreemap v0.0.0-20250419174037-3d62b7205d54/go.mod h1:0tr7FllbE9gJkHq7CVeeDDFAFKQVy5RnCSSNBOvdqbc= +github.com/aclements/go-perfevent v0.0.0-20240301234650-f7843625020f h1:JjxwchlOepwsUWcQwD2mLUAGE9aCp0/ehy6yCHFBOvo= +github.com/aclements/go-perfevent v0.0.0-20240301234650-f7843625020f/go.mod h1:tMDTce/yLLN/SK8gMOxQfnyeMeCg8KGzp0D1cbECEeo= +github.com/alecthomas/assert/v2 v2.11.0 h1:2Q9r3ki8+JYXvGsDyBXwH3LcJ+WK5D0gc5E8vS6K3D0= +github.com/alecthomas/assert/v2 v2.11.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k= +github.com/alecthomas/go-check-sumtype v0.3.1 h1:u9aUvbGINJxLVXiFvHUlPEaD7VDULsrxJb4Aq31NLkU= +github.com/alecthomas/go-check-sumtype v0.3.1/go.mod h1:A8TSiN3UPRw3laIgWEUOHHLPa6/r9MtoigdlP5h3K/E= +github.com/alecthomas/repr v0.4.0 h1:GhI2A8MACjfegCPVq9f1FLvIBS+DrQ2KQBFZP1iFzXc= +github.com/alecthomas/repr v0.4.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= +github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b h1:mimo19zliBX/vSQ6PWWSL9lK8qwHozUj03+zLoEB8O0= +github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= +github.com/alexkohler/nakedret/v2 v2.0.5 h1:fP5qLgtwbx9EJE8dGEERT02YwS8En4r9nnZ71RK+EVU= +github.com/alexkohler/nakedret/v2 v2.0.5/go.mod h1:bF5i0zF2Wo2o4X4USt9ntUWve6JbFv02Ff4vlkmS/VU= github.com/alexkohler/prealloc v1.0.0 h1:Hbq0/3fJPQhNkN0dR95AVrr6R7tou91y0uHG5pOcUuw= github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= github.com/alingse/asasalint v0.0.11 h1:SFwnQXJ49Kx/1GghOFz1XGqHYKp21Kq1nHad/0WQRnw= github.com/alingse/asasalint v0.0.11/go.mod h1:nCaoMhw7a9kSJObvQyVzNTPBDbNpdocqrSP7t/cW5+I= +github.com/alingse/nilnesserr v0.1.2 h1:Yf8Iwm3z2hUUrP4muWfW83DF4nE3r1xZ26fGWUKCZlo= +github.com/alingse/nilnesserr v0.1.2/go.mod h1:1xJPrXonEtX7wyTq8Dytns5P2hNzoWymVUIaKm4HNFg= github.com/ashanbrown/forbidigo v1.6.0 h1:D3aewfM37Yb3pxHujIPSpTf6oQk9sc9WZi8gerOIVIY= github.com/ashanbrown/forbidigo v1.6.0/go.mod h1:Y8j9jy9ZYAEHXdu723cUlraTqbzjKF1MUyfOKL+AjcU= -github.com/ashanbrown/makezero v1.1.1 h1:iCQ87C0V0vSyO+M9E/FZYbu65auqH0lnsOkf5FcB28s= -github.com/ashanbrown/makezero v1.1.1/go.mod h1:i1bJLCRSCHOcOa9Y6MyF2FTfMZMFdHvxKHxgO5Z1axI= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/ashanbrown/makezero v1.2.0 h1:/2Lp1bypdmK9wDIq7uWBlDF1iMUpIIS4A+pF6C9IEUU= +github.com/ashanbrown/makezero v1.2.0/go.mod h1:dxlPhHbDMC6N6xICzFBSK+4njQDdK8euNO0qjQMtGY4= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bitfield/gotestdox v0.2.2 h1:x6RcPAbBbErKLnapz1QeAlf3ospg8efBsedU93CDsnE= github.com/bitfield/gotestdox v0.2.2/go.mod h1:D+gwtS0urjBrzguAkTM2wodsTQYFHdpx8eqRJ3N+9pY= -github.com/bkielbasa/cyclop v1.2.1 h1:AeF71HZDob1P2/pRm1so9cd1alZnrpyc4q2uP2l0gJY= -github.com/bkielbasa/cyclop v1.2.1/go.mod h1:K/dT/M0FPAiYjBgQGau7tz+3TMh4FWAEqlMhzFWCrgM= +github.com/bkielbasa/cyclop v1.2.3 h1:faIVMIGDIANuGPWH031CZJTi2ymOQBULs9H21HSMa5w= +github.com/bkielbasa/cyclop v1.2.3/go.mod h1:kHTwA9Q0uZqOADdupvcFJQtp/ksSnytRMe8ztxG8Fuo= github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ089M= github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k= -github.com/bombsimon/wsl/v4 v4.4.1 h1:jfUaCkN+aUpobrMO24zwyAMwMAV5eSziCkOKEauOLdw= -github.com/bombsimon/wsl/v4 v4.4.1/go.mod h1:Xu/kDxGZTofQcDGCtQe9KCzhHphIe0fDuyWTxER9Feo= -github.com/breml/bidichk v0.2.7 h1:dAkKQPLl/Qrk7hnP6P+E0xOodrq8Us7+U0o4UBOAlQY= -github.com/breml/bidichk v0.2.7/go.mod h1:YodjipAGI9fGcYM7II6wFvGhdMYsC5pHDlGzqvEW3tQ= -github.com/breml/errchkjson v0.3.6 h1:VLhVkqSBH96AvXEyclMR37rZslRrY2kcyq+31HCsVrA= -github.com/breml/errchkjson v0.3.6/go.mod h1:jhSDoFheAF2RSDOlCfhHO9KqhZgAYLyvHe7bRCX8f/U= -github.com/butuzov/ireturn v0.3.0 h1:hTjMqWw3y5JC3kpnC5vXmFJAWI/m31jaCYQqzkS6PL0= -github.com/butuzov/ireturn v0.3.0/go.mod h1:A09nIiwiqzN/IoVo9ogpa0Hzi9fex1kd9PSD6edP5ZA= -github.com/butuzov/mirror v1.2.0 h1:9YVK1qIjNspaqWutSv8gsge2e/Xpq1eqEkslEUHy5cs= -github.com/butuzov/mirror v1.2.0/go.mod h1:DqZZDtzm42wIAIyHXeN8W/qb1EPlb9Qn/if9icBOpdQ= -github.com/catenacyber/perfsprint v0.7.1 h1:PGW5G/Kxn+YrN04cRAZKC+ZuvlVwolYMrIyyTJ/rMmc= -github.com/catenacyber/perfsprint v0.7.1/go.mod h1:/wclWYompEyjUD2FuIIDVKNkqz7IgBIWXIH3V0Zol50= +github.com/bombsimon/wsl/v4 v4.5.0 h1:iZRsEvDdyhd2La0FVi5k6tYehpOR/R7qIUjmKk7N74A= +github.com/bombsimon/wsl/v4 v4.5.0/go.mod h1:NOQ3aLF4nD7N5YPXMruR6ZXDOAqLoM0GEpLwTdvmOSc= +github.com/breml/bidichk v0.3.2 h1:xV4flJ9V5xWTqxL+/PMFF6dtJPvZLPsyixAoPe8BGJs= +github.com/breml/bidichk v0.3.2/go.mod h1:VzFLBxuYtT23z5+iVkamXO386OB+/sVwZOpIj6zXGos= +github.com/breml/errchkjson v0.4.0 h1:gftf6uWZMtIa/Is3XJgibewBm2ksAQSY/kABDNFTAdk= +github.com/breml/errchkjson v0.4.0/go.mod h1:AuBOSTHyLSaaAFlWsRSuRBIroCh3eh7ZHh5YeelDIk8= +github.com/butuzov/ireturn v0.3.1 h1:mFgbEI6m+9W8oP/oDdfA34dLisRFCj2G6o/yiI1yZrY= +github.com/butuzov/ireturn v0.3.1/go.mod h1:ZfRp+E7eJLC0NQmk1Nrm1LOrn/gQlOykv+cVPdiXH5M= +github.com/butuzov/mirror v1.3.0 h1:HdWCXzmwlQHdVhwvsfBb2Au0r3HyINry3bDWLYXiKoc= +github.com/butuzov/mirror v1.3.0/go.mod h1:AEij0Z8YMALaq4yQj9CPPVYOyJQyiexpQEQgihajRfI= +github.com/caddyserver/certmagic v0.23.0 h1:CfpZ/50jMfG4+1J/u2LV6piJq4HOfO6ppOnOf7DkFEU= +github.com/caddyserver/certmagic v0.23.0/go.mod h1:9mEZIWqqWoI+Gf+4Trh04MOVPD0tGSxtqsxg87hAIH4= +github.com/caddyserver/zerossl v0.1.3 h1:onS+pxp3M8HnHpN5MMbOMyNjmTheJyWRaZYwn+YTAyA= +github.com/caddyserver/zerossl v0.1.3/go.mod h1:CxA0acn7oEGO6//4rtrRjYgEoa4MFw/XofZnrYwGqG4= +github.com/catenacyber/perfsprint v0.8.2 h1:+o9zVmCSVa7M4MvabsWvESEhpsMkhfE7k0sHNGL95yw= +github.com/catenacyber/perfsprint v0.8.2/go.mod h1:q//VWC2fWbcdSLEY1R3l8n0zQCDPdE4IjZwyY1HMunM= github.com/ccojocar/zxcvbn-go v1.0.2 h1:na/czXU8RrhXO4EZme6eQJLR4PzcGsahsBOAwU6I3Vg= github.com/ccojocar/zxcvbn-go v1.0.2/go.mod h1:g1qkXtUSvHP8lhHp5GrSmTz6uWALGRMQdw6Qnz/hi60= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/charithe/durationcheck v0.0.10 h1:wgw73BiocdBDQPik+zcEoBG/ob8uyBHf2iyoHGPf5w4= github.com/charithe/durationcheck v0.0.10/go.mod h1:bCWXb7gYRysD1CU3C+u4ceO49LoGOY1C1L6uouGNreQ= github.com/chavacava/garif v0.1.0 h1:2JHa3hbYf5D9dsgseMKAmc/MZ109otzgNFk5s87H9Pc= github.com/chavacava/garif v0.1.0/go.mod h1:XMyYCkEL58DF0oyW4qDjjnPWONs2HBqYKI+UIPD+Gww= -github.com/ckaznocha/intrange v0.1.2 h1:3Y4JAxcMntgb/wABQ6e8Q8leMd26JbX2790lIss9MTI= -github.com/ckaznocha/intrange v0.1.2/go.mod h1:RWffCw/vKBwHeOEwWdCikAtY0q4gGt8VhJZEEA5n+RE= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= -github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= -github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= -github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/ckaznocha/intrange v0.3.0 h1:VqnxtK32pxgkhJgYQEeOArVidIPg+ahLP7WBOXZd5ZY= +github.com/ckaznocha/intrange v0.3.0/go.mod h1:+I/o2d2A1FBHgGELbGxzIcyd3/9l9DuwjM8FsbSS3Lo= +github.com/cockroachdb/crlib v0.0.0-20241112164430-1264a2edc35b h1:SHlYZ/bMx7frnmeqCu+xm0TCxXLzX3jQIVuFbnFGtFU= +github.com/cockroachdb/crlib v0.0.0-20241112164430-1264a2edc35b/go.mod h1:Gq51ZeKaFCXk6QwuGM0w1dnaOqc/F5zKT2zA9D6Xeac= +github.com/cockroachdb/datadriven v1.0.3-0.20250407164829-2945557346d5 h1:UycK/E0TkisVrQbSoxvU827FwgBBcZ95nRRmpj/12QI= +github.com/cockroachdb/datadriven v1.0.3-0.20250407164829-2945557346d5/go.mod h1:jsaKMvD3RBCATk1/jbUZM8C9idWBJME9+VRZ5+Liq1g= +github.com/cockroachdb/errors v1.11.3 h1:5bA+k2Y6r+oz/6Z/RFlNeVCesGARKuC6YymtcDrbC/I= +github.com/cockroachdb/errors v1.11.3/go.mod h1:m4UIW4CDjx+R5cybPsNrRbreomiFqt8o1h1wUVazSd8= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= +github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895 h1:XANOgPYtvELQ/h4IrmPAohXqe2pWA8Bwhejr3VQoZsA= +github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895/go.mod h1:aPd7gM9ov9M8v32Yy5NJrDyOcD8z642dqs+F0CeNXfA= +github.com/cockroachdb/pebble/v2 v2.1.3 h1:irU503OnjRoJBrkZQIJvwv9c4WvpUeOJxhRApojB8D8= +github.com/cockroachdb/pebble/v2 v2.1.3/go.mod h1:B1UgWsyR+L+UvZXNgpxw+WqsUKA8VQ/bb//FXOHghB8= +github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= +github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cockroachdb/swiss v0.0.0-20251224182025-b0f6560f979b h1:VXvSNzmr8hMj8XTuY0PT9Ane9qZGul/p67vGYwl9BFI= +github.com/cockroachdb/swiss v0.0.0-20251224182025-b0f6560f979b/go.mod h1:yBRu/cnL4ks9bgy4vAASdjIW+/xMlFwuHKqtmh3GZQg= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= -github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/crackcomm/go-gitignore v0.0.0-20241020182519-7843d2ba8fdf h1:dwGgBWn84wUS1pVikGiruW+x5XM4amhjaZO20vCjay4= +github.com/crackcomm/go-gitignore v0.0.0-20241020182519-7843d2ba8fdf/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= -github.com/curioswitch/go-reassign v0.2.0 h1:G9UZyOcpk/d7Gd6mqYgd8XYWFMw/znxwGDUstnC9DIo= -github.com/curioswitch/go-reassign v0.2.0/go.mod h1:x6OpXuWvgfQaMGks2BZybTngWjT84hqJfKoO8Tt/Roc= -github.com/daixiang0/gci v0.13.4 h1:61UGkmpoAcxHM2hhNkZEf5SzwQtWJXTSws7jaPyqwlw= -github.com/daixiang0/gci v0.13.4/go.mod h1:12etP2OniiIdP4q+kjUGrC/rUagga7ODbqsom5Eo5Yk= +github.com/curioswitch/go-reassign v0.3.0 h1:dh3kpQHuADL3cobV/sSGETA8DOv457dwl+fbBAhrQPs= +github.com/curioswitch/go-reassign v0.3.0/go.mod h1:nApPCCTtqLJN/s8HfItCcKV0jIPwluBOvZP+dsJGA88= +github.com/daixiang0/gci v0.13.5 h1:kThgmH1yBmZSBCh1EJVxQ7JsHpm5Oms0AMed/0LaH4c= +github.com/daixiang0/gci v0.13.5/go.mod h1:12etP2OniiIdP4q+kjUGrC/rUagga7ODbqsom5Eo5Yk= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= -github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y= -github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= +github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8= +github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40= github.com/denis-tingaikin/go-header v0.5.0 h1:SRdnP5ZKvcO9KKRP1KJrhFR3RrlGuD+42t4429eC9k8= github.com/denis-tingaikin/go-header v0.5.0/go.mod h1:mMenU5bWrok6Wl2UsZjy+1okegmwQ3UgWl4V1D8gjlY= +github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI= +github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= github.com/dnephin/pflag v1.0.7 h1:oxONGlWxhmUct0YzKTgrpQv9AUA1wtPBn7zuSjJqptk= github.com/dnephin/pflag v1.0.7/go.mod h1:uxE91IoWURlOiTUIA8Mq5ZZkAv3dPUfZNaT80Zm7OQE= -github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= -github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/elastic/gosigar v0.14.3 h1:xwkKwPia+hSfg9GqrCUKYdId102m9qTJIIr7egmK/uo= -github.com/elastic/gosigar v0.14.3/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/ettle/strcase v0.2.0 h1:fGNiVF21fHXpX1niBgk0aROov1LagYsOwV/xqKDKR/Q= github.com/ettle/strcase v0.2.0/go.mod h1:DajmHElDSaX76ITe3/VHVyMin4LWSJN5Z909Wp+ED1A= github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5 h1:BBso6MBKW8ncyZLv37o+KNyy0HrrHgfnOaGQC2qvN+A= github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5/go.mod h1:JpoxHjuQauoxiFMl1ie8Xc/7TfLuMZ5eOCONd1sUBHg= -github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= -github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= -github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4= -github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/filecoin-project/go-clock v0.1.0 h1:SFbYIM75M8NnFm1yMHhN9Ahy3W5bEZV9gd6MPfXbKVU= +github.com/filecoin-project/go-clock v0.1.0/go.mod h1:4uB/O4PvOjlx1VCMdZ9MyDZXRm//gkj1ELEbxfI1AZs= github.com/firefart/nonamedreturns v1.0.5 h1:tM+Me2ZaXs8tfdDw3X6DOX++wMCOqzYUho6tUTYIdRA= github.com/firefart/nonamedreturns v1.0.5/go.mod h1:gHJjDqhGM4WyPt639SOZs+G89Ko7QKH5R5BhnO6xJhw= github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg= github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= -github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= -github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo= github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= -github.com/ghostiam/protogetter v0.3.6 h1:R7qEWaSgFCsy20yYHNIJsU9ZOb8TziSRRxuAOTVKeOk= -github.com/ghostiam/protogetter v0.3.6/go.mod h1:7lpeDnEJ1ZjL/YtyoN99ljO4z0pd3H0d18/t2dPBxHw= -github.com/go-critic/go-critic v0.11.4 h1:O7kGOCx0NDIni4czrkRIXTnit0mkyKOCePh3My6OyEU= -github.com/go-critic/go-critic v0.11.4/go.mod h1:2QAdo4iuLik5S9YG0rT4wcZ8QxwHYkrr6/2MWAiv/vc= +github.com/gabriel-vasile/mimetype v1.4.10 h1:zyueNbySn/z8mJZHLt6IPw0KoZsiQNszIpU+bX4+ZK0= +github.com/gabriel-vasile/mimetype v1.4.10/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s= +github.com/gammazero/chanqueue v1.1.1 h1:n9Y+zbBxw2f7uUE9wpgs0rOSkP/I/yhDLiNuhyVjojQ= +github.com/gammazero/chanqueue v1.1.1/go.mod h1:fMwpwEiuUgpab0sH4VHiVcEoji1pSi+EIzeG4TPeKPc= +github.com/gammazero/deque v1.2.0 h1:scEFO8Uidhw6KDU5qg1HA5fYwM0+us2qdeJqm43bitU= +github.com/gammazero/deque v1.2.0/go.mod h1:JVrR+Bj1NMQbPnYclvDlvSX0nVGReLrQZ0aUMuWLctg= +github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= +github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9 h1:r5GgOLGbza2wVHRzK7aAj6lWZjfbAwiu/RDCVOKjRyM= +github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs= +github.com/ghostiam/protogetter v0.3.9 h1:j+zlLLWzqLay22Cz/aYwTHKQ88GE2DQ6GkWSYFOI4lQ= +github.com/ghostiam/protogetter v0.3.9/go.mod h1:WZ0nw9pfzsgxuRsPOFQomgDVSWtDLJRfQJEhsGbmQMA= +github.com/go-critic/go-critic v0.12.0 h1:iLosHZuye812wnkEz1Xu3aBwn5ocCPfc9yqmFG9pa6w= +github.com/go-critic/go-critic v0.12.0/go.mod h1:DpE0P6OVc6JzVYzmM5gq5jMU31zLr4am5mB/VfFK64w= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI= @@ -171,71 +209,48 @@ github.com/go-toolsmith/strparse v1.1.0 h1:GAioeZUK9TGxnLS+qfdqNbA4z0SSm5zVNtCQi github.com/go-toolsmith/strparse v1.1.0/go.mod h1:7ksGy58fsaQkGQlY8WVoBFNyEPMGuJin1rfoPS4lBSQ= github.com/go-toolsmith/typep v1.1.0 h1:fIRYDyF+JywLfqzyhdiHzRop/GQDxxNhLGQ6gFUNHus= github.com/go-toolsmith/typep v1.1.0/go.mod h1:fVIw+7zjdsMxDA3ITWnH1yOiw1rnTQKCsF/sk2H/qig= -github.com/go-viper/mapstructure/v2 v2.0.0 h1:dhn8MZ1gZ0mzeodTG3jt5Vj/o87xZKuNAprG2mQfMfc= -github.com/go-viper/mapstructure/v2 v2.0.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= -github.com/go-xmlfmt/xmlfmt v1.1.2 h1:Nea7b4icn8s57fTx1M5AI4qQT5HEM3rVUO8MuE6g80U= -github.com/go-xmlfmt/xmlfmt v1.1.2/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= +github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= +github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-xmlfmt/xmlfmt v1.1.3 h1:t8Ey3Uy7jDSEisW2K3somuMKIpzktkWptA0iFCnRUWY= +github.com/go-xmlfmt/xmlfmt v1.1.3/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= -github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E= github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM= -github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= -github.com/golangci/gofmt v0.0.0-20240816233607-d8596aa466a9 h1:/1322Qns6BtQxUZDTAT4SdcoxknUki7IAoK4SAXr8ME= -github.com/golangci/gofmt v0.0.0-20240816233607-d8596aa466a9/go.mod h1:Oesb/0uFAyWoaw1U1qS5zyjCg5NP9C9iwjnI4tIsXEE= -github.com/golangci/golangci-lint v1.60.2 h1:Y8aWnZCMOLY5T7Ga5hcoemyKsZZJCUmIIK3xTD3jIhc= -github.com/golangci/golangci-lint v1.60.2/go.mod h1:4UvjLpOJoQSvmyWkmO1urDR3txhL9R9sn4oM/evJ95g= +github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e h1:4bw4WeyTYPp0smaXiJZCNnLrvVBqirQVreixayXezGc= +github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golangci/dupl v0.0.0-20250308024227-f665c8d69b32 h1:WUvBfQL6EW/40l6OmeSBYQJNSif4O11+bmWEz+C7FYw= +github.com/golangci/dupl v0.0.0-20250308024227-f665c8d69b32/go.mod h1:NUw9Zr2Sy7+HxzdjIULge71wI6yEg1lWQr7Evcu8K0E= +github.com/golangci/go-printf-func-name v0.1.0 h1:dVokQP+NMTO7jwO4bwsRwLWeudOVUPPyAKJuzv8pEJU= +github.com/golangci/go-printf-func-name v0.1.0/go.mod h1:wqhWFH5mUdJQhweRnldEywnR5021wTdZSNgwYceV14s= +github.com/golangci/gofmt v0.0.0-20250106114630-d62b90e6713d h1:viFft9sS/dxoYY0aiOTsLKO2aZQAPT4nlQCsimGcSGE= +github.com/golangci/gofmt v0.0.0-20250106114630-d62b90e6713d/go.mod h1:ivJ9QDg0XucIkmwhzCDsqcnxxlDStoTl89jDMIoNxKY= +github.com/golangci/golangci-lint v1.64.8 h1:y5TdeVidMtBGG32zgSC7ZXTFNHrsJkDnpO4ItB3Am+I= +github.com/golangci/golangci-lint v1.64.8/go.mod h1:5cEsUQBSr6zi8XI8OjmcY2Xmliqc4iYL7YoPrL+zLJ4= github.com/golangci/misspell v0.6.0 h1:JCle2HUTNWirNlDIAUO44hUsKhOFqGPoC4LZxlaSXDs= github.com/golangci/misspell v0.6.0/go.mod h1:keMNyY6R9isGaSAu+4Q8NMBwMPkh15Gtc8UCVoDtAWo= -github.com/golangci/modinfo v0.3.4 h1:oU5huX3fbxqQXdfspamej74DFX0kyGLkw1ppvXoJ8GA= -github.com/golangci/modinfo v0.3.4/go.mod h1:wytF1M5xl9u0ij8YSvhkEVPP3M5Mc7XLl1pxH3B2aUM= github.com/golangci/plugin-module-register v0.1.1 h1:TCmesur25LnyJkpsVrupv1Cdzo+2f7zX0H6Jkw1Ol6c= github.com/golangci/plugin-module-register v0.1.1/go.mod h1:TTpqoB6KkwOJMV8u7+NyXMrkwwESJLOkfl9TxR1DGFc= -github.com/golangci/revgrep v0.5.3 h1:3tL7c1XBMtWHHqVpS5ChmiAAoe4PF/d5+ULzV9sLAzs= -github.com/golangci/revgrep v0.5.3/go.mod h1:U4R/s9dlXZsg8uJmaR1GrloUr14D7qDl8gi2iPXJH8k= +github.com/golangci/revgrep v0.8.0 h1:EZBctwbVd0aMeRnNUsFogoyayvKHyxlV3CdUA46FX2s= +github.com/golangci/revgrep v0.8.0/go.mod h1:U4R/s9dlXZsg8uJmaR1GrloUr14D7qDl8gi2iPXJH8k= github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed h1:IURFTjxeTfNFP0hTEi1YKjB/ub8zkpaOqFFMApi2EAs= github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed/go.mod h1:XLXN8bNw4CGRPaqgl3bv/lhz7bsGPh4/xSaMTbo2vkQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= -github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k= -github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a h1://KbezygeMJZCSHH+HgUZiTeSoiuFspbMg1ge+eFj18= +github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -248,22 +263,20 @@ github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/ad github.com/gostaticanalysis/analysisutil v0.7.1 h1:ZMCjoue3DtDWQ5WyU16YbjbQEQ3VuzwxALrpYd+HeKk= github.com/gostaticanalysis/analysisutil v0.7.1/go.mod h1:v21E3hY37WKMGSnbsw2S/ojApNWb6C1//mXO48CXbVc= github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado= -github.com/gostaticanalysis/comment v1.4.2 h1:hlnx5+S2fY9Zo9ePo4AhgYsYHbM2+eAv8m/s1JiCd6Q= github.com/gostaticanalysis/comment v1.4.2/go.mod h1:KLUTGDv6HOCotCH8h2erHKmpci2ZoR8VPu34YA2uzdM= -github.com/gostaticanalysis/forcetypeassert v0.1.0 h1:6eUflI3DiGusXGK6X7cCcIgVCpZ2CiZ1Q7jl6ZxNV70= -github.com/gostaticanalysis/forcetypeassert v0.1.0/go.mod h1:qZEedyP/sY1lTGV1uJ3VhWZ2mqag3IkWsDHVbplHXak= +github.com/gostaticanalysis/comment v1.5.0 h1:X82FLl+TswsUMpMh17srGRuKaaXprTaytmEpgnKIDu8= +github.com/gostaticanalysis/comment v1.5.0/go.mod h1:V6eb3gpCv9GNVqb6amXzEUX3jXLVK/AdA+IrAMSqvEc= +github.com/gostaticanalysis/forcetypeassert v0.2.0 h1:uSnWrrUEYDr86OCxWa4/Tp2jeYDlogZiZHzGkWFefTk= +github.com/gostaticanalysis/forcetypeassert v0.2.0/go.mod h1:M5iPavzE9pPqWyeiVXSFghQjljW1+l/Uke3PXHS6ILY= github.com/gostaticanalysis/nilerr v0.1.1 h1:ThE+hJP0fEp4zWLkWHWcRyI2Od0p7DlgYG3Uqrmrcpk= github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A= github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M= -github.com/gostaticanalysis/testutil v0.4.0 h1:nhdCmubdmDF6VEatUNjgUZBJKWRqugoISdUv3PPQgHY= -github.com/gostaticanalysis/testutil v0.4.0/go.mod h1:bLIoPefWXrRi/ssLFWX1dx7Repi5x3CuviD3dgAZaBU= -github.com/gxed/go-shellwords v1.0.3 h1:2TP32H4TAklZUdz84oj95BJhVnIrRasyx2j1cqH5K38= -github.com/gxed/go-shellwords v1.0.3/go.mod h1:N7paucT91ByIjmVJHhvoarjoQnmsi3Jd3vH7VqgtMxQ= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= -github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= -github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/gostaticanalysis/testutil v0.5.0 h1:Dq4wT1DdTwTGCQQv3rl3IvD5Ld0E6HiY+3Zh0sUGqw8= +github.com/gostaticanalysis/testutil v0.5.0/go.mod h1:OLQSbuM6zw2EvCcXTz1lVq5unyoNft372msDY0nY5Hs= +github.com/hashicorp/go-immutable-radix/v2 v2.1.0 h1:CUW5RYIcysz+D3B+l1mDeXrQ7fUvGGCwJfdASSzbrfo= +github.com/hashicorp/go-immutable-radix/v2 v2.1.0/go.mod h1:hgdqLXA4f6NIjRVisM1TJ9aOJVNRqKZj+xDGF6m7PBw= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= @@ -281,87 +294,93 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2 github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= -github.com/ipfs/boxo v0.23.1-0.20240927234853-19a402b7dc34 h1:/Etgc4IR0OUF+nIoNdqwu12EYuaSMpd7/Nc5wRLd67U= -github.com/ipfs/boxo v0.23.1-0.20240927234853-19a402b7dc34/go.mod h1:ulu5I6avTmgGmvjuCaBRKwsaOOKjBfQw1EiOOQp8M6E= -github.com/ipfs/go-block-format v0.2.0 h1:ZqrkxBA2ICbDRbK8KJs/u0O3dlp6gmAuuXUJNiW1Ycs= -github.com/ipfs/go-block-format v0.2.0/go.mod h1:+jpL11nFx5A/SPpsoBn6Bzkra/zaArfSmsknbPMYgzM= -github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= -github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= +github.com/ipfs/boxo v0.35.3-0.20251202220026-0842ad274a0c h1:mczpALnNzNhmggehO5Ehr9+Q8+NiJyKJfT4EPwi01d0= +github.com/ipfs/boxo v0.35.3-0.20251202220026-0842ad274a0c/go.mod h1:Abmp1if6bMQG87/0SQPIB9fkxJnZMLCt2nQw3yUZHH0= +github.com/ipfs/go-bitfield v1.1.0 h1:fh7FIo8bSwaJEh6DdTWbCeZ1eqOaOkKFI74SCnsWbGA= +github.com/ipfs/go-bitfield v1.1.0/go.mod h1:paqf1wjq/D2BBmzfTVFlJQ9IlFOZpg422HL0HqsGWHU= +github.com/ipfs/go-block-format v0.2.3 h1:mpCuDaNXJ4wrBJLrtEaGFGXkferrw5eqVvzaHhtFKQk= +github.com/ipfs/go-block-format v0.2.3/go.mod h1:WJaQmPAKhD3LspLixqlqNFxiZ3BZ3xgqxxoSR/76pnA= +github.com/ipfs/go-cid v0.6.0 h1:DlOReBV1xhHBhhfy/gBNNTSyfOM6rLiIx9J7A4DGf30= +github.com/ipfs/go-cid v0.6.0/go.mod h1:NC4kS1LZjzfhK40UGmpXv5/qD2kcMzACYJNntCUiDhQ= github.com/ipfs/go-cidutil v0.1.0 h1:RW5hO7Vcf16dplUU60Hs0AKDkQAVPVplr7lk97CFL+Q= github.com/ipfs/go-cidutil v0.1.0/go.mod h1:e7OEVBMIv9JaOxt9zaGEmAoSlXW9jdFZ5lP/0PwcfpA= -github.com/ipfs/go-datastore v0.6.0 h1:JKyz+Gvz1QEZw0LsX1IBn+JFCJQH4SJVFtM4uWU0Myk= -github.com/ipfs/go-datastore v0.6.0/go.mod h1:rt5M3nNbSO/8q1t4LNkLyUwRs8HupMeN/8O4Vn9YAT8= +github.com/ipfs/go-datastore v0.9.0 h1:WocriPOayqalEsueHv6SdD4nPVl4rYMfYGLD4bqCZ+w= +github.com/ipfs/go-datastore v0.9.0/go.mod h1:uT77w/XEGrvJWwHgdrMr8bqCN6ZTW9gzmi+3uK+ouHg= github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= +github.com/ipfs/go-ds-leveldb v0.5.2 h1:6nmxlQ2zbp4LCNdJVsmHfs9GP0eylfBNxpmY1csp0x0= +github.com/ipfs/go-ds-leveldb v0.5.2/go.mod h1:2fAwmcvD3WoRT72PzEekHBkQmBDhc39DJGoREiuGmYo= +github.com/ipfs/go-dsqueue v0.1.1 h1:6PQlHDyf9PSTN69NmwUir5+0is3tU0vRJj8zLlgK8Mc= +github.com/ipfs/go-dsqueue v0.1.1/go.mod h1:Xxg353WSwwzYn3FGSzZ+taSQII3pIZ+EJC8/oWRDM10= +github.com/ipfs/go-ipfs-cmds v0.15.0 h1:nQDgKadrzyiFyYoZMARMIoVoSwe3gGTAfGvrWLeAQbQ= +github.com/ipfs/go-ipfs-cmds v0.15.0/go.mod h1:VABf/mv/wqvYX6hLG6Z+40eNAEw3FQO0bSm370Or3Wk= github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ= github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= github.com/ipfs/go-ipfs-pq v0.0.3 h1:YpoHVJB+jzK15mr/xsWC574tyDLkezVrDNeaalQBsTE= github.com/ipfs/go-ipfs-pq v0.0.3/go.mod h1:btNw5hsHBpRcSSgZtiNm/SLj5gYIZ18AKtv3kERkRb4= -github.com/ipfs/go-ipfs-util v0.0.3 h1:2RFdGez6bu2ZlZdI+rWfIdbQb1KudQp3VGwPtdNCmE0= -github.com/ipfs/go-ipfs-util v0.0.3/go.mod h1:LHzG1a0Ig4G+iZ26UUOMjHd+lfM84LZCrn17xAKWBvs= -github.com/ipfs/go-ipld-format v0.6.0 h1:VEJlA2kQ3LqFSIm5Vu6eIlSxD/Ze90xtc4Meten1F5U= -github.com/ipfs/go-ipld-format v0.6.0/go.mod h1:g4QVMTn3marU3qXchwjpKPKgJv+zF+OlaKMyhJ4LHPg= -github.com/ipfs/go-ipld-legacy v0.2.1 h1:mDFtrBpmU7b//LzLSypVrXsD8QxkEWxu5qVxN99/+tk= -github.com/ipfs/go-ipld-legacy v0.2.1/go.mod h1:782MOUghNzMO2DER0FlBR94mllfdCJCkTtDtPM51otM= -github.com/ipfs/go-log v1.0.5 h1:2dOuUCB1Z7uoczMWgAyDck5JLb72zHzrMnGnCNNbvY8= -github.com/ipfs/go-log v1.0.5/go.mod h1:j0b8ZoR+7+R99LD9jZ6+AJsrzkPbSXbZfGakb5JPtIo= -github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g= -github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= -github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= -github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg= -github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY= -github.com/ipfs/go-peertaskqueue v0.8.1 h1:YhxAs1+wxb5jk7RvS0LHdyiILpNmRIRnZVztekOF0pg= -github.com/ipfs/go-peertaskqueue v0.8.1/go.mod h1:Oxxd3eaK279FxeydSPPVGHzbwVeHjatZ2GA8XD+KbPU= -github.com/ipfs/go-test v0.0.4 h1:DKT66T6GBB6PsDFLoO56QZPrOmzJkqU1FZH5C9ySkew= -github.com/ipfs/go-test v0.0.4/go.mod h1:qhIM1EluEfElKKM6fnWxGn822/z9knUGM1+I/OAQNKI= +github.com/ipfs/go-ipfs-redirects-file v0.1.2 h1:QCK7VtL91FH17KROVVy5KrzDx2hu68QvB2FTWk08ZQk= +github.com/ipfs/go-ipfs-redirects-file v0.1.2/go.mod h1:yIiTlLcDEM/8lS6T3FlCEXZktPPqSOyuY6dEzVqw7Fw= +github.com/ipfs/go-ipld-cbor v0.2.1 h1:H05yEJbK/hxg0uf2AJhyerBDbjOuHX4yi+1U/ogRa7E= +github.com/ipfs/go-ipld-cbor v0.2.1/go.mod h1:x9Zbeq8CoE5R2WicYgBMcr/9mnkQ0lHddYWJP2sMV3A= +github.com/ipfs/go-ipld-format v0.6.3 h1:9/lurLDTotJpZSuL++gh3sTdmcFhVkCwsgx2+rAh4j8= +github.com/ipfs/go-ipld-format v0.6.3/go.mod h1:74ilVN12NXVMIV+SrBAyC05UJRk0jVvGqdmrcYZvCBk= +github.com/ipfs/go-ipld-legacy v0.2.2 h1:DThbqCPVLpWBcGtU23KDLiY2YRZZnTkXQyfz8aOfBkQ= +github.com/ipfs/go-ipld-legacy v0.2.2/go.mod h1:hhkj+b3kG9b2BcUNw8IFYAsfeNo8E3U7eYlWeAOPyDU= +github.com/ipfs/go-log/v2 v2.9.0 h1:l4b06AwVXwldIzbVPZy5z7sKp9lHFTX0KWfTBCtHaOk= +github.com/ipfs/go-log/v2 v2.9.0/go.mod h1:UhIYAwMV7Nb4ZmihUxfIRM2Istw/y9cAk3xaK+4Zs2c= +github.com/ipfs/go-metrics-interface v0.3.0 h1:YwG7/Cy4R94mYDUuwsBfeziJCVm9pBMJ6q/JR9V40TU= +github.com/ipfs/go-metrics-interface v0.3.0/go.mod h1:OxxQjZDGocXVdyTPocns6cOLwHieqej/jos7H4POwoY= +github.com/ipfs/go-peertaskqueue v0.8.2 h1:PaHFRaVFdxQk1Qo3OKiHPYjmmusQy7gKQUaL8JDszAU= +github.com/ipfs/go-peertaskqueue v0.8.2/go.mod h1:L6QPvou0346c2qPJNiJa6BvOibxDfaiPlqHInmzg0FA= +github.com/ipfs/go-test v0.2.3 h1:Z/jXNAReQFtCYyn7bsv/ZqUwS6E7iIcSpJ2CuzCvnrc= +github.com/ipfs/go-test v0.2.3/go.mod h1:QW8vSKkwYvWFwIZQLGQXdkt9Ud76eQXRQ9Ao2H+cA1o= +github.com/ipfs/go-unixfsnode v1.10.2 h1:TREegX1J4X+k1w4AhoDuxxFvVcS9SegMRvrmxF6Tca8= +github.com/ipfs/go-unixfsnode v1.10.2/go.mod h1:ImDPTSiKZ+2h4UVdkSDITJHk87bUAp7kX/lgifjRicg= github.com/ipfs/hang-fds v0.1.0 h1:deBiFlWHsVGzJ0ZMaqscEqRM1r2O1rFZ59UiQXb1Xko= github.com/ipfs/hang-fds v0.1.0/go.mod h1:29VLWOn3ftAgNNgXg/al7b11UzuQ+w7AwtCGcTaWkbM= -github.com/ipfs/iptb v1.4.0 h1:YFYTrCkLMRwk/35IMyC6+yjoQSHTEcNcefBStLJzgvo= -github.com/ipfs/iptb v1.4.0/go.mod h1:1rzHpCYtNp87/+hTxG5TfCVn/yMY3dKnLn8tBiMfdmg= -github.com/ipfs/iptb-plugins v0.5.0 h1:zEMLlWAb531mLpD36KFy/yc0egT6FkBEHQtdERexNao= -github.com/ipfs/iptb-plugins v0.5.0/go.mod h1:/6crDf3s58T70BhZ+m9SyyKpK7VvSDS2Ny4kafxXDp4= -github.com/ipld/go-codec-dagpb v1.6.0 h1:9nYazfyu9B1p3NAgfVdpRco3Fs2nFC72DqVsMj6rOcc= -github.com/ipld/go-codec-dagpb v1.6.0/go.mod h1:ANzFhfP2uMJxRBr8CE+WQWs5UsNa0pYtmKZ+agnUw9s= +github.com/ipfs/iptb v1.4.1 h1:faXd3TKGPswbHyZecqqg6UfbES7RDjTKQb+6VFPKDUo= +github.com/ipfs/iptb v1.4.1/go.mod h1:nTsBMtVYFEu0FjC5DgrErnABm3OG9ruXkFXGJoTV5OA= +github.com/ipfs/iptb-plugins v0.5.1 h1:11PNTNEt2+SFxjUcO5qpyCTXqDj6T8Tx9pU/G4ytCIQ= +github.com/ipfs/iptb-plugins v0.5.1/go.mod h1:mscJAjRnu4g16QK6oUBn9RGpcp8ueJmLfmPxIG/At78= +github.com/ipld/go-car/v2 v2.16.0 h1:LWe0vmN/QcQmUU4tr34W5Nv5mNraW+G6jfN2s+ndBco= +github.com/ipld/go-car/v2 v2.16.0/go.mod h1:RqFGWN9ifcXVmCrTAVnfnxiWZk1+jIx67SYhenlmL34= +github.com/ipld/go-codec-dagpb v1.7.0 h1:hpuvQjCSVSLnTnHXn+QAMR0mLmb1gA6wl10LExo2Ts0= +github.com/ipld/go-codec-dagpb v1.7.0/go.mod h1:rD3Zg+zub9ZnxcLwfol/OTQRVjaLzXypgy4UqHQvilM= github.com/ipld/go-ipld-prime v0.21.0 h1:n4JmcpOlPDIxBcY037SVfpd1G+Sj1nKZah0m6QH9C2E= github.com/ipld/go-ipld-prime v0.21.0/go.mod h1:3RLqy//ERg/y5oShXXdx5YIp50cFGOanyMctpPjsvxQ= +github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20250821084354-a425e60cd714 h1:cqNk8PEwHnK0vqWln+U/YZhQc9h2NB3KjUjDPZo5Q2s= +github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20250821084354-a425e60cd714/go.mod h1:ZEUdra3CoqRVRYgAX/jAJO9aZGz6SKtKEG628fHHktY= +github.com/ipshipyard/p2p-forge v0.7.0 h1:PQayexxZC1FR2Vx0XOSbmZ6wDPliidS48I+xXWuF+YU= +github.com/ipshipyard/p2p-forge v0.7.0/go.mod h1:i2wg0p7WmHGyo5vYaK9COZBp8BN5Drncfu3WoQNZlQY= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= -github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= -github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c h1:uUx61FiAa1GI6ZmVd2wf2vULeQZIKG66eybjNXKYCz4= -github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c/go.mod h1:sdx1xVM9UuLw1tXnhJWN3piypTUO3vCIHYmG15KE/dU= -github.com/jbenet/go-random-files v0.0.0-20190219210431-31b3f20ebded h1:fHCa28iw+qaRWZK4IqrntHxXALD5kKr/ESrpOCRRdrg= -github.com/jbenet/go-random-files v0.0.0-20190219210431-31b3f20ebded/go.mod h1:FKvZrl5nnaGnTAMewcq0i7wM5zHD75e0lwlnF8q46uo= github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk= -github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o= -github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= github.com/jgautheron/goconst v1.7.1 h1:VpdAG7Ca7yvvJk5n8dMwQhfEZJh95kl/Hl9S1OI5Jkk= github.com/jgautheron/goconst v1.7.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjzq7gFzUs= github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c= -github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af h1:KA9BjwUk7KlCh6S9EAGWBt1oExIUv9WyNCiRz5amv48= -github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= -github.com/jjti/go-spancheck v0.6.2 h1:iYtoxqPMzHUPp7St+5yA8+cONdyXD3ug6KK15n7Pklk= -github.com/jjti/go-spancheck v0.6.2/go.mod h1:+X7lvIrR5ZdUTkxFYqzJ0abr8Sb5LOo80uOhWNqIrYA= +github.com/jjti/go-spancheck v0.6.4 h1:Tl7gQpYf4/TMU7AT84MN83/6PutY21Nb9fuQjFTpRRc= +github.com/jjti/go-spancheck v0.6.4/go.mod h1:yAEYdKJ2lRkDA8g7X+oKUHXOWVAXSBJRv04OhF+QUjk= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julz/importas v0.1.0 h1:F78HnrsjY3cR7j0etXy5+TU1Zuy7Xt08X/1aJnH5xXY= -github.com/julz/importas v0.1.0/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= -github.com/karamaru-alpha/copyloopvar v1.1.0 h1:x7gNyKcC2vRBO1H2Mks5u1VxQtYvFiym7fCjIP8RPos= -github.com/karamaru-alpha/copyloopvar v1.1.0/go.mod h1:u7CIfztblY0jZLOQZgH3oYsJzpC2A7S6u/lfgSXHy0k= +github.com/julz/importas v0.2.0 h1:y+MJN/UdL63QbFJHws9BVC5RpA2iq0kpjrFajTGivjQ= +github.com/julz/importas v0.2.0/go.mod h1:pThlt589EnCYtMnmhmRYY/qn9lCf/frPOK+WMx3xiJY= +github.com/karamaru-alpha/copyloopvar v1.2.1 h1:wmZaZYIjnJ0b5UoKDjUHrikcV0zuPyyxI4SVplLd2CI= +github.com/karamaru-alpha/copyloopvar v1.2.1/go.mod h1:nFmMlFNlClC2BPvNaHMdkirmTJxVCY0lhxBtlfOypMM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/errcheck v1.7.0 h1:+SbscKmWJ5mOK/bO1zS60F5I9WwZDWOfRsC4RwfwRV0= -github.com/kisielk/errcheck v1.7.0/go.mod h1:1kLL+jV4e+CFfueBmI1dSK2ADDyQnlrnrY/FqKluHJQ= +github.com/kisielk/errcheck v1.9.0 h1:9xt1zI9EBfcYBvdU1nVrzMzzUPUtPKs9bVSIM3TAb3M= +github.com/kisielk/errcheck v1.9.0/go.mod h1:kQxWMMVZgIkDq7U8xtG/n2juOjbLgZtedi0D+/VL/i8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kkHAIKE/contextcheck v1.1.5 h1:CdnJh63tcDe53vG+RebdpdXJTc9atMgGqdx8LXxiilg= -github.com/kkHAIKE/contextcheck v1.1.5/go.mod h1:O930cpht4xb1YQpK+1+AgoM3mFsvxr7uyFptcnWTYUA= -github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= -github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= -github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= -github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= -github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0= -github.com/koron/go-ssdp v0.0.4/go.mod h1:oDXq+E5IL5q0U8uSBcoAXzTzInwy5lEgC91HoKtbmZk= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kkHAIKE/contextcheck v1.1.6 h1:7HIyRcnyzxL9Lz06NGhiKvenXq7Zw6Q0UQu/ttjfJCE= +github.com/kkHAIKE/contextcheck v1.1.6/go.mod h1:3dDbMRNBFaq8HFXWC1JyvDSPm43CmE6IuHam8Wr0rkg= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= +github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= +github.com/koron/go-ssdp v0.0.6 h1:Jb0h04599eq/CY7rB5YEqPS83HmRfHP2azkxMN2rFtU= +github.com/koron/go-ssdp v0.0.6/go.mod h1:0R9LfRJGek1zWTjN3JUNlm5INCDYGpRDfAptnct63fI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -372,48 +391,52 @@ github.com/kulti/thelper v0.6.3 h1:ElhKf+AlItIu+xGnI990no4cE2+XaSu1ULymV2Yulxs= github.com/kulti/thelper v0.6.3/go.mod h1:DsqKShOvP40epevkFrvIwkCMNYxMeTNjdWL4dqWHZ6I= github.com/kunwardeep/paralleltest v1.0.10 h1:wrodoaKYzS2mdNVnc4/w31YaXFtsc21PCTdvWJ/lDDs= github.com/kunwardeep/paralleltest v1.0.10/go.mod h1:2C7s65hONVqY7Q5Efj5aLzRCNLjw2h4eMc9EcypGjcY= -github.com/kyoh86/exportloopref v0.1.11 h1:1Z0bcmTypkL3Q4k+IDHMWTcnCliEZcaPiIe0/ymEyhQ= -github.com/kyoh86/exportloopref v0.1.11/go.mod h1:qkV4UF1zGl6EkF1ox8L5t9SwyeBAZ3qLMd6up458uqA= -github.com/lasiar/canonicalheader v1.1.1 h1:wC+dY9ZfiqiPwAexUApFush/csSPXeIi4QqyxXmng8I= -github.com/lasiar/canonicalheader v1.1.1/go.mod h1:cXkb3Dlk6XXy+8MVQnF23CYKWlyA7kfQhSw2CcZtZb0= -github.com/ldez/gomoddirectives v0.2.4 h1:j3YjBIjEBbqZ0NKtBNzr8rtMHTOrLPeiwTkfUJZ3alg= -github.com/ldez/gomoddirectives v0.2.4/go.mod h1:oWu9i62VcQDYp9EQ0ONTfqLNh+mDLWWDO+SO0qSQw5g= -github.com/ldez/tagliatelle v0.5.0 h1:epgfuYt9v0CG3fms0pEgIMNPuFf/LpPIfjk4kyqSioo= -github.com/ldez/tagliatelle v0.5.0/go.mod h1:rj1HmWiL1MiKQuOONhd09iySTEkUuE/8+5jtPYz9xa4= +github.com/lasiar/canonicalheader v1.1.2 h1:vZ5uqwvDbyJCnMhmFYimgMZnJMjwljN5VGY0VKbMXb4= +github.com/lasiar/canonicalheader v1.1.2/go.mod h1:qJCeLFS0G/QlLQ506T+Fk/fWMa2VmBUiEI2cuMK4djI= +github.com/ldez/exptostd v0.4.2 h1:l5pOzHBz8mFOlbcifTxzfyYbgEmoUqjxLFHZkjlbHXs= +github.com/ldez/exptostd v0.4.2/go.mod h1:iZBRYaUmcW5jwCR3KROEZ1KivQQp6PHXbDPk9hqJKCQ= +github.com/ldez/gomoddirectives v0.6.1 h1:Z+PxGAY+217f/bSGjNZr/b2KTXcyYLgiWI6geMBN2Qc= +github.com/ldez/gomoddirectives v0.6.1/go.mod h1:cVBiu3AHR9V31em9u2kwfMKD43ayN5/XDgr+cdaFaKs= +github.com/ldez/grignotin v0.9.0 h1:MgOEmjZIVNn6p5wPaGp/0OKWyvq42KnzAt/DAb8O4Ow= +github.com/ldez/grignotin v0.9.0/go.mod h1:uaVTr0SoZ1KBii33c47O1M8Jp3OP3YDwhZCmzT9GHEk= +github.com/ldez/tagliatelle v0.7.1 h1:bTgKjjc2sQcsgPiT902+aadvMjCeMHrY7ly2XKFORIk= +github.com/ldez/tagliatelle v0.7.1/go.mod h1:3zjxUpsNB2aEZScWiZTHrAXOl1x25t3cRmzfK1mlo2I= +github.com/ldez/usetesting v0.4.2 h1:J2WwbrFGk3wx4cZwSMiCQQ00kjGR0+tuuyW0Lqm4lwA= +github.com/ldez/usetesting v0.4.2/go.mod h1:eEs46T3PpQ+9RgN9VjpY6qWdiw2/QmfiDeWmdZdrjIQ= github.com/leonklingele/grouper v1.1.2 h1:o1ARBDLOmmasUaNDesWqWCIFH3u7hoFlM84YrjT3mIY= github.com/leonklingele/grouper v1.1.2/go.mod h1:6D0M/HVkhs2yRKRFZUoGjeDy7EZTfFBE9gl4kjmIGkA= +github.com/libdns/libdns v1.0.0-beta.1 h1:KIf4wLfsrEpXpZ3vmc/poM8zCATXT2klbdPe6hyOBjQ= +github.com/libdns/libdns v1.0.0-beta.1/go.mod h1:4Bj9+5CQiNMVGf87wjX4CY3HQJypUHRuLvlsfsZqLWQ= github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c= github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic= -github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= -github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= -github.com/libp2p/go-libp2p v0.36.3 h1:NHz30+G7D8Y8YmznrVZZla0ofVANrvBl2c+oARfMeDQ= -github.com/libp2p/go-libp2p v0.36.3/go.mod h1:4Y5vFyCUiJuluEPmpnKYf6WFx5ViKPUYs/ixe9ANFZ8= +github.com/libp2p/go-doh-resolver v0.5.0 h1:4h7plVVW+XTS+oUBw2+8KfoM1jF6w8XmO7+skhePFdE= +github.com/libp2p/go-doh-resolver v0.5.0/go.mod h1:aPDxfiD2hNURgd13+hfo29z9IC22fv30ee5iM31RzxU= +github.com/libp2p/go-flow-metrics v0.3.0 h1:q31zcHUvHnwDO0SHaukewPYgwOBSxtt830uJtUx6784= +github.com/libp2p/go-flow-metrics v0.3.0/go.mod h1:nuhlreIwEguM1IvHAew3ij7A8BMlyHQJ279ao24eZZo= +github.com/libp2p/go-libp2p v0.46.0 h1:0T2yvIKpZ3DVYCuPOFxPD1layhRU486pj9rSlGWYnDM= +github.com/libp2p/go-libp2p v0.46.0/go.mod h1:TbIDnpDjBLa7isdgYpbxozIVPBTmM/7qKOJP4SFySrQ= github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= -github.com/libp2p/go-libp2p-kad-dht v0.26.1 h1:AazV3LCImYVkDUGAHx5lIEgZ9iUI2QQKH5GMRQU8uEA= -github.com/libp2p/go-libp2p-kad-dht v0.26.1/go.mod h1:mqRUGJ/+7ziQ3XknU2kKHfsbbgb9xL65DXjPOJwmZF8= -github.com/libp2p/go-libp2p-kbucket v0.6.3 h1:p507271wWzpy2f1XxPzCQG9NiN6R6lHL9GiSErbQQo0= -github.com/libp2p/go-libp2p-kbucket v0.6.3/go.mod h1:RCseT7AH6eJWxxk2ol03xtP9pEHetYSPXOaJnOiD8i0= -github.com/libp2p/go-libp2p-record v0.2.0 h1:oiNUOCWno2BFuxt3my4i1frNrt7PerzB3queqa1NkQ0= -github.com/libp2p/go-libp2p-record v0.2.0/go.mod h1:I+3zMkvvg5m2OcSdoL0KPljyJyvNDFGKX7QdlpYUcwk= -github.com/libp2p/go-libp2p-routing-helpers v0.7.4 h1:6LqS1Bzn5CfDJ4tzvP9uwh42IB7TJLNFJA6dEeGBv84= -github.com/libp2p/go-libp2p-routing-helpers v0.7.4/go.mod h1:we5WDj9tbolBXOuF1hGOkR+r7Uh1408tQbAKaT5n1LE= +github.com/libp2p/go-libp2p-kad-dht v0.36.0 h1:7QuXhV36+Vyj+L6A7mrYkn2sYLrbRcbjvsYDu/gXhn8= +github.com/libp2p/go-libp2p-kad-dht v0.36.0/go.mod h1:O24LxTH9Rt3I5XU8nmiA9VynS4TrTwAyj+zBJKB05vQ= +github.com/libp2p/go-libp2p-kbucket v0.8.0 h1:QAK7RzKJpYe+EuSEATAaaHYMYLkPDGC18m9jxPLnU8s= +github.com/libp2p/go-libp2p-kbucket v0.8.0/go.mod h1:JMlxqcEyKwO6ox716eyC0hmiduSWZZl6JY93mGaaqc4= +github.com/libp2p/go-libp2p-record v0.3.1 h1:cly48Xi5GjNw5Wq+7gmjfBiG9HCzQVkiZOUZ8kUl+Fg= +github.com/libp2p/go-libp2p-record v0.3.1/go.mod h1:T8itUkLcWQLCYMqtX7Th6r7SexyUJpIyPgks757td/E= +github.com/libp2p/go-libp2p-routing-helpers v0.7.5 h1:HdwZj9NKovMx0vqq6YNPTh6aaNzey5zHD7HeLJtq6fI= +github.com/libp2p/go-libp2p-routing-helpers v0.7.5/go.mod h1:3YaxrwP0OBPDD7my3D0KxfR89FlcX/IEbxDEDfAmj98= github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg= github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0= github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM= -github.com/libp2p/go-nat v0.2.0 h1:Tyz+bUFAYqGyJ/ppPPymMGbIgNRH+WqC5QrT5fKrrGk= -github.com/libp2p/go-nat v0.2.0/go.mod h1:3MJr+GRpRkyT65EpVPBstXLvOlAPzUVlG6Pwg9ohLJk= -github.com/libp2p/go-netroute v0.2.1 h1:V8kVrpD8GK0Riv15/7VN6RbUQ3URNZVosw7H2v9tksU= -github.com/libp2p/go-netroute v0.2.1/go.mod h1:hraioZr0fhBjG0ZRXJJ6Zj2IVEVNx6tDTFQfSmcq7mQ= +github.com/libp2p/go-netroute v0.3.0 h1:nqPCXHmeNmgTJnktosJ/sIef9hvwYCrsLxXmfNks/oc= +github.com/libp2p/go-netroute v0.3.0/go.mod h1:Nkd5ShYgSMS5MUKy/MU2T57xFoOKvvLR92Lic48LEyA= github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s= github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU= -github.com/libp2p/go-yamux/v4 v4.0.1 h1:FfDR4S1wj6Bw2Pqbc8Uz7pCxeRBPbwsBbEdfwiCypkQ= -github.com/libp2p/go-yamux/v4 v4.0.1/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4= -github.com/lufeee/execinquery v1.2.1 h1:hf0Ems4SHcUGBxpGN7Jz78z1ppVkP/837ZlETPCEtOM= -github.com/lufeee/execinquery v1.2.1/go.mod h1:EC7DrEKView09ocscGHC+apXMIaorh4xqSxS/dy8SbM= +github.com/libp2p/go-yamux/v5 v5.0.1 h1:f0WoX/bEF2E8SbE4c/k1Mo+/9z0O4oC/hWEA+nfYRSg= +github.com/libp2p/go-yamux/v5 v5.0.1/go.mod h1:en+3cdX51U0ZslwRdRLrvQsdayFt3TSUKvBGErzpWbU= github.com/macabu/inamedparam v0.1.3 h1:2tk/phHkMlEL/1GNe/Yf6kkR/hkcUdAEY3L0hjYV1Mk= github.com/macabu/inamedparam v0.1.3/go.mod h1:93FLICAIk/quk7eaPPQvbzihUdn/QkGDwIZEoLtpH6I= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= @@ -422,33 +445,36 @@ github.com/maratori/testableexamples v1.0.0 h1:dU5alXRrD8WKSjOUnmJZuzdxWOEQ57+7s github.com/maratori/testableexamples v1.0.0/go.mod h1:4rhjL1n20TUTT4vdh3RDqSizKLyXp7K2u6HgraZCGzE= github.com/maratori/testpackage v1.1.1 h1:S58XVV5AD7HADMmD0fNnziNHqKvSdDuEKdPD1rNTU04= github.com/maratori/testpackage v1.1.1/go.mod h1:s4gRK/ym6AMrqpOa/kEbQTV4Q4jb7WeLZzVhVVVOQMc= +github.com/marcopolo/simnet v0.0.1 h1:rSMslhPz6q9IvJeFWDoMGxMIrlsbXau3NkuIXHGJxfg= +github.com/marcopolo/simnet v0.0.1/go.mod h1:WDaQkgLAjqDUEBAOXz22+1j6wXKfGlC5sD5XWt3ddOs= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= -github.com/matoous/godox v0.0.0-20240105082147-c5b5e0e7c0c0 h1:Ny7cm4KSWceJLYyI1sm+aFIVDWSGXLcOJ0O0UaS5wdU= -github.com/matoous/godox v0.0.0-20240105082147-c5b5e0e7c0c0/go.mod h1:jgE/3fUXiTurkdHOLT5WEkThTSuE7yxHv5iWPa80afs= +github.com/matoous/godox v1.1.0 h1:W5mqwbyWrwZv6OQ5Z1a/DHGMOvXYCBP3+Ht7KMoJhq4= +github.com/matoous/godox v1.1.0/go.mod h1:jgE/3fUXiTurkdHOLT5WEkThTSuE7yxHv5iWPa80afs= github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE= github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= -github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/mgechev/revive v1.3.9 h1:18Y3R4a2USSBF+QZKFQwVkBROUda7uoBlkEuBD+YD1A= -github.com/mgechev/revive v1.3.9/go.mod h1:+uxEIr5UH0TjXWHTno3xh4u7eg6jDpXKzQccA9UGhHU= -github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.61 h1:nLxbwF3XxhwVSm8g9Dghm9MHPaUZuqhPiGL+675ZmEs= -github.com/miekg/dns v1.1.61/go.mod h1:mnAarhS3nWaW+NVP2wTkYVIZyHNJ098SJZUki3eykwQ= +github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= +github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= +github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= +github.com/mgechev/revive v1.7.0 h1:JyeQ4yO5K8aZhIKf5rec56u0376h8AlKNQEmjfkjKlY= +github.com/mgechev/revive v1.7.0/go.mod h1:qZnwcNhoguE58dfi96IJeSTPeZQejNeoMQLUZGi4SW4= +github.com/mholt/acmez/v3 v3.1.2 h1:auob8J/0FhmdClQicvJvuDavgd5ezwLBfKuYmynhYzc= +github.com/mholt/acmez/v3 v3.1.2/go.mod h1:L1wOU06KKvq7tswuMDwKdcHeKpFFgkppZy/y0DFxagQ= +github.com/miekg/dns v1.1.68 h1:jsSRkNozw7G/mnmXULynzMNIsgY2dHC8LO6U6Ij2JEA= +github.com/miekg/dns v1.1.68/go.mod h1:fujopn7TB3Pu3JM69XaawiU0wqjpL9/8xGop5UrTPps= github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKoFL8DUUmalo2yJJUCxbPKtm8OKfqr2/FTNU= github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc= github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/minlz v1.0.1-0.20250507153514-87eb42fe8882 h1:0lgqHvJWHLGW5TuObJrfyEi6+ASTKDBWikGvPqy9Yiw= +github.com/minio/minlz v1.0.1-0.20250507153514-87eb42fe8882/go.mod h1:qT0aEB35q79LLornSzeDH75LBf3aH1MV+jB5w9Wasec= github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= @@ -465,25 +491,24 @@ github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aG github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= -github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= -github.com/multiformats/go-multiaddr v0.13.0 h1:BCBzs61E3AGHcYYTv8dqRH43ZfyrqM8RXVPT8t13tLQ= -github.com/multiformats/go-multiaddr v0.13.0/go.mod h1:sBXrNzucqkFJhvKOiwwLyqamGa/P5EIXNPLovyhQCII= -github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= -github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= +github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= +github.com/multiformats/go-multiaddr v0.16.1 h1:fgJ0Pitow+wWXzN9do+1b8Pyjmo8m5WhGfzpL82MpCw= +github.com/multiformats/go-multiaddr v0.16.1/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0= +github.com/multiformats/go-multiaddr-dns v0.4.1 h1:whi/uCLbDS3mSEUMb1MsoT4uzUeZB0N32yzufqS0i5M= +github.com/multiformats/go-multiaddr-dns v0.4.1/go.mod h1:7hfthtB4E4pQwirrz+J0CcDUfbWzTqEzVyYKKIKpgkc= github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= -github.com/multiformats/go-multicodec v0.9.0 h1:pb/dlPnzee/Sxv/j4PmkDRxCOi3hXTz3IbPKOXWJkmg= -github.com/multiformats/go-multicodec v0.9.0/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k= +github.com/multiformats/go-multicodec v0.10.0 h1:UpP223cig/Cx8J76jWt91njpK3GTAO1w02sdcjZDSuc= +github.com/multiformats/go-multicodec v0.10.0/go.mod h1:wg88pM+s2kZJEQfRCKBNU+g32F5aWBEjyFHXvZLTcLI= github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= -github.com/multiformats/go-multistream v0.5.0 h1:5htLSLl7lvJk3xx3qT/8Zm9J4K8vEOf/QGkvOGQAyiE= -github.com/multiformats/go-multistream v0.5.0/go.mod h1:n6tMZiwiP2wUsR8DgfDWw1dydlEqV3l6N3/GBsX6ILA= -github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= -github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= -github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= +github.com/multiformats/go-multistream v0.6.1 h1:4aoX5v6T+yWmc2raBHsTvzmFhOI8WVOer28DeBBEYdQ= +github.com/multiformats/go-multistream v0.6.1/go.mod h1:ksQf6kqHAb6zIsyw7Zm+gAuVo57Qbq84E27YlYqavqw= +github.com/multiformats/go-varint v0.1.0 h1:i2wqFp4sdl3IcIxfAonHQV9qU5OsZ4Ts9IOoETFs5dI= +github.com/multiformats/go-varint v0.1.0/go.mod h1:5KVAVXegtfmNQQm/lCY+ATvDzvJJhSkUlGQV9wgObdI= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/nakabonne/nestif v0.3.1 h1:wm28nZjhQY5HyYPx+weN3Q65k6ilSBxDb8v5S81B81U= @@ -492,18 +517,15 @@ github.com/nishanths/exhaustive v0.12.0 h1:vIY9sALmw6T/yxiASewa4TQcFsVYZQQRUQJhK github.com/nishanths/exhaustive v0.12.0/go.mod h1:mEZ95wPIZW+x8kC4TgC+9YCUgiST7ecevsVDTgc2obs= github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm/w98Vk= github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c= -github.com/nunnatsa/ginkgolinter v0.16.2 h1:8iLqHIZvN4fTLDC0Ke9tbSZVcyVHoBs0HIbnVSxfHJk= -github.com/nunnatsa/ginkgolinter v0.16.2/go.mod h1:4tWRinDN1FeJgU+iJANW/kz7xKN5nYRAOfJDQUS9dOQ= +github.com/nunnatsa/ginkgolinter v0.19.1 h1:mjwbOlDQxZi9Cal+KfbEJTCz327OLNfwNvoZ70NJ+c4= +github.com/nunnatsa/ginkgolinter v0.19.1/go.mod h1:jkQ3naZDmxaZMXPWaS9rblH+i+GWXQCaS/JFIWcOH2s= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= -github.com/onsi/ginkgo/v2 v2.20.0 h1:PE84V2mHqoT1sglvHc8ZdQtPcwmvvt29WLEEO3xmdZw= -github.com/onsi/ginkgo/v2 v2.20.0/go.mod h1:lG9ey2Z29hR41WMVthyJBGUBcBhGOtoPF2VFMvBXFCI= -github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k= -github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY= -github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= -github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= -github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus= +github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8= +github.com/onsi/gomega v1.36.3 h1:hID7cr8t3Wp26+cYnfcjR6HpJ00fdogN6dqZ1t6IylU= +github.com/onsi/gomega v1.36.3/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w= @@ -513,41 +535,55 @@ github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT9 github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= -github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= -github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= -github.com/pion/datachannel v1.5.8 h1:ph1P1NsGkazkjrvyMfhRBUAWMxugJjq2HfQifaOoSNo= -github.com/pion/datachannel v1.5.8/go.mod h1:PgmdpoaNBLX9HNzNClmdki4DYW5JtI7Yibu8QzbL3tI= +github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= +github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= +github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 h1:1/WtZae0yGtPq+TI6+Tv1WTxkukpXeMlviSxvL7SRgk= +github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9/go.mod h1:x3N5drFsm2uilKKuuYo6LdyD8vZAW55sH/9w+pbo1sw= +github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pion/datachannel v1.5.10 h1:ly0Q26K1i6ZkGf42W7D4hQYR90pZwzFOjTq5AuCKk4o= +github.com/pion/datachannel v1.5.10/go.mod h1:p/jJfC9arb29W7WrxyKbepTU20CFgyx5oLo8Rs4Py/M= +github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s= github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk= github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE= -github.com/pion/ice/v2 v2.3.34 h1:Ic1ppYCj4tUOcPAp76U6F3fVrlSw8A9JtRXLqw6BbUM= -github.com/pion/ice/v2 v2.3.34/go.mod h1:mBF7lnigdqgtB+YHkaY/Y6s6tsyRyo4u4rPGRuOjUBQ= -github.com/pion/interceptor v0.1.29 h1:39fsnlP1U8gw2JzOFWdfCU82vHvhW9o0rZnZF56wF+M= -github.com/pion/interceptor v0.1.29/go.mod h1:ri+LGNjRUc5xUNtDEPzfdkmSqISixVTBF/z/Zms/6T4= -github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY= +github.com/pion/dtls/v3 v3.0.6 h1:7Hkd8WhAJNbRgq9RgdNh1aaWlZlGpYTzdqjy9x9sK2E= +github.com/pion/dtls/v3 v3.0.6/go.mod h1:iJxNQ3Uhn1NZWOMWlLxEEHAN5yX7GyPvvKw04v9bzYU= +github.com/pion/ice/v4 v4.0.10 h1:P59w1iauC/wPk9PdY8Vjl4fOFL5B+USq1+xbDcN6gT4= +github.com/pion/ice/v4 v4.0.10/go.mod h1:y3M18aPhIxLlcO/4dn9X8LzLLSma84cx6emMSu14FGw= +github.com/pion/interceptor v0.1.40 h1:e0BjnPcGpr2CFQgKhrQisBU7V3GXK6wrfYrGYaU6Jq4= +github.com/pion/interceptor v0.1.40/go.mod h1:Z6kqH7M/FYirg3frjGJ21VLSRJGBXB/KqaTIrdqnOic= github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms= -github.com/pion/mdns v0.0.12 h1:CiMYlY+O0azojWDmxdNr7ADGrnZ+V6Ilfner+6mSVK8= -github.com/pion/mdns v0.0.12/go.mod h1:VExJjv8to/6Wqm1FXK+Ii/Z9tsVk/F5sD/N70cnYFbk= +github.com/pion/logging v0.2.3 h1:gHuf0zpoh1GW67Nr6Gj4cv5Z9ZscU7g/EaoC/Ke/igI= +github.com/pion/logging v0.2.3/go.mod h1:z8YfknkquMe1csOrxK5kc+5/ZPAzMxbKLX5aXpbpC90= +github.com/pion/mdns/v2 v2.0.7 h1:c9kM8ewCgjslaAmicYMFQIde2H9/lrZpjBkN8VwoVtM= +github.com/pion/mdns/v2 v2.0.7/go.mod h1:vAdSYNAT0Jy3Ru0zl2YiW3Rm/fJCwIeM0nToenfOJKA= github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA= github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8= -github.com/pion/rtcp v1.2.14 h1:KCkGV3vJ+4DAJmvP0vaQShsb0xkRfWkO540Gy102KyE= -github.com/pion/rtcp v1.2.14/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9L4= -github.com/pion/rtp v1.8.8 h1:EtYFHI0rpUEjT/RMnGfb1vdJhbYmPG77szD72uUnSxs= -github.com/pion/rtp v1.8.8/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= -github.com/pion/sctp v1.8.20 h1:sOc3lkV/tQaP57ZUEXIMdM2V92IIB2ia5v/ygnBxaEg= -github.com/pion/sctp v1.8.20/go.mod h1:oTxw8i5m+WbDHZJL/xUpe6CPIn1Y0GIKKwTLF4h53H8= -github.com/pion/sdp/v3 v3.0.9 h1:pX++dCHoHUwq43kuwf3PyJfHlwIj4hXA7Vrifiq0IJY= -github.com/pion/sdp/v3 v3.0.9/go.mod h1:B5xmvENq5IXJimIO4zfp6LAe1fD9N+kFv+V/1lOdz8M= -github.com/pion/srtp/v2 v2.0.20 h1:HNNny4s+OUmG280ETrCdgFndp4ufx3/uy85EawYEhTk= -github.com/pion/srtp/v2 v2.0.20/go.mod h1:0KJQjA99A6/a0DOVTu1PhDSw0CXF2jTkqOoMg3ODqdA= +github.com/pion/rtcp v1.2.15 h1:LZQi2JbdipLOj4eBjK4wlVoQWfrZbh3Q6eHtWtJBZBo= +github.com/pion/rtcp v1.2.15/go.mod h1:jlGuAjHMEXwMUHK78RgX0UmEJFV4zUKOFHR7OP+D3D0= +github.com/pion/rtp v1.8.19 h1:jhdO/3XhL/aKm/wARFVmvTfq0lC/CvN1xwYKmduly3c= +github.com/pion/rtp v1.8.19/go.mod h1:bAu2UFKScgzyFqvUKmbvzSdPr+NGbZtv6UB2hesqXBk= +github.com/pion/sctp v1.8.39 h1:PJma40vRHa3UTO3C4MyeJDQ+KIobVYRZQZ0Nt7SjQnE= +github.com/pion/sctp v1.8.39/go.mod h1:cNiLdchXra8fHQwmIoqw0MbLLMs+f7uQ+dGMG2gWebE= +github.com/pion/sdp/v3 v3.0.13 h1:uN3SS2b+QDZnWXgdr69SM8KB4EbcnPnPf2Laxhty/l4= +github.com/pion/sdp/v3 v3.0.13/go.mod h1:88GMahN5xnScv1hIMTqLdu/cOcUkj6a9ytbncwMCq2E= +github.com/pion/srtp/v3 v3.0.6 h1:E2gyj1f5X10sB/qILUGIkL4C2CqK269Xq167PbGCc/4= +github.com/pion/srtp/v3 v3.0.6/go.mod h1:BxvziG3v/armJHAaJ87euvkhHqWe9I7iiOy50K2QkhY= github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4= github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8= +github.com/pion/stun/v3 v3.0.0 h1:4h1gwhWLWuZWOJIJR9s2ferRO+W3zA/b6ijOI6mKzUw= +github.com/pion/stun/v3 v3.0.0/go.mod h1:HvCN8txt8mwi4FBvS3EmDghW6aQJ24T+y+1TKjB5jyU= +github.com/pion/transport/v2 v2.2.1/go.mod h1:cXXWavvCnFF6McHTft3DWS9iic2Mftcz1Aq29pGcU5g= +github.com/pion/transport/v2 v2.2.4/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= github.com/pion/transport/v2 v2.2.10 h1:ucLBLE8nuxiHfvkFKnkDQRYWYfp8ejf4YBOPfaQpw6Q= github.com/pion/transport/v2 v2.2.10/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E= -github.com/pion/turn/v2 v2.1.6 h1:Xr2niVsiPTB0FPtt+yAWKFUkU1eotQbGgpTIld4x1Gc= -github.com/pion/turn/v2 v2.1.6/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY= -github.com/pion/webrtc/v3 v3.3.0 h1:Rf4u6n6U5t5sUxhYPQk/samzU/oDv7jk6BA5hyO2F9I= -github.com/pion/webrtc/v3 v3.3.0/go.mod h1:hVmrDJvwhEertRWObeb1xzulzHGeVUoPlWvxdGzcfU0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0= +github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo= +github.com/pion/turn/v4 v4.0.2 h1:ZqgQ3+MjP32ug30xAbD6Mn+/K4Sxi3SdNOTFf+7mpps= +github.com/pion/turn/v4 v4.0.2/go.mod h1:pMMKP/ieNAG/fN5cZiN4SDuyKsXtNTr0ccN7IToA1zs= +github.com/pion/webrtc/v4 v4.1.2 h1:mpuUo/EJ1zMNKGE79fAdYNFZBX790KE7kQQpLMjjR54= +github.com/pion/webrtc/v4 v4.1.2/go.mod h1:xsCXiNAmMEjIdFxAYU0MbB3RwRieJsegSB2JZsGN+8U= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -555,21 +591,20 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4= github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw= -github.com/polyfloyd/go-errorlint v1.6.0 h1:tftWV9DE7txiFzPpztTAwyoRLKNj9gpVm2cg8/OwcYY= -github.com/polyfloyd/go-errorlint v1.6.0/go.mod h1:HR7u8wuP1kb1NeN1zqTd1ZMlqUKPPHF+Id4vIPvDqVw= +github.com/polyfloyd/go-errorlint v1.7.1 h1:RyLVXIbosq1gBdk/pChWA8zWYLsq9UEw7a1L5TVMCnA= +github.com/polyfloyd/go-errorlint v1.7.1/go.mod h1:aXjNb1x2TNhoLsk26iv1yl7a+zTnXPhwEMtEXukiLR8= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= -github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= -github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/quasilyte/go-ruleguard v0.4.2 h1:htXcXDK6/rO12kiTHKfHuqR4kr3Y4M0J0rOL6CH/BYs= -github.com/quasilyte/go-ruleguard v0.4.2/go.mod h1:GJLgqsLeo4qgavUoL8JeGFNS7qcisx3awV/w9eWTmNI= +github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= +github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= +github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= +github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= +github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= +github.com/quasilyte/go-ruleguard v0.4.3-0.20240823090925-0fe6f58b47b1 h1:+Wl/0aFp0hpuHM3H//KMft64WQ1yX9LdJY64Qm/gFCo= +github.com/quasilyte/go-ruleguard v0.4.3-0.20240823090925-0fe6f58b47b1/go.mod h1:GJLgqsLeo4qgavUoL8JeGFNS7qcisx3awV/w9eWTmNI= github.com/quasilyte/go-ruleguard/dsl v0.3.22 h1:wd8zkOhSNr+I+8Qeciml08ivDt1pSXe60+5DqOpCjPE= github.com/quasilyte/go-ruleguard/dsl v0.3.22/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= github.com/quasilyte/gogrep v0.5.0 h1:eTKODPXbI8ffJMN+W2aE0+oL0z/nh8/5eNdiO34SOAo= @@ -578,46 +613,41 @@ github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 h1:TCg2WBOl github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 h1:M8mH9eK4OUR4lu7Gd+PU1fV2/qnDNfzT635KRSObncs= github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567/go.mod h1:DWNGW8A4Y+GyBgPuaQJuWiy0XYftx4Xm/y5Jqk9I6VQ= -github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= -github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= -github.com/quic-go/quic-go v0.45.2 h1:DfqBmqjb4ExSdxRIb/+qXhPC+7k6+DUNZha4oeiC9fY= -github.com/quic-go/quic-go v0.45.2/go.mod h1:1dLehS7TIR64+vxGR70GDcatWTOtMX2PUtnKsjbTurI= -github.com/quic-go/webtransport-go v0.8.0 h1:HxSrwun11U+LlmwpgM1kEqIqH90IT4N8auv/cD7QFJg= -github.com/quic-go/webtransport-go v0.8.0/go.mod h1:N99tjprW432Ut5ONql/aUhSLT0YVSlwHohQsuac9WaM= -github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= -github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU= +github.com/quic-go/qpack v0.6.0 h1:g7W+BMYynC1LbYLSqRt8PBg5Tgwxn214ZZR34VIOjz8= +github.com/quic-go/qpack v0.6.0/go.mod h1:lUpLKChi8njB4ty2bFLX2x4gzDqXwUpaO1DP9qMDZII= +github.com/quic-go/quic-go v0.57.1 h1:25KAAR9QR8KZrCZRThWMKVAwGoiHIrNbT72ULHTuI10= +github.com/quic-go/quic-go v0.57.1/go.mod h1:ly4QBAjHA2VhdnxhojRsCUOeJwKYg+taDlos92xb1+s= +github.com/quic-go/webtransport-go v0.9.0 h1:jgys+7/wm6JarGDrW+lD/r9BGqBAmqY/ssklE09bA70= +github.com/quic-go/webtransport-go v0.9.0/go.mod h1:4FUYIiUc75XSsF6HShcLeXXYZJ9AGwo/xh3L8M/P1ao= +github.com/raeperd/recvcheck v0.2.0 h1:GnU+NsbiCqdC2XX5+vMZzP+jAJC5fht7rcVTAhX74UI= +github.com/raeperd/recvcheck v0.2.0/go.mod h1:n04eYkwIR0JbgD73wT8wL4JjPC3wm0nFtzBnWNocnYU= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryancurrah/gomodguard v1.3.3 h1:eiSQdJVNr9KTNxY2Niij8UReSwR8Xrte3exBrAZfqpg= -github.com/ryancurrah/gomodguard v1.3.3/go.mod h1:rsKQjj4l3LXe8N344Ow7agAy5p9yjsWOtRzUMYmA0QY= +github.com/ryancurrah/gomodguard v1.3.5 h1:cShyguSwUEeC0jS7ylOiG/idnd1TpJ1LfHGpV3oJmPU= +github.com/ryancurrah/gomodguard v1.3.5/go.mod h1:MXlEPQRxgfPQa62O8wzK3Ozbkv9Rkqr+wKjSxTdsNJE= github.com/ryanrolds/sqlclosecheck v0.5.1 h1:dibWW826u0P8jNLsLN+En7+RqWWTYrjCB9fJfSfdyCU= github.com/ryanrolds/sqlclosecheck v0.5.1/go.mod h1:2g3dUjoS6AL4huFdv6wn55WpLIDjY7ZgUR4J8HOO/XQ= github.com/sagikazarmark/locafero v0.6.0 h1:ON7AQg37yzcRPU69mt7gwhFEBwxI6P9T4Qu3N51bwOk= github.com/sagikazarmark/locafero v0.6.0/go.mod h1:77OmuIc6VTraTXKXIs/uvUxKGUXjE1GbemJYHqdNjX0= github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= -github.com/samber/lo v1.46.0 h1:w8G+oaCPgz1PoCJztqymCFaKwXt+5cCXn51uPxExFfQ= -github.com/samber/lo v1.46.0/go.mod h1:RmDH9Ct32Qy3gduHQuKJ3gW1fMHAnE/fAzQuf6He5cU= -github.com/sanposhiho/wastedassign/v2 v2.0.7 h1:J+6nrY4VW+gC9xFzUc+XjPD3g3wF3je/NsJFwFK7Uxc= -github.com/sanposhiho/wastedassign/v2 v2.0.7/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= -github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 h1:lZUw3E0/J3roVtGQ+SCrUrg3ON6NgVqpn3+iol9aGu4= -github.com/santhosh-tekuri/jsonschema/v5 v5.3.1/go.mod h1:uToXkOrWAZ6/Oc07xWQrPOhJotwFIyu2bBVN41fcDUY= +github.com/sanposhiho/wastedassign/v2 v2.1.0 h1:crurBF7fJKIORrV85u9UUpePDYGWnwvv3+A96WvwXT0= +github.com/sanposhiho/wastedassign/v2 v2.1.0/go.mod h1:+oSmSC+9bQ+VUAxA66nBb0Z7N8CK7mscKTDYC6aIek4= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.1 h1:PKK9DyHxif4LZo+uQSgXNqs0jj5+xZwwfKHgph2lxBw= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.1/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU= github.com/sashamelentyev/interfacebloat v1.1.0 h1:xdRdJp0irL086OyW1H/RTZTr1h/tMEOsumirXcOJqAw= github.com/sashamelentyev/interfacebloat v1.1.0/go.mod h1:+Y9yU5YdTkrNvoX0xHc84dxiN1iBi9+G8zZIhPVoNjQ= -github.com/sashamelentyev/usestdlibvars v1.27.0 h1:t/3jZpSXtRPRf2xr0m63i32ZrusyurIGT9E5wAvXQnI= -github.com/sashamelentyev/usestdlibvars v1.27.0/go.mod h1:9nl0jgOfHKWNFS43Ojw0i7aRoS4j6EBye3YBhmAIRF8= -github.com/securego/gosec/v2 v2.20.1-0.20240820084340-81cda2f91fbe h1:exdneYmXwZ4+VaIWv9mQ47uIHkTQSN50DYdCjXJ1cdQ= -github.com/securego/gosec/v2 v2.20.1-0.20240820084340-81cda2f91fbe/go.mod h1:iyeMMRw8QEmueUSZ2VqmkQMiDyDcobfPnG00CV/NWdE= -github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU= -github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= +github.com/sashamelentyev/usestdlibvars v1.28.0 h1:jZnudE2zKCtYlGzLVreNp5pmCdOxXUzwsMDBkR21cyQ= +github.com/sashamelentyev/usestdlibvars v1.28.0/go.mod h1:9nl0jgOfHKWNFS43Ojw0i7aRoS4j6EBye3YBhmAIRF8= +github.com/securego/gosec/v2 v2.22.2 h1:IXbuI7cJninj0nRpZSLCUlotsj8jGusohfONMrHoF6g= +github.com/securego/gosec/v2 v2.22.2/go.mod h1:UEBGA+dSKb+VqM6TdehR7lnQtIIMorYJ4/9CW1KVQBE= github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= @@ -625,34 +655,35 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/sivchari/containedctx v1.0.3 h1:x+etemjbsh2fB5ewm5FeLNi5bUjK0V8n0RB+Wwfd0XE= github.com/sivchari/containedctx v1.0.3/go.mod h1:c1RDvCbnJLtH4lLcYD/GqwiBSSf4F5Qk0xld2rBqzJ4= -github.com/sivchari/tenv v1.10.0 h1:g/hzMA+dBCKqGXgW8AV/1xIWhAvDrx0zFKNR48NFMg0= -github.com/sivchari/tenv v1.10.0/go.mod h1:tdY24masnVoZFxYrHv/nD6Tc8FbkEtAQEEziXpyMgqY= +github.com/sivchari/tenv v1.12.1 h1:+E0QzjktdnExv/wwsnnyk4oqZBUfuh89YMQT1cyuvSY= +github.com/sivchari/tenv v1.12.1/go.mod h1:1LjSOUCc25snIr5n3DtGGrENhX3LuWefcplwVGC24mw= github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs= github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg= github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM= -github.com/sonatard/noctx v0.0.2 h1:L7Dz4De2zDQhW8S0t+KUjY0MAQJd6SgVwhzNIc4ok00= -github.com/sonatard/noctx v0.0.2/go.mod h1:kzFz+CzWSjQ2OzIm46uJZoXuBpa2+0y3T36U18dWqIo= +github.com/sonatard/noctx v0.1.0 h1:JjqOc2WN16ISWAjAk8M5ej0RfExEXtkEyExl2hLW+OM= +github.com/sonatard/noctx v0.1.0/go.mod h1:0RvBxqY8D4j9cTTTWE8ylt2vqj2EPI8fHmrxHdsaZ2c= github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= github.com/sourcegraph/go-diff v0.7.0 h1:9uLlrd5T46OXs5qpp8L/MTltk0zikUGi0sNNyCpA8G0= github.com/sourcegraph/go-diff v0.7.0/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= -github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs= +github.com/spf13/afero v1.12.0/go.mod h1:ZTlWwG4/ahT8W7T0WQ5uYmjI9duaLQGy3Q2OAl4sk/4= github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= -github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= +github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0= github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= -github.com/stbenjam/no-sprintf-host-port v0.1.1 h1:tYugd/yrm1O0dV+ThCbaKZh195Dfm07ysF0U6JQXczc= -github.com/stbenjam/no-sprintf-host-port v0.1.1/go.mod h1:TLhvtIvONRzdmkFiio4O8LHsN9N74I+PhRquPsxpL0I= +github.com/stbenjam/no-sprintf-host-port v0.2.0 h1:i8pxvGrt1+4G0czLr/WnmyH7zbZ8Bg8etvARQ1rpyl4= +github.com/stbenjam/no-sprintf-host-port v0.2.0/go.mod h1:eL0bQ9PasS0hsyTyfTjjG+E80QIyPnBVQbYZyv20Jfk= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -664,47 +695,61 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= -github.com/tdakkota/asciicheck v0.2.0 h1:o8jvnUANo0qXtnslk2d3nMKTFNlOnJjRrNcj0j9qkHM= -github.com/tdakkota/asciicheck v0.2.0/go.mod h1:Qb7Y9EgjCLJGup51gDHFzbI08/gbGhL/UVhYIPWG2rg= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= +github.com/tdakkota/asciicheck v0.4.1 h1:bm0tbcmi0jezRA2b5kg4ozmMuGAFotKI3RZfrhfovg8= +github.com/tdakkota/asciicheck v0.4.1/go.mod h1:0k7M3rCfRXb0Z6bwgvkEIMleKH3kXNz9UqJ9Xuqopr8= github.com/tenntenn/modver v1.0.1 h1:2klLppGhDgzJrScMpkj9Ujy3rXPUspSjAcev9tSEBgA= github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0= github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3 h1:f+jULpRQGxTSkNYKJ51yaw6ChIqO+Je8UqsTKN/cDag= github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY= -github.com/tetafro/godot v1.4.16 h1:4ChfhveiNLk4NveAZ9Pu2AN8QZ2nkUGFuadM9lrr5D0= -github.com/tetafro/godot v1.4.16/go.mod h1:2oVxTBSftRTh4+MVfUaUXR6bn2GDXCaMcOG4Dk3rfio= -github.com/timakin/bodyclose v0.0.0-20240125160201-f835fa56326a h1:A6uKudFIfAEpoPdaal3aSqGxBzLyU8TqyXImLwo6dIo= -github.com/timakin/bodyclose v0.0.0-20240125160201-f835fa56326a/go.mod h1:mkjARE7Yr8qU23YcGMSALbIxTQ9r9QBVahQOBRfU460= -github.com/timonwong/loggercheck v0.9.4 h1:HKKhqrjcVj8sxL7K77beXh0adEm6DLjV/QOGeMXEVi4= -github.com/timonwong/loggercheck v0.9.4/go.mod h1:caz4zlPcgvpEkXgVnAJGowHAMW2NwHaNlpS8xDbVhTg= -github.com/tomarrell/wrapcheck/v2 v2.9.0 h1:801U2YCAjLhdN8zhZ/7tdjB3EnAoRlJHt/s+9hijLQ4= -github.com/tomarrell/wrapcheck/v2 v2.9.0/go.mod h1:g9vNIyhb5/9TQgumxQyOEqDHsmGYcGsVMOx/xGkqdMo= +github.com/tetafro/godot v1.5.0 h1:aNwfVI4I3+gdxjMgYPus9eHmoBeJIbnajOyqZYStzuw= +github.com/tetafro/godot v1.5.0/go.mod h1:2oVxTBSftRTh4+MVfUaUXR6bn2GDXCaMcOG4Dk3rfio= +github.com/timakin/bodyclose v0.0.0-20241017074812-ed6a65f985e3 h1:y4mJRFlM6fUyPhoXuFg/Yu02fg/nIPFMOY8tOqppoFg= +github.com/timakin/bodyclose v0.0.0-20241017074812-ed6a65f985e3/go.mod h1:mkjARE7Yr8qU23YcGMSALbIxTQ9r9QBVahQOBRfU460= +github.com/timonwong/loggercheck v0.10.1 h1:uVZYClxQFpw55eh+PIoqM7uAOHMrhVcDoWDery9R8Lg= +github.com/timonwong/loggercheck v0.10.1/go.mod h1:HEAWU8djynujaAVX7QI65Myb8qgfcZ1uKbdpg3ZzKl8= +github.com/tomarrell/wrapcheck/v2 v2.10.0 h1:SzRCryzy4IrAH7bVGG4cK40tNUhmVmMDuJujy4XwYDg= +github.com/tomarrell/wrapcheck/v2 v2.10.0/go.mod h1:g9vNIyhb5/9TQgumxQyOEqDHsmGYcGsVMOx/xGkqdMo= github.com/tommy-muehle/go-mnd/v2 v2.5.1 h1:NowYhSdyE/1zwK9QCLeRb6USWdoif80Ie+v+yU8u1Zw= github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= -github.com/ultraware/funlen v0.1.0 h1:BuqclbkY6pO+cvxoq7OsktIXZpgBSkYTQtmwhAK81vI= -github.com/ultraware/funlen v0.1.0/go.mod h1:XJqmOQja6DpxarLj6Jj1U7JuoS8PvL4nEqDaQhy22p4= -github.com/ultraware/whitespace v0.1.1 h1:bTPOGejYFulW3PkcrqkeQwOd6NKOOXvmGD9bo/Gk8VQ= -github.com/ultraware/whitespace v0.1.1/go.mod h1:XcP1RLD81eV4BW8UhQlpaR+SDc2givTvyI8a586WjW8= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.22.10 h1:p8Fspmz3iTctJstry1PYS3HVdllxnEzTEsgIgtxTrCk= +github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb h1:Ywfo8sUltxogBpFuMOFRrrSifO788kAFxmvVw31PtQQ= +github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb/go.mod h1:ikPs9bRWicNw3S7XpJ8sK/smGwU9WcSVU3dy9qahYBM= +github.com/ultraware/funlen v0.2.0 h1:gCHmCn+d2/1SemTdYMiKLAHFYxTYz7z9VIDRaTGyLkI= +github.com/ultraware/funlen v0.2.0/go.mod h1:ZE0q4TsJ8T1SQcjmkhN/w+MceuatI6pBFSxxyteHIJA= +github.com/ultraware/whitespace v0.2.0 h1:TYowo2m9Nfj1baEQBjuHzvMRbp19i+RCcRYrSWoFa+g= +github.com/ultraware/whitespace v0.2.0/go.mod h1:XcP1RLD81eV4BW8UhQlpaR+SDc2givTvyI8a586WjW8= github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/uudashr/gocognit v1.1.3 h1:l+a111VcDbKfynh+airAy/DJQKaXh2m9vkoysMPSZyM= -github.com/uudashr/gocognit v1.1.3/go.mod h1:aKH8/e8xbTRBwjbCkwZ8qt4l2EpKXl31KMHgSS+lZ2U= +github.com/urfave/cli v1.22.16 h1:MH0k6uJxdwdeWQTwhSO42Pwr4YLrNLwBtg1MRgTqPdQ= +github.com/urfave/cli v1.22.16/go.mod h1:EeJR6BKodywf4zciqrdw6hpCPk68JO9z5LazXZMn5Po= +github.com/uudashr/gocognit v1.2.0 h1:3BU9aMr1xbhPlvJLSydKwdLN3tEUUrzPSSM8S4hDYRA= +github.com/uudashr/gocognit v1.2.0/go.mod h1:k/DdKPI6XBZO1q7HgoV2juESI2/Ofj9AcHPZhBBdrTU= +github.com/uudashr/iface v1.3.1 h1:bA51vmVx1UIhiIsQFSNq6GZ6VPTk3WNMZgRiCe9R29U= +github.com/uudashr/iface v1.3.1/go.mod h1:4QvspiRd3JLPAEXBQ9AiZpLbJlrWWgRChOKDJEuQTdg= github.com/warpfork/go-testmark v0.12.1 h1:rMgCpJfwy1sJ50x0M0NgyphxYYPMOODIJHhsXyEHU0s= github.com/warpfork/go-testmark v0.12.1/go.mod h1:kHwy7wfvGSPh1rQJYKayD4AbtNaeyZdcGi9tNJTaa5Y= github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0 h1:GDDkbFiaK8jsSDJfjId/PEGEShv6ugrt4kYsC5UIDaQ= github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc h1:BCPnHtcboadS0DvysUuJXZ4lWVv5Bh5i7+tbIyi+ck4= github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc/go.mod h1:r45hJU7yEoA81k6MWNhpMj/kms0n14dkzkxYHoB96UM= +github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 h1:5HZfQkwe0mIfyDmc1Em5GqlNRzcdtlv4HTNmdpt7XH0= +github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11/go.mod h1:Wlo/SzPmxVp6vXpGt/zaXhHH0fn4IxgqZc82aKg6bpQ= +github.com/whyrusleeping/cbor-gen v0.3.1 h1:82ioxmhEYut7LBVGhGq8xoRkXPLElVuh5mV67AFfdv0= +github.com/whyrusleeping/cbor-gen v0.3.1/go.mod h1:pM99HXyEbSQHcosHc0iW7YFmwnscr+t9Te4ibko05so= +github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E= +github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= -github.com/wlynxg/anet v0.0.3 h1:PvR53psxFXstc12jelG6f1Lv4MWqE0tI76/hHGjh9rg= github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= +github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU= +github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= github.com/xen0n/gosmopolitan v1.2.2 h1:/p2KTnMzwRexIW8GlKawsTWOxn7UHA+jCMF/V8HHtvU= github.com/xen0n/gosmopolitan v1.2.2/go.mod h1:7XX7Mj61uLYrj0qmeN0zi7XDon9JRAEhYQqAPLVNTeg= github.com/yagipy/maintidx v1.0.0 h1:h5NvIsCz+nRDapQ0exNv4aJ0yXSI0420omVANTv3GJM= @@ -720,118 +765,108 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zeebo/assert v1.1.0 h1:hU1L1vLTHsnO8x8c9KAR5GmM5QscxHg5RNU5z5qbUWY= +github.com/zeebo/assert v1.1.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= +github.com/zeebo/blake3 v0.2.4 h1:KYQPkhpRtcqh0ssGYcKLG1JYvddkEA8QwCM/yBqhaZI= +github.com/zeebo/blake3 v0.2.4/go.mod h1:7eeQ6d2iXWRGF6npfaxl2CU+xy2Fjo2gxeyZGCRUjcE= +github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo= +github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4= gitlab.com/bosi/decorder v0.4.2 h1:qbQaV3zgwnBZ4zPMhGLW4KZe7A7NwxEhJx39R3shffo= gitlab.com/bosi/decorder v0.4.2/go.mod h1:muuhHoaJkA9QLcYHq4Mj8FJUwDZ+EirSHRiaTcTf6T8= go-simpler.org/assert v0.9.0 h1:PfpmcSvL7yAnWyChSjOz6Sp6m9j5lyK8Ok9pEL31YkQ= go-simpler.org/assert v0.9.0/go.mod h1:74Eqh5eI6vCK6Y5l3PI8ZYFXG4Sa+tkr70OIPJAUr28= -go-simpler.org/musttag v0.12.2 h1:J7lRc2ysXOq7eM8rwaTYnNrHd5JwjppzB6mScysB2Cs= -go-simpler.org/musttag v0.12.2/go.mod h1:uN1DVIasMTQKk6XSik7yrJoEysGtR2GRqvWnI9S7TYM= -go-simpler.org/sloglint v0.7.2 h1:Wc9Em/Zeuu7JYpl+oKoYOsQSy2X560aVueCW/m6IijY= -go-simpler.org/sloglint v0.7.2/go.mod h1:US+9C80ppl7VsThQclkM7BkCHQAzuz8kHLsW3ppuluo= -go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= -go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= -go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= -go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= -go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= -go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc= -go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= -go.uber.org/fx v1.22.1 h1:nvvln7mwyT5s1q201YE29V/BFrGor6vMiDNpU/78Mys= -go.uber.org/fx v1.22.1/go.mod h1:HT2M7d7RHo+ebKGh9NRcrsrHHfpZ60nW3QRubMRfv48= -go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go-simpler.org/musttag v0.13.0 h1:Q/YAW0AHvaoaIbsPj3bvEI5/QFP7w696IMUpnKXQfCE= +go-simpler.org/musttag v0.13.0/go.mod h1:FTzIGeK6OkKlUDVpj0iQUXZLUO1Js9+mvykDQy9C5yM= +go-simpler.org/sloglint v0.9.0 h1:/40NQtjRx9txvsB/RN022KsUJU+zaaSb/9q9BSefSrE= +go-simpler.org/sloglint v0.9.0/go.mod h1:G/OrAF6uxj48sHahCzrbarVMptL2kjWTaUeC8+fOGww= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg= +go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= +go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= +go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= +go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= +go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= +go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= +go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= +go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= +go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= +go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= +go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= +go.uber.org/dig v1.19.0 h1:BACLhebsYdpQ7IROQ1AGPjrXcP5dF80U3gKoFzbaq/4= +go.uber.org/dig v1.19.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= +go.uber.org/fx v1.24.0 h1:wE8mruvpg2kiiL1Vqd0CC+tr0/24XIB10Iwp2lLWzkg= +go.uber.org/fx v1.24.0/go.mod h1:AmDeGyS+ZARGKM4tlH4FY2Jr63VjbEDJHtqXTGP5hbo= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU= -go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= -go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko= +go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= -go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.uber.org/zap/exp v0.3.0 h1:6JYzdifzYkGmTdRR59oYH+Ng7k49H9qVpWwNSsGJj3U= +go.uber.org/zap/exp v0.3.0/go.mod h1:5I384qq7XGxYyByIhHm6jg5CHkGY0nsTfbDLgDDlgJQ= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= +golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= -golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= -golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= +golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= +golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= +golang.org/x/exp v0.0.0-20251125195548-87e1e737ad39 h1:DHNhtq3sNNzrvduZZIiFyXWOL9IWaDPHqTnLJp+rCBY= +golang.org/x/exp v0.0.0-20251125195548-87e1e737ad39/go.mod h1:46edojNIoXTNOhySWIWdix628clX9ODXwPsQuG6hsK0= golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/exp/typeparams v0.0.0-20230203172020-98cc5a0785f9/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= -golang.org/x/exp/typeparams v0.0.0-20240613232115-7f521ea00fb8 h1:+ZJmEdDFzH5H0CnzOrwgbH3elHctfTecW9X0k2tkn5M= -golang.org/x/exp/typeparams v0.0.0-20240613232115-7f521ea00fb8/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/exp/typeparams v0.0.0-20250210185358-939b2ce775ac h1:TSSpLIG4v+p0rPv1pNOQtl1I8knsO4S9trOxNMOLVP4= +golang.org/x/exp/typeparams v0.0.0-20250210185358-939b2ce775ac/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= +golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= -golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= +golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -841,83 +876,68 @@ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= +golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= +golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54 h1:E2/AqCUMZGgd73TQkxUMcMla25GB9i/5HOdLr+uH7Vo= +golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54/go.mod h1:hKdjCMrbv9skySur+Nek8Hd0uJ0GuxJIoIX2payrIdQ= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= -golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= -golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= -golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= +golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= +golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= +golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= +golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190321232350-e250d351ecad/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= @@ -927,77 +947,52 @@ golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= golang.org/x/tools v0.1.1-0.20210302220138-2ac05c832e1a/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= -golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.11.0/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= -golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= +golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= +golang.org/x/tools/go/expect v0.1.1-deprecated h1:jpBZDwmgPhXsKZC6WhL20P4b/wmnpsEAGHaNy0n/rJM= +golang.org/x/tools/go/expect v0.1.1-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= +golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM= +golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated/go.mod h1:RVAQXBGNv1ib0J382/DPCRS/BPnsGebyM1Gj5VSDpG8= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gonum.org/v1/gonum v0.15.0 h1:2lYxjRbTYyxkJxlhC+LvJIx3SsANPdRybu1tGj9/OrQ= -gonum.org/v1/gonum v0.15.0/go.mod h1:xzZVBJBtS+Mz4q0Yl2LJTk+OxOg4jiXZ7qBoM0uISGo= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/gotestsum v1.12.0 h1:CmwtaGDkHxrZm4Ib0Vob89MTfpc3GrEFMJKovliPwGk= -gotest.tools/gotestsum v1.12.0/go.mod h1:fAvqkSptospfSbQw26CTYzNwnsE/ztqLeyhP0h67ARY= -gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= -gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.5.1 h1:4bH5o3b5ZULQ4UrBmP+63W9r7qIkqJClEA9ko5YKx+I= -honnef.co/go/tools v0.5.1/go.mod h1:e9irvo83WDG9/irijV44wr3tbhcFeRnfpVlRqVwpzMs= -lukechampine.com/blake3 v1.3.0 h1:sJ3XhFINmHSrYCgl958hscfIa3bw8x4DqMP3u1YvoYE= -lukechampine.com/blake3 v1.3.0/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= +gotest.tools/gotestsum v1.13.0 h1:+Lh454O9mu9AMG1APV4o0y7oDYKyik/3kBOiCqiEpRo= +gotest.tools/gotestsum v1.13.0/go.mod h1:7f0NS5hFb0dWr4NtcsAsF0y1kzjEFfAil0HiBQJE03Q= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= +honnef.co/go/tools v0.6.1 h1:R094WgE8K4JirYjBaOpz/AvTyUu/3wbmAoskKN/pxTI= +honnef.co/go/tools v0.6.1/go.mod h1:3puzxxljPCe8RGJX7BIy1plGbxEOZni5mR2aXe3/uk4= +lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg= +lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo= mvdan.cc/gofumpt v0.7.0 h1:bg91ttqXmi9y2xawvkuMXyvAA/1ZGJqYAEGjXuP0JXU= mvdan.cc/gofumpt v0.7.0/go.mod h1:txVFJy/Sc/mvaycET54pV8SW8gWxTlUuGHVEcncmNUo= mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f h1:lMpcwN6GxNbWtbpI1+xzFLSW8XzX0u72NttUGVFjO3U= diff --git a/test/dependencies/pollEndpoint/main.go b/test/dependencies/pollEndpoint/main.go index 0c548d8c9..fbea6fd77 100644 --- a/test/dependencies/pollEndpoint/main.go +++ b/test/dependencies/pollEndpoint/main.go @@ -10,7 +10,7 @@ import ( "os" "time" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" ma "github.com/multiformats/go-multiaddr" manet "github.com/multiformats/go-multiaddr/net" ) diff --git a/test/integration/addcat_test.go b/test/integration/addcat_test.go index 936b28c6d..22d8be9be 100644 --- a/test/integration/addcat_test.go +++ b/test/integration/addcat_test.go @@ -13,7 +13,7 @@ import ( "github.com/ipfs/boxo/bootstrap" "github.com/ipfs/boxo/files" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" "github.com/ipfs/go-test/random" "github.com/ipfs/kubo/core" "github.com/ipfs/kubo/core/coreapi" diff --git a/test/integration/bitswap_wo_routing_test.go b/test/integration/bitswap_wo_routing_test.go index fa4e8d513..826d034e8 100644 --- a/test/integration/bitswap_wo_routing_test.go +++ b/test/integration/bitswap_wo_routing_test.go @@ -2,7 +2,6 @@ package integrationtest import ( "bytes" - "context" "testing" blocks "github.com/ipfs/go-block-format" @@ -14,8 +13,7 @@ import ( ) func TestBitswapWithoutRouting(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() const numPeers = 4 // create network diff --git a/test/integration/wan_lan_dht_test.go b/test/integration/wan_lan_dht_test.go index 7c70aa98f..57f4bf1ac 100644 --- a/test/integration/wan_lan_dht_test.go +++ b/test/integration/wan_lan_dht_test.go @@ -17,7 +17,6 @@ import ( testutil "github.com/libp2p/go-libp2p-testing/net" corenet "github.com/libp2p/go-libp2p/core/network" - "github.com/libp2p/go-libp2p/core/peerstore" mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" ma "github.com/multiformats/go-multiaddr" @@ -105,7 +104,7 @@ func RunDHTConnectivity(conf testutil.LatencyConfig, numPeers int) error { } defer wanPeer.Close() wanAddr := makeAddr(uint32(i), true) - wanPeer.Peerstore.AddAddr(wanPeer.Identity, wanAddr, peerstore.PermanentAddrTTL) + _ = wanPeer.PeerHost.Network().Listen(wanAddr) for _, p := range wanPeers { _, _ = mn.LinkPeers(p.Identity, wanPeer.Identity) _ = wanPeer.PeerHost.Connect(connectionContext, p.Peerstore.PeerInfo(p.Identity)) @@ -121,7 +120,7 @@ func RunDHTConnectivity(conf testutil.LatencyConfig, numPeers int) error { } defer lanPeer.Close() lanAddr := makeAddr(uint32(i), false) - lanPeer.Peerstore.AddAddr(lanPeer.Identity, lanAddr, peerstore.PermanentAddrTTL) + _ = lanPeer.PeerHost.Network().Listen(lanAddr) for _, p := range lanPeers { _, _ = mn.LinkPeers(p.Identity, lanPeer.Identity) _ = lanPeer.PeerHost.Connect(connectionContext, p.Peerstore.PeerInfo(p.Identity)) @@ -132,10 +131,9 @@ func RunDHTConnectivity(conf testutil.LatencyConfig, numPeers int) error { // Add interfaces / addresses to test peer. wanAddr := makeAddr(0, true) - testPeer.Peerstore.AddAddr(testPeer.Identity, wanAddr, peerstore.PermanentAddrTTL) + _ = testPeer.PeerHost.Network().Listen(wanAddr) lanAddr := makeAddr(0, false) - testPeer.Peerstore.AddAddr(testPeer.Identity, lanAddr, peerstore.PermanentAddrTTL) - + _ = testPeer.PeerHost.Network().Listen(lanAddr) // The test peer is connected to one lan peer. for _, p := range lanPeers { if _, err := mn.LinkPeers(testPeer.Identity, p.Identity); err != nil { diff --git a/test/sharness/README.md b/test/sharness/README.md index 6ab8539da..239e46d1e 100644 --- a/test/sharness/README.md +++ b/test/sharness/README.md @@ -13,7 +13,7 @@ The usual ipfs env flags also apply: ```sh # the output will make your eyes bleed -IPFS_LOGGING=debug TEST_VERBOSE=1 make +GOLOG_LOG_LEVEL=debug TEST_VERBOSE=1 make ``` To make the tests abort as soon as an error occurs, use the TEST_IMMEDIATE env variable: diff --git a/test/sharness/Rules.mk b/test/sharness/Rules.mk index c1e70eb09..0ac3cf950 100644 --- a/test/sharness/Rules.mk +++ b/test/sharness/Rules.mk @@ -4,8 +4,8 @@ SHARNESS_$(d) = $(d)/lib/sharness/sharness.sh T_$(d) = $(sort $(wildcard $(d)/t[0-9][0-9][0-9][0-9]-*.sh)) -DEPS_$(d) := test/bin/random test/bin/multihash test/bin/pollEndpoint \ - test/bin/iptb test/bin/go-sleep test/bin/random-files \ +DEPS_$(d) := test/bin/multihash test/bin/pollEndpoint test/bin/iptb \ + test/bin/go-sleep test/bin/random-data test/bin/random-files \ test/bin/go-timeout test/bin/hang-fds test/bin/ma-pipe-unidir \ test/bin/cid-fmt DEPS_$(d) += cmd/ipfs/ipfs @@ -14,10 +14,10 @@ DEPS_$(d) += $(SHARNESS_$(d)) ifeq ($(OS),Linux) PLUGINS_DIR_$(d) := $(d)/plugins/ -ORGIN_PLUGINS_$(d) := $(plugin/plugins_plugins_so) -PLUGINS_$(d) := $(addprefix $(PLUGINS_DIR_$(d)),$(notdir $(ORGIN_PLUGINS_$(d)))) +ORIGIN_PLUGINS_$(d) := $(plugin/plugins_plugins_so) +PLUGINS_$(d) := $(addprefix $(PLUGINS_DIR_$(d)),$(notdir $(ORIGIN_PLUGINS_$(d)))) -$(PLUGINS_$(d)): $(ORGIN_PLUGINS_$(d)) +$(PLUGINS_$(d)): $(ORIGIN_PLUGINS_$(d)) @mkdir -p $(@D) cp -f plugin/plugins/$(@F) $@ diff --git a/test/sharness/lib/test-lib.sh b/test/sharness/lib/test-lib.sh index 69fd2e66c..413d0e92f 100644 --- a/test/sharness/lib/test-lib.sh +++ b/test/sharness/lib/test-lib.sh @@ -54,7 +54,7 @@ cur_test_pwd="$(pwd)" while true ; do echo -n > stuck_cwd_list - lsof -c ipfs -Ffn 2>/dev/null | grep -A1 '^fcwd$' | grep '^n' | cut -b 2- | while read -r pwd_of_stuck ; do + timeout 5 lsof -c ipfs -Ffn 2>/dev/null | grep -A1 '^fcwd$' | grep '^n' | cut -b 2- | while read -r pwd_of_stuck ; do case "$pwd_of_stuck" in "$cur_test_pwd"*) echo "$pwd_of_stuck" >> stuck_cwd_list @@ -158,8 +158,8 @@ test_wait_open_tcp_port_10_sec() { for i in $(test_seq 1 100) do # this is not a perfect check, but it's portable. - # cant count on ss. not installed everywhere. - # cant count on netstat using : or . as port delim. differ across platforms. + # can't count on ss. not installed everywhere. + # can't count on netstat using : or . as port delim. differ across platforms. echo $(netstat -aln | egrep "^tcp.*LISTEN" | egrep "[.:]$1" | wc -l) -gt 0 if [ $(netstat -aln | egrep "^tcp.*LISTEN" | egrep "[.:]$1" | wc -l) -gt 0 ]; then return 0 @@ -205,6 +205,36 @@ test_init_ipfs() { ipfs init "${args[@]}" --profile=test > /dev/null ' + test_expect_success "disable telemetry" ' + test_config_set --bool Plugins.Plugins.telemetry.Disabled "true" + ' + + test_expect_success "prepare config -- mounting" ' + mkdir mountdir ipfs ipns mfs && + test_config_set Mounts.IPFS "$(pwd)/ipfs" && + test_config_set Mounts.IPNS "$(pwd)/ipns" && + test_config_set Mounts.MFS "$(pwd)/mfs" || + test_fsh cat "\"$IPFS_PATH/config\"" + ' + +} + +test_init_ipfs_measure() { + args=("$@") + + # we set the Addresses.API config variable. + # the cli client knows to use it, so only need to set. + # todo: in the future, use env? + + test_expect_success "ipfs init succeeds" ' + export IPFS_PATH="$(pwd)/.ipfs" && + ipfs init "${args[@]}" --profile=test,flatfs-measure > /dev/null + ' + + test_expect_success "disable telemetry" ' + test_config_set --bool Plugins.Plugins.telemetry.Disabled "true" + ' + test_expect_success "prepare config -- mounting" ' mkdir mountdir ipfs ipns && test_config_set Mounts.IPFS "$(pwd)/ipfs" && @@ -287,10 +317,37 @@ test_launch_ipfs_daemon_without_network() { } do_umount() { + local mount_point="$1" + local max_retries=3 + local retry_delay=0.5 + + # Try normal unmount first (without lazy flag) + for i in $(seq 1 $max_retries); do + if [ "$(uname -s)" = "Linux" ]; then + # First attempt: standard unmount + if fusermount -u "$mount_point" 2>/dev/null; then + return 0 + fi + else + if umount "$mount_point" 2>/dev/null; then + return 0 + fi + fi + + # If not last attempt, wait before retry + if [ $i -lt $max_retries ]; then + go-sleep "${retry_delay}s" + fi + done + + # If normal unmount failed, try lazy unmount as last resort (Linux only) if [ "$(uname -s)" = "Linux" ]; then - fusermount -z -u "$1" + # Log that we're falling back to lazy unmount + test "$TEST_VERBOSE" = 1 && echo "# Warning: falling back to lazy unmount for $mount_point" + fusermount -z -u "$mount_point" 2>/dev/null else - umount "$1" + # On non-Linux, try force unmount + umount -f "$mount_point" 2>/dev/null || true fi } @@ -300,12 +357,14 @@ test_mount_ipfs() { test_expect_success FUSE "'ipfs mount' succeeds" ' do_umount "$(pwd)/ipfs" || true && do_umount "$(pwd)/ipns" || true && + do_umount "$(pwd)/mfs" || true && ipfs mount >actual ' test_expect_success FUSE "'ipfs mount' output looks good" ' echo "IPFS mounted at: $(pwd)/ipfs" >expected && echo "IPNS mounted at: $(pwd)/ipns" >>expected && + echo "MFS mounted at: $(pwd)/mfs" >>expected && test_cmp expected actual ' diff --git a/test/sharness/t0002-docker-image.sh b/test/sharness/t0002-docker-image.sh index 2ff827806..81bb8d449 100755 --- a/test/sharness/t0002-docker-image.sh +++ b/test/sharness/t0002-docker-image.sh @@ -36,8 +36,8 @@ test_expect_success "docker image build succeeds" ' ' test_expect_success "write init scripts" ' - echo "ipfs config Foo Bar" > 001.sh && - echo "ipfs config Baz Qux" > 002.sh && + echo "ipfs config Mounts.IPFS Bar" > 001.sh && + echo "ipfs config Pubsub.Router Qux" > 002.sh && chmod +x 002.sh ' @@ -65,10 +65,10 @@ test_expect_success "check that init scripts were run correctly and in the corre test_expect_success "check that init script configs were applied" ' echo Bar > expected && - docker exec "$DOC_ID" ipfs config Foo > actual && + docker exec "$DOC_ID" ipfs config Mounts.IPFS > actual && test_cmp actual expected && echo Qux > expected && - docker exec "$DOC_ID" ipfs config Baz > actual && + docker exec "$DOC_ID" ipfs config Pubsub.Router > actual && test_cmp actual expected ' diff --git a/test/sharness/t0018-indent.sh b/test/sharness/t0018-indent.sh index 5fa398fd2..a6029d93f 100755 --- a/test/sharness/t0018-indent.sh +++ b/test/sharness/t0018-indent.sh @@ -5,6 +5,9 @@ test_description="Test sharness test indent" . lib/test-lib.sh for file in $(find .. -name 't*.sh' -type f); do + if [ "$(basename "$file")" = "t0290-cid.sh" ]; then + continue + fi test_expect_success "indent in $file is not using tabs" ' test_must_fail grep -P "^ *\t" $file ' diff --git a/test/sharness/t0021-config.sh b/test/sharness/t0021-config.sh index 5264908c7..3e6886348 100755 --- a/test/sharness/t0021-config.sh +++ b/test/sharness/t0021-config.sh @@ -13,41 +13,23 @@ test_config_cmd_set() { cfg_key=$1 cfg_val=$2 - test_expect_success "ipfs config succeeds" ' - ipfs config $cfg_flags "$cfg_key" "$cfg_val" - ' + test_expect_success "ipfs config succeeds" " + ipfs config $cfg_flags \"$cfg_key\" \"$cfg_val\" + " - test_expect_success "ipfs config output looks good" ' - echo "$cfg_val" >expected && - ipfs config "$cfg_key" >actual && - test_cmp expected actual - ' - - # also test our lib function. it should work too. - cfg_key="Lib.$cfg_key" - test_expect_success "test_config_set succeeds" ' - test_config_set $cfg_flags "$cfg_key" "$cfg_val" - ' - - test_expect_success "test_config_set value looks good" ' - echo "$cfg_val" >expected && - ipfs config "$cfg_key" >actual && - test_cmp expected actual - ' + test_expect_success "ipfs config output looks good" " + echo \"$cfg_val\" >expected && + if [$cfg_flags != \"--json\"]; then + ipfs config \"$cfg_key\" >actual && + test_cmp expected actual + else + ipfs config \"$cfg_key\" | tr -d \"\\n\\t \" >actual && + echo >>actual && + test_cmp expected actual + fi + " } -# this is a bit brittle. the problem is we need to test -# with something that will be forced to unmarshal as a struct. -# (i.e. just setting 'ipfs config --json foo "[1, 2, 3]"') may -# set it as astring instead of proper json. We leverage the -# unmarshalling that has to happen. -CONFIG_SET_JSON_TEST='{ - "MDNS": { - "Enabled": true, - "Interval": 10 - } -}' - test_profile_apply_revert() { profile=$1 inverse_profile=$2 @@ -87,27 +69,32 @@ test_profile_apply_dry_run_not_alter() { } test_config_cmd() { - test_config_cmd_set "beep" "boop" - test_config_cmd_set "beep1" "boop2" - test_config_cmd_set "beep1" "boop2" - test_config_cmd_set "--bool" "beep2" "true" - test_config_cmd_set "--bool" "beep2" "false" - test_config_cmd_set "--json" "beep3" "true" - test_config_cmd_set "--json" "beep3" "false" - test_config_cmd_set "--json" "Discovery" "$CONFIG_SET_JSON_TEST" - test_config_cmd_set "--json" "deep-not-defined.prop" "true" - test_config_cmd_set "--json" "deep-null" "null" - test_config_cmd_set "--json" "deep-null.prop" "true" + test_config_cmd_set "Addresses.API" "foo" + test_config_cmd_set "Addresses.Gateway" "bar" + test_config_cmd_set "Datastore.GCPeriod" "baz" + test_config_cmd_set "AutoNAT.ServiceMode" "enabled" + test_config_cmd_set "--bool" "Discovery.MDNS.Enabled" "true" + test_config_cmd_set "--bool" "Discovery.MDNS.Enabled" "false" + test_config_cmd_set "--json" "Datastore.HashOnRead" "true" + test_config_cmd_set "--json" "Datastore.HashOnRead" "false" + test_config_cmd_set "--json" "Experimental.FilestoreEnabled" "true" + test_config_cmd_set "--json" "Import.BatchMaxSize" "null" + test_config_cmd_set "--json" "Import.UnixFSRawLeaves" "true" + test_config_cmd_set "--json" "Routing.Routers.Test" "{\\\"Parameters\\\":\\\"Test\\\",\\\"Type\\\":\\\"Test\\\"}" + test_config_cmd_set "--json" "Experimental.OptimisticProvideJobsPoolSize" "1337" + test_config_cmd_set "--json" "Addresses.Swarm" "[\\\"test\\\",\\\"test\\\",\\\"test\\\"]" + test_config_cmd_set "--json" "Gateway.PublicGateways.Foo" "{\\\"DeserializedResponses\\\":true,\\\"InlineDNSLink\\\":false,\\\"NoDNSLink\\\":false,\\\"Paths\\\":[\\\"Bar\\\",\\\"Baz\\\"],\\\"UseSubdomains\\\":true}" + test_config_cmd_set "--bool" "Gateway.PublicGateways.Foo.UseSubdomains" "false" test_expect_success "'ipfs config show' works" ' ipfs config show >actual ' test_expect_success "'ipfs config show' output looks good" ' - grep "\"beep\": \"boop\"," actual && - grep "\"beep1\": \"boop2\"," actual && - grep "\"beep2\": false," actual && - grep "\"beep3\": false," actual + grep "\"API\": \"foo\"," actual && + grep "\"Gateway\": \"bar\"" actual && + grep "\"Enabled\": false" actual && + grep "\"HashOnRead\": false" actual ' test_expect_success "'ipfs config show --config-file' works" ' @@ -281,7 +268,7 @@ test_config_cmd() { # won't work as it changes datastore definition, which makes ipfs not launch # without converting first - # test_profile_apply_revert badgerds + # test_profile_apply_revert pebbleds test_expect_success "cleanup config backups" ' find "$IPFS_PATH" -name "config-*" -exec rm {} \; diff --git a/test/sharness/t0025-datastores.sh b/test/sharness/t0025-datastores.sh index f0ddd4e2e..6be9eb3ed 100755 --- a/test/sharness/t0025-datastores.sh +++ b/test/sharness/t0025-datastores.sh @@ -4,13 +4,20 @@ test_description="Test non-standard datastores" . lib/test-lib.sh -test_expect_success "'ipfs init --empty-repo=false --profile=badgerds' succeeds" ' - BITS="2048" && - ipfs init --empty-repo=false --profile=badgerds -' +profiles=("flatfs" "pebbleds" "badgerds") +proot="$(mktemp -d "${TMPDIR:-/tmp}/t0025.XXXXXX")" -test_expect_success "'ipfs pin ls' works" ' - ipfs pin ls | wc -l | grep 9 -' +for profile in "${profiles[@]}"; do + test_expect_success "'ipfs init --empty-repo=false --profile=$profile' succeeds" ' + BITS="2048" && + IPFS_PATH="$proot/$profile" && + ipfs init --empty-repo=false --profile=$profile + ' + test_expect_success "'ipfs pin add' and 'pin ls' works with $profile" ' + export IPFS_PATH="$proot/$profile" && + echo -n "hello_$profile" | ipfs block put --pin=true > hello_cid && + ipfs pin ls -t recursive "$(cat hello_cid)" + ' +done test_done diff --git a/test/sharness/t0030-mount.sh b/test/sharness/t0030-mount.sh index 0c0983d0c..6df7a26bb 100755 --- a/test/sharness/t0030-mount.sh +++ b/test/sharness/t0030-mount.sh @@ -16,7 +16,8 @@ if ! test_have_prereq FUSE; then fi -export IPFS_NS_MAP="welcome.example.com:/ipfs/$HASH_WELCOME_DOCS" +# echo -n "ipfs" > expected && ipfs add --cid-version 1 -Q -w expected +export IPFS_NS_MAP="welcome.example.com:/ipfs/bafybeicq7bvn5lz42qlmghaoiwrve74pzi53auqetbantp5kajucsabike" # start iptb + wait for peering NUM_NODES=5 @@ -27,17 +28,17 @@ startup_cluster $NUM_NODES # test mount failure before mounting properly. test_expect_success "'ipfs mount' fails when there is no mount dir" ' - tmp_ipfs_mount() { ipfsi 0 mount -f=not_ipfs -n=not_ipns >output 2>output.err; } && + tmp_ipfs_mount() { ipfsi 0 mount -f=not_ipfs -n=not_ipns -m=not_mfs >output 2>output.err; } && test_must_fail tmp_ipfs_mount ' test_expect_success "'ipfs mount' output looks good" ' test_must_be_empty output && - test_should_contain "not_ipns\|not_ipfs" output.err + test_should_contain "not_ipns\|not_ipfs\|not_mfs" output.err ' test_expect_success "setup and publish default IPNS value" ' - mkdir "$(pwd)/ipfs" "$(pwd)/ipns" && + mkdir "$(pwd)/ipfs" "$(pwd)/ipns" "$(pwd)/mfs" && ipfsi 0 name publish QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn ' @@ -46,12 +47,14 @@ test_expect_success "setup and publish default IPNS value" ' test_expect_success FUSE "'ipfs mount' succeeds" ' do_umount "$(pwd)/ipfs" || true && do_umount "$(pwd)/ipns" || true && - ipfsi 0 mount -f "$(pwd)/ipfs" -n "$(pwd)/ipns" >actual + do_umount "$(pwd)/mfs" || true && + ipfsi 0 mount -f "$(pwd)/ipfs" -n "$(pwd)/ipns" -m "$(pwd)/mfs" >actual ' test_expect_success FUSE "'ipfs mount' output looks good" ' echo "IPFS mounted at: $(pwd)/ipfs" >expected && echo "IPNS mounted at: $(pwd)/ipns" >>expected && + echo "MFS mounted at: $(pwd)/mfs" >>expected && test_cmp expected actual ' @@ -63,21 +66,64 @@ test_expect_success FUSE "local symlink works" ' test_expect_success FUSE "can resolve ipns names" ' echo -n "ipfs" > expected && - cat ipns/welcome.example.com/ping > actual && + ipfsi 0 add --cid-version 1 -Q -w expected && + cat ipns/welcome.example.com/expected > actual && test_cmp expected actual ' +test_expect_success FUSE "create mfs file via fuse" ' + touch mfs/testfile && + ipfsi 0 files ls | grep testfile +' + +test_expect_success FUSE "create mfs dir via fuse" ' + mkdir mfs/testdir && + ipfsi 0 files ls | grep testdir +' + +test_expect_success FUSE "read mfs file from fuse" ' + echo content > mfs/testfile && + getfattr -n ipfs_cid mfs/testfile +' +test_expect_success FUSE "ipfs add file and read it back via fuse" ' + echo content3 | ipfsi 0 files write -e /testfile3 && + grep content3 mfs/testfile3 +' + +test_expect_success FUSE "ipfs add file and read it back via fuse" ' + echo content > testfile2 && + ipfsi 0 add --to-files /testfile2 testfile2 && + grep content mfs/testfile2 +' + +test_expect_success FUSE "test file xattr" ' + echo content > mfs/testfile && + getfattr -n ipfs_cid mfs/testfile +' + +test_expect_success FUSE "test file removal" ' + touch mfs/testfile && + rm mfs/testfile +' + +test_expect_success FUSE "test nested dirs" ' + mkdir -p mfs/foo/bar/baz/qux && + echo content > mfs/foo/bar/baz/qux/quux && + ipfsi 0 files stat /foo/bar/baz/qux/quux +' + test_expect_success "mount directories cannot be removed while active" ' - test_must_fail rmdir ipfs ipns 2>/dev/null + test_must_fail rmdir ipfs ipns mfs 2>/dev/null ' test_expect_success "unmount directories" ' do_umount "$(pwd)/ipfs" && - do_umount "$(pwd)/ipns" + do_umount "$(pwd)/ipns" && + do_umount "$(pwd)/mfs" ' test_expect_success "mount directories can be removed after shutdown" ' - rmdir ipfs ipns + rmdir ipfs ipns mfs ' test_expect_success 'stop iptb' ' diff --git a/test/sharness/t0032-mount-sharded.sh b/test/sharness/t0032-mount-sharded.sh index 10ba421a2..7a3e51858 100755 --- a/test/sharness/t0032-mount-sharded.sh +++ b/test/sharness/t0032-mount-sharded.sh @@ -16,7 +16,7 @@ fi test_init_ipfs test_expect_success 'force sharding' ' - ipfs config --json Internal.UnixFSShardingSizeThreshold "\"1B\"" + ipfs config --json Import.UnixFSHAMTDirectorySizeThreshold "\"1B\"" ' test_launch_ipfs_daemon diff --git a/test/sharness/t0040-add-and-cat.sh b/test/sharness/t0040-add-and-cat.sh index 142ab8ec1..f0e5a1bb9 100755 --- a/test/sharness/t0040-add-and-cat.sh +++ b/test/sharness/t0040-add-and-cat.sh @@ -355,10 +355,10 @@ test_add_cat_file() { test_cmp expected actual ' - test_must_fail "ipfs add with multiple files of same name but different dirs fails" ' + test_expect_success "ipfs add with multiple files of same name but different dirs fails" ' mkdir -p mountdir/same-file/ && cp mountdir/hello.txt mountdir/same-file/hello.txt && - ipfs add mountdir/hello.txt mountdir/same-file/hello.txt >actual && + test_expect_code 1 ipfs add mountdir/hello.txt mountdir/same-file/hello.txt >actual && rm mountdir/same-file/hello.txt && rmdir mountdir/same-file ' @@ -469,18 +469,27 @@ test_add_cat_file() { ipfs files rm -r --force /mfs ' + # confirm -w and --to-files are exclusive + # context: https://github.com/ipfs/kubo/issues/10611 + test_expect_success "ipfs add -r -w dir --to-files /mfs/subdir5/ errors (-w and --to-files are exclusive)" ' + ipfs files mkdir -p /mfs/subdir5 && + test_expect_code 1 ipfs add -r -w test --to-files /mfs/subdir5/ >actual 2>&1 && + test_should_contain "Error" actual && + ipfs files rm -r --force /mfs + ' + } test_add_cat_5MB() { ADD_FLAGS="$1" EXP_HASH="$2" - test_expect_success "generate 5MB file using go-random" ' - random 5242880 41 >mountdir/bigfile + test_expect_success "generate 5MB file using random-data" ' + random-data -size=5242880 -seed=41 >mountdir/bigfile ' test_expect_success "sha1 of the file looks ok" ' - echo "11145620fb92eb5a49c9986b5c6844efda37e471660e" >sha1_expected && + echo "11145b8c4bc8f87ea2fcfc3d55708b8cac2aadf12862" >sha1_expected && multihash -a=sha1 -e=hex mountdir/bigfile >sha1_actual && test_cmp sha1_expected sha1_actual ' @@ -585,12 +594,12 @@ test_add_cat_expensive() { ADD_FLAGS="$1" HASH="$2" - test_expect_success EXPENSIVE "generate 100MB file using go-random" ' - random 104857600 42 >mountdir/bigfile + test_expect_success EXPENSIVE "generate 100MB file using random-data" ' + random-data -size=104857600 -seed=42 >mountdir/bigfile ' test_expect_success EXPENSIVE "sha1 of the file looks ok" ' - echo "1114885b197b01e0f7ff584458dc236cb9477d2e736d" >sha1_expected && + echo "11141e8c04d7cd019cc0acf0311a8ca6cf2c18413c96" >sha1_expected && multihash -a=sha1 -e=hex mountdir/bigfile >sha1_actual && test_cmp sha1_expected sha1_actual ' @@ -614,7 +623,7 @@ test_add_cat_expensive() { ' test_expect_success EXPENSIVE "ipfs cat output hashed looks good" ' - echo "1114885b197b01e0f7ff584458dc236cb9477d2e736d" >sha1_expected && + echo "11141e8c04d7cd019cc0acf0311a8ca6cf2c18413c96" >sha1_expected && test_cmp sha1_expected sha1_actual ' @@ -873,17 +882,17 @@ test_expect_success "'ipfs add -rn' succeeds" ' mkdir -p mountdir/moons/saturn && echo "Hello Europa!" >mountdir/moons/jupiter/europa.txt && echo "Hello Titan!" >mountdir/moons/saturn/titan.txt && - echo "hey youre no moon!" >mountdir/moons/mercury.txt && + echo "hey you are no moon!" >mountdir/moons/mercury.txt && ipfs add -rn mountdir/moons >actual ' test_expect_success "'ipfs add -rn' output looks good" ' - MOONS="QmVKvomp91nMih5j6hYBA8KjbiaYvEetU2Q7KvtZkLe9nQ" && + MOONS="QmbGoaQZm8kjYfCiN1aBsgwhqfUBGDYTrDb91Mz7Dvq81B" && EUROPA="Qmbjg7zWdqdMaK2BucPncJQDxiALExph5k3NkQv5RHpccu" && JUPITER="QmS5mZddhFPLWFX3w6FzAy9QxyYkaxvUpsWCtZ3r7jub9J" && SATURN="QmaMagZT4rTE7Nonw8KGSK4oe1bh533yhZrCo1HihSG8FK" && TITAN="QmZzppb9WHn552rmRqpPfgU5FEiHH6gDwi3MrB9cTdPwdb" && - MERCURY="QmUJjVtnN8YEeYcS8VmUeWffTWhnMQAkk5DzZdKnPhqUdK" && + MERCURY="QmRsTB5CpEUvDUpDgHCzb3VftZ139zrk9zs5ZcgYh9TMPJ" && echo "added $EUROPA moons/jupiter/europa.txt" >expected && echo "added $MERCURY moons/mercury.txt" >>expected && echo "added $TITAN moons/saturn/titan.txt" >>expected && @@ -893,42 +902,42 @@ test_expect_success "'ipfs add -rn' output looks good" ' test_cmp expected actual ' -test_expect_success "go-random is installed" ' - type random +test_expect_success "random-data is installed" ' + type random-data ' -test_add_cat_5MB "" "QmSr7FqYkxYWGoSfy8ZiaMWQ5vosb18DQGCzjwEQnVHkTb" +test_add_cat_5MB "" "QmapAfmzmeWYTNztMQEhUXFcSGrsax22WRG7YN9xLdMeQq" -test_add_cat_5MB --raw-leaves "QmbdLHCmdi48eM8T7D67oXjA1S2Puo8eMfngdHhdPukFd6" +test_add_cat_5MB --raw-leaves "QmabWSFaPusmiZaaVZLhEUtHcj8CCvVeUfkBpKqAkKVMiS" # note: the specified hash implies that internal nodes are stored # using CidV1 and leaves are stored using raw blocks -test_add_cat_5MB --cid-version=1 "bafybeigfnx3tka2rf5ovv2slb7ymrt4zbwa3ryeqibe6fipyt5vgsrli3u" +test_add_cat_5MB --cid-version=1 "bafybeifwdkm32fmukqwh3jofm6ma76bcqvn6opxstsnzmya7utboi4cb2m" # note: the specified hash implies that internal nodes are stored # using CidV1 and leaves are stored using CidV1 but using the legacy # format (i.e. not raw) -test_add_cat_5MB '--cid-version=1 --raw-leaves=false' "bafybeieyifrgpjn3yengthr7qaj72ozm2aq3wm53srgeprc43w67qpvfqa" +test_add_cat_5MB '--cid-version=1 --raw-leaves=false' "bafybeifq4unep5w4agr3nlynxidj2rymf6dzu6bf4ieqqildkboe5mdmne" # note: --hash=blake2b-256 implies --cid-version=1 which implies --raw-leaves=true # the specified hash represents the leaf nodes stored as raw leaves and # encoded with the blake2b-256 hash function -test_add_cat_5MB '--hash=blake2b-256' "bafykbzacebnmjcl4sn37b3ehtibvf263oun2w6idghenrvlpehq5w5jqyvhjo" +test_add_cat_5MB '--hash=blake2b-256' "bafykbzacebxcnlql4oc3mtscqn32aumqkqxxv3wt7dkyrphgh6lc2gckiq6bw" # the specified hash represents the leaf nodes stored as protoful nodes and # encoded with the blake2b-256 hash function -test_add_cat_5MB '--hash=blake2b-256 --raw-leaves=false' "bafykbzaceaxiiykzgpbhnzlecffqm3zbuvhujyvxe5scltksyafagkyw4rjn2" +test_add_cat_5MB '--hash=blake2b-256 --raw-leaves=false' "bafykbzacearibnoamkfmcagpfgk2sbgx65qftnsrh4ttd3g7ghooasfnyavme" -test_add_cat_expensive "" "QmU9SWAPPmNEKZB8umYMmjYvN7VyHqABNvdA6GUi4MMEz3" +test_add_cat_expensive "" "Qma1WZKC3jad7e3F7GEDvkFdhPLyMEhKszBF4nBUCBGh6c" # note: the specified hash implies that internal nodes are stored # using CidV1 and leaves are stored using raw blocks -test_add_cat_expensive "--cid-version=1" "bafybeidkj5ecbhrqmzrcee2rw7qwsx24z3364qya3fnp2ktkg2tnsrewhi" +test_add_cat_expensive "--cid-version=1" "bafybeibdfw7nsmb3erhej2k6v4eopaswsf5yfv2ikweqa3qsc5no4jywqu" # note: --hash=blake2b-256 implies --cid-version=1 which implies --raw-leaves=true # the specified hash represents the leaf nodes stored as raw leaves and # encoded with the blake2b-256 hash function -test_add_cat_expensive '--hash=blake2b-256' "bafykbzaceb26fnq5hz5iopzamcb4yqykya5x6a4nvzdmcyuu4rj2akzs3z7r6" +test_add_cat_expensive '--hash=blake2b-256' "bafykbzaceduy3thhmcf6ptfqzxberlvj7sgo4uokrvd6qwrhim6r3rgcb26qi" test_add_named_pipe diff --git a/test/sharness/t0042-add-skip.sh b/test/sharness/t0042-add-skip.sh index 64d8e1a7c..00f96f065 100755 --- a/test/sharness/t0042-add-skip.sh +++ b/test/sharness/t0042-add-skip.sh @@ -93,8 +93,8 @@ EOF test_cmp expected actual ' - test_expect_failure "'ipfs add' with an unregistered hash and wrapped leaves fails without crashing" ' - ipfs add --hash poseidon-bls12_381-a2-fc1 --raw-leaves=false -r mountdir/planets + test_expect_success "'ipfs add' with an unregistered hash and wrapped leaves fails without crashing" ' + test_expect_code 1 ipfs add --hash poseidon-bls12_381-a2-fc1 --raw-leaves=false -r mountdir/planets ' } diff --git a/test/sharness/t0043-add-w.sh b/test/sharness/t0043-add-w.sh index 1f13cae3a..a0bfc2797 100755 --- a/test/sharness/t0043-add-w.sh +++ b/test/sharness/t0043-add-w.sh @@ -6,58 +6,54 @@ test_description="Test add -w" -add_w_m='QmazHkwx6mPmmCEi1jR5YzjjQd1g5XzKfYQLzRAg7x5uUk' +add_w_m='QmbDfuW3tZ5PmAucyLBAMzVeETHCHM7Ho9CWdBvWxRGd3i' -add_w_1='added Qme987pqNBhZZXy4ckeXiR7zaRQwBabB7fTgHurW2yJfNu 4r93 -added Qmf82PSsMpUHcrqxa69KG6Qp5yeK7K9BTizXgG3nvzWcNG ' +add_w_1='added QmP9WCV5SjQRoxoCkgywzw4q5X23rhHJJXzPQt4VbNa9M5 0h0r91 +added Qmave82G8vLbtx6JCokrrhLPpFNfWj5pbXobddiUASfpe3 ' -add_w_12='added Qme987pqNBhZZXy4ckeXiR7zaRQwBabB7fTgHurW2yJfNu 4r93 -added QmVb4ntSZZnT2J2zvCmXKMJc52cmZYH6AB37MzeYewnkjs 4u6ead -added QmZPASVB6EsADrLN8S2sak34zEHL8mx4TAVsPJU9cNnQQJ ' +add_w_12='added QmP9WCV5SjQRoxoCkgywzw4q5X23rhHJJXzPQt4VbNa9M5 0h0r91 +added QmNUiT9caQy5zXvw942UYXkjLseQLWBkf7ZJD6RCfk8JgP 951op +added QmWXoq9vUtdNxmM16kvJRgyQdi4S4gfYSjd2MsRprBXWmG ' -add_w_21='added Qme987pqNBhZZXy4ckeXiR7zaRQwBabB7fTgHurW2yJfNu 4r93 -added QmVb4ntSZZnT2J2zvCmXKMJc52cmZYH6AB37MzeYewnkjs 4u6ead -added QmZPASVB6EsADrLN8S2sak34zEHL8mx4TAVsPJU9cNnQQJ ' +add_w_d1='added QmQKZCZKKL71zcMNpFFVcWzoh5dimX45mKgUu3LhvdaCRn 3s78oa/cb5v5v +added QmPng2maSno8o659Lu2QtKg2d2L53RMahoyK6wNkifYaxY 3s78oa/cnd062l-rh +added QmX3s7jJjFQhKRuGpDA3W4BYHdCWAyL3oB6U3iSoaYxVxs 3s78oa/es3gm9ck7b +added QmSUZXb48DoNjUPpX9Jue1mUpyCghEDZY62iif1JhdofoG 3s78oa/kfo77-6i_hp0ttz +added QmdC215Wp2sH47aw6R9CLBVa5uxJB4zEag1gtsKqjYGDb5 3s78oa/p91vs5t +added QmSEGJRYb5wrJRBxNsse91YJSpmgf5ikKRtCwvGZ1V1Nc2 3s78oa +added QmS2ML7DPVisc4gQtSrwMi3qwS9eyzGR7zVdwqwRPU9rGz ' -add_w_d1='added QmPcaX84tDiTfzdTn8GQxexodgeWH6mHjSss5Zfr5ojssb _jo7/-s782qgs -added QmaVBqquUuXKjkyWHXaXfsaQUxAnsCKS95VRDHU8PzGA4K _jo7/15totauzkak- -added QmaAHFG8cmhW3WLjofx5siSp44VV25ETN6ThzrU8iAqpkR _jo7/galecuirrj4r -added QmeuSfhJNKwBESp1W9H8cfoMdBfW3AeHQDWXbNXQJYWp53 _jo7/mzo50r-1xidf5zx -added QmYC3u5jGWuyFwvTxtvLYm2K3SpWZ31tg3NjpVVvh9cJaJ _jo7/wzvsihy -added QmQkib3f9XNX5sj6WEahLUPFpheTcwSRJwUCSvjcv8b9by _jo7 -added QmNQoesMj1qp8ApE51NbtTjFYksyzkezPD4cat7V2kzbKN ' +add_w_d1_v1='added bafkreibpfapmbmf55elpipnoofmda7xbs5spthba2srrovnchttzplmrnm fvmq97/0vz12t0yf +added bafkreihc5hdzpjwbqy6b5r2h2oxbm6mp4sx4eqll253k6f5yijsismvoxy fvmq97/2hpfk8slf0 +added bafkreihlmwk6pkk7klsmypmk2wfkgijbk7wavhtrcvgrfxvug7x5ndawge fvmq97/nda000755cd76 +added bafkreigpntro6bt4m6c5pcnmvk24qyiq3lwffhwry7k2hqtretqhfsfvqa fvmq97/nsz0wsonz +added bafkreieeznfvzr6742npktcn4ajzxujst6j2uztwfninhvic4bbvm356u4 fvmq97/pq3f6t0 +added bafybeiatm3oos62mm5hu4cmq234wipw2fjaqflq2cdqgc6i6dcgzamxwrm fvmq97 +added bafybeifp4ioszjk2377psexdhk7thcxnpaj2wls4yifsntbgxzti7ds4uy ' -add_w_d1_v1='added bafkreif7rizm7yeem72okzlwr2ls73cyemfyv5mjghdew3kzhtfznzz4dq _jo7/-s782qgs -added bafkreifkecyeevzcocvjliaz3ssiej5tkp32xyuogizonybihapdzovlsu _jo7/15totauzkak- -added bafkreif5xhyhjhqp3muvj52wp37nutafsznckeuhikrl3h6w2sx3xdyeqm _jo7/galecuirrj4r -added bafkreia6ooswgjtadq5n5zxkn2qyw3dpuyutvam7grtxn36ywykv52vkje _jo7/mzo50r-1xidf5zx -added bafkreibhvbkg6zgra4bu56a36h25g52g6yxsb25qvgqv2trx4zbmhkmxku _jo7/wzvsihy -added bafybeietuhja6ipwwnxefjecz6c5yls4j4q7r5gxiesyzfzkwsaimpa5mu _jo7 -added bafybeihxnrujsxdwyzuf3rq6wigzitrj6vjvxphttrtsx6tqabzpqfbd54 ' +add_w_d2='added QmP9WCV5SjQRoxoCkgywzw4q5X23rhHJJXzPQt4VbNa9M5 0h0r91 +added QmPpv7rFgkBqMYKJok6kVixqJgAGkyPiX3Jrr7n9rU1gcv 1o8ef-25onywi +added QmW7zDxGpaJTRpte7uCvMA9eXJ5L274FfsFPK9pE5RShq9 2ju9tn-b09/-qw1d8j9 +added QmNNm9D3pn8NXbuYSde614qbb9xE67g9TNV6zXePgSZvHj 2ju9tn-b09/03rfc61t4qq_m +added QmUYefaFAWka9LWarDeetQFe8CCSHaAtj4JR7YToYPSJyi 2ju9tn-b09/57dl-1lbjvu +added QmcMLvVinwJsHtYxTUXEoPd8XkbuyvJNffZ85PT11cWDc2 2ju9tn-b09/t8h1_w +added QmUTZE57VoF7xqWmrrcDNtDXrEs6znTQaRwmwkawGDs1GA 2ju9tn-b09/ugqi0nmv-1 +added QmfX5q9CMquL4JnAuG4H13RXjTb9DncMfu9pvpEsWkECJk fvmq97/0vz12t0yf +added Qmdr3jR1UATLFeuoieBTHLNNwhCUJbgN5oat7U9X8TtfdZ fvmq97/2hpfk8slf0 +added QmfUKgXSiE1wCQuX3Pws9FftthJuAMXrDWhG5EhhnmA6gQ fvmq97/nda000755cd76 +added QmYM35pgHvLdKH8ssw9kJeiUY5kcjhb5h3BTiDhAgbsYYh fvmq97/nsz0wsonz +added QmNarBSVwzYjLeEjGMJqTNtRCYGCLGo6TJqd21hPi7WXFT fvmq97/pq3f6t0 +added QmUNhQpFBZvfH4JyNxiE8QY31bZDpQHMmjSRRnbRZYZ3be 2ju9tn-b09 +added QmWtZu8dv4XRK8zPmwbNjS6biqe4bGEF9J5zb51sBJCMro fvmq97 +added QmYp7QoL8wRacLn9pJftJSkiiSmNGdWb7qT5ENDW2HXBcu ' -add_w_d2='added Qme987pqNBhZZXy4ckeXiR7zaRQwBabB7fTgHurW2yJfNu 4r93 -added QmU9Jqks8TPu4vFr6t7EKkAKQrSJuEujNj1AkzoCeTEDFJ gnz66h/1k0xpx34 -added QmSLYZycXAufRw3ePMVH2brbtYWCcWsmksGLbHcT8ia9Ke gnz66h/9cwudvacx -added QmfYmpCCAMU9nLe7xbrYsHf5z2R2GxeQnsm4zavUhX9vq2 gnz66h/9ximv51cbo8 -added QmWgEE4e2kfx3b8HZcBk5cLrfhoi8kTMQP2MipgPhykuV3 gnz66h/b54ygh6gs -added QmcLbqEqhREGednc6mrVtanee4WHKp5JnUfiwTTHCJwuDf gnz66h/lbl5 -added QmPcaX84tDiTfzdTn8GQxexodgeWH6mHjSss5Zfr5ojssb _jo7/-s782qgs -added QmaVBqquUuXKjkyWHXaXfsaQUxAnsCKS95VRDHU8PzGA4K _jo7/15totauzkak- -added QmaAHFG8cmhW3WLjofx5siSp44VV25ETN6ThzrU8iAqpkR _jo7/galecuirrj4r -added QmeuSfhJNKwBESp1W9H8cfoMdBfW3AeHQDWXbNXQJYWp53 _jo7/mzo50r-1xidf5zx -added QmYC3u5jGWuyFwvTxtvLYm2K3SpWZ31tg3NjpVVvh9cJaJ _jo7/wzvsihy -added QmVaKAt2eVftNKFfKhiBV7Mu5HjCugffuLqWqobSSFgiA7 h3qpecj0 -added QmQkib3f9XNX5sj6WEahLUPFpheTcwSRJwUCSvjcv8b9by _jo7 -added QmVPwNy8pZegpsNmsjjZvdTQn4uCeuZgtzhgWhRSQWjK9x gnz66h -added QmTmc46fhKC8Liuh5soy1VotdnHcqLu3r6HpPGwDZCnqL1 ' - -add_w_r='QmcCksBMDuuyuyfAMMNzEAx6Z7jTrdRy9a23WpufAhG9ji' +add_w_r='QmUerh2irM8cngqJHLGKCn4AGBSyHYAUi8i8zyVzXKNYyb' . lib/test-lib.sh test_add_w() { - test_expect_success "go-random-files is installed" ' + test_expect_success "random-files is installed" ' type random-files ' @@ -70,7 +66,7 @@ test_add_w() { # test single file test_expect_success "ipfs add -w (single file) succeeds" ' - ipfs add -w m/4r93 >actual + ipfs add -w m/0h0r91 >actual ' test_expect_success "ipfs add -w (single file) is correct" ' @@ -80,7 +76,7 @@ test_add_w() { # test two files together test_expect_success "ipfs add -w (multiple) succeeds" ' - ipfs add -w m/4r93 m/4u6ead >actual + ipfs add -w m/0h0r91 m/951op >actual ' test_expect_success "ipfs add -w (multiple) is correct" ' @@ -89,17 +85,17 @@ test_add_w() { ' test_expect_success "ipfs add -w (multiple) succeeds" ' - ipfs add -w m/4u6ead m/4r93 >actual + ipfs add -w m/951op m/0h0r91 >actual ' test_expect_success "ipfs add -w (multiple) orders" ' - echo "$add_w_21" >expected && + echo "$add_w_12" >expected && test_sort_cmp expected actual ' # test a directory test_expect_success "ipfs add -w -r (dir) succeeds" ' - ipfs add -r -w m/t_1wp-8a2/_jo7 >actual + ipfs add -r -w m/9m7mh3u51z3b/3s78oa >actual ' test_expect_success "ipfs add -w -r (dir) is correct" ' @@ -109,8 +105,8 @@ test_add_w() { # test files and directory test_expect_success "ipfs add -w -r succeeds" ' - ipfs add -w -r m/t_1wp-8a2/h3qpecj0 \ - m/ha6f0x7su6/gnz66h m/t_1wp-8a2/_jo7 m/4r93 >actual + ipfs add -w -r m/9m7mh3u51z3b/1o8ef-25onywi \ + m/vck_-2/2ju9tn-b09 m/9m7mh3u51z3b/fvmq97 m/0h0r91 >actual ' test_expect_success "ipfs add -w -r is correct" ' @@ -130,10 +126,10 @@ test_add_w() { # test repeats together test_expect_success "ipfs add -w (repeats) succeeds" ' - ipfs add -Q -w -r m/t_1wp-8a2/h3qpecj0 m/ha6f0x7su6/gnz66h \ - m/t_1wp-8a2/_jo7 m/4r93 m/t_1wp-8a2 m/t_1wp-8a2 m/4r93 \ - m/4r93 m/ha6f0x7su6/_rwujlf3qh_g08 \ - m/ha6f0x7su6/gnz66h/9cwudvacx >actual + ipfs add -Q -w -r m/9m7mh3u51z3b/1o8ef-25onywi m/vck_-2/2ju9tn-b09 \ + m/9m7mh3u51z3b/fvmq97 m/0h0r91 m/9m7mh3u51z3b m/9m7mh3u51z3b m/0h0r91 \ + m/0h0r91 m/vck_-2/0dl083je2 \ + m/vck_-2/2ju9tn-b09/-qw1d8j9 >actual ' test_expect_success "ipfs add -w (repeats) is correct" ' @@ -142,7 +138,7 @@ test_add_w() { ' test_expect_success "ipfs add -w -r (dir) --cid-version=1 succeeds" ' - ipfs add -r -w --cid-version=1 m/t_1wp-8a2/_jo7 >actual + ipfs add -r -w --cid-version=1 m/9m7mh3u51z3b/fvmq97 >actual ' test_expect_success "ipfs add -w -r (dir) --cid-version=1 is correct" ' @@ -151,7 +147,7 @@ test_add_w() { ' test_expect_success "ipfs add -w -r -n (dir) --cid-version=1 succeeds" ' - ipfs add -r -w -n --cid-version=1 m/t_1wp-8a2/_jo7 >actual + ipfs add -r -w -n --cid-version=1 m/9m7mh3u51z3b/fvmq97 >actual ' test_expect_success "ipfs add -w -r -n (dir) --cid-version=1 is correct" ' diff --git a/test/sharness/t0045-ls.sh b/test/sharness/t0045-ls.sh index 5e02ad167..ebb391d65 100755 --- a/test/sharness/t0045-ls.sh +++ b/test/sharness/t0045-ls.sh @@ -16,106 +16,106 @@ test_ls_cmd() { echo "test" >testData/f1 && echo "data" >testData/f2 && echo "hello" >testData/d1/a && - random 128 42 >testData/d1/128 && + random-data -size=128 -seed=42 >testData/d1/128 && echo "world" >testData/d2/a && - random 1024 42 >testData/d2/1024 && + random-data -size=1024 -seed=42 >testData/d2/1024 && echo "badname" >testData/d2/`echo -e "bad\x7fname.txt"` && ipfs add -r testData >actual_add ' test_expect_success "'ipfs add' output looks good" ' cat <<-\EOF >expected_add && -added QmQNd6ubRXaNG6Prov8o6vk3bn6eWsj9FxLGrAVDUAGkGe testData/d1/128 +added QmWUixdcx1VJtpuAgXAy4e3JPAbEoHE6VEDut5KcYcpuGN testData/d1/128 added QmZULkCELmmk5XNfCgTnCyFgAVxBRBXyDHGGMVoLFLiXEN testData/d1/a -added QmbQBUSRL9raZtNXfpTDeaxQapibJEG6qEY8WqAN22aUzd testData/d2/1024 +added QmZHVTX2epinyx5baTFV2L2ap9VtgbmfeFdhgntAypT5N3 testData/d2/1024 added QmaRGe7bVmVaLmxbrMiVNXqW4pRNNp3xq7hFtyRKA3mtJL testData/d2/a added QmQSLRRd1Lxn6NMsWmmj2g9W3LtSRfmVAVqU3ShneLUrbn testData/d2/bad\x7fname.txt added QmeomffUNfmQy76CQGy9NdmqEnnHU9soCexBnGU3ezPHVH testData/f1 added QmNtocSs7MoDkJMc1RkyisCSKvLadujPsfJfSdJ3e1eA1M testData/f2 -added QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss testData/d1 -added Qmf9nCpkCfa8Gtz5m1NJMeHBWcBozKRcbdom338LukPAjy testData/d2 -added QmRPX2PWaPGqzoVzqNcQkueijHVzPicjupnD7eLck6Rs21 testData +added QmWWEQhcLufF3qPmmbUjqH7WVWBT9JrGJwPiVTryCoBs2j testData/d1 +added Qmapxr4zxxUjoUFzyggydRZDkcJknjbtahYFKokbBAVghW testData/d2 +added QmR5UuxvF2ALd2GRGMCNg1GDiuuvcAyEkQaCV9fNkevWuc testData EOF test_cmp expected_add actual_add ' - + test_expect_success "'ipfs ls ' succeeds" ' - ipfs ls QmRPX2PWaPGqzoVzqNcQkueijHVzPicjupnD7eLck6Rs21 Qmf9nCpkCfa8Gtz5m1NJMeHBWcBozKRcbdom338LukPAjy QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss >actual_ls + ipfs ls QmR5UuxvF2ALd2GRGMCNg1GDiuuvcAyEkQaCV9fNkevWuc Qmapxr4zxxUjoUFzyggydRZDkcJknjbtahYFKokbBAVghW QmWWEQhcLufF3qPmmbUjqH7WVWBT9JrGJwPiVTryCoBs2j >actual_ls ' test_expect_success "'ipfs ls ' output looks good" ' cat <<-\EOF >expected_ls && -QmRPX2PWaPGqzoVzqNcQkueijHVzPicjupnD7eLck6Rs21: -QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss - d1/ -Qmf9nCpkCfa8Gtz5m1NJMeHBWcBozKRcbdom338LukPAjy - d2/ +QmR5UuxvF2ALd2GRGMCNg1GDiuuvcAyEkQaCV9fNkevWuc: +QmWWEQhcLufF3qPmmbUjqH7WVWBT9JrGJwPiVTryCoBs2j - d1/ +Qmapxr4zxxUjoUFzyggydRZDkcJknjbtahYFKokbBAVghW - d2/ QmeomffUNfmQy76CQGy9NdmqEnnHU9soCexBnGU3ezPHVH 5 f1 QmNtocSs7MoDkJMc1RkyisCSKvLadujPsfJfSdJ3e1eA1M 5 f2 -Qmf9nCpkCfa8Gtz5m1NJMeHBWcBozKRcbdom338LukPAjy: -QmbQBUSRL9raZtNXfpTDeaxQapibJEG6qEY8WqAN22aUzd 1024 1024 +Qmapxr4zxxUjoUFzyggydRZDkcJknjbtahYFKokbBAVghW: +QmZHVTX2epinyx5baTFV2L2ap9VtgbmfeFdhgntAypT5N3 1024 1024 QmaRGe7bVmVaLmxbrMiVNXqW4pRNNp3xq7hFtyRKA3mtJL 6 a QmQSLRRd1Lxn6NMsWmmj2g9W3LtSRfmVAVqU3ShneLUrbn 8 bad\x7fname.txt -QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss: -QmQNd6ubRXaNG6Prov8o6vk3bn6eWsj9FxLGrAVDUAGkGe 128 128 +QmWWEQhcLufF3qPmmbUjqH7WVWBT9JrGJwPiVTryCoBs2j: +QmWUixdcx1VJtpuAgXAy4e3JPAbEoHE6VEDut5KcYcpuGN 128 128 QmZULkCELmmk5XNfCgTnCyFgAVxBRBXyDHGGMVoLFLiXEN 6 a EOF test_cmp expected_ls actual_ls ' test_expect_success "'ipfs ls --size=false ' succeeds" ' - ipfs ls --size=false QmRPX2PWaPGqzoVzqNcQkueijHVzPicjupnD7eLck6Rs21 Qmf9nCpkCfa8Gtz5m1NJMeHBWcBozKRcbdom338LukPAjy QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss >actual_ls + ipfs ls --size=false QmR5UuxvF2ALd2GRGMCNg1GDiuuvcAyEkQaCV9fNkevWuc Qmapxr4zxxUjoUFzyggydRZDkcJknjbtahYFKokbBAVghW QmWWEQhcLufF3qPmmbUjqH7WVWBT9JrGJwPiVTryCoBs2j >actual_ls ' test_expect_success "'ipfs ls ' output looks good" ' cat <<-\EOF >expected_ls && -QmRPX2PWaPGqzoVzqNcQkueijHVzPicjupnD7eLck6Rs21: -QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss d1/ -Qmf9nCpkCfa8Gtz5m1NJMeHBWcBozKRcbdom338LukPAjy d2/ +QmR5UuxvF2ALd2GRGMCNg1GDiuuvcAyEkQaCV9fNkevWuc: +QmWWEQhcLufF3qPmmbUjqH7WVWBT9JrGJwPiVTryCoBs2j d1/ +Qmapxr4zxxUjoUFzyggydRZDkcJknjbtahYFKokbBAVghW d2/ QmeomffUNfmQy76CQGy9NdmqEnnHU9soCexBnGU3ezPHVH f1 QmNtocSs7MoDkJMc1RkyisCSKvLadujPsfJfSdJ3e1eA1M f2 -Qmf9nCpkCfa8Gtz5m1NJMeHBWcBozKRcbdom338LukPAjy: -QmbQBUSRL9raZtNXfpTDeaxQapibJEG6qEY8WqAN22aUzd 1024 +Qmapxr4zxxUjoUFzyggydRZDkcJknjbtahYFKokbBAVghW: +QmZHVTX2epinyx5baTFV2L2ap9VtgbmfeFdhgntAypT5N3 1024 QmaRGe7bVmVaLmxbrMiVNXqW4pRNNp3xq7hFtyRKA3mtJL a QmQSLRRd1Lxn6NMsWmmj2g9W3LtSRfmVAVqU3ShneLUrbn bad\x7fname.txt -QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss: -QmQNd6ubRXaNG6Prov8o6vk3bn6eWsj9FxLGrAVDUAGkGe 128 +QmWWEQhcLufF3qPmmbUjqH7WVWBT9JrGJwPiVTryCoBs2j: +QmWUixdcx1VJtpuAgXAy4e3JPAbEoHE6VEDut5KcYcpuGN 128 QmZULkCELmmk5XNfCgTnCyFgAVxBRBXyDHGGMVoLFLiXEN a EOF test_cmp expected_ls actual_ls ' test_expect_success "'ipfs ls --headers ' succeeds" ' - ipfs ls --headers QmRPX2PWaPGqzoVzqNcQkueijHVzPicjupnD7eLck6Rs21 Qmf9nCpkCfa8Gtz5m1NJMeHBWcBozKRcbdom338LukPAjy QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss >actual_ls_headers + ipfs ls --headers QmR5UuxvF2ALd2GRGMCNg1GDiuuvcAyEkQaCV9fNkevWuc Qmapxr4zxxUjoUFzyggydRZDkcJknjbtahYFKokbBAVghW QmWWEQhcLufF3qPmmbUjqH7WVWBT9JrGJwPiVTryCoBs2j >actual_ls_headers ' test_expect_success "'ipfs ls --headers ' output looks good" ' cat <<-\EOF >expected_ls_headers && -QmRPX2PWaPGqzoVzqNcQkueijHVzPicjupnD7eLck6Rs21: +QmR5UuxvF2ALd2GRGMCNg1GDiuuvcAyEkQaCV9fNkevWuc: Hash Size Name -QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss - d1/ -Qmf9nCpkCfa8Gtz5m1NJMeHBWcBozKRcbdom338LukPAjy - d2/ +QmWWEQhcLufF3qPmmbUjqH7WVWBT9JrGJwPiVTryCoBs2j - d1/ +Qmapxr4zxxUjoUFzyggydRZDkcJknjbtahYFKokbBAVghW - d2/ QmeomffUNfmQy76CQGy9NdmqEnnHU9soCexBnGU3ezPHVH 5 f1 QmNtocSs7MoDkJMc1RkyisCSKvLadujPsfJfSdJ3e1eA1M 5 f2 -Qmf9nCpkCfa8Gtz5m1NJMeHBWcBozKRcbdom338LukPAjy: +Qmapxr4zxxUjoUFzyggydRZDkcJknjbtahYFKokbBAVghW: Hash Size Name -QmbQBUSRL9raZtNXfpTDeaxQapibJEG6qEY8WqAN22aUzd 1024 1024 +QmZHVTX2epinyx5baTFV2L2ap9VtgbmfeFdhgntAypT5N3 1024 1024 QmaRGe7bVmVaLmxbrMiVNXqW4pRNNp3xq7hFtyRKA3mtJL 6 a QmQSLRRd1Lxn6NMsWmmj2g9W3LtSRfmVAVqU3ShneLUrbn 8 bad\x7fname.txt -QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss: +QmWWEQhcLufF3qPmmbUjqH7WVWBT9JrGJwPiVTryCoBs2j: Hash Size Name -QmQNd6ubRXaNG6Prov8o6vk3bn6eWsj9FxLGrAVDUAGkGe 128 128 +QmWUixdcx1VJtpuAgXAy4e3JPAbEoHE6VEDut5KcYcpuGN 128 128 QmZULkCELmmk5XNfCgTnCyFgAVxBRBXyDHGGMVoLFLiXEN 6 a EOF test_cmp expected_ls_headers actual_ls_headers ' test_expect_success "'ipfs ls --size=false --cid-base=base32 ' succeeds" ' - ipfs ls --size=false --cid-base=base32 $(cid-fmt -v 1 -b base32 %s QmRPX2PWaPGqzoVzqNcQkueijHVzPicjupnD7eLck6Rs21 Qmf9nCpkCfa8Gtz5m1NJMeHBWcBozKRcbdom338LukPAjy QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss) >actual_ls_base32 + ipfs ls --size=false --cid-base=base32 $(cid-fmt -v 1 -b base32 %s QmR5UuxvF2ALd2GRGMCNg1GDiuuvcAyEkQaCV9fNkevWuc Qmapxr4zxxUjoUFzyggydRZDkcJknjbtahYFKokbBAVghW QmWWEQhcLufF3qPmmbUjqH7WVWBT9JrGJwPiVTryCoBs2j) >actual_ls_base32 ' test_expect_success "'ipfs ls --size=false --cid-base=base32 ' output looks good" ' @@ -132,99 +132,99 @@ test_ls_cmd_streaming() { echo "test" >testData/f1 && echo "data" >testData/f2 && echo "hello" >testData/d1/a && - random 128 42 >testData/d1/128 && + random-data -size=128 -seed=42 >testData/d1/128 && echo "world" >testData/d2/a && - random 1024 42 >testData/d2/1024 && + random-data -size=1024 -seed=42 >testData/d2/1024 && echo "badname" >testData/d2/`echo -e "bad\x7fname.txt"` && ipfs add -r testData >actual_add ' test_expect_success "'ipfs add' output looks good" ' cat <<-\EOF >expected_add && -added QmQNd6ubRXaNG6Prov8o6vk3bn6eWsj9FxLGrAVDUAGkGe testData/d1/128 +added QmWUixdcx1VJtpuAgXAy4e3JPAbEoHE6VEDut5KcYcpuGN testData/d1/128 added QmZULkCELmmk5XNfCgTnCyFgAVxBRBXyDHGGMVoLFLiXEN testData/d1/a -added QmbQBUSRL9raZtNXfpTDeaxQapibJEG6qEY8WqAN22aUzd testData/d2/1024 +added QmZHVTX2epinyx5baTFV2L2ap9VtgbmfeFdhgntAypT5N3 testData/d2/1024 added QmaRGe7bVmVaLmxbrMiVNXqW4pRNNp3xq7hFtyRKA3mtJL testData/d2/a added QmQSLRRd1Lxn6NMsWmmj2g9W3LtSRfmVAVqU3ShneLUrbn testData/d2/bad\x7fname.txt added QmeomffUNfmQy76CQGy9NdmqEnnHU9soCexBnGU3ezPHVH testData/f1 added QmNtocSs7MoDkJMc1RkyisCSKvLadujPsfJfSdJ3e1eA1M testData/f2 -added QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss testData/d1 -added Qmf9nCpkCfa8Gtz5m1NJMeHBWcBozKRcbdom338LukPAjy testData/d2 -added QmRPX2PWaPGqzoVzqNcQkueijHVzPicjupnD7eLck6Rs21 testData +added QmWWEQhcLufF3qPmmbUjqH7WVWBT9JrGJwPiVTryCoBs2j testData/d1 +added Qmapxr4zxxUjoUFzyggydRZDkcJknjbtahYFKokbBAVghW testData/d2 +added QmR5UuxvF2ALd2GRGMCNg1GDiuuvcAyEkQaCV9fNkevWuc testData EOF test_cmp expected_add actual_add ' test_expect_success "'ipfs ls --stream ' succeeds" ' - ipfs ls --stream QmRPX2PWaPGqzoVzqNcQkueijHVzPicjupnD7eLck6Rs21 Qmf9nCpkCfa8Gtz5m1NJMeHBWcBozKRcbdom338LukPAjy QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss >actual_ls_stream + ipfs ls --stream QmR5UuxvF2ALd2GRGMCNg1GDiuuvcAyEkQaCV9fNkevWuc Qmapxr4zxxUjoUFzyggydRZDkcJknjbtahYFKokbBAVghW QmWWEQhcLufF3qPmmbUjqH7WVWBT9JrGJwPiVTryCoBs2j >actual_ls_stream ' test_expect_success "'ipfs ls --stream ' output looks good" ' cat <<-\EOF >expected_ls_stream && -QmRPX2PWaPGqzoVzqNcQkueijHVzPicjupnD7eLck6Rs21: -QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss - d1/ -Qmf9nCpkCfa8Gtz5m1NJMeHBWcBozKRcbdom338LukPAjy - d2/ +QmR5UuxvF2ALd2GRGMCNg1GDiuuvcAyEkQaCV9fNkevWuc: +QmWWEQhcLufF3qPmmbUjqH7WVWBT9JrGJwPiVTryCoBs2j - d1/ +Qmapxr4zxxUjoUFzyggydRZDkcJknjbtahYFKokbBAVghW - d2/ QmeomffUNfmQy76CQGy9NdmqEnnHU9soCexBnGU3ezPHVH 5 f1 QmNtocSs7MoDkJMc1RkyisCSKvLadujPsfJfSdJ3e1eA1M 5 f2 -Qmf9nCpkCfa8Gtz5m1NJMeHBWcBozKRcbdom338LukPAjy: -QmbQBUSRL9raZtNXfpTDeaxQapibJEG6qEY8WqAN22aUzd 1024 1024 +Qmapxr4zxxUjoUFzyggydRZDkcJknjbtahYFKokbBAVghW: +QmZHVTX2epinyx5baTFV2L2ap9VtgbmfeFdhgntAypT5N3 1024 1024 QmaRGe7bVmVaLmxbrMiVNXqW4pRNNp3xq7hFtyRKA3mtJL 6 a QmQSLRRd1Lxn6NMsWmmj2g9W3LtSRfmVAVqU3ShneLUrbn 8 bad\x7fname.txt -QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss: -QmQNd6ubRXaNG6Prov8o6vk3bn6eWsj9FxLGrAVDUAGkGe 128 128 +QmWWEQhcLufF3qPmmbUjqH7WVWBT9JrGJwPiVTryCoBs2j: +QmWUixdcx1VJtpuAgXAy4e3JPAbEoHE6VEDut5KcYcpuGN 128 128 QmZULkCELmmk5XNfCgTnCyFgAVxBRBXyDHGGMVoLFLiXEN 6 a EOF test_cmp expected_ls_stream actual_ls_stream ' test_expect_success "'ipfs ls --size=false --stream ' succeeds" ' - ipfs ls --size=false --stream QmRPX2PWaPGqzoVzqNcQkueijHVzPicjupnD7eLck6Rs21 Qmf9nCpkCfa8Gtz5m1NJMeHBWcBozKRcbdom338LukPAjy QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss >actual_ls_stream + ipfs ls --size=false --stream QmR5UuxvF2ALd2GRGMCNg1GDiuuvcAyEkQaCV9fNkevWuc Qmapxr4zxxUjoUFzyggydRZDkcJknjbtahYFKokbBAVghW QmWWEQhcLufF3qPmmbUjqH7WVWBT9JrGJwPiVTryCoBs2j >actual_ls_stream ' test_expect_success "'ipfs ls --size=false --stream ' output looks good" ' cat <<-\EOF >expected_ls_stream && -QmRPX2PWaPGqzoVzqNcQkueijHVzPicjupnD7eLck6Rs21: -QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss d1/ -Qmf9nCpkCfa8Gtz5m1NJMeHBWcBozKRcbdom338LukPAjy d2/ +QmR5UuxvF2ALd2GRGMCNg1GDiuuvcAyEkQaCV9fNkevWuc: +QmWWEQhcLufF3qPmmbUjqH7WVWBT9JrGJwPiVTryCoBs2j d1/ +Qmapxr4zxxUjoUFzyggydRZDkcJknjbtahYFKokbBAVghW d2/ QmeomffUNfmQy76CQGy9NdmqEnnHU9soCexBnGU3ezPHVH f1 QmNtocSs7MoDkJMc1RkyisCSKvLadujPsfJfSdJ3e1eA1M f2 -Qmf9nCpkCfa8Gtz5m1NJMeHBWcBozKRcbdom338LukPAjy: -QmbQBUSRL9raZtNXfpTDeaxQapibJEG6qEY8WqAN22aUzd 1024 +Qmapxr4zxxUjoUFzyggydRZDkcJknjbtahYFKokbBAVghW: +QmZHVTX2epinyx5baTFV2L2ap9VtgbmfeFdhgntAypT5N3 1024 QmaRGe7bVmVaLmxbrMiVNXqW4pRNNp3xq7hFtyRKA3mtJL a QmQSLRRd1Lxn6NMsWmmj2g9W3LtSRfmVAVqU3ShneLUrbn bad\x7fname.txt -QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss: -QmQNd6ubRXaNG6Prov8o6vk3bn6eWsj9FxLGrAVDUAGkGe 128 +QmWWEQhcLufF3qPmmbUjqH7WVWBT9JrGJwPiVTryCoBs2j: +QmWUixdcx1VJtpuAgXAy4e3JPAbEoHE6VEDut5KcYcpuGN 128 QmZULkCELmmk5XNfCgTnCyFgAVxBRBXyDHGGMVoLFLiXEN a EOF test_cmp expected_ls_stream actual_ls_stream ' test_expect_success "'ipfs ls --stream --headers ' succeeds" ' - ipfs ls --stream --headers QmRPX2PWaPGqzoVzqNcQkueijHVzPicjupnD7eLck6Rs21 Qmf9nCpkCfa8Gtz5m1NJMeHBWcBozKRcbdom338LukPAjy QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss >actual_ls_stream_headers + ipfs ls --stream --headers QmR5UuxvF2ALd2GRGMCNg1GDiuuvcAyEkQaCV9fNkevWuc Qmapxr4zxxUjoUFzyggydRZDkcJknjbtahYFKokbBAVghW QmWWEQhcLufF3qPmmbUjqH7WVWBT9JrGJwPiVTryCoBs2j >actual_ls_stream_headers ' test_expect_success "'ipfs ls --stream --headers ' output looks good" ' cat <<-\EOF >expected_ls_stream_headers && -QmRPX2PWaPGqzoVzqNcQkueijHVzPicjupnD7eLck6Rs21: +QmR5UuxvF2ALd2GRGMCNg1GDiuuvcAyEkQaCV9fNkevWuc: Hash Size Name -QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss - d1/ -Qmf9nCpkCfa8Gtz5m1NJMeHBWcBozKRcbdom338LukPAjy - d2/ +QmWWEQhcLufF3qPmmbUjqH7WVWBT9JrGJwPiVTryCoBs2j - d1/ +Qmapxr4zxxUjoUFzyggydRZDkcJknjbtahYFKokbBAVghW - d2/ QmeomffUNfmQy76CQGy9NdmqEnnHU9soCexBnGU3ezPHVH 5 f1 QmNtocSs7MoDkJMc1RkyisCSKvLadujPsfJfSdJ3e1eA1M 5 f2 -Qmf9nCpkCfa8Gtz5m1NJMeHBWcBozKRcbdom338LukPAjy: +Qmapxr4zxxUjoUFzyggydRZDkcJknjbtahYFKokbBAVghW: Hash Size Name -QmbQBUSRL9raZtNXfpTDeaxQapibJEG6qEY8WqAN22aUzd 1024 1024 +QmZHVTX2epinyx5baTFV2L2ap9VtgbmfeFdhgntAypT5N3 1024 1024 QmaRGe7bVmVaLmxbrMiVNXqW4pRNNp3xq7hFtyRKA3mtJL 6 a QmQSLRRd1Lxn6NMsWmmj2g9W3LtSRfmVAVqU3ShneLUrbn 8 bad\x7fname.txt -QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss: +QmWWEQhcLufF3qPmmbUjqH7WVWBT9JrGJwPiVTryCoBs2j: Hash Size Name -QmQNd6ubRXaNG6Prov8o6vk3bn6eWsj9FxLGrAVDUAGkGe 128 128 +QmWUixdcx1VJtpuAgXAy4e3JPAbEoHE6VEDut5KcYcpuGN 128 128 QmZULkCELmmk5XNfCgTnCyFgAVxBRBXyDHGGMVoLFLiXEN 6 a EOF test_cmp expected_ls_stream_headers actual_ls_stream_headers @@ -244,19 +244,19 @@ test_ls_cmd_raw_leaves() { test_ls_object() { test_expect_success "ipfs add medium size file then 'ipfs ls --size=false' works as expected" ' - random 500000 2 > somefile && + random-data -size=500000 -seed=2 > somefile && HASH=$(ipfs add somefile -q) && - echo "QmPrM8S5T7Q3M8DQvQMS7m41m3Aq4jBjzAzvky5fH3xfr4 " > ls-expect && - echo "QmdaAntAzQqqVMo4B8V69nkQd5d918YjHXUe2oF6hr72ri " >> ls-expect && + echo "QmWJuiG6dhfwo3KXxCc9gkdizoMoXbLMCDiTTZgEhSmyyo " > ls-expect && + echo "QmNPxtpjhoXMRVKm4oSwcJaS4fck5FR4LufPd5KJr4jYhm " >> ls-expect && ipfs ls --size=false $HASH > ls-actual && test_cmp ls-actual ls-expect ' test_expect_success "ipfs add medium size file then 'ipfs ls' works as expected" ' - random 500000 2 > somefile && + random-data -size=500000 -seed=2 > somefile && HASH=$(ipfs add somefile -q) && - echo "QmPrM8S5T7Q3M8DQvQMS7m41m3Aq4jBjzAzvky5fH3xfr4 262144 " > ls-expect && - echo "QmdaAntAzQqqVMo4B8V69nkQd5d918YjHXUe2oF6hr72ri 237856 " >> ls-expect && + echo "QmWJuiG6dhfwo3KXxCc9gkdizoMoXbLMCDiTTZgEhSmyyo 262144 " > ls-expect && + echo "QmNPxtpjhoXMRVKm4oSwcJaS4fck5FR4LufPd5KJr4jYhm 237856 " >> ls-expect && ipfs ls $HASH > ls-actual && test_cmp ls-actual ls-expect ' @@ -285,8 +285,8 @@ test_ls_object test_expect_success "'ipfs add -r' succeeds" ' mkdir adir && # note: not using a seed as the files need to have truly random content - random 1000 > adir/file1 && - random 1000 > adir/file2 && + random-data -size=1000 > adir/file1 && + random-data -size=1000 > adir/file2 && ipfs add --pin=false -q -r adir > adir-hashes ' diff --git a/test/sharness/t0046-id-hash.sh b/test/sharness/t0046-id-hash.sh index d4c28f215..878b7228d 100755 --- a/test/sharness/t0046-id-hash.sh +++ b/test/sharness/t0046-id-hash.sh @@ -25,7 +25,8 @@ test_expect_success "ipfs add succeeds with identity hash" ' ' test_expect_success "content not actually added" ' - ipfs refs local | fgrep -q -v $HASH + ipfs refs local > locals && + test_should_not_contain $HASH locals ' test_expect_success "but can fetch it anyway" ' @@ -65,7 +66,7 @@ test_expect_success "ipfs add --inline --raw-leaves outputs the correct hash" ' ' test_expect_success "create 1000 bytes file and get its hash" ' - random 1000 2 > 1000bytes && + random-data -size=1000 -seed=2 > 1000bytes && HASH0=$(ipfs add -q --raw-leaves --only-hash 1000bytes) ' @@ -98,7 +99,8 @@ test_expect_success "ipfs add succeeds with identity hash and --nocopy" ' ' test_expect_success "content not actually added (filestore enabled)" ' - ipfs refs local | fgrep -q -v $HASH + ipfs refs local > locals && + test_should_not_contain $HASH locals ' test_expect_success "but can fetch it anyway (filestore enabled)" ' diff --git a/test/sharness/t0060-daemon.sh b/test/sharness/t0060-daemon.sh index 431ff245c..a160a8988 100755 --- a/test/sharness/t0060-daemon.sh +++ b/test/sharness/t0060-daemon.sh @@ -8,8 +8,8 @@ test_description="Test daemon command" . lib/test-lib.sh -test_expect_success "create badger config" ' - ipfs init --profile=badgerds,test > /dev/null && +test_expect_success "create pebble config" ' + ipfs init --profile=pebbleds,test > /dev/null && cp "$IPFS_PATH/config" init-config ' @@ -21,8 +21,8 @@ test_launch_ipfs_daemon --init --init-config="$(pwd)/init-config" --init-profile test_kill_ipfs_daemon test_expect_success "daemon initialization with existing config works" ' - ipfs config "Datastore.Spec.child.path" >actual && - test $(cat actual) = "badgerds" && + ipfs config "Datastore.Spec.path" >actual && + test $(cat actual) = "pebbleds" && ipfs config Addresses > orig_addrs ' @@ -131,21 +131,21 @@ test_expect_success "ipfs help output looks good" ' # check transport is encrypted by default and no plaintext is allowed test_expect_success SOCAT "default transport should support encryption (TLS, needs socat )" ' - socat - tcp:localhost:$SWARM_PORT,connect-timeout=1 > swarmnc < ../t0060-data/mss-tls && + socat -s - tcp:localhost:$SWARM_PORT,connect-timeout=1 > swarmnc < ../t0060-data/mss-tls && grep -q "/tls" swarmnc && test_must_fail grep -q "na" swarmnc || test_fsh cat swarmnc ' test_expect_success SOCAT "default transport should support encryption (Noise, needs socat )" ' - socat - tcp:localhost:$SWARM_PORT,connect-timeout=1 > swarmnc < ../t0060-data/mss-noise && + socat -s - tcp:localhost:$SWARM_PORT,connect-timeout=1 > swarmnc < ../t0060-data/mss-noise && grep -q "/noise" swarmnc && test_must_fail grep -q "na" swarmnc || test_fsh cat swarmnc ' test_expect_success SOCAT "default transport should not support plaintext (needs socat )" ' - socat - tcp:localhost:$SWARM_PORT,connect-timeout=1 > swarmnc < ../t0060-data/mss-plaintext && + socat -s - tcp:localhost:$SWARM_PORT,connect-timeout=1 > swarmnc < ../t0060-data/mss-plaintext && grep -q "na" swarmnc && test_must_fail grep -q "/plaintext" swarmnc || test_fsh cat swarmnc @@ -195,7 +195,7 @@ TEST_ULIMIT_PRESET=1 test_launch_ipfs_daemon test_expect_success "daemon raised its fd limit" ' - grep -v "setting file descriptor limit" actual_daemon > /dev/null + test_should_not_contain "setting file descriptor limit" actual_daemon ' test_expect_success "daemon actually can handle 2048 file descriptors" ' diff --git a/test/sharness/t0061-daemon-opts.sh b/test/sharness/t0061-daemon-opts.sh index 531d2d247..a168ae4b0 100755 --- a/test/sharness/t0061-daemon-opts.sh +++ b/test/sharness/t0061-daemon-opts.sh @@ -18,7 +18,7 @@ apiaddr=$API_ADDR # Odd. this fails here, but the inverse works on t0060-daemon. test_expect_success SOCAT 'transport should be unencrypted ( needs socat )' ' - socat - tcp:localhost:$SWARM_PORT,connect-timeout=1 > swarmnc < ../t0060-data/mss-plaintext && + socat -s - tcp:localhost:$SWARM_PORT,connect-timeout=1 > swarmnc < ../t0060-data/mss-plaintext && grep -q "/plaintext" swarmnc && test_must_fail grep -q "na" swarmnc || test_fsh cat swarmnc diff --git a/test/sharness/t0066-migration.sh b/test/sharness/t0066-migration.sh index fa6a10e02..50ca3d17c 100755 --- a/test/sharness/t0066-migration.sh +++ b/test/sharness/t0066-migration.sh @@ -10,6 +10,10 @@ test_description="Test migrations auto update prompt" test_init_ipfs +# Remove explicit AutoConf.Enabled=false from test profile to use implicit default +# This allows daemon to work with 'auto' values added by v16-to-17 migration +ipfs config --json AutoConf.Enabled null >/dev/null 2>&1 + MIGRATION_START=7 IPFS_REPO_VER=$(<.ipfs/version) @@ -22,6 +26,12 @@ gen_mock_migrations() { j=$((i+1)) echo "#!/bin/bash" > bin/fs-repo-${i}-to-${j} echo "echo fake applying ${i}-to-${j} repo migration" >> bin/fs-repo-${i}-to-${j} + # Update version file to the target version for hybrid migration system + echo "if [ \"\$1\" = \"-path\" ] && [ -n \"\$2\" ]; then" >> bin/fs-repo-${i}-to-${j} + echo " echo $j > \"\$2/version\"" >> bin/fs-repo-${i}-to-${j} + echo "elif [ -n \"\$IPFS_PATH\" ]; then" >> bin/fs-repo-${i}-to-${j} + echo " echo $j > \"\$IPFS_PATH/version\"" >> bin/fs-repo-${i}-to-${j} + echo "fi" >> bin/fs-repo-${i}-to-${j} chmod +x bin/fs-repo-${i}-to-${j} ((i++)) done @@ -54,34 +64,42 @@ test_expect_success "manually reset repo version to $MIGRATION_START" ' ' test_expect_success "ipfs daemon --migrate=false fails" ' - test_expect_code 1 ipfs daemon --migrate=false > false_out + test_expect_code 1 ipfs daemon --migrate=false > false_out 2>&1 ' test_expect_success "output looks good" ' - grep "Please get fs-repo-migrations from https://dist.ipfs.tech" false_out + grep "Kubo repository at .* has version .* and needs to be migrated to version" false_out && + grep "Error: fs-repo requires migration" false_out ' -# The migrations will succeed, but the daemon will still exit with 1 because -# the fake migrations do not update the repo version number. -# -# If run with real migrations, the daemon continues running and must be killed. +# The migrations will succeed and the daemon will continue running +# since the mock migrations now properly update the repo version number. test_expect_success "ipfs daemon --migrate=true runs migration" ' - test_expect_code 1 ipfs daemon --migrate=true > true_out + ipfs daemon --migrate=true > true_out 2>&1 & + DAEMON_PID=$! + # Wait for daemon to be ready then shutdown gracefully + sleep 3 && ipfs shutdown 2>/dev/null || kill $DAEMON_PID 2>/dev/null || true + wait $DAEMON_PID 2>/dev/null || true ' test_expect_success "output looks good" ' check_migration_output true_out && - grep "Success: fs-repo migrated to version $IPFS_REPO_VER" true_out > /dev/null + (grep "Success: fs-repo migrated to version $IPFS_REPO_VER" true_out > /dev/null || + grep "Hybrid migration completed successfully: v$MIGRATION_START → v$IPFS_REPO_VER" true_out > /dev/null) +' + +test_expect_success "reset repo version for auto-migration test" ' + echo "$MIGRATION_START" > "$IPFS_PATH"/version ' test_expect_success "'ipfs daemon' prompts to auto migrate" ' - test_expect_code 1 ipfs daemon > daemon_out 2> daemon_err + test_expect_code 1 ipfs daemon > daemon_out 2>&1 ' test_expect_success "output looks good" ' - grep "Found outdated fs-repo" daemon_out > /dev/null && + grep "Kubo repository at .* has version .* and needs to be migrated to version" daemon_out > /dev/null && grep "Run migrations now?" daemon_out > /dev/null && - grep "Please get fs-repo-migrations from https://dist.ipfs.tech" daemon_out > /dev/null + grep "Error: fs-repo requires migration" daemon_out > /dev/null ' test_expect_success "ipfs repo migrate succeed" ' @@ -89,8 +107,9 @@ test_expect_success "ipfs repo migrate succeed" ' ' test_expect_success "output looks good" ' - grep "Found outdated fs-repo, starting migration." migrate_out > /dev/null && - grep "Success: fs-repo migrated to version $IPFS_REPO_VER" true_out > /dev/null + grep "Migrating repository from version" migrate_out > /dev/null && + (grep "Success: fs-repo migrated to version $IPFS_REPO_VER" migrate_out > /dev/null || + grep "Hybrid migration completed successfully: v$MIGRATION_START → v$IPFS_REPO_VER" migrate_out > /dev/null) ' test_expect_success "manually reset repo version to latest" ' @@ -102,7 +121,7 @@ test_expect_success "detect repo does not need migration" ' ' test_expect_success "output looks good" ' - grep "Repo does not require migration" migrate_out > /dev/null + grep "Repository is already at version" migrate_out > /dev/null ' # ensure that we get a lock error if we need to migrate and the daemon is running diff --git a/test/sharness/t0070-user-config.sh b/test/sharness/t0070-user-config.sh index 63c26ea3a..5a8180c73 100755 --- a/test/sharness/t0070-user-config.sh +++ b/test/sharness/t0070-user-config.sh @@ -11,10 +11,12 @@ test_description="Test user-provided config values" test_init_ipfs test_expect_success "bootstrap doesn't overwrite user-provided config keys (top-level)" ' - ipfs config Foo.Bar baz && + ipfs config Identity.PeerID >previous && + ipfs config Identity.PeerID foo && ipfs bootstrap rm --all && - echo "baz" >expected && - ipfs config Foo.Bar >actual && + echo "foo" >expected && + ipfs config Identity.PeerID >actual && + ipfs config Identity.PeerID $(cat previous) && test_cmp expected actual ' diff --git a/test/sharness/t0080-repo.sh b/test/sharness/t0080-repo.sh index 3f33a5f44..1059e8b93 100755 --- a/test/sharness/t0080-repo.sh +++ b/test/sharness/t0080-repo.sh @@ -30,7 +30,7 @@ test_expect_success "'ipfs repo gc' succeeds" ' ' test_expect_success "'ipfs repo gc' looks good (patch root)" ' - grep -v "removed $HASH" gc_out_actual + test_should_not_contain "removed $HASH" gc_out_actual ' test_expect_success "'ipfs repo gc' doesn't remove file" ' @@ -49,7 +49,7 @@ test_expect_success "'ipfs pin rm' output looks good" ' test_expect_success "ipfs repo gc fully reverse ipfs add (part 1)" ' ipfs repo gc && - random 100000 41 >gcfile && + random-data -size=100000 -seed=41 >gcfile && find "$IPFS_PATH/blocks" -type f -name "*.data" | sort -u > expected_blocks && hash=$(ipfs add -q gcfile) && ipfs pin rm -r $hash && @@ -142,7 +142,7 @@ test_expect_success "'ipfs refs local' no longer shows file" ' ' test_expect_success "adding multiblock random file succeeds" ' - random 1000000 >multiblock && + random-data -size=1000000 >multiblock && MBLOCKHASH=`ipfs add -q multiblock` ' @@ -284,11 +284,11 @@ test_expect_success "'ipfs repo stat --size-only' succeeds" ' ' test_expect_success "repo stats came out correct for --size-only" ' - grep "RepoSize" repo-stats-size-only && - grep "StorageMax" repo-stats-size-only && - grep -v "RepoPath" repo-stats-size-only && - grep -v "NumObjects" repo-stats-size-only && - grep -v "Version" repo-stats-size-only + test_should_contain "RepoSize" repo-stats-size-only && + test_should_contain "StorageMax" repo-stats-size-only && + test_should_not_contain "RepoPath" repo-stats-size-only && + test_should_not_contain "NumObjects" repo-stats-size-only && + test_should_not_contain "Version" repo-stats-size-only ' test_expect_success "'ipfs repo version' succeeds" ' diff --git a/test/sharness/t0082-repo-gc-auto.sh b/test/sharness/t0082-repo-gc-auto.sh index 50a4e6fae..4d4559534 100755 --- a/test/sharness/t0082-repo-gc-auto.sh +++ b/test/sharness/t0082-repo-gc-auto.sh @@ -17,10 +17,10 @@ check_ipfs_storage() { test_init_ipfs -test_expect_success "generate 2 600 kB files and 2 MB file using go-random" ' - random 600k 41 >600k1 && - random 600k 42 >600k2 && - random 2M 43 >2M +test_expect_success "generate 2 600 kB files and 2 MB file using random-data" ' + random-data -size=614400 -seed=41 >600k1 && + random-data -size=614400 -seed=42 >600k2 && + random-data -size=2097152 -seed=43 >2M ' test_expect_success "set ipfs gc watermark, storage max, and gc timeout" ' diff --git a/test/sharness/t0086-repo-verify.sh b/test/sharness/t0086-repo-verify.sh index 0f12fef8f..b73a6230e 100755 --- a/test/sharness/t0086-repo-verify.sh +++ b/test/sharness/t0086-repo-verify.sh @@ -3,6 +3,9 @@ # Copyright (c) 2016 Jeromy Johnson # MIT Licensed; see the LICENSE file in this repository. # +# NOTE: This is a legacy sharness test kept for compatibility. +# New tests for 'ipfs repo verify' should be added to test/cli/repo_verify_test.go +# test_description="Test ipfs repo fsck" @@ -24,7 +27,10 @@ sort_rand() { } check_random_corruption() { - to_break=$(find "$IPFS_PATH/blocks" -type f -name '*.data' | sort_rand | head -n 1) + # Exclude well-known blocks from corruption as they cause test flakiness: + # - CIQL7TG2PB52XIZLLHDYIUFMHUQLMMZWBNBZSLDXFCPZ5VDNQQ2WDZQ.data: empty file block + # - CIQFTFEEHEDF6KLBT32BFAGLXEZL4UWFNWM4LFTLMXQBCERZ6CMLX3Y.data: empty directory block (has special handling, served from memory even when corrupted on disk) + to_break=$(find "$IPFS_PATH/blocks" -type f -name '*.data' | grep -v -E "CIQL7TG2PB52XIZLLHDYIUFMHUQLMMZWBNBZSLDXFCPZ5VDNQQ2WDZQ.data|CIQFTFEEHEDF6KLBT32BFAGLXEZL4UWFNWM4LFTLMXQBCERZ6CMLX3Y.data" | sort_rand | head -n 1) test_expect_success "back up file and overwrite it" ' cp "$to_break" backup_file && diff --git a/test/sharness/t0087-repo-robust-gc.sh b/test/sharness/t0087-repo-robust-gc.sh index 884de5774..453e6a6cc 100755 --- a/test/sharness/t0087-repo-robust-gc.sh +++ b/test/sharness/t0087-repo-robust-gc.sh @@ -16,7 +16,7 @@ to_raw_cid() { test_gc_robust_part1() { test_expect_success "add a 1MB file with --raw-leaves" ' - random 1048576 56 > afile && + random-data -size=1048576 -seed=56 > afile && HASH1=`ipfs add --raw-leaves -q --cid-version 1 afile` && REFS=`ipfs refs -r $HASH1` && read LEAF1 LEAF2 LEAF3 LEAF4 < <(echo $REFS) @@ -96,20 +96,20 @@ test_gc_robust_part1() { test_gc_robust_part2() { test_expect_success "add 1MB file normally (i.e., without raw leaves)" ' - random 1048576 56 > afile && + random-data -size=1048576 -seed=56 > afile && HASH2=`ipfs add -q afile` ' - LEAF1=QmSijovevteoY63Uj1uC5b8pkpDU5Jgyk2dYBqz3sMJUPc - LEAF1FILE=.ipfs/blocks/ME/CIQECF2K344QITW5S6E6H6T4DOXDDB2XA2V7BBOCIMN2VVF4Q77SMEY.data + LEAF1=QmcNNR6JSCUhJ9nyoVQgBhABPgcgdsuYJgdSB1f2g6BF5c + LEAF1FILE=.ipfs/blocks/RA/CIQNA5C3BLRUX3LZ7X6UTOV3KSHLARNXVDK3W5KUO6GVHNRP4SGLRAY.data - LEAF2=QmTbPEyrA1JyGUHFvmtx1FNZVzdBreMv8Hc8jV9sBRWhNA - LEAF2FILE=.ipfs/blocks/WM/CIQE4EFIJN2SUTQYSKMKNG7VM75W3SXT6LWJCHJJ73UAWN73WCX3WMY.data + LEAF2=QmPvtiBLgwuwF2wyf9VL8PaYgSt1XwGJ2Yu4AscRGEQvqR + LEAF2FILE=.ipfs/blocks/RN/CIQBPIKEATBI7TIHVYRQJZAKEWF2H22PXW3A7LCEPB6MFFL7IA2CRNA.data test_expect_success "add some additional unpinned content" ' - random 1000 3 > junk1 && - random 1000 4 > junk2 && + random-data -size=1000 -seed=3 > junk1 && + random-data -size=1000 -seed=4 > junk2 && JUNK1=`ipfs add --pin=false -q junk1` && JUNK2=`ipfs add --pin=false -q junk2` ' diff --git a/test/sharness/t0114-gateway-subdomains.sh b/test/sharness/t0114-gateway-subdomains.sh index 5d9927d8e..ae1bc1a93 100755 --- a/test/sharness/t0114-gateway-subdomains.sh +++ b/test/sharness/t0114-gateway-subdomains.sh @@ -163,7 +163,7 @@ test_localhost_gateway_response_should_contain \ "Location: http://$DIR_CID.ipfs.localhost:$GWAY_PORT/" # Kubo specific end-to-end test -# (independend of gateway-conformance) +# (independent of gateway-conformance) # We return human-readable body with HTTP 301 so existing cli scripts that use path-based # gateway are informed to enable following HTTP redirects @@ -194,7 +194,7 @@ test_localhost_gateway_response_should_contain \ # /ipns/ # Kubo specific end-to-end test -# (independend of gateway-conformance) +# (independent of gateway-conformance) test_localhost_gateway_response_should_contain \ "request for localhost/ipns/{fqdn} redirects to DNSLink in subdomain" \ @@ -228,7 +228,7 @@ test_localhost_gateway_response_should_contain \ "I am a txt file" # Kubo specific end-to-end test -# (independend of gateway-conformance) +# (independent of gateway-conformance) # This tests link to parent specific to boxo + relative pathing end-to-end tests specific to Kubo. # {CID}.ipfs.localhost/sub/dir (Directory Listing) @@ -429,7 +429,7 @@ test_hostname_gateway_response_should_contain \ "404 Not Found" # Kubo specific end-to-end test -# (independend of gateway-conformance) +# (independent of gateway-conformance) # HTML specific to Boxo/Kubo, and relative pathing specific to code in Kubo # {CID}.ipfs.example.com/sub/dir (Directory Listing) @@ -801,8 +801,8 @@ test_expect_success "request for http://fake.domain.com/ipfs/{CID} with X-Forwar " # Kubo specific end-to-end test -# (independend of gateway-conformance) -# test cofiguration beign wired up correctly end-to-end +# (independent of gateway-conformance) +# test configuration being wired up correctly end-to-end ## ============================================================================ ## Test support for wildcards in gateway config @@ -916,4 +916,4 @@ test_expect_success "clean up ipfs dir" ' test_done -# end Kubo specific end-to-end test \ No newline at end of file +# end Kubo specific end-to-end test diff --git a/test/sharness/t0115-gateway-dir-listing.sh b/test/sharness/t0115-gateway-dir-listing.sh index 1ce0861b2..d4e08e5be 100755 --- a/test/sharness/t0115-gateway-dir-listing.sh +++ b/test/sharness/t0115-gateway-dir-listing.sh @@ -40,7 +40,7 @@ test_expect_success "path gw: backlink on root CID should be hidden" ' test_expect_success "path gw: redirect dir listing to URL with trailing slash" ' curl -sD - http://127.0.0.1:$GWAY_PORT/ipfs/${DIR_CID}/ą/ę > list_response && test_should_contain "HTTP/1.1 301 Moved Permanently" list_response && - test_should_contain "Location: /ipfs/${DIR_CID}/%c4%85/%c4%99/" list_response + test_should_contain "Location: /ipfs/${DIR_CID}/%C4%85/%C4%99/" list_response ' test_expect_success "path gw: Etag should be present" ' @@ -81,7 +81,7 @@ test_expect_success "subdomain gw: backlink on root CID should be hidden" ' test_expect_success "subdomain gw: redirect dir listing to URL with trailing slash" ' curl -sD - --resolve $DIR_HOSTNAME:$GWAY_PORT:127.0.0.1 http://$DIR_HOSTNAME:$GWAY_PORT/ą/ę > list_response && test_should_contain "HTTP/1.1 301 Moved Permanently" list_response && - test_should_contain "Location: /%c4%85/%c4%99/" list_response + test_should_contain "Location: /%C4%85/%C4%99/" list_response ' test_expect_success "subdomain gw: Etag should be present" ' @@ -130,7 +130,7 @@ test_expect_success "dnslink gw: backlink on root CID should be hidden" ' test_expect_success "dnslink gw: redirect dir listing to URL with trailing slash" ' curl -sD - --resolve $DNSLINK_HOSTNAME:$GWAY_PORT:127.0.0.1 http://$DNSLINK_HOSTNAME:$GWAY_PORT/ą/ę > list_response && test_should_contain "HTTP/1.1 301 Moved Permanently" list_response && - test_should_contain "Location: /%c4%85/%c4%99/" list_response + test_should_contain "Location: /%C4%85/%C4%99/" list_response ' test_expect_success "dnslink gw: Etag should be present" ' diff --git a/test/sharness/t0119-prometheus-data/prometheus_metrics b/test/sharness/t0119-prometheus-data/prometheus_metrics index f3ba65c97..1099032d7 100644 --- a/test/sharness/t0119-prometheus-data/prometheus_metrics +++ b/test/sharness/t0119-prometheus-data/prometheus_metrics @@ -1,85 +1,33 @@ -flatfs_datastore_batchcommit_errors_total -flatfs_datastore_batchcommit_latency_seconds_bucket -flatfs_datastore_batchcommit_latency_seconds_count -flatfs_datastore_batchcommit_latency_seconds_sum -flatfs_datastore_batchcommit_total -flatfs_datastore_batchdelete_errors_total -flatfs_datastore_batchdelete_latency_seconds_bucket -flatfs_datastore_batchdelete_latency_seconds_count -flatfs_datastore_batchdelete_latency_seconds_sum -flatfs_datastore_batchdelete_total -flatfs_datastore_batchput_errors_total -flatfs_datastore_batchput_latency_seconds_bucket -flatfs_datastore_batchput_latency_seconds_count -flatfs_datastore_batchput_latency_seconds_sum -flatfs_datastore_batchput_size_bytes_bucket -flatfs_datastore_batchput_size_bytes_count -flatfs_datastore_batchput_size_bytes_sum -flatfs_datastore_batchput_total -flatfs_datastore_check_errors_total -flatfs_datastore_check_latency_seconds_bucket -flatfs_datastore_check_latency_seconds_count -flatfs_datastore_check_latency_seconds_sum -flatfs_datastore_check_total -flatfs_datastore_delete_errors_total -flatfs_datastore_delete_latency_seconds_bucket -flatfs_datastore_delete_latency_seconds_count -flatfs_datastore_delete_latency_seconds_sum -flatfs_datastore_delete_total -flatfs_datastore_du_errors_total -flatfs_datastore_du_latency_seconds_bucket -flatfs_datastore_du_latency_seconds_count -flatfs_datastore_du_latency_seconds_sum -flatfs_datastore_du_total -flatfs_datastore_gc_errors_total -flatfs_datastore_gc_latency_seconds_bucket -flatfs_datastore_gc_latency_seconds_count -flatfs_datastore_gc_latency_seconds_sum -flatfs_datastore_gc_total -flatfs_datastore_get_errors_total -flatfs_datastore_get_latency_seconds_bucket -flatfs_datastore_get_latency_seconds_count -flatfs_datastore_get_latency_seconds_sum -flatfs_datastore_get_size_bytes_bucket -flatfs_datastore_get_size_bytes_count -flatfs_datastore_get_size_bytes_sum -flatfs_datastore_get_total -flatfs_datastore_getsize_errors_total -flatfs_datastore_getsize_latency_seconds_bucket -flatfs_datastore_getsize_latency_seconds_count -flatfs_datastore_getsize_latency_seconds_sum -flatfs_datastore_getsize_total -flatfs_datastore_has_errors_total -flatfs_datastore_has_latency_seconds_bucket -flatfs_datastore_has_latency_seconds_count -flatfs_datastore_has_latency_seconds_sum -flatfs_datastore_has_total -flatfs_datastore_put_errors_total -flatfs_datastore_put_latency_seconds_bucket -flatfs_datastore_put_latency_seconds_count -flatfs_datastore_put_latency_seconds_sum -flatfs_datastore_put_size_bytes_bucket -flatfs_datastore_put_size_bytes_count -flatfs_datastore_put_size_bytes_sum -flatfs_datastore_put_total -flatfs_datastore_query_errors_total -flatfs_datastore_query_latency_seconds_bucket -flatfs_datastore_query_latency_seconds_count -flatfs_datastore_query_latency_seconds_sum -flatfs_datastore_query_total -flatfs_datastore_scrub_errors_total -flatfs_datastore_scrub_latency_seconds_bucket -flatfs_datastore_scrub_latency_seconds_count -flatfs_datastore_scrub_latency_seconds_sum -flatfs_datastore_scrub_total -flatfs_datastore_sync_errors_total -flatfs_datastore_sync_latency_seconds_bucket -flatfs_datastore_sync_latency_seconds_count -flatfs_datastore_sync_latency_seconds_sum -flatfs_datastore_sync_total +exchange_bitswap_requests_in_flight +exchange_bitswap_response_bytes_bucket +exchange_bitswap_response_bytes_count +exchange_bitswap_response_bytes_sum +exchange_bitswap_wantlists_items_total +exchange_bitswap_wantlists_seconds_bucket +exchange_bitswap_wantlists_seconds_count +exchange_bitswap_wantlists_seconds_sum +exchange_bitswap_wantlists_total +exchange_httpnet_request_duration_seconds_bucket +exchange_httpnet_request_duration_seconds_count +exchange_httpnet_request_duration_seconds_sum +exchange_httpnet_request_sent_bytes +exchange_httpnet_requests_body_failure +exchange_httpnet_requests_failure +exchange_httpnet_requests_in_flight +exchange_httpnet_requests_total +exchange_httpnet_response_bytes_bucket +exchange_httpnet_response_bytes_count +exchange_httpnet_response_bytes_sum +exchange_httpnet_wantlists_items_total +exchange_httpnet_wantlists_seconds_bucket +exchange_httpnet_wantlists_seconds_count +exchange_httpnet_wantlists_seconds_sum +exchange_httpnet_wantlists_total go_gc_duration_seconds go_gc_duration_seconds_count go_gc_duration_seconds_sum +go_gc_gogc_percent +go_gc_gomemlimit_bytes go_goroutines go_info go_memstats_alloc_bytes @@ -94,7 +42,6 @@ go_memstats_heap_objects go_memstats_heap_released_bytes go_memstats_heap_sys_bytes go_memstats_last_gc_time_seconds -go_memstats_lookups_total go_memstats_mallocs_total go_memstats_mcache_inuse_bytes go_memstats_mcache_sys_bytes @@ -105,9 +52,22 @@ go_memstats_other_sys_bytes go_memstats_stack_inuse_bytes go_memstats_stack_sys_bytes go_memstats_sys_bytes +go_sched_gomaxprocs_threads go_threads +http_server_request_body_size_bytes_bucket +http_server_request_body_size_bytes_count +http_server_request_body_size_bytes_sum +http_server_request_duration_seconds_bucket +http_server_request_duration_seconds_count +http_server_request_duration_seconds_sum +http_server_response_body_size_bytes_bucket +http_server_response_body_size_bytes_count +http_server_response_body_size_bytes_sum ipfs_bitswap_active_block_tasks ipfs_bitswap_active_tasks +ipfs_bitswap_bcast_skips_total +ipfs_bitswap_blocks_received +ipfs_bitswap_haves_received ipfs_bitswap_pending_block_tasks ipfs_bitswap_pending_tasks ipfs_bitswap_recv_all_blocks_bytes_bucket @@ -123,6 +83,7 @@ ipfs_bitswap_sent_all_blocks_bytes_bucket ipfs_bitswap_sent_all_blocks_bytes_count ipfs_bitswap_sent_all_blocks_bytes_sum ipfs_bitswap_want_blocks_total +ipfs_bitswap_wanthaves_broadcast ipfs_bitswap_wantlist_total ipfs_bs_cache_boxo_blockstore_cache_hits ipfs_bs_cache_boxo_blockstore_cache_total @@ -205,6 +166,7 @@ ipfs_fsrepo_datastore_sync_latency_seconds_bucket ipfs_fsrepo_datastore_sync_latency_seconds_count ipfs_fsrepo_datastore_sync_latency_seconds_sum ipfs_fsrepo_datastore_sync_total +ipfs_http_gw_concurrent_requests ipfs_http_request_duration_seconds ipfs_http_request_duration_seconds_count ipfs_http_request_duration_seconds_sum @@ -216,85 +178,6 @@ ipfs_http_response_size_bytes ipfs_http_response_size_bytes_count ipfs_http_response_size_bytes_sum ipfs_info -leveldb_datastore_batchcommit_errors_total -leveldb_datastore_batchcommit_latency_seconds_bucket -leveldb_datastore_batchcommit_latency_seconds_count -leveldb_datastore_batchcommit_latency_seconds_sum -leveldb_datastore_batchcommit_total -leveldb_datastore_batchdelete_errors_total -leveldb_datastore_batchdelete_latency_seconds_bucket -leveldb_datastore_batchdelete_latency_seconds_count -leveldb_datastore_batchdelete_latency_seconds_sum -leveldb_datastore_batchdelete_total -leveldb_datastore_batchput_errors_total -leveldb_datastore_batchput_latency_seconds_bucket -leveldb_datastore_batchput_latency_seconds_count -leveldb_datastore_batchput_latency_seconds_sum -leveldb_datastore_batchput_size_bytes_bucket -leveldb_datastore_batchput_size_bytes_count -leveldb_datastore_batchput_size_bytes_sum -leveldb_datastore_batchput_total -leveldb_datastore_check_errors_total -leveldb_datastore_check_latency_seconds_bucket -leveldb_datastore_check_latency_seconds_count -leveldb_datastore_check_latency_seconds_sum -leveldb_datastore_check_total -leveldb_datastore_delete_errors_total -leveldb_datastore_delete_latency_seconds_bucket -leveldb_datastore_delete_latency_seconds_count -leveldb_datastore_delete_latency_seconds_sum -leveldb_datastore_delete_total -leveldb_datastore_du_errors_total -leveldb_datastore_du_latency_seconds_bucket -leveldb_datastore_du_latency_seconds_count -leveldb_datastore_du_latency_seconds_sum -leveldb_datastore_du_total -leveldb_datastore_gc_errors_total -leveldb_datastore_gc_latency_seconds_bucket -leveldb_datastore_gc_latency_seconds_count -leveldb_datastore_gc_latency_seconds_sum -leveldb_datastore_gc_total -leveldb_datastore_get_errors_total -leveldb_datastore_get_latency_seconds_bucket -leveldb_datastore_get_latency_seconds_count -leveldb_datastore_get_latency_seconds_sum -leveldb_datastore_get_size_bytes_bucket -leveldb_datastore_get_size_bytes_count -leveldb_datastore_get_size_bytes_sum -leveldb_datastore_get_total -leveldb_datastore_getsize_errors_total -leveldb_datastore_getsize_latency_seconds_bucket -leveldb_datastore_getsize_latency_seconds_count -leveldb_datastore_getsize_latency_seconds_sum -leveldb_datastore_getsize_total -leveldb_datastore_has_errors_total -leveldb_datastore_has_latency_seconds_bucket -leveldb_datastore_has_latency_seconds_count -leveldb_datastore_has_latency_seconds_sum -leveldb_datastore_has_total -leveldb_datastore_put_errors_total -leveldb_datastore_put_latency_seconds_bucket -leveldb_datastore_put_latency_seconds_count -leveldb_datastore_put_latency_seconds_sum -leveldb_datastore_put_size_bytes_bucket -leveldb_datastore_put_size_bytes_count -leveldb_datastore_put_size_bytes_sum -leveldb_datastore_put_total -leveldb_datastore_query_errors_total -leveldb_datastore_query_latency_seconds_bucket -leveldb_datastore_query_latency_seconds_count -leveldb_datastore_query_latency_seconds_sum -leveldb_datastore_query_total -leveldb_datastore_scrub_errors_total -leveldb_datastore_scrub_latency_seconds_bucket -leveldb_datastore_scrub_latency_seconds_count -leveldb_datastore_scrub_latency_seconds_sum -leveldb_datastore_scrub_total -leveldb_datastore_sync_errors_total -leveldb_datastore_sync_latency_seconds_bucket -leveldb_datastore_sync_latency_seconds_count -leveldb_datastore_sync_latency_seconds_sum -leveldb_datastore_sync_total libp2p_autonat_next_probe_timestamp libp2p_autonat_reachability_status libp2p_autonat_reachability_status_confidence @@ -357,10 +240,15 @@ libp2p_relaysvc_status libp2p_swarm_dial_ranking_delay_seconds_bucket libp2p_swarm_dial_ranking_delay_seconds_count libp2p_swarm_dial_ranking_delay_seconds_sum +otel_scope_info process_cpu_seconds_total process_max_fds +process_network_receive_bytes_total +process_network_transmit_bytes_total process_open_fds process_resident_memory_bytes process_start_time_seconds process_virtual_memory_bytes process_virtual_memory_max_bytes +provider_provides_total +target_info diff --git a/test/sharness/t0119-prometheus-data/prometheus_metrics_added_by_enabling_rcmgr b/test/sharness/t0119-prometheus-data/prometheus_metrics_added_by_enabling_rcmgr index 382ab1256..e69de29bb 100644 --- a/test/sharness/t0119-prometheus-data/prometheus_metrics_added_by_enabling_rcmgr +++ b/test/sharness/t0119-prometheus-data/prometheus_metrics_added_by_enabling_rcmgr @@ -1,4 +0,0 @@ -libp2p_rcmgr_memory_allocations_allowed_total -libp2p_rcmgr_memory_allocations_blocked_total -libp2p_rcmgr_peer_blocked_total -libp2p_rcmgr_peers_allowed_total diff --git a/test/sharness/t0119-prometheus-data/prometheus_metrics_added_by_measure_profile b/test/sharness/t0119-prometheus-data/prometheus_metrics_added_by_measure_profile new file mode 100644 index 000000000..03f132701 --- /dev/null +++ b/test/sharness/t0119-prometheus-data/prometheus_metrics_added_by_measure_profile @@ -0,0 +1,158 @@ +flatfs_datastore_batchcommit_errors_total +flatfs_datastore_batchcommit_latency_seconds_bucket +flatfs_datastore_batchcommit_latency_seconds_count +flatfs_datastore_batchcommit_latency_seconds_sum +flatfs_datastore_batchcommit_total +flatfs_datastore_batchdelete_errors_total +flatfs_datastore_batchdelete_latency_seconds_bucket +flatfs_datastore_batchdelete_latency_seconds_count +flatfs_datastore_batchdelete_latency_seconds_sum +flatfs_datastore_batchdelete_total +flatfs_datastore_batchput_errors_total +flatfs_datastore_batchput_latency_seconds_bucket +flatfs_datastore_batchput_latency_seconds_count +flatfs_datastore_batchput_latency_seconds_sum +flatfs_datastore_batchput_size_bytes_bucket +flatfs_datastore_batchput_size_bytes_count +flatfs_datastore_batchput_size_bytes_sum +flatfs_datastore_batchput_total +flatfs_datastore_check_errors_total +flatfs_datastore_check_latency_seconds_bucket +flatfs_datastore_check_latency_seconds_count +flatfs_datastore_check_latency_seconds_sum +flatfs_datastore_check_total +flatfs_datastore_delete_errors_total +flatfs_datastore_delete_latency_seconds_bucket +flatfs_datastore_delete_latency_seconds_count +flatfs_datastore_delete_latency_seconds_sum +flatfs_datastore_delete_total +flatfs_datastore_du_errors_total +flatfs_datastore_du_latency_seconds_bucket +flatfs_datastore_du_latency_seconds_count +flatfs_datastore_du_latency_seconds_sum +flatfs_datastore_du_total +flatfs_datastore_gc_errors_total +flatfs_datastore_gc_latency_seconds_bucket +flatfs_datastore_gc_latency_seconds_count +flatfs_datastore_gc_latency_seconds_sum +flatfs_datastore_gc_total +flatfs_datastore_get_errors_total +flatfs_datastore_get_latency_seconds_bucket +flatfs_datastore_get_latency_seconds_count +flatfs_datastore_get_latency_seconds_sum +flatfs_datastore_get_size_bytes_bucket +flatfs_datastore_get_size_bytes_count +flatfs_datastore_get_size_bytes_sum +flatfs_datastore_get_total +flatfs_datastore_getsize_errors_total +flatfs_datastore_getsize_latency_seconds_bucket +flatfs_datastore_getsize_latency_seconds_count +flatfs_datastore_getsize_latency_seconds_sum +flatfs_datastore_getsize_total +flatfs_datastore_has_errors_total +flatfs_datastore_has_latency_seconds_bucket +flatfs_datastore_has_latency_seconds_count +flatfs_datastore_has_latency_seconds_sum +flatfs_datastore_has_total +flatfs_datastore_put_errors_total +flatfs_datastore_put_latency_seconds_bucket +flatfs_datastore_put_latency_seconds_count +flatfs_datastore_put_latency_seconds_sum +flatfs_datastore_put_size_bytes_bucket +flatfs_datastore_put_size_bytes_count +flatfs_datastore_put_size_bytes_sum +flatfs_datastore_put_total +flatfs_datastore_query_errors_total +flatfs_datastore_query_latency_seconds_bucket +flatfs_datastore_query_latency_seconds_count +flatfs_datastore_query_latency_seconds_sum +flatfs_datastore_query_total +flatfs_datastore_scrub_errors_total +flatfs_datastore_scrub_latency_seconds_bucket +flatfs_datastore_scrub_latency_seconds_count +flatfs_datastore_scrub_latency_seconds_sum +flatfs_datastore_scrub_total +flatfs_datastore_sync_errors_total +flatfs_datastore_sync_latency_seconds_bucket +flatfs_datastore_sync_latency_seconds_count +flatfs_datastore_sync_latency_seconds_sum +flatfs_datastore_sync_total +leveldb_datastore_batchcommit_errors_total +leveldb_datastore_batchcommit_latency_seconds_bucket +leveldb_datastore_batchcommit_latency_seconds_count +leveldb_datastore_batchcommit_latency_seconds_sum +leveldb_datastore_batchcommit_total +leveldb_datastore_batchdelete_errors_total +leveldb_datastore_batchdelete_latency_seconds_bucket +leveldb_datastore_batchdelete_latency_seconds_count +leveldb_datastore_batchdelete_latency_seconds_sum +leveldb_datastore_batchdelete_total +leveldb_datastore_batchput_errors_total +leveldb_datastore_batchput_latency_seconds_bucket +leveldb_datastore_batchput_latency_seconds_count +leveldb_datastore_batchput_latency_seconds_sum +leveldb_datastore_batchput_size_bytes_bucket +leveldb_datastore_batchput_size_bytes_count +leveldb_datastore_batchput_size_bytes_sum +leveldb_datastore_batchput_total +leveldb_datastore_check_errors_total +leveldb_datastore_check_latency_seconds_bucket +leveldb_datastore_check_latency_seconds_count +leveldb_datastore_check_latency_seconds_sum +leveldb_datastore_check_total +leveldb_datastore_delete_errors_total +leveldb_datastore_delete_latency_seconds_bucket +leveldb_datastore_delete_latency_seconds_count +leveldb_datastore_delete_latency_seconds_sum +leveldb_datastore_delete_total +leveldb_datastore_du_errors_total +leveldb_datastore_du_latency_seconds_bucket +leveldb_datastore_du_latency_seconds_count +leveldb_datastore_du_latency_seconds_sum +leveldb_datastore_du_total +leveldb_datastore_gc_errors_total +leveldb_datastore_gc_latency_seconds_bucket +leveldb_datastore_gc_latency_seconds_count +leveldb_datastore_gc_latency_seconds_sum +leveldb_datastore_gc_total +leveldb_datastore_get_errors_total +leveldb_datastore_get_latency_seconds_bucket +leveldb_datastore_get_latency_seconds_count +leveldb_datastore_get_latency_seconds_sum +leveldb_datastore_get_size_bytes_bucket +leveldb_datastore_get_size_bytes_count +leveldb_datastore_get_size_bytes_sum +leveldb_datastore_get_total +leveldb_datastore_getsize_errors_total +leveldb_datastore_getsize_latency_seconds_bucket +leveldb_datastore_getsize_latency_seconds_count +leveldb_datastore_getsize_latency_seconds_sum +leveldb_datastore_getsize_total +leveldb_datastore_has_errors_total +leveldb_datastore_has_latency_seconds_bucket +leveldb_datastore_has_latency_seconds_count +leveldb_datastore_has_latency_seconds_sum +leveldb_datastore_has_total +leveldb_datastore_put_errors_total +leveldb_datastore_put_latency_seconds_bucket +leveldb_datastore_put_latency_seconds_count +leveldb_datastore_put_latency_seconds_sum +leveldb_datastore_put_size_bytes_bucket +leveldb_datastore_put_size_bytes_count +leveldb_datastore_put_size_bytes_sum +leveldb_datastore_put_total +leveldb_datastore_query_errors_total +leveldb_datastore_query_latency_seconds_bucket +leveldb_datastore_query_latency_seconds_count +leveldb_datastore_query_latency_seconds_sum +leveldb_datastore_query_total +leveldb_datastore_scrub_errors_total +leveldb_datastore_scrub_latency_seconds_bucket +leveldb_datastore_scrub_latency_seconds_count +leveldb_datastore_scrub_latency_seconds_sum +leveldb_datastore_scrub_total +leveldb_datastore_sync_errors_total +leveldb_datastore_sync_latency_seconds_bucket +leveldb_datastore_sync_latency_seconds_count +leveldb_datastore_sync_latency_seconds_sum +leveldb_datastore_sync_total diff --git a/test/sharness/t0119-prometheus.sh b/test/sharness/t0119-prometheus.sh index fef204e23..4daf8281b 100755 --- a/test/sharness/t0119-prometheus.sh +++ b/test/sharness/t0119-prometheus.sh @@ -57,4 +57,28 @@ test_expect_success "make sure initial metrics added by setting ResourceMgr.Enab diff -u ../t0119-prometheus-data/prometheus_metrics_added_by_enabling_rcmgr rcmgr_metrics ' +# Reinitialize ipfs with --profile=flatfs-measure and check metrics. + +test_expect_success "remove ipfs directory" ' + rm -rf .ipfs mountdir ipfs ipns +' + +test_init_ipfs_measure + +test_launch_ipfs_daemon + +test_expect_success "collect metrics" ' + curl "$API_ADDR/debug/metrics/prometheus" > raw_metrics +' +test_kill_ipfs_daemon + +test_expect_success "filter metrics and find ones added by enabling flatfs-measure profile" ' + sed -ne "s/^\([a-z0-9_]\+\).*/\1/p" raw_metrics | LC_ALL=C sort > filtered_metrics && + grep -v -x -f ../t0119-prometheus-data/prometheus_metrics filtered_metrics | LC_ALL=C sort | uniq > measure_metrics +' + +test_expect_success "make sure initial metrics added by initializing with flatfs-measure profile haven't changed" ' + diff -u ../t0119-prometheus-data/prometheus_metrics_added_by_measure_profile measure_metrics +' + test_done diff --git a/test/sharness/t0120-bootstrap.sh b/test/sharness/t0120-bootstrap.sh index 2922533c6..e4bbde78a 100755 --- a/test/sharness/t0120-bootstrap.sh +++ b/test/sharness/t0120-bootstrap.sh @@ -9,10 +9,14 @@ BP1="/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTez BP2="/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa" BP3="/dnsaddr/bootstrap.libp2p.io/p2p/QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb" BP4="/dnsaddr/bootstrap.libp2p.io/p2p/QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt" -BP5="/ip4/104.131.131.82/tcp/4001/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ" -BP6="/ip4/104.131.131.82/udp/4001/quic-v1/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ" +BP5="/dnsaddr/va1.bootstrap.libp2p.io/p2p/12D3KooWKnDdG3iXw9eTFijk3EWSunZcFi54Zka4wmtqtt6rPxc8" +BP6="/ip4/104.131.131.82/tcp/4001/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ" +BP7="/ip4/104.131.131.82/udp/4001/quic-v1/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ" -test_description="Test ipfs repo operations" +test_description="Test ipfs bootstrap operations" + +# NOTE: For AutoConf bootstrap functionality (add default, --expand-auto, etc.) +# see test/cli/bootstrap_auto_test.go and test/cli/autoconf/expand_test.go . lib/test-lib.sh @@ -82,33 +86,12 @@ test_bootstrap_cmd() { test_bootstrap_list_cmd $BP2 - test_expect_success "'ipfs bootstrap add --default' succeeds" ' - ipfs bootstrap add --default >add2_actual - ' - - test_expect_success "'ipfs bootstrap add --default' output has default BP" ' - echo "added $BP1" >add2_expected && - echo "added $BP2" >>add2_expected && - echo "added $BP3" >>add2_expected && - echo "added $BP4" >>add2_expected && - echo "added $BP5" >>add2_expected && - echo "added $BP6" >>add2_expected && - test_cmp add2_expected add2_actual - ' - - test_bootstrap_list_cmd $BP1 $BP2 $BP3 $BP4 $BP5 $BP6 - test_expect_success "'ipfs bootstrap rm --all' succeeds" ' ipfs bootstrap rm --all >rm2_actual ' test_expect_success "'ipfs bootstrap rm' output looks good" ' - echo "removed $BP1" >rm2_expected && - echo "removed $BP2" >>rm2_expected && - echo "removed $BP3" >>rm2_expected && - echo "removed $BP4" >>rm2_expected && - echo "removed $BP5" >>rm2_expected && - echo "removed $BP6" >>rm2_expected && + echo "removed $BP2" >rm2_expected && test_cmp rm2_expected rm2_actual ' diff --git a/test/sharness/t0121-bootstrap-iptb.sh b/test/sharness/t0121-bootstrap-iptb.sh index 16dcbdb2f..049191865 100755 --- a/test/sharness/t0121-bootstrap-iptb.sh +++ b/test/sharness/t0121-bootstrap-iptb.sh @@ -52,7 +52,7 @@ test_expect_success "bring down iptb nodes" ' ' test_expect_success "reset iptb nodes" ' - # the api doesnt seem to get cleaned up in sharness tests for some reason + # the api does not seem to get cleaned up in sharness tests for some reason iptb testbed create -type localipfs -count 5 -force -init ' diff --git a/test/sharness/t0131-multinode-client-routing.sh b/test/sharness/t0131-multinode-client-routing.sh index 8949a1bdf..13b9c97d5 100755 --- a/test/sharness/t0131-multinode-client-routing.sh +++ b/test/sharness/t0131-multinode-client-routing.sh @@ -24,7 +24,7 @@ check_file_fetch() { run_single_file_test() { test_expect_success "add a file on node1" ' - random 1000000 > filea && + random-data -size=1000000 > filea && FILEA_HASH=$(ipfsi 1 add -q filea) ' @@ -57,7 +57,7 @@ test_expect_success "connect up nodes" ' ' test_expect_success "add a file on a node in client mode" ' - random 1000000 > filea && + random-data -size=1000000 > filea && FILE_HASH=$(ipfsi 8 add -q filea) ' diff --git a/test/sharness/t0140-swarm.sh b/test/sharness/t0140-swarm.sh index d65831d3e..37bb44b64 100755 --- a/test/sharness/t0140-swarm.sh +++ b/test/sharness/t0140-swarm.sh @@ -58,9 +58,9 @@ test_launch_ipfs_daemon test_expect_success 'Addresses.Announce affects addresses' ' ipfs swarm addrs local >actual && - grep "/ip4/1.2.3.4/tcp/1234" actual && + test_should_contain "/ip4/1.2.3.4/tcp/1234" actual && ipfs id -f"" | xargs -n1 echo >actual && - grep "/ip4/1.2.3.4/tcp/1234" actual + test_should_contain "/ip4/1.2.3.4/tcp/1234" actual ' test_kill_ipfs_daemon @@ -81,18 +81,18 @@ test_launch_ipfs_daemon test_expect_success 'Addresses.AppendAnnounce is applied on top of Announce' ' ipfs swarm addrs local >actual && - grep "/ip4/1.2.3.4/tcp/1234" actual && - grep "/dnsaddr/dynamic.example.com" actual && - grep "/ip4/10.20.30.40/tcp/4321" actual && + test_should_contain "/ip4/1.2.3.4/tcp/1234" actual && + test_should_contain "/dnsaddr/dynamic.example.com" actual && + test_should_contain "/ip4/10.20.30.40/tcp/4321" actual && ipfs id -f"" | xargs -n1 echo | tee actual && - grep "/ip4/1.2.3.4/tcp/1234/p2p" actual && - grep "/dnsaddr/dynamic.example.com/p2p/" actual && - grep "/ip4/10.20.30.40/tcp/4321/p2p/" actual + test_should_contain "/ip4/1.2.3.4/tcp/1234/p2p" actual && + test_should_contain "/dnsaddr/dynamic.example.com/p2p/" actual && + test_should_contain "/ip4/10.20.30.40/tcp/4321/p2p/" actual ' test_kill_ipfs_daemon -noAnnounceCfg='["/ip4/1.2.3.4/tcp/1234"]' +noAnnounceCfg='["/ip4/1.2.3.4/tcp/1234", "/ip4/10.20.30.40/tcp/4321"]' test_expect_success "test_config_set succeeds" " ipfs config --json Addresses.NoAnnounce '$noAnnounceCfg' " @@ -101,11 +101,11 @@ test_launch_ipfs_daemon test_expect_success "Addresses.NoAnnounce affects addresses from Announce and AppendAnnounce" ' ipfs swarm addrs local >actual && - grep -v "/ip4/1.2.3.4/tcp/1234" actual && - grep -v "/ip4/10.20.30.40/tcp/4321" actual && + test_should_not_contain "/ip4/1.2.3.4/tcp/1234" actual && + test_should_not_contain "/ip4/10.20.30.40/tcp/4321" actual && ipfs id -f"" | xargs -n1 echo >actual && - grep -v "/ip4/1.2.3.4/tcp/1234" actual && - grep -v "//ip4/10.20.30.40/tcp/4321" actual + test_should_not_contain "/ip4/1.2.3.4/tcp/1234" actual && + test_should_not_contain "/ip4/10.20.30.40/tcp/4321" actual ' test_kill_ipfs_daemon @@ -119,9 +119,9 @@ test_launch_ipfs_daemon test_expect_success "Addresses.NoAnnounce with /ipcidr affects addresses" ' ipfs swarm addrs local >actual && - grep -v "/ip4/1.2.3.4/tcp/1234" actual && + test_should_not_contain "/ip4/1.2.3.4/tcp/1234" actual && ipfs id -f"" | xargs -n1 echo >actual && - grep -v "/ip4/1.2.3.4/tcp/1234" actual + test_should_not_contain "/ip4/1.2.3.4/tcp/1234" actual ' test_kill_ipfs_daemon diff --git a/test/sharness/t0165-keystore-data/README.md b/test/sharness/t0165-keystore-data/README.md index 4c0a68b51..298b7708e 100644 --- a/test/sharness/t0165-keystore-data/README.md +++ b/test/sharness/t0165-keystore-data/README.md @@ -8,7 +8,7 @@ openssl genpkey -algorithm RSA -pkeyopt rsa_keygen_bits:2048 > openssl_rsa.pem ``` secp key used in the 'restrict import key' test. -From: https://www.openssl.org/docs/man1.1.1/man1/openssl-genpkey.html +From: https://docs.openssl.org/1.1.1/man1/genpkey/ ```bash openssl genpkey -genparam -algorithm EC -out ecp.pem \ -pkeyopt ec_paramgen_curve:secp384r1 \ diff --git a/test/sharness/t0181-private-network.sh b/test/sharness/t0181-private-network.sh index 46dc45cdf..efae18b15 100755 --- a/test/sharness/t0181-private-network.sh +++ b/test/sharness/t0181-private-network.sh @@ -10,6 +10,10 @@ test_description="Test private network feature" test_init_ipfs +test_expect_success "disable AutoConf for private network tests" ' + ipfs config --json AutoConf.Enabled false +' + export LIBP2P_FORCE_PNET=1 test_expect_success "daemon won't start with force pnet env but with no key" ' @@ -26,7 +30,7 @@ test_expect_success "daemon output includes info about the reason" ' pnet_key() { echo '/key/swarm/psk/1.0.0/' echo '/bin/' - random 32 + random-data -size=32 } pnet_key > "${IPFS_PATH}/swarm.key" @@ -36,7 +40,9 @@ LIBP2P_FORCE_PNET=1 test_launch_ipfs_daemon test_expect_success "set up iptb testbed" ' iptb testbed create -type localipfs -count 5 -force -init && iptb run -- ipfs config --json "Routing.LoopbackAddressesOnLanDHT" true && - iptb run -- ipfs config --json Addresses.Swarm '"'"'["/ip4/127.0.0.1/tcp/0"]'"'"' + iptb run -- ipfs config --json "Swarm.Transports.Network.Websocket" false && + iptb run -- ipfs config --json Addresses.Swarm '"'"'["/ip4/127.0.0.1/tcp/0"]'"'"' && + iptb run -- ipfs config --json AutoConf.Enabled false ' set_key() { @@ -100,7 +106,7 @@ run_single_file_test() { node2=$2 test_expect_success "add a file on node$node1" ' - random 1000000 > filea && + random-data -size=1000000 > filea && FILEA_HASH=$(ipfsi $node1 add -q filea) ' @@ -135,4 +141,23 @@ test_expect_success "stop testbed" ' test_kill_ipfs_daemon +# Test that AutoConf with default mainnet URL fails on private networks +test_expect_success "setup test repo with AutoConf enabled and private network" ' + export IPFS_PATH="$(pwd)/.ipfs-autoconf-test" && + ipfs init --profile=test > /dev/null && + ipfs config --json AutoConf.Enabled true && + pnet_key > "${IPFS_PATH}/swarm.key" +' + +test_expect_success "daemon fails with AutoConf + private network error" ' + export IPFS_PATH="$(pwd)/.ipfs-autoconf-test" && + test_expect_code 1 ipfs daemon > autoconf_stdout 2> autoconf_stderr +' + +test_expect_success "error message mentions AutoConf and private network conflict" ' + grep "AutoConf cannot use the default mainnet URL" autoconf_stderr > /dev/null && + grep "private network.*swarm.key" autoconf_stderr > /dev/null && + grep "AutoConf.Enabled=false" autoconf_stderr > /dev/null +' + test_done diff --git a/test/sharness/t0182-circuit-relay.sh b/test/sharness/t0182-circuit-relay.sh index c79edfc8e..d7d112148 100755 --- a/test/sharness/t0182-circuit-relay.sh +++ b/test/sharness/t0182-circuit-relay.sh @@ -11,7 +11,7 @@ test_expect_success 'init iptb' ' iptb run -- ipfs config --json "Routing.LoopbackAddressesOnLanDHT" true ' -# Network toplogy: A <-> Relay <-> B +# Network topology: A <-> Relay <-> B test_expect_success 'start up nodes for configuration' ' iptb start -wait -- --routing=none ' diff --git a/test/sharness/t0220-bitswap.sh b/test/sharness/t0220-bitswap.sh index 3575f0d33..412437651 100755 --- a/test/sharness/t0220-bitswap.sh +++ b/test/sharness/t0220-bitswap.sh @@ -18,7 +18,6 @@ test_expect_success "'ipfs bitswap stat' succeeds" ' test_expect_success "'ipfs bitswap stat' output looks good" ' cat <expected && bitswap status - provides buffer: 0 / 256 blocks received: 0 blocks sent: 0 data received: 0 @@ -56,7 +55,6 @@ test_expect_success "'ipfs bitswap stat' succeeds" ' test_expect_success "'ipfs bitswap stat' output looks good" ' cat <expected && bitswap status - provides buffer: 0 / 256 blocks received: 0 blocks sent: 0 data received: 0 @@ -85,7 +83,6 @@ test_expect_success "'ipfs bitswap stat --human' succeeds" ' test_expect_success "'ipfs bitswap stat --human' output looks good" ' cat <expected && bitswap status - provides buffer: 0 / 256 blocks received: 0 blocks sent: 0 data received: 0 B diff --git a/test/sharness/t0231-channel-streaming.sh b/test/sharness/t0231-channel-streaming.sh index 36e855fb7..147a13b55 100755 --- a/test/sharness/t0231-channel-streaming.sh +++ b/test/sharness/t0231-channel-streaming.sh @@ -16,7 +16,7 @@ get_api_port() { test_ls_cmd() { test_expect_success "make a file with multiple refs" ' - HASH=$(random 1000000 | ipfs add -q) + HASH=$(random-data -size=1000000 | ipfs add -q) ' test_expect_success "can get refs through curl" ' diff --git a/test/sharness/t0250-files-api.sh b/test/sharness/t0250-files-api.sh index 9c01a5bcf..b86ee56f5 100755 --- a/test/sharness/t0250-files-api.sh +++ b/test/sharness/t0250-files-api.sh @@ -674,6 +674,18 @@ test_files_api() { ipfs files ls /adir | grep foobar ' + test_expect_success "test copy --force overwrites files" ' + ipfs files cp /ipfs/$FILE1 /file1 && + ipfs files cp /ipfs/$FILE2 /file2 && + ipfs files cp --force /file1 /file2 && + test "`ipfs files read /file1`" = "`ipfs files read /file2`" + ' + + test_expect_success "clean up" ' + ipfs files rm /file1 && + ipfs files rm /file2 + ' + test_expect_success "should fail to write file and create intermediate directories with no --parents flag set $EXTRA" ' echo "ipfs rocks" | test_must_fail ipfs files write --create /parents/foo/ipfs.txt ' @@ -849,7 +861,7 @@ tests_for_files_api "with-daemon" test_kill_ipfs_daemon test_expect_success "enable sharding in config" ' - ipfs config --json Internal.UnixFSShardingSizeThreshold "\"1B\"" + ipfs config --json Import.UnixFSHAMTDirectorySizeThreshold "\"1B\"" ' test_launch_ipfs_daemon_without_network @@ -880,7 +892,7 @@ test_expect_success "set up automatic sharding/unsharding data" ' ' test_expect_success "reset automatic sharding" ' - ipfs config --json Internal.UnixFSShardingSizeThreshold null + ipfs config --json Import.UnixFSHAMTDirectorySizeThreshold null ' test_launch_ipfs_daemon_without_network diff --git a/test/sharness/t0260-sharding.sh b/test/sharness/t0260-sharding.sh index 85e4a7ca7..7b0094fd4 100755 --- a/test/sharness/t0260-sharding.sh +++ b/test/sharness/t0260-sharding.sh @@ -34,7 +34,7 @@ test_init_ipfs UNSHARDED="QmavrTrQG4VhoJmantURAYuw3bowq3E2WcvP36NRQDAC1N" test_expect_success "force sharding off" ' -ipfs config --json Internal.UnixFSShardingSizeThreshold "\"1G\"" +ipfs config --json Import.UnixFSHAMTDirectorySizeThreshold "\"1G\"" ' test_add_dir "$UNSHARDED" @@ -46,7 +46,7 @@ test_add_dir "$UNSHARDED" test_kill_ipfs_daemon test_expect_success "force sharding on" ' - ipfs config --json Internal.UnixFSShardingSizeThreshold "\"1B\"" + ipfs config --json Import.UnixFSHAMTDirectorySizeThreshold "\"1B\"" ' SHARDED="QmSCJD1KYLhVVHqBK3YyXuoEqHt7vggyJhzoFYbT8v1XYL" diff --git a/test/sharness/t0270-filestore.sh b/test/sharness/t0270-filestore.sh index 82b7ae492..fc377c2d2 100755 --- a/test/sharness/t0270-filestore.sh +++ b/test/sharness/t0270-filestore.sh @@ -13,7 +13,7 @@ test_expect_success "create a dataset" ' random-files -seed=483 -depth=3 -dirs=4 -files=6 -filesize=1000000 somedir > /dev/null ' -EXPHASH="QmW4JLyeTxEWGwa4mkE9mHzdtAkyhMX2ToGFEKZNjCiJud" +EXPHASH="QmXKtATsEt42CF5JoSsmzJstrvwEB5P89YQtdX4mdf9E3M" get_repo_size() { disk_usage "$IPFS_PATH" @@ -63,7 +63,7 @@ test_filestore_adds() { init_ipfs_filestore() { test_expect_success "clean up old node" ' - rm -rf "$IPFS_PATH" mountdir ipfs ipns + rm -rf "$IPFS_PATH" mountdir ipfs ipns mfs ' test_init_ipfs diff --git a/test/sharness/t0271-filestore-utils.sh b/test/sharness/t0271-filestore-utils.sh index c7e814b9d..5f7111bdd 100755 --- a/test/sharness/t0271-filestore-utils.sh +++ b/test/sharness/t0271-filestore-utils.sh @@ -10,7 +10,7 @@ test_description="Test out the filestore nocopy functionality" test_init_filestore() { test_expect_success "clean up old node" ' - rm -rf "$IPFS_PATH" mountdir ipfs ipns + rm -rf "$IPFS_PATH" mountdir ipfs ipns mfs ' test_init_ipfs @@ -24,9 +24,9 @@ test_init_dataset() { test_expect_success "create a dataset" ' rm -r somedir mkdir somedir && - random 1000 1 > somedir/file1 && - random 10000 2 > somedir/file2 && - random 1000000 3 > somedir/file3 + random-data -size=1000 -seed=1 > somedir/file1 && + random-data -size=10000 -seed=2 > somedir/file2 && + random-data -size=1000000 -seed=3 > somedir/file3 ' } @@ -35,34 +35,48 @@ test_init() { test_init_dataset } -EXPHASH="QmRueCuPMYYvdxWz1vWncF7wzCScEx4qasZXo5aVBb1R4V" +EXPHASH="QmXqfraAT3U8ct14PPPXcFkWyvmqUZazLdo29GXTKSHkP4" cat < ls_expect_file_order -bafkreicj3ezgtrh3euw2gyub6w3jydhnouqobxt7stbgtns3mv3iwv6bqq 1000 somedir/file1 0 -bafkreibxwxisv4cld6x76ybqbvf2uwbkoswjqt4hut46af6rps2twme7ey 10000 somedir/file2 0 -bafkreidntk6ciin24oez6yjz4b25fgwecncvi4ua4uhr2tdyenogpzpid4 262144 somedir/file3 0 -bafkreidwie26yauqbhpd2nhhhmod55irq3z372mh6gw4ikl2ifo34c5jra 262144 somedir/file3 262144 -bafkreib7piyesy3dr22sawmycdftrmpyt3z4tmhxrdig2zt5zdp7qwbuay 262144 somedir/file3 524288 -bafkreigxp5k3k6b3i5sldu4r3im74nfxmoptuuubcvq6rg632nfznskglu 213568 somedir/file3 786432 +bafkreidx7ivgllulfkzyoo4oa7dfrg4mjmudg2qgdivoooj4s7lh3m5nqu 1000 somedir/file1 0 +bafkreic2wqrsyr3y3qgzbvufen2w25r3p3zljckqyxkpcagsxz3zdcosd4 10000 somedir/file2 0 +bafkreiemzfmzws23c2po4m6deiueknqfty7r3voes3e3zujmobrooc2ngm 262144 somedir/file3 0 +bafkreihgm53yhxn427lnfdwhqgpawc62qejog7gega5kqb6uwbyhjm47hu 262144 somedir/file3 262144 +bafkreigl2pjptgxz6cexcnua56zc5dwsyrc4ph2eulmcb634oes6gzvmuy 262144 somedir/file3 524288 +bafkreifjcthslybjizk36xffcsb32fsbguxz3ptkl7723wz4u3qikttmam 213568 somedir/file3 786432 EOF sort < ls_expect_file_order > ls_expect_key_order -FILE1_HASH=bafkreicj3ezgtrh3euw2gyub6w3jydhnouqobxt7stbgtns3mv3iwv6bqq -FILE2_HASH=bafkreibxwxisv4cld6x76ybqbvf2uwbkoswjqt4hut46af6rps2twme7ey -FILE3_HASH=QmfE4SDQazxTD7u8VTYs9AJqQL8rrJPUAorLeJXKSZrVf9 +FILE1_HASH=bafkreidx7ivgllulfkzyoo4oa7dfrg4mjmudg2qgdivoooj4s7lh3m5nqu +FILE2_HASH=bafkreic2wqrsyr3y3qgzbvufen2w25r3p3zljckqyxkpcagsxz3zdcosd4 +FILE3_HASH=QmYEZtRGGk8rgM8MetegLLRHMKskPCg7zWpmQQAo3cQiN5 cat < verify_expect_file_order -ok bafkreicj3ezgtrh3euw2gyub6w3jydhnouqobxt7stbgtns3mv3iwv6bqq 1000 somedir/file1 0 -ok bafkreibxwxisv4cld6x76ybqbvf2uwbkoswjqt4hut46af6rps2twme7ey 10000 somedir/file2 0 -ok bafkreidntk6ciin24oez6yjz4b25fgwecncvi4ua4uhr2tdyenogpzpid4 262144 somedir/file3 0 -ok bafkreidwie26yauqbhpd2nhhhmod55irq3z372mh6gw4ikl2ifo34c5jra 262144 somedir/file3 262144 -ok bafkreib7piyesy3dr22sawmycdftrmpyt3z4tmhxrdig2zt5zdp7qwbuay 262144 somedir/file3 524288 -ok bafkreigxp5k3k6b3i5sldu4r3im74nfxmoptuuubcvq6rg632nfznskglu 213568 somedir/file3 786432 +ok bafkreidx7ivgllulfkzyoo4oa7dfrg4mjmudg2qgdivoooj4s7lh3m5nqu 1000 somedir/file1 0 +ok bafkreic2wqrsyr3y3qgzbvufen2w25r3p3zljckqyxkpcagsxz3zdcosd4 10000 somedir/file2 0 +ok bafkreiemzfmzws23c2po4m6deiueknqfty7r3voes3e3zujmobrooc2ngm 262144 somedir/file3 0 +ok bafkreihgm53yhxn427lnfdwhqgpawc62qejog7gega5kqb6uwbyhjm47hu 262144 somedir/file3 262144 +ok bafkreigl2pjptgxz6cexcnua56zc5dwsyrc4ph2eulmcb634oes6gzvmuy 262144 somedir/file3 524288 +ok bafkreifjcthslybjizk36xffcsb32fsbguxz3ptkl7723wz4u3qikttmam 213568 somedir/file3 786432 EOF sort < verify_expect_file_order > verify_expect_key_order +cat < verify_rm_expect +ok bafkreic2wqrsyr3y3qgzbvufen2w25r3p3zljckqyxkpcagsxz3zdcosd4 10000 somedir/file2 0 keep +ok bafkreidx7ivgllulfkzyoo4oa7dfrg4mjmudg2qgdivoooj4s7lh3m5nqu 1000 somedir/file1 0 keep +changed bafkreiemzfmzws23c2po4m6deiueknqfty7r3voes3e3zujmobrooc2ngm 262144 somedir/file3 0 remove +changed bafkreifjcthslybjizk36xffcsb32fsbguxz3ptkl7723wz4u3qikttmam 213568 somedir/file3 786432 remove +changed bafkreigl2pjptgxz6cexcnua56zc5dwsyrc4ph2eulmcb634oes6gzvmuy 262144 somedir/file3 524288 remove +changed bafkreihgm53yhxn427lnfdwhqgpawc62qejog7gega5kqb6uwbyhjm47hu 262144 somedir/file3 262144 remove +EOF + +cat < verify_after_rm_expect +ok bafkreic2wqrsyr3y3qgzbvufen2w25r3p3zljckqyxkpcagsxz3zdcosd4 10000 somedir/file2 0 +ok bafkreidx7ivgllulfkzyoo4oa7dfrg4mjmudg2qgdivoooj4s7lh3m5nqu 1000 somedir/file1 0 +EOF + IPFS_CMD="ipfs" test_filestore_adds() { @@ -155,6 +169,27 @@ test_filestore_verify() { test_init_dataset } +test_filestore_rm_bad_blocks() { + test_filestore_state + + test_expect_success "change first bit of file" ' + dd if=/dev/zero of=somedir/file3 bs=1024 count=1 + ' + + test_expect_success "'$IPFS_CMD filestore verify --remove-bad-blocks' shows changed file removed" ' + $IPFS_CMD filestore verify --remove-bad-blocks > verify_rm_actual && + test_cmp verify_rm_expect verify_rm_actual + ' + + test_expect_success "'$IPFS_CMD filestore verify' shows only files that were not removed" ' + $IPFS_CMD filestore verify > verify_after && + test_cmp verify_after_rm_expect verify_after + ' + + # reset the state for the next test + test_init_dataset +} + test_filestore_dups() { # make sure the filestore is in a clean state test_filestore_state @@ -179,6 +214,8 @@ test_filestore_verify test_filestore_dups +test_filestore_rm_bad_blocks + # # With daemon # @@ -197,34 +234,36 @@ test_filestore_dups test_kill_ipfs_daemon +test_filestore_rm_bad_blocks + ## ## base32 ## -EXPHASH="bafybeibva2uh4qpwjo2yr5g7m7nd5kfq64atydq77qdlrikh5uejwqdcbi" +EXPHASH="bafybeienfbjfbywu5y44i5qm4wxajblgy5a6xuc4eepjaw5fq223wwsy3m" cat < ls_expect_file_order -bafkreicj3ezgtrh3euw2gyub6w3jydhnouqobxt7stbgtns3mv3iwv6bqq 1000 somedir/file1 0 -bafkreibxwxisv4cld6x76ybqbvf2uwbkoswjqt4hut46af6rps2twme7ey 10000 somedir/file2 0 -bafkreidntk6ciin24oez6yjz4b25fgwecncvi4ua4uhr2tdyenogpzpid4 262144 somedir/file3 0 -bafkreidwie26yauqbhpd2nhhhmod55irq3z372mh6gw4ikl2ifo34c5jra 262144 somedir/file3 262144 -bafkreib7piyesy3dr22sawmycdftrmpyt3z4tmhxrdig2zt5zdp7qwbuay 262144 somedir/file3 524288 -bafkreigxp5k3k6b3i5sldu4r3im74nfxmoptuuubcvq6rg632nfznskglu 213568 somedir/file3 786432 +bafkreidx7ivgllulfkzyoo4oa7dfrg4mjmudg2qgdivoooj4s7lh3m5nqu 1000 somedir/file1 0 +bafkreic2wqrsyr3y3qgzbvufen2w25r3p3zljckqyxkpcagsxz3zdcosd4 10000 somedir/file2 0 +bafkreiemzfmzws23c2po4m6deiueknqfty7r3voes3e3zujmobrooc2ngm 262144 somedir/file3 0 +bafkreihgm53yhxn427lnfdwhqgpawc62qejog7gega5kqb6uwbyhjm47hu 262144 somedir/file3 262144 +bafkreigl2pjptgxz6cexcnua56zc5dwsyrc4ph2eulmcb634oes6gzvmuy 262144 somedir/file3 524288 +bafkreifjcthslybjizk36xffcsb32fsbguxz3ptkl7723wz4u3qikttmam 213568 somedir/file3 786432 EOF sort < ls_expect_file_order > ls_expect_key_order -FILE1_HASH=bafkreicj3ezgtrh3euw2gyub6w3jydhnouqobxt7stbgtns3mv3iwv6bqq -FILE2_HASH=bafkreibxwxisv4cld6x76ybqbvf2uwbkoswjqt4hut46af6rps2twme7ey -FILE3_HASH=bafybeih24zygzr2orr5q62mjnbgmjwgj6rx3tp74pwcqsqth44rloncllq +FILE1_HASH=bafkreidx7ivgllulfkzyoo4oa7dfrg4mjmudg2qgdivoooj4s7lh3m5nqu +FILE2_HASH=bafkreic2wqrsyr3y3qgzbvufen2w25r3p3zljckqyxkpcagsxz3zdcosd4 +FILE3_HASH=bafybeietaxxjghilcjhc2m4zcmicm7yjvkjdfkamc3ct2hq4gmsb3shqsi cat < verify_expect_file_order -ok bafkreicj3ezgtrh3euw2gyub6w3jydhnouqobxt7stbgtns3mv3iwv6bqq 1000 somedir/file1 0 -ok bafkreibxwxisv4cld6x76ybqbvf2uwbkoswjqt4hut46af6rps2twme7ey 10000 somedir/file2 0 -ok bafkreidntk6ciin24oez6yjz4b25fgwecncvi4ua4uhr2tdyenogpzpid4 262144 somedir/file3 0 -ok bafkreidwie26yauqbhpd2nhhhmod55irq3z372mh6gw4ikl2ifo34c5jra 262144 somedir/file3 262144 -ok bafkreib7piyesy3dr22sawmycdftrmpyt3z4tmhxrdig2zt5zdp7qwbuay 262144 somedir/file3 524288 -ok bafkreigxp5k3k6b3i5sldu4r3im74nfxmoptuuubcvq6rg632nfznskglu 213568 somedir/file3 786432 +ok bafkreidx7ivgllulfkzyoo4oa7dfrg4mjmudg2qgdivoooj4s7lh3m5nqu 1000 somedir/file1 0 +ok bafkreic2wqrsyr3y3qgzbvufen2w25r3p3zljckqyxkpcagsxz3zdcosd4 10000 somedir/file2 0 +ok bafkreiemzfmzws23c2po4m6deiueknqfty7r3voes3e3zujmobrooc2ngm 262144 somedir/file3 0 +ok bafkreihgm53yhxn427lnfdwhqgpawc62qejog7gega5kqb6uwbyhjm47hu 262144 somedir/file3 262144 +ok bafkreigl2pjptgxz6cexcnua56zc5dwsyrc4ph2eulmcb634oes6gzvmuy 262144 somedir/file3 524288 +ok bafkreifjcthslybjizk36xffcsb32fsbguxz3ptkl7723wz4u3qikttmam 213568 somedir/file3 786432 EOF sort < verify_expect_file_order > verify_expect_key_order @@ -243,6 +282,8 @@ test_filestore_verify test_filestore_dups +test_filestore_rm_bad_blocks + # # With daemon # @@ -263,6 +304,8 @@ test_kill_ipfs_daemon test_done +test_filestore_rm_bad_blocks + ## test_done diff --git a/test/sharness/t0272-urlstore.sh b/test/sharness/t0272-urlstore.sh index 8fa7ff3b8..47e95a8ca 100755 --- a/test/sharness/t0272-urlstore.sh +++ b/test/sharness/t0272-urlstore.sh @@ -10,9 +10,9 @@ test_description="Test out the urlstore functionality" test_expect_success "create some random files" ' - random 2222 7 > file1 && - random 500000 7 > file2 && - random 50000000 7 > file3 + random-data -size=2222 -seed=7 > file1 && + random-data -size=500000 -seed=7 > file2 && + random-data -size=50000000 -seed=7 > file3 ' test_urlstore() { @@ -69,9 +69,9 @@ test_urlstore() { ' cat < ls_expect -bafkreiafqvawjpukk4achpu7edu4d6x5dbzwgigl6nxunjif3ser6bnfpu 262144 http://127.0.0.1:$GWAY_PORT/ipfs/QmUow2T4P69nEsqTQDZCt8yg9CPS8GFmpuDAr5YtsPhTdM 0 -bafkreia46t3jwchosehfcq7kponx26shcjkatxek4m2tzzd67i6o3frpou 237856 http://127.0.0.1:$GWAY_PORT/ipfs/QmUow2T4P69nEsqTQDZCt8yg9CPS8GFmpuDAr5YtsPhTdM 262144 -bafkreiga7ukbxrxs26fiseijjd7zdd6gmlrmnxhalwfbagxwjv7ck4o34a 2222 http://127.0.0.1:$GWAY_PORT/ipfs/QmcHm3BL2cXuQ6rJdKQgPrmT9suqGkfy2KzH3MkXPEBXU6 0 +bafkreiconmdoujderxi757nf4wjpo4ukbhlo6mmxs6pg3yl53ln3ykldvi 2222 http://127.0.0.1:$GWAY_PORT/ipfs/QmUNEBSK2uPLSZU3Dj6XbSHjdGze4huWxESx2R4Ef1cKRW 0 +bafkreifybqsfcheqkxzlhuuvoi3u6wz42kic4yqohvkia2i5fg3mpkqt3i 262144 http://127.0.0.1:$GWAY_PORT/ipfs/QmTgZc5bhTHUcqGN8rRP9oTJBv1UeJVWufPMPiUfbP9Ghs 0 +bafkreigxuuyoickqhwxu4kjckmgfqb7ygd426qiakryvvstixy523imym4 237856 http://127.0.0.1:$GWAY_PORT/ipfs/QmTgZc5bhTHUcqGN8rRP9oTJBv1UeJVWufPMPiUfbP9Ghs 262144 EOF test_expect_success "ipfs filestore ls works with urls" ' @@ -80,9 +80,9 @@ EOF ' cat < verify_expect -ok bafkreiafqvawjpukk4achpu7edu4d6x5dbzwgigl6nxunjif3ser6bnfpu 262144 http://127.0.0.1:$GWAY_PORT/ipfs/QmUow2T4P69nEsqTQDZCt8yg9CPS8GFmpuDAr5YtsPhTdM 0 -ok bafkreia46t3jwchosehfcq7kponx26shcjkatxek4m2tzzd67i6o3frpou 237856 http://127.0.0.1:$GWAY_PORT/ipfs/QmUow2T4P69nEsqTQDZCt8yg9CPS8GFmpuDAr5YtsPhTdM 262144 -ok bafkreiga7ukbxrxs26fiseijjd7zdd6gmlrmnxhalwfbagxwjv7ck4o34a 2222 http://127.0.0.1:$GWAY_PORT/ipfs/QmcHm3BL2cXuQ6rJdKQgPrmT9suqGkfy2KzH3MkXPEBXU6 0 +ok bafkreifybqsfcheqkxzlhuuvoi3u6wz42kic4yqohvkia2i5fg3mpkqt3i 262144 http://127.0.0.1:$GWAY_PORT/ipfs/QmTgZc5bhTHUcqGN8rRP9oTJBv1UeJVWufPMPiUfbP9Ghs 0 +ok bafkreigxuuyoickqhwxu4kjckmgfqb7ygd426qiakryvvstixy523imym4 237856 http://127.0.0.1:$GWAY_PORT/ipfs/QmTgZc5bhTHUcqGN8rRP9oTJBv1UeJVWufPMPiUfbP9Ghs 262144 +ok bafkreiconmdoujderxi757nf4wjpo4ukbhlo6mmxs6pg3yl53ln3ykldvi 2222 http://127.0.0.1:$GWAY_PORT/ipfs/QmUNEBSK2uPLSZU3Dj6XbSHjdGze4huWxESx2R4Ef1cKRW 0 EOF test_expect_success "ipfs filestore verify works with urls" ' @@ -116,8 +116,8 @@ EOF ' cat < verify_expect_2 -error bafkreiafqvawjpukk4achpu7edu4d6x5dbzwgigl6nxunjif3ser6bnfpu 262144 http://127.0.0.1:$GWAY_PORT/ipfs/QmUow2T4P69nEsqTQDZCt8yg9CPS8GFmpuDAr5YtsPhTdM 0 -error bafkreia46t3jwchosehfcq7kponx26shcjkatxek4m2tzzd67i6o3frpou 237856 http://127.0.0.1:$GWAY_PORT/ipfs/QmUow2T4P69nEsqTQDZCt8yg9CPS8GFmpuDAr5YtsPhTdM 262144 +error bafkreifybqsfcheqkxzlhuuvoi3u6wz42kic4yqohvkia2i5fg3mpkqt3i 262144 http://127.0.0.1:$GWAY_PORT/ipfs/QmTgZc5bhTHUcqGN8rRP9oTJBv1UeJVWufPMPiUfbP9Ghs 0 +error bafkreigxuuyoickqhwxu4kjckmgfqb7ygd426qiakryvvstixy523imym4 237856 http://127.0.0.1:$GWAY_PORT/ipfs/QmTgZc5bhTHUcqGN8rRP9oTJBv1UeJVWufPMPiUfbP9Ghs 262144 EOF test_expect_success "ipfs filestore verify is correct" ' diff --git a/test/sharness/t0275-cid-security.sh b/test/sharness/t0275-cid-security.sh index e8d265550..7f8764d3f 100755 --- a/test/sharness/t0275-cid-security.sh +++ b/test/sharness/t0275-cid-security.sh @@ -15,7 +15,7 @@ test_expect_success "adding using unsafe function fails with error" ' ' test_expect_success "error reason is pointed out" ' - grep "insecure hash functions not allowed" add_out || test_fsh cat add_out + grep "potentially insecure hash functions not allowed" add_out || test_fsh cat add_out ' test_expect_success "adding using too short of a hash function gives out an error" ' @@ -23,7 +23,7 @@ test_expect_success "adding using too short of a hash function gives out an erro ' test_expect_success "error reason is pointed out" ' - grep "hashes must be at least 20 bytes long" block_out + grep "digest too small" block_out ' @@ -35,7 +35,7 @@ test_cat_get() { test_expect_success "error reason is pointed out" ' - grep "insecure hash functions not allowed" ipfs_cat + grep "potentially insecure hash functions not allowed" ipfs_cat ' @@ -45,7 +45,7 @@ test_cat_get() { ' test_expect_success "error reason is pointed out" ' - grep "hashes must be at least 20 bytes long" ipfs_get + grep "digest too small" ipfs_get ' } diff --git a/test/sharness/t0276-cidv0v1.sh b/test/sharness/t0276-cidv0v1.sh index c810f4544..04a345692 100755 --- a/test/sharness/t0276-cidv0v1.sh +++ b/test/sharness/t0276-cidv0v1.sh @@ -15,8 +15,8 @@ test_init_ipfs # test_expect_success "create two small files" ' - random 1000 7 > afile - random 1000 9 > bfile + random-data -size=1000 -seed=7 > afile + random-data -size=1000 -seed=9 > bfile ' test_expect_success "add file using CIDv1 but don't pin" ' diff --git a/test/sharness/t0290-cid.sh b/test/sharness/t0290-cid.sh index 8fb36e30e..97ec0cd42 100755 --- a/test/sharness/t0290-cid.sh +++ b/test/sharness/t0290-cid.sh @@ -4,6 +4,11 @@ test_description="Test cid commands" . lib/test-lib.sh +# NOTE: Primary tests for "ipfs cid" commands are in test/cli/cid_test.go +# These sharness tests are kept for backward compatibility but new tests +# should be added to test/cli/cid_test.go instead. If any of these tests +# break, consider removing them and updating only the test/cli version. + # note: all "ipfs cid" commands should work without requiring a repo CIDv0="QmS4ustL54uo8FzR9455qaxZwuMiUhyvMcX9Ba8nUH4uVv" @@ -101,7 +106,7 @@ v 118 base32hex V 86 base32hexupper z 122 base58btc Z 90 base58flickr - 128640 base256emoji +🚀 128640 base256emoji EOF cat < codecs_expect @@ -113,6 +118,7 @@ cat < codecs_expect 120 git-raw 123 torrent-info 124 torrent-file + 128 blake3-hashseq 129 leofcoin-block 130 leofcoin-tx 131 leofcoin-pr @@ -128,7 +134,7 @@ cat < codecs_expect 151 eth-account-snapshot 152 eth-storage-trie 153 eth-receipt-log-trie - 154 eth-reciept-log + 154 eth-receipt-log 176 bitcoin-block 177 bitcoin-tx 178 bitcoin-witness-commitment @@ -146,7 +152,7 @@ cat < codecs_expect 297 dag-json 496 swhid-1-snp 512 json -46083 urdca-2015-canon +46083 rdfc-1 46593 json-jcs EOF @@ -239,13 +245,57 @@ cat < hashes_expect EOF test_expect_success "cid bases" ' - cut -c 12- bases_expect > expect && + cat <<-EOF > expect + identity + base2 + base32 + base32upper + base32pad + base32padupper + base16 + base16upper + base36 + base36upper + base64 + base64pad + base32hexpad + base32hexpadupper + base64url + base64urlpad + base32hex + base32hexupper + base58btc + base58flickr + base256emoji + EOF ipfs cid bases > actual && test_cmp expect actual ' test_expect_success "cid bases --prefix" ' - cut -c 1-3,12- bases_expect > expect && + cat <<-EOF > expect + identity + 0 base2 + b base32 + B base32upper + c base32pad + C base32padupper + f base16 + F base16upper + k base36 + K base36upper + m base64 + M base64pad + t base32hexpad + T base32hexpadupper + u base64url + U base64urlpad + v base32hex + V base32hexupper + z base58btc + Z base58flickr + 🚀 base256emoji + EOF ipfs cid bases --prefix > actual && test_cmp expect actual ' diff --git a/test/sharness/t0500-issues-and-regressions-offline.sh b/test/sharness/t0500-issues-and-regressions-offline.sh index 5a361aae9..d185e7bec 100755 --- a/test/sharness/t0500-issues-and-regressions-offline.sh +++ b/test/sharness/t0500-issues-and-regressions-offline.sh @@ -22,7 +22,7 @@ test_expect_success "ipfs pin ls --help succeeds when input remains open" ' ' test_expect_success "ipfs add on 1MB from stdin woks" ' - random 1048576 42 | ipfs add -q > 1MB.hash + random-data -size=1048576 -seed=42 | ipfs add -q > 1MB.hash ' test_expect_success "'ipfs refs -r -e \$(cat 1MB.hash)' succeeds" ' diff --git a/test/unit/Rules.mk b/test/unit/Rules.mk index 69404637c..915d08f9a 100644 --- a/test/unit/Rules.mk +++ b/test/unit/Rules.mk @@ -2,7 +2,8 @@ include mk/header.mk CLEAN += $(d)/gotest.json $(d)/gotest.junit.xml -$(d)/gotest.junit.xml: test/bin/gotestsum coverage/unit_tests.coverprofile +# Convert gotest.json (produced by test_unit) to JUnit XML format +$(d)/gotest.junit.xml: test/bin/gotestsum $(d)/gotest.json gotestsum --no-color --junitfile $@ --raw-command cat $(@D)/gotest.json include mk/footer.mk diff --git a/thirdparty/README.md b/thirdparty/README.md index a68b51c5d..a4774a4af 100644 --- a/thirdparty/README.md +++ b/thirdparty/README.md @@ -1,5 +1,2 @@ -thirdparty consists of Golang packages that contain no go-ipfs dependencies and -may be vendored ipfs/go-ipfs at a later date. - packages under this directory _must not_ import packages under -`ipfs/go-ipfs` that are not also under `thirdparty`. +`ipfs/kubo` that are not also under `thirdparty`. diff --git a/thirdparty/assert/assert.go b/thirdparty/assert/assert.go deleted file mode 100644 index f737d191e..000000000 --- a/thirdparty/assert/assert.go +++ /dev/null @@ -1,25 +0,0 @@ -package assert - -import "testing" - -func Nil(err error, t *testing.T, msgs ...string) { - if err != nil { - t.Fatal(msgs, "error:", err) - } -} - -func True(v bool, t *testing.T, msgs ...string) { - if !v { - t.Fatal(msgs) - } -} - -func False(v bool, t *testing.T, msgs ...string) { - True(!v, t, msgs...) -} - -func Err(err error, t *testing.T, msgs ...string) { - if err == nil { - t.Fatal(msgs, "error:", err) - } -} diff --git a/thirdparty/dir/dir.go b/thirdparty/dir/dir.go deleted file mode 100644 index 5aa93c329..000000000 --- a/thirdparty/dir/dir.go +++ /dev/null @@ -1,25 +0,0 @@ -package dir - -// TODO move somewhere generic - -import ( - "errors" - "os" - "path/filepath" -) - -// Writable ensures the directory exists and is writable. -func Writable(path string) error { - // Construct the path if missing - if err := os.MkdirAll(path, os.ModePerm); err != nil { - return err - } - // Check the directory is writable - if f, err := os.Create(filepath.Join(path, "._check_writable")); err == nil { - f.Close() - os.Remove(f.Name()) - } else { - return errors.New("'" + path + "' is not writable") - } - return nil -} diff --git a/thirdparty/notifier/notifier.go b/thirdparty/notifier/notifier.go deleted file mode 100644 index bb8860702..000000000 --- a/thirdparty/notifier/notifier.go +++ /dev/null @@ -1,142 +0,0 @@ -// Package notifier provides a simple notification dispatcher -// meant to be embedded in larger structures who wish to allow -// clients to sign up for event notifications. -package notifier - -import ( - "sync" - - process "github.com/jbenet/goprocess" - ratelimit "github.com/jbenet/goprocess/ratelimit" -) - -// Notifiee is a generic interface. Clients implement -// their own Notifiee interfaces to ensure type-safety -// of notifications: -// -// type RocketNotifiee interface{ -// Countdown(r Rocket, countdown time.Duration) -// LiftedOff(Rocket) -// ReachedOrbit(Rocket) -// Detached(Rocket, Capsule) -// Landed(Rocket) -// } -type Notifiee interface{} - -// Notifier is a notification dispatcher. It's meant -// to be composed, and its zero-value is ready to be used. -// -// type Rocket struct { -// notifier notifier.Notifier -// } -type Notifier struct { - mu sync.RWMutex // guards notifiees - nots map[Notifiee]struct{} - lim *ratelimit.RateLimiter -} - -// RateLimited returns a rate limited Notifier. only limit goroutines -// will be spawned. If limit is zero, no rate limiting happens. This -// is the same as `Notifier{}`. -func RateLimited(limit int) *Notifier { - n := &Notifier{} - if limit > 0 { - n.lim = ratelimit.NewRateLimiter(process.Background(), limit) - } - return n -} - -// Notify signs up Notifiee e for notifications. This function -// is meant to be called behind your own type-safe function(s): -// -// // generic function for pattern-following -// func (r *Rocket) Notify(n Notifiee) { -// r.notifier.Notify(n) -// } -// -// // or as part of other functions -// func (r *Rocket) Onboard(a Astronaut) { -// r.astronauts = append(r.austronauts, a) -// r.notifier.Notify(a) -// } -func (n *Notifier) Notify(e Notifiee) { - n.mu.Lock() - if n.nots == nil { // so that zero-value is ready to be used. - n.nots = make(map[Notifiee]struct{}) - } - n.nots[e] = struct{}{} - n.mu.Unlock() -} - -// StopNotify stops notifying Notifiee e. This function -// is meant to be called behind your own type-safe function(s): -// -// // generic function for pattern-following -// func (r *Rocket) StopNotify(n Notifiee) { -// r.notifier.StopNotify(n) -// } -// -// // or as part of other functions -// func (r *Rocket) Detach(c Capsule) { -// r.notifier.StopNotify(c) -// r.capsule = nil -// } -func (n *Notifier) StopNotify(e Notifiee) { - n.mu.Lock() - if n.nots != nil { // so that zero-value is ready to be used. - delete(n.nots, e) - } - n.mu.Unlock() -} - -// NotifyAll messages the notifier's notifiees with a given notification. -// This is done by calling the given function with each notifiee. It is -// meant to be called with your own type-safe notification functions: -// -// func (r *Rocket) Launch() { -// r.notifyAll(func(n Notifiee) { -// n.Launched(r) -// }) -// } -// -// // make it private so only you can use it. This function is necessary -// // to make sure you only up-cast in one place. You control who you added -// // to be a notifiee. If Go adds generics, maybe we can get rid of this -// // method but for now it is like wrapping a type-less container with -// // a type safe interface. -// func (r *Rocket) notifyAll(notify func(Notifiee)) { -// r.notifier.NotifyAll(func(n notifier.Notifiee) { -// notify(n.(Notifiee)) -// }) -// } -// -// Note well: each notification is launched in its own goroutine, so they -// can be processed concurrently, and so that whatever the notification does -// it _never_ blocks out the client. This is so that consumers _cannot_ add -// hooks into your object that block you accidentally. -func (n *Notifier) NotifyAll(notify func(Notifiee)) { - n.mu.Lock() - defer n.mu.Unlock() - - if n.nots == nil { // so that zero-value is ready to be used. - return - } - - // no rate limiting. - if n.lim == nil { - for notifiee := range n.nots { - go notify(notifiee) - } - return - } - - // with rate limiting. - n.lim.Go(func(worker process.Process) { - for notifiee := range n.nots { - notifiee := notifiee // rebind for loop data races - n.lim.LimitedGo(func(worker process.Process) { - notify(notifiee) - }) - } - }) -} diff --git a/thirdparty/notifier/notifier_test.go b/thirdparty/notifier/notifier_test.go deleted file mode 100644 index 401b3b02a..000000000 --- a/thirdparty/notifier/notifier_test.go +++ /dev/null @@ -1,289 +0,0 @@ -package notifier - -import ( - "fmt" - "sync" - "testing" - "time" -) - -// test data structures. -type Router struct { - queue chan Packet - notifier Notifier -} - -type Packet struct{} - -type RouterNotifiee interface { - Enqueued(*Router, Packet) - Forwarded(*Router, Packet) - Dropped(*Router, Packet) -} - -func (r *Router) Notify(n RouterNotifiee) { - r.notifier.Notify(n) -} - -func (r *Router) StopNotify(n RouterNotifiee) { - r.notifier.StopNotify(n) -} - -func (r *Router) notifyAll(notify func(n RouterNotifiee)) { - r.notifier.NotifyAll(func(n Notifiee) { - notify(n.(RouterNotifiee)) - }) -} - -func (r *Router) Receive(p Packet) { - select { - case r.queue <- p: // enqueued - r.notifyAll(func(n RouterNotifiee) { - n.Enqueued(r, p) - }) - - default: // drop - r.notifyAll(func(n RouterNotifiee) { - n.Dropped(r, p) - }) - } -} - -func (r *Router) Forward() { - p := <-r.queue - r.notifyAll(func(n RouterNotifiee) { - n.Forwarded(r, p) - }) -} - -type Metrics struct { - enqueued int - forwarded int - dropped int - received chan struct{} - sync.Mutex -} - -func (m *Metrics) Enqueued(*Router, Packet) { - m.Lock() - m.enqueued++ - m.Unlock() - if m.received != nil { - m.received <- struct{}{} - } -} - -func (m *Metrics) Forwarded(*Router, Packet) { - m.Lock() - m.forwarded++ - m.Unlock() - if m.received != nil { - m.received <- struct{}{} - } -} - -func (m *Metrics) Dropped(*Router, Packet) { - m.Lock() - m.dropped++ - m.Unlock() - if m.received != nil { - m.received <- struct{}{} - } -} - -func (m *Metrics) String() string { - m.Lock() - defer m.Unlock() - return fmt.Sprintf("%d enqueued, %d forwarded, %d in queue, %d dropped", - m.enqueued, m.forwarded, m.enqueued-m.forwarded, m.dropped) -} - -func TestNotifies(t *testing.T) { - m := Metrics{received: make(chan struct{})} - r := Router{queue: make(chan Packet, 10)} - r.Notify(&m) - - for i := 0; i < 10; i++ { - r.Receive(Packet{}) - <-m.received - if m.enqueued != (1 + i) { - t.Error("not notifying correctly", m.enqueued, 1+i) - } - - } - - for i := 0; i < 10; i++ { - r.Receive(Packet{}) - <-m.received - if m.enqueued != 10 { - t.Error("not notifying correctly", m.enqueued, 10) - } - if m.dropped != (1 + i) { - t.Error("not notifying correctly", m.dropped, 1+i) - } - } -} - -func TestStopsNotifying(t *testing.T) { - m := Metrics{received: make(chan struct{})} - r := Router{queue: make(chan Packet, 10)} - r.Notify(&m) - - for i := 0; i < 5; i++ { - r.Receive(Packet{}) - <-m.received - if m.enqueued != (1 + i) { - t.Error("not notifying correctly") - } - } - - r.StopNotify(&m) - - for i := 0; i < 5; i++ { - r.Receive(Packet{}) - select { - case <-m.received: - t.Error("did not stop notifying") - default: - } - if m.enqueued != 5 { - t.Error("did not stop notifying") - } - } -} - -func TestThreadsafe(t *testing.T) { - N := 1000 - r := Router{queue: make(chan Packet, 10)} - m1 := Metrics{received: make(chan struct{})} - m2 := Metrics{received: make(chan struct{})} - m3 := Metrics{received: make(chan struct{})} - r.Notify(&m1) - r.Notify(&m2) - r.Notify(&m3) - - var n int - var wg sync.WaitGroup - for i := 0; i < N; i++ { - n++ - wg.Add(1) - go func() { - defer wg.Done() - r.Receive(Packet{}) - }() - - if i%3 == 0 { - n++ - wg.Add(1) - go func() { - defer wg.Done() - r.Forward() - }() - } - } - - // drain queues - for i := 0; i < (n * 3); i++ { - select { - case <-m1.received: - case <-m2.received: - case <-m3.received: - } - } - - wg.Wait() - - // counts should be correct and all agree. and this should - // run fine under `go test -race -cpu=5` - - t.Log("m1", m1.String()) - t.Log("m2", m2.String()) - t.Log("m3", m3.String()) - - if m1.String() != m2.String() || m2.String() != m3.String() { - t.Error("counts disagree") - } -} - -type highwatermark struct { - mu sync.Mutex - mark int - limit int - errs chan error -} - -func (m *highwatermark) incr() { - m.mu.Lock() - m.mark++ - // fmt.Println("incr", m.mark) - if m.mark > m.limit { - m.errs <- fmt.Errorf("went over limit: %d/%d", m.mark, m.limit) - } - m.mu.Unlock() -} - -func (m *highwatermark) decr() { - m.mu.Lock() - m.mark-- - // fmt.Println("decr", m.mark) - if m.mark < 0 { - m.errs <- fmt.Errorf("went under zero: %d/%d", m.mark, m.limit) - } - m.mu.Unlock() -} - -func TestLimited(t *testing.T) { - timeout := 10 * time.Second // huge timeout. - limit := 9 - - hwm := highwatermark{limit: limit, errs: make(chan error, 100)} - n := RateLimited(limit) // will stop after 3 rounds - n.Notify(1) - n.Notify(2) - n.Notify(3) - - entr := make(chan struct{}) - exit := make(chan struct{}) - done := make(chan struct{}) - go func() { - for i := 0; i < 10; i++ { - // fmt.Printf("round: %d\n", i) - n.NotifyAll(func(e Notifiee) { - hwm.incr() - entr <- struct{}{} - <-exit // wait - hwm.decr() - }) - } - done <- struct{}{} - }() - - for i := 0; i < 30; { - select { - case <-entr: - continue // let as many enter as possible - case <-time.After(1 * time.Millisecond): - } - - // let one exit - select { - case <-entr: - continue // in case of timing issues. - case exit <- struct{}{}: - case <-time.After(timeout): - t.Error("got stuck") - } - i++ - } - - select { - case <-done: // two parts done - case <-time.After(timeout): - t.Error("did not finish") - } - - close(hwm.errs) - for err := range hwm.errs { - t.Error(err) - } -} diff --git a/tracing/doc.go b/tracing/doc.go index d442ea2db..2c9711a63 100644 --- a/tracing/doc.go +++ b/tracing/doc.go @@ -6,7 +6,7 @@ // // Tracing is configured through environment variables, as consistent with the OpenTelemetry spec as possible: // -// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/sdk-environment-variables.md +// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/configuration/sdk-environment-variables.md // // OTEL_TRACES_EXPORTER: a comma-separated list of exporters: // - otlp diff --git a/version.go b/version.go index c8b24bd3c..89faef6bf 100644 --- a/version.go +++ b/version.go @@ -4,17 +4,20 @@ import ( "fmt" "runtime" - "github.com/ipfs/kubo/repo/fsrepo" + "github.com/ipfs/kubo/core/commands/cmdutils" ) // CurrentCommit is the current git commit, this is set as a ldflag in the Makefile. var CurrentCommit string // CurrentVersionNumber is the current application's version literal. -const CurrentVersionNumber = "0.31.0-dev" +const CurrentVersionNumber = "0.40.0-dev" const ApiVersion = "/kubo/" + CurrentVersionNumber + "/" //nolint +// RepoVersion is the version number that we are currently expecting to see. +const RepoVersion = 18 + // GetUserAgentVersion is the libp2p user agent used by go-ipfs. // // Note: This will end in `/` when no commit is available. This is expected. @@ -26,13 +29,13 @@ func GetUserAgentVersion() string { } userAgent += userAgentSuffix } - return userAgent + return cmdutils.CleanAndTrim(userAgent) } var userAgentSuffix string func SetUserAgentSuffix(suffix string) { - userAgentSuffix = suffix + userAgentSuffix = cmdutils.CleanAndTrim(suffix) } type VersionInfo struct { @@ -47,7 +50,7 @@ func GetVersionInfo() *VersionInfo { return &VersionInfo{ Version: CurrentVersionNumber, Commit: CurrentCommit, - Repo: fmt.Sprint(fsrepo.RepoVersion), + Repo: fmt.Sprint(RepoVersion), System: runtime.GOARCH + "/" + runtime.GOOS, // TODO: Precise version here Golang: runtime.Version(), }