diff --git a/.codespell-ignore b/.codespell-ignore deleted file mode 100644 index 4b7efa7c0..000000000 --- a/.codespell-ignore +++ /dev/null @@ -1,21 +0,0 @@ -Adin -nd -Nd -afile -thirdparty -receivedFrom -origN -hel -TotalIn -childs -userA -AssignT -OT -AssignT -fo -recusive -raison -Boddy -ressource -achin -re-using diff --git a/.cspell.yml b/.cspell.yml new file mode 100644 index 000000000..f56756a87 --- /dev/null +++ b/.cspell.yml @@ -0,0 +1,6 @@ +ignoreWords: + - childs # This spelling is used in the files command + - NodeCreater # This spelling is used in the fuse dependency + - Boddy # One of the contributors to the project - Chris Boddy + - Botto # One of the contributors to the project - Santiago Botto + - cose # dag-cose \ No newline at end of file diff --git a/.gitattributes b/.gitattributes index 831606f19..280c95af2 100644 --- a/.gitattributes +++ b/.gitattributes @@ -15,3 +15,23 @@ LICENSE text eol=auto # Binary assets assets/init-doc/* binary core/coreunix/test_data/** binary +test/cli/migrations/testdata/** binary + +# Generated test data +test/cli/migrations/testdata/** linguist-generated=true +test/cli/autoconf/testdata/** linguist-generated=true +test/cli/fixtures/** linguist-generated=true +test/sharness/t0054-dag-car-import-export-data/** linguist-generated=true +test/sharness/t0109-gateway-web-_redirects-data/** linguist-generated=true +test/sharness/t0114-gateway-subdomains/** linguist-generated=true +test/sharness/t0115-gateway-dir-listing/** linguist-generated=true +test/sharness/t0116-gateway-cache/** linguist-generated=true +test/sharness/t0119-prometheus-data/** linguist-generated=true +test/sharness/t0165-keystore-data/** linguist-generated=true +test/sharness/t0275-cid-security-data/** linguist-generated=true +test/sharness/t0280-plugin-dag-jose-data/** linguist-generated=true +test/sharness/t0280-plugin-data/** linguist-generated=true +test/sharness/t0280-plugin-git-data/** linguist-generated=true +test/sharness/t0400-api-no-gateway/** linguist-generated=true +test/sharness/t0701-delegated-routing-reframe/** linguist-generated=true +test/sharness/t0702-delegated-routing-http/** linguist-generated=true diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 4bc3665b6..904b7815a 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -29,12 +29,12 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v5 - name: Setup Go uses: actions/setup-go@v5 with: - go-version: 1.24.x + go-version: 1.25.x # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL diff --git a/.github/workflows/docker-build.yml b/.github/workflows/docker-build.yml deleted file mode 100644 index 180f37963..000000000 --- a/.github/workflows/docker-build.yml +++ /dev/null @@ -1,34 +0,0 @@ -# If we decide to run build-image.yml on every PR, we could deprecate this workflow. -name: Docker Build - -on: - workflow_dispatch: - pull_request: - paths-ignore: - - '**/*.md' - push: - branches: - - 'master' - -concurrency: - group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }} - cancel-in-progress: true - -jobs: - docker-build: - if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch' - runs-on: ubuntu-latest - timeout-minutes: 10 - env: - IMAGE_NAME: ipfs/kubo - WIP_IMAGE_TAG: wip - defaults: - run: - shell: bash - steps: - - uses: actions/setup-go@v5 - with: - go-version: 1.24.x - - uses: actions/checkout@v4 - - run: docker build -t $IMAGE_NAME:$WIP_IMAGE_TAG . - - run: docker run --rm $IMAGE_NAME:$WIP_IMAGE_TAG --version diff --git a/.github/workflows/docker-check.yml b/.github/workflows/docker-check.yml new file mode 100644 index 000000000..e11f9830d --- /dev/null +++ b/.github/workflows/docker-check.yml @@ -0,0 +1,62 @@ +# This workflow performs a quick Docker build check on PRs and pushes to master. +# It builds the Docker image and runs a basic smoke test to ensure the image works. +# This is a lightweight check - for full multi-platform builds and publishing, see docker-image.yml +name: Docker Check + +on: + workflow_dispatch: + pull_request: + paths-ignore: + - '**/*.md' + push: + branches: + - 'master' + +concurrency: + group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }} + cancel-in-progress: true + +jobs: + lint: + if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch' + runs-on: ubuntu-latest + timeout-minutes: 5 + steps: + - uses: actions/checkout@v5 + - uses: hadolint/hadolint-action@v3.1.0 + with: + dockerfile: Dockerfile + failure-threshold: warning + verbose: true + format: tty + + build: + if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch' + runs-on: ubuntu-latest + timeout-minutes: 10 + env: + IMAGE_NAME: ipfs/kubo + WIP_IMAGE_TAG: wip + defaults: + run: + shell: bash + steps: + - uses: actions/checkout@v5 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Build Docker image with BuildKit + uses: docker/build-push-action@v6 + with: + context: . + push: false + load: true + tags: ${{ env.IMAGE_NAME }}:${{ env.WIP_IMAGE_TAG }} + cache-from: | + type=gha + type=registry,ref=${{ env.IMAGE_NAME }}:buildcache + cache-to: type=gha,mode=max + + - name: Test Docker image + run: docker run --rm $IMAGE_NAME:$WIP_IMAGE_TAG --version diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml index f83809240..4564c060e 100644 --- a/.github/workflows/docker-image.yml +++ b/.github/workflows/docker-image.yml @@ -1,3 +1,7 @@ +# This workflow builds and publishes official Docker images to Docker Hub. +# It handles multi-platform builds (amd64, arm/v7, arm64/v8) and pushes tagged releases. +# This workflow is triggered on tags, specific branches, and can be manually dispatched. +# For quick build checks during development, see docker-check.yml name: Docker Push on: @@ -38,7 +42,7 @@ jobs: LEGACY_IMAGE_NAME: ipfs/go-ipfs steps: - name: Check out the repo - uses: actions/checkout@v4 + uses: actions/checkout@v5 - name: Set up QEMU uses: docker/setup-qemu-action@v3 @@ -46,13 +50,11 @@ jobs: - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 - - name: Cache Docker layers - uses: actions/cache@v4 + - name: Log in to Docker Hub + uses: docker/login-action@v3 with: - path: /tmp/.buildx-cache - key: ${{ runner.os }}-buildx-${{ github.sha }} - restore-keys: | - ${{ runner.os }}-buildx- + username: ${{ vars.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} - name: Get tags id: tags @@ -63,12 +65,6 @@ jobs: echo "EOF" >> $GITHUB_OUTPUT shell: bash - - name: Log in to Docker Hub - uses: docker/login-action@v3 - with: - username: ${{ vars.DOCKER_USERNAME }} - password: ${{ secrets.DOCKER_PASSWORD }} - # We have to build each platform separately because when using multi-arch # builds, only one platform is being loaded into the cache. This would # prevent us from testing the other platforms. @@ -81,8 +77,10 @@ jobs: load: true file: ./Dockerfile tags: ${{ env.IMAGE_NAME }}:linux-amd64 - cache-from: type=local,src=/tmp/.buildx-cache - cache-to: type=local,dest=/tmp/.buildx-cache-new + cache-from: | + type=gha + type=registry,ref=${{ env.IMAGE_NAME }}:buildcache + cache-to: type=gha,mode=max - name: Build Docker image (linux/arm/v7) uses: docker/build-push-action@v6 @@ -93,8 +91,10 @@ jobs: load: true file: ./Dockerfile tags: ${{ env.IMAGE_NAME }}:linux-arm-v7 - cache-from: type=local,src=/tmp/.buildx-cache - cache-to: type=local,dest=/tmp/.buildx-cache-new + cache-from: | + type=gha + type=registry,ref=${{ env.IMAGE_NAME }}:buildcache + cache-to: type=gha,mode=max - name: Build Docker image (linux/arm64/v8) uses: docker/build-push-action@v6 @@ -105,8 +105,10 @@ jobs: load: true file: ./Dockerfile tags: ${{ env.IMAGE_NAME }}:linux-arm64-v8 - cache-from: type=local,src=/tmp/.buildx-cache - cache-to: type=local,dest=/tmp/.buildx-cache-new + cache-from: | + type=gha + type=registry,ref=${{ env.IMAGE_NAME }}:buildcache + cache-to: type=gha,mode=max # We test all the images on amd64 host here. This uses QEMU to emulate # the other platforms. @@ -132,12 +134,9 @@ jobs: push: true file: ./Dockerfile tags: "${{ github.event.inputs.tags || steps.tags.outputs.value }}" - cache-from: type=local,src=/tmp/.buildx-cache-new - cache-to: type=local,dest=/tmp/.buildx-cache-new - - # https://github.com/docker/build-push-action/issues/252 - # https://github.com/moby/buildkit/issues/1896 - - name: Move cache to limit growth - run: | - rm -rf /tmp/.buildx-cache - mv /tmp/.buildx-cache-new /tmp/.buildx-cache + cache-from: | + type=gha + type=registry,ref=${{ env.IMAGE_NAME }}:buildcache + cache-to: | + type=gha,mode=max + type=registry,ref=${{ env.IMAGE_NAME }}:buildcache,mode=max diff --git a/.github/workflows/gateway-conformance.yml b/.github/workflows/gateway-conformance.yml index b305a8fc7..18d1959e6 100644 --- a/.github/workflows/gateway-conformance.yml +++ b/.github/workflows/gateway-conformance.yml @@ -49,12 +49,12 @@ jobs: - name: Setup Go uses: actions/setup-go@v5 with: - go-version: 1.24.x + go-version: 1.25.x - uses: protocol/cache-go-action@v1 with: name: ${{ github.job }} - name: Checkout kubo-gateway - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: path: kubo-gateway - name: Build kubo-gateway @@ -136,12 +136,12 @@ jobs: - name: Setup Go uses: actions/setup-go@v5 with: - go-version: 1.24.x + go-version: 1.25.x - uses: protocol/cache-go-action@v1 with: name: ${{ github.job }} - name: Checkout kubo-gateway - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: path: kubo-gateway - name: Build kubo-gateway diff --git a/.github/workflows/gobuild.yml b/.github/workflows/gobuild.yml index 32cefb4c1..48665074f 100644 --- a/.github/workflows/gobuild.yml +++ b/.github/workflows/gobuild.yml @@ -30,8 +30,8 @@ jobs: steps: - uses: actions/setup-go@v5 with: - go-version: 1.24.x - - uses: actions/checkout@v4 + go-version: 1.25.x + - uses: actions/checkout@v5 - run: make cmd/ipfs-try-build env: TEST_FUSE: 1 diff --git a/.github/workflows/golang-analysis.yml b/.github/workflows/golang-analysis.yml index aef635845..bb1a49570 100644 --- a/.github/workflows/golang-analysis.yml +++ b/.github/workflows/golang-analysis.yml @@ -22,12 +22,12 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 10 steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: submodules: recursive - uses: actions/setup-go@v5 with: - go-version: "1.24.x" + go-version: "1.25.x" - name: Check that go.mod is tidy uses: protocol/multiple-go-modules@v1.4 with: diff --git a/.github/workflows/golint.yml b/.github/workflows/golint.yml index 5f6e0bb20..898e3e936 100644 --- a/.github/workflows/golint.yml +++ b/.github/workflows/golint.yml @@ -31,6 +31,6 @@ jobs: steps: - uses: actions/setup-go@v5 with: - go-version: 1.24.x - - uses: actions/checkout@v4 + go-version: 1.25.x + - uses: actions/checkout@v5 - run: make -O test_go_lint diff --git a/.github/workflows/gotest.yml b/.github/workflows/gotest.yml index a5eb4ac8d..34d86352b 100644 --- a/.github/workflows/gotest.yml +++ b/.github/workflows/gotest.yml @@ -32,9 +32,9 @@ jobs: - name: Set up Go uses: actions/setup-go@v5 with: - go-version: 1.24.x + go-version: 1.25.x - name: Check out Kubo - uses: actions/checkout@v4 + uses: actions/checkout@v5 - name: Install missing tools run: sudo apt update && sudo apt install -y zsh - name: 👉️ If this step failed, go to «Summary» (top left) → inspect the «Failures/Errors» table diff --git a/.github/workflows/interop.yml b/.github/workflows/interop.yml index e85e1f2fe..2f7c7a78c 100644 --- a/.github/workflows/interop.yml +++ b/.github/workflows/interop.yml @@ -10,7 +10,7 @@ on: - 'master' env: - GO_VERSION: 1.24.x + GO_VERSION: 1.25.x concurrency: group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }} @@ -39,7 +39,7 @@ jobs: - uses: actions/setup-go@v5 with: go-version: ${{ env.GO_VERSION }} - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - run: make build - uses: actions/upload-artifact@v4 with: @@ -56,7 +56,7 @@ jobs: - uses: actions/setup-node@v4 with: node-version: lts/* - - uses: actions/download-artifact@v4 + - uses: actions/download-artifact@v5 with: name: kubo path: cmd/ipfs @@ -91,13 +91,13 @@ jobs: steps: - uses: actions/setup-node@v4 with: - node-version: 18.14.0 - - uses: actions/download-artifact@v4 + node-version: 20.x + - uses: actions/download-artifact@v5 with: name: kubo path: cmd/ipfs - run: chmod +x cmd/ipfs/ipfs - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: repository: ipfs/ipfs-webui path: ipfs-webui diff --git a/.github/workflows/sharness.yml b/.github/workflows/sharness.yml index 62725e9b4..9295bc1c1 100644 --- a/.github/workflows/sharness.yml +++ b/.github/workflows/sharness.yml @@ -25,9 +25,9 @@ jobs: - name: Setup Go uses: actions/setup-go@v5 with: - go-version: 1.24.x + go-version: 1.25.x - name: Checkout Kubo - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: path: kubo - name: Install missing tools diff --git a/.github/workflows/spellcheck.yml b/.github/workflows/spellcheck.yml index a34cc0798..4eda8b222 100644 --- a/.github/workflows/spellcheck.yml +++ b/.github/workflows/spellcheck.yml @@ -1,21 +1,18 @@ name: Spell Check -on: [push, pull_request] +on: + pull_request: + push: + branches: ["master"] + workflow_dispatch: + +permissions: + contents: read + +concurrency: + group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }} + cancel-in-progress: true jobs: spellcheck: - runs-on: ubuntu-latest - - steps: - - name: Checkout code - uses: actions/checkout@v4 - - - name: Install Codespell - run: pip install codespell==2.4.0 - - - name: Run Codespell - uses: codespell-project/actions-codespell@v2 - with: - only_warn: 1 - ignore_words_file: .codespell-ignore - skip: "*.mod,*.sum,*.pdf,./docs/AUTHORS,./test/sharness/t0275-cid-security-data,./test/sharness/t0280-plugin-dag-jose-data,./bin" + uses: ipdxco/unified-github-workflows/.github/workflows/reusable-spellcheck.yml@v1 diff --git a/.hadolint.yaml b/.hadolint.yaml new file mode 100644 index 000000000..78b3d23bf --- /dev/null +++ b/.hadolint.yaml @@ -0,0 +1,13 @@ +# Hadolint configuration for Kubo Docker image +# https://github.com/hadolint/hadolint + +# Ignore specific rules +ignored: + # DL3008: Pin versions in apt-get install + # We use stable base images and prefer smaller layers over version pinning + - DL3008 + +# Trust base images from these registries +trustedRegistries: + - docker.io + - gcr.io \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 67a776ee1..0dec582e3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,6 @@ # Kubo Changelogs +- [v0.37](docs/changelogs/v0.37.md) - [v0.36](docs/changelogs/v0.36.md) - [v0.35](docs/changelogs/v0.35.md) - [v0.34](docs/changelogs/v0.34.md) diff --git a/Dockerfile b/Dockerfile index 98e44a1b6..6d43beefa 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,13 +1,16 @@ -FROM --platform=${BUILDPLATFORM:-linux/amd64} golang:1.24 AS builder +# syntax=docker/dockerfile:1 +# Enables BuildKit with cache mounts for faster builds +FROM --platform=${BUILDPLATFORM:-linux/amd64} golang:1.25 AS builder ARG TARGETOS TARGETARCH -ENV SRC_DIR /kubo +ENV SRC_DIR=/kubo -# Download packages first so they can be cached. +# Cache go module downloads between builds for faster rebuilds COPY go.mod go.sum $SRC_DIR/ -RUN cd $SRC_DIR \ - && go mod download +WORKDIR $SRC_DIR +RUN --mount=type=cache,target=/go/pkg/mod \ + go mod download COPY . $SRC_DIR @@ -18,92 +21,78 @@ ARG IPFS_PLUGINS # Allow for other targets to be built, e.g.: docker build --build-arg MAKE_TARGET="nofuse" ARG MAKE_TARGET=build -# Build the thing. -# Also: fix getting HEAD commit hash via git rev-parse. -RUN cd $SRC_DIR \ - && mkdir -p .git/objects \ +# Build ipfs binary with cached go modules and build cache. +# mkdir .git/objects allows git rev-parse to read commit hash for version info +RUN --mount=type=cache,target=/go/pkg/mod \ + --mount=type=cache,target=/root/.cache/go-build \ + mkdir -p .git/objects \ && GOOS=$TARGETOS GOARCH=$TARGETARCH GOFLAGS=-buildvcs=false make ${MAKE_TARGET} IPFS_PLUGINS=$IPFS_PLUGINS -# Using Debian Buster because the version of busybox we're using is based on it -# and we want to make sure the libraries we're using are compatible. That's also -# why we're running this for the target platform. -FROM debian:stable-slim AS utilities +# Extract required runtime tools from Debian. +# We use Debian instead of Alpine because we need glibc compatibility +# for the busybox base image we're using. +FROM debian:bookworm-slim AS utilities RUN set -eux; \ apt-get update; \ - apt-get install -y \ + apt-get install -y --no-install-recommends \ tini \ # Using gosu (~2MB) instead of su-exec (~20KB) because it's easier to # install on Debian. Useful links: # - https://github.com/ncopa/su-exec#why-reinvent-gosu # - https://github.com/tianon/gosu/issues/52#issuecomment-441946745 gosu \ - # This installs fusermount which we later copy over to the target image. + # fusermount enables IPFS mount commands fuse \ ca-certificates \ ; \ - rm -rf /var/lib/apt/lists/* + apt-get clean; \ + rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* -# Now comes the actual target image, which aims to be as small as possible. +# Final minimal image with shell for debugging (busybox provides sh) FROM busybox:stable-glibc -# Get the ipfs binary, entrypoint script, and TLS CAs from the build container. -ENV SRC_DIR /kubo +# Copy ipfs binary, startup scripts, and runtime dependencies +ENV SRC_DIR=/kubo COPY --from=utilities /usr/sbin/gosu /sbin/gosu COPY --from=utilities /usr/bin/tini /sbin/tini COPY --from=utilities /bin/fusermount /usr/local/bin/fusermount COPY --from=utilities /etc/ssl/certs /etc/ssl/certs COPY --from=builder $SRC_DIR/cmd/ipfs/ipfs /usr/local/bin/ipfs -COPY --from=builder $SRC_DIR/bin/container_daemon /usr/local/bin/start_ipfs +COPY --from=builder --chmod=755 $SRC_DIR/bin/container_daemon /usr/local/bin/start_ipfs COPY --from=builder $SRC_DIR/bin/container_init_run /usr/local/bin/container_init_run -# Add suid bit on fusermount so it will run properly +# Set SUID for fusermount to enable FUSE mounting by non-root user RUN chmod 4755 /usr/local/bin/fusermount -# Fix permissions on start_ipfs (ignore the build machine's permissions) -RUN chmod 0755 /usr/local/bin/start_ipfs - -# Swarm TCP; should be exposed to the public -EXPOSE 4001 -# Swarm UDP; should be exposed to the public -EXPOSE 4001/udp -# Daemon API; must not be exposed publicly but to client services under you control +# Swarm P2P port (TCP/UDP) - expose publicly for peer connections +EXPOSE 4001 4001/udp +# API port - keep private, only for trusted clients EXPOSE 5001 -# Web Gateway; can be exposed publicly with a proxy, e.g. as https://ipfs.example.org +# Gateway port - can be exposed publicly via reverse proxy EXPOSE 8080 -# Swarm Websockets; must be exposed publicly when the node is listening using the websocket transport (/ipX/.../tcp/8081/ws). +# Swarm WebSockets - expose publicly for browser-based peers EXPOSE 8081 -# Create the fs-repo directory and switch to a non-privileged user. -ENV IPFS_PATH /data/ipfs -RUN mkdir -p $IPFS_PATH \ +# Create ipfs user (uid 1000) and required directories with proper ownership +ENV IPFS_PATH=/data/ipfs +RUN mkdir -p $IPFS_PATH /ipfs /ipns /mfs /container-init.d \ && adduser -D -h $IPFS_PATH -u 1000 -G users ipfs \ - && chown ipfs:users $IPFS_PATH + && chown ipfs:users $IPFS_PATH /ipfs /ipns /mfs /container-init.d -# Create mount points for `ipfs mount` command -RUN mkdir /ipfs /ipns /mfs \ - && chown ipfs:users /ipfs /ipns /mfs - -# Create the init scripts directory -RUN mkdir /container-init.d \ - && chown ipfs:users /container-init.d - -# Expose the fs-repo as a volume. -# start_ipfs initializes an fs-repo if none is mounted. -# Important this happens after the USER directive so permissions are correct. +# Volume for IPFS repository data persistence VOLUME $IPFS_PATH # The default logging level -ENV GOLOG_LOG_LEVEL "" +ENV GOLOG_LOG_LEVEL="" -# This just makes sure that: -# 1. There's an fs-repo, and initializes one if there isn't. -# 2. The API and Gateway are accessible from outside the container. +# Entrypoint initializes IPFS repo if needed and configures networking. +# tini ensures proper signal handling and zombie process cleanup ENTRYPOINT ["/sbin/tini", "--", "/usr/local/bin/start_ipfs"] -# Healthcheck for the container -# QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn is the CID of empty folder +# Health check verifies IPFS daemon is responsive. +# Uses empty directory CID (QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn) as test HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \ CMD ipfs --api=/ip4/127.0.0.1/tcp/5001 dag stat /ipfs/QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn || exit 1 -# Execute the daemon subcommand by default +# Default: run IPFS daemon with auto-migration enabled CMD ["daemon", "--migrate=true", "--agent-version-suffix=docker"] diff --git a/bin/mkreleaselog b/bin/mkreleaselog index 2ff6c0e89..101881e90 100755 --- a/bin/mkreleaselog +++ b/bin/mkreleaselog @@ -79,7 +79,12 @@ msg() { statlog() { local module="$1" - local rpath="$GOPATH/src/$(strip_version "$module")" + local rpath + if [[ "$module" == "github.com/ipfs/kubo" ]]; then + rpath="$ROOT_DIR" + else + rpath="$GOPATH/src/$(strip_version "$module")" + fi local start="${2:-}" local end="${3:-HEAD}" local mailmap_file="$rpath/.mailmap" @@ -166,7 +171,12 @@ release_log() { local start="$2" local end="${3:-HEAD}" local repo="$(strip_version "$1")" - local dir="$GOPATH/src/$repo" + local dir + if [[ "$module" == "github.com/ipfs/kubo" ]]; then + dir="$ROOT_DIR" + else + dir="$GOPATH/src/$repo" + fi local commit pr git -C "$dir" log \ @@ -203,8 +213,13 @@ mod_deps() { ensure() { local repo="$(strip_version "$1")" local commit="$2" - local rpath="$GOPATH/src/$repo" - if [[ ! -d "$rpath" ]]; then + local rpath + if [[ "$1" == "github.com/ipfs/kubo" ]]; then + rpath="$ROOT_DIR" + else + rpath="$GOPATH/src/$repo" + fi + if [[ "$1" != "github.com/ipfs/kubo" ]] && [[ ! -d "$rpath" ]]; then msg "Cloning $repo..." git clone "http://$repo" "$rpath" >&2 fi @@ -237,10 +252,7 @@ recursive_release_log() { local module="$(go list -m)" local dir="$(go list -m -f '{{.Dir}}')" - if [[ "${GOPATH}/${module}" -ef "${dir}" ]]; then - echo "This script requires the target module and all dependencies to live in a GOPATH." - return 1 - fi + # Kubo can be run from any directory, dependencies still use GOPATH ( local result=0 diff --git a/client/rpc/api_test.go b/client/rpc/api_test.go index c0da3d7b0..745f0fe13 100644 --- a/client/rpc/api_test.go +++ b/client/rpc/api_test.go @@ -2,9 +2,9 @@ package rpc import ( "context" + "errors" "net/http" "net/http/httptest" - "runtime" "strconv" "strings" "sync" @@ -12,11 +12,11 @@ import ( "time" "github.com/ipfs/boxo/path" + "github.com/ipfs/kubo/config" iface "github.com/ipfs/kubo/core/coreiface" "github.com/ipfs/kubo/core/coreiface/tests" "github.com/ipfs/kubo/test/cli/harness" ma "github.com/multiformats/go-multiaddr" - "go.uber.org/multierr" ) type NodeProvider struct{} @@ -45,6 +45,9 @@ func (np NodeProvider) MakeAPISwarm(t *testing.T, ctx context.Context, fullIdent c := n.ReadConfig() c.Experimental.FilestoreEnabled = true + // only provide things we pin. Allows to test + // provide operations. + c.Reprovider.Strategy = config.NewOptionalString("roots") n.WriteConfig(c) n.StartDaemon("--enable-pubsub-experiment", "--offline="+strconv.FormatBool(!online)) @@ -88,16 +91,12 @@ func (np NodeProvider) MakeAPISwarm(t *testing.T, ctx context.Context, fullIdent wg.Wait() - return apis, multierr.Combine(errs...) + return apis, errors.Join(errs...) } func TestHttpApi(t *testing.T) { t.Parallel() - if runtime.GOOS == "windows" { - t.Skip("skipping due to #9905") - } - tests.TestApi(NodeProvider{})(t) } diff --git a/cmd/ipfs/kubo/add_migrations.go b/cmd/ipfs/kubo/add_migrations.go index 557a8de84..d77d0afdf 100644 --- a/cmd/ipfs/kubo/add_migrations.go +++ b/cmd/ipfs/kubo/add_migrations.go @@ -86,7 +86,7 @@ func addMigrationFiles(ctx context.Context, node *core.IpfsNode, paths []string, return err } - ipfsPath, err := ufs.Add(ctx, files.NewReaderStatFile(f, fi), options.Unixfs.Pin(pin)) + ipfsPath, err := ufs.Add(ctx, files.NewReaderStatFile(f, fi), options.Unixfs.Pin(pin, "")) if err != nil { return err } diff --git a/cmd/ipfs/kubo/daemon.go b/cmd/ipfs/kubo/daemon.go index 94b633f79..6c594912d 100644 --- a/cmd/ipfs/kubo/daemon.go +++ b/cmd/ipfs/kubo/daemon.go @@ -34,8 +34,6 @@ import ( nodeMount "github.com/ipfs/kubo/fuse/node" fsrepo "github.com/ipfs/kubo/repo/fsrepo" "github.com/ipfs/kubo/repo/fsrepo/migrations" - "github.com/ipfs/kubo/repo/fsrepo/migrations/ipfsfetcher" - goprocess "github.com/jbenet/goprocess" p2pcrypto "github.com/libp2p/go-libp2p/core/crypto" pnet "github.com/libp2p/go-libp2p/core/pnet" "github.com/libp2p/go-libp2p/core/protocol" @@ -45,7 +43,6 @@ import ( manet "github.com/multiformats/go-multiaddr/net" prometheus "github.com/prometheus/client_golang/prometheus" promauto "github.com/prometheus/client_golang/prometheus/promauto" - "go.uber.org/multierr" ) const ( @@ -67,6 +64,7 @@ const ( routingOptionDHTServerKwd = "dhtserver" routingOptionNoneKwd = "none" routingOptionCustomKwd = "custom" + routingOptionDelegatedKwd = "delegated" routingOptionDefaultKwd = "default" routingOptionAutoKwd = "auto" routingOptionAutoClientKwd = "autoclient" @@ -277,7 +275,7 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment } var cacheMigrations, pinMigrations bool - var fetcher migrations.Fetcher + var externalMigrationFetcher migrations.Fetcher // acquire the repo lock _before_ constructing a node. we need to make // sure we are permitted to access the resources (datastore, etc.) @@ -287,74 +285,39 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment return err case fsrepo.ErrNeedMigration: domigrate, found := req.Options[migrateKwd].(bool) - fmt.Println("Found outdated fs-repo, migrations need to be run.") + + // Get current repo version for more informative message + currentVersion, verErr := migrations.RepoVersion(cctx.ConfigRoot) + if verErr != nil { + // Fallback to generic message if we can't read version + fmt.Printf("Kubo repository at %s requires migration.\n", cctx.ConfigRoot) + } else { + fmt.Printf("Kubo repository at %s has version %d and needs to be migrated to version %d.\n", + cctx.ConfigRoot, currentVersion, version.RepoVersion) + } if !found { domigrate = YesNoPrompt("Run migrations now? [y/N]") } if !domigrate { - fmt.Println("Not running migrations of fs-repo now.") - fmt.Println("Please get fs-repo-migrations from https://dist.ipfs.tech") + fmt.Printf("Not running migrations on repository at %s. Re-run daemon with --migrate or see 'ipfs repo migrate --help'\n", cctx.ConfigRoot) return errors.New("fs-repo requires migration") } - // Read Migration section of IPFS config - configFileOpt, _ := req.Options[commands.ConfigFileOption].(string) - migrationCfg, err := migrations.ReadMigrationConfig(cctx.ConfigRoot, configFileOpt) + // Use hybrid migration strategy that intelligently combines external and embedded migrations + err = migrations.RunHybridMigrations(cctx.Context(), version.RepoVersion, cctx.ConfigRoot, false) if err != nil { - return err - } - - // Define function to create IPFS fetcher. Do not supply an - // already-constructed IPFS fetcher, because this may be expensive and - // not needed according to migration config. Instead, supply a function - // to construct the particular IPFS fetcher implementation used here, - // which is called only if an IPFS fetcher is needed. - newIpfsFetcher := func(distPath string) migrations.Fetcher { - return ipfsfetcher.NewIpfsFetcher(distPath, 0, &cctx.ConfigRoot, configFileOpt) - } - - // Fetch migrations from current distribution, or location from environ - fetchDistPath := migrations.GetDistPathEnv(migrations.CurrentIpfsDist) - - // Create fetchers according to migrationCfg.DownloadSources - fetcher, err = migrations.GetMigrationFetcher(migrationCfg.DownloadSources, fetchDistPath, newIpfsFetcher) - if err != nil { - return err - } - defer fetcher.Close() - - if migrationCfg.Keep == "cache" { - cacheMigrations = true - } else if migrationCfg.Keep == "pin" { - pinMigrations = true - } - - if cacheMigrations || pinMigrations { - // Create temp directory to store downloaded migration archives - migrations.DownloadDirectory, err = os.MkdirTemp("", "migrations") - if err != nil { - return err - } - // Defer cleanup of download directory so that it gets cleaned up - // if daemon returns early due to error - defer func() { - if migrations.DownloadDirectory != "" { - os.RemoveAll(migrations.DownloadDirectory) - } - }() - } - - err = migrations.RunMigration(cctx.Context(), fetcher, fsrepo.RepoVersion, "", false) - if err != nil { - fmt.Println("The migrations of fs-repo failed:") + fmt.Println("Repository migration failed:") fmt.Printf(" %s\n", err) fmt.Println("If you think this is a bug, please file an issue and include this whole log output.") - fmt.Println(" https://github.com/ipfs/fs-repo-migrations") + fmt.Println(" https://github.com/ipfs/kubo") return err } + // Note: Migration caching/pinning functionality has been deprecated + // The hybrid migration system handles legacy migrations more efficiently + repo, err = fsrepo.Open(cctx.ConfigRoot) if err != nil { return err @@ -381,6 +344,27 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment return err } + // Validate autoconf setup - check for private network conflict + swarmKey, _ := repo.SwarmKey() + isPrivateNetwork := swarmKey != nil || pnet.ForcePrivateNetwork + if err := config.ValidateAutoConfWithRepo(cfg, isPrivateNetwork); err != nil { + return err + } + + // Start background AutoConf updater if enabled + if cfg.AutoConf.Enabled.WithDefault(config.DefaultAutoConfEnabled) { + // Start autoconf client for background updates + client, err := config.GetAutoConfClient(cfg) + if err != nil { + log.Errorf("failed to create autoconf client: %v", err) + } else { + // Start primes cache and starts background updater + if _, err := client.Start(cctx.Context()); err != nil { + log.Errorf("failed to start autoconf updater: %v", err) + } + } + } + fmt.Printf("PeerID: %s\n", cfg.Identity.PeerID) if !psSet { @@ -404,8 +388,8 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment } routingOption, _ := req.Options[routingOptionKwd].(string) - if routingOption == routingOptionDefaultKwd { - routingOption = cfg.Routing.Type.WithDefault(routingOptionAutoKwd) + if routingOption == routingOptionDefaultKwd || routingOption == "" { + routingOption = cfg.Routing.Type.WithDefault(config.DefaultRoutingType) if routingOption == "" { routingOption = routingOptionAutoKwd } @@ -435,6 +419,8 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment } } + // Use config for routing construction + switch routingOption { case routingOptionSupernodeKwd: return errors.New("supernode routing was never fully implemented and has been removed") @@ -450,6 +436,8 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment ncfg.Routing = libp2p.DHTServerOption case routingOptionNoneKwd: ncfg.Routing = libp2p.NilRouterOption + case routingOptionDelegatedKwd: + ncfg.Routing = libp2p.ConstructDelegatedOnlyRouting(cfg) case routingOptionCustomKwd: if cfg.Routing.AcceleratedDHTClient.WithDefault(config.DefaultAcceleratedDHTClient) { return errors.New("Routing.AcceleratedDHTClient option is set even tho Routing.Type is custom, using custom .AcceleratedDHTClient needs to be set on DHT routers individually") @@ -491,11 +479,24 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment if cfg.Provider.Strategy.WithDefault("") != "" && cfg.Reprovider.Strategy.IsDefault() { log.Fatal("Invalid config. Remove unused Provider.Strategy and set Reprovider.Strategy instead. Documentation: https://github.com/ipfs/kubo/blob/master/docs/config.md#reproviderstrategy") } + // Check for deprecated "flat" strategy + if cfg.Reprovider.Strategy.WithDefault("") == "flat" { + log.Error("Reprovider.Strategy='flat' is deprecated and will be removed in the next release. Please update your config to use 'all' instead.") + } if cfg.Experimental.StrategicProviding { log.Error("Experimental.StrategicProviding was removed. Remove it from your config and set Provider.Enabled=false to remove this message. Documentation: https://github.com/ipfs/kubo/blob/master/docs/experimental-features.md#strategic-providing") cfg.Experimental.StrategicProviding = false cfg.Provider.Enabled = config.False } + if routingOption == routingOptionDelegatedKwd { + // Delegated routing is read-only mode - content providing must be disabled + if cfg.Provider.Enabled.WithDefault(config.DefaultProviderEnabled) { + log.Fatal("Routing.Type=delegated does not support content providing. Set Provider.Enabled=false in your config.") + } + if cfg.Reprovider.Interval.WithDefault(config.DefaultReproviderInterval) != 0 { + log.Fatal("Routing.Type=delegated does not support content providing. Set Reprovider.Interval='0' in your config.") + } + } printLibp2pPorts(node) @@ -527,6 +528,9 @@ take effect. } }() + // Clear any cached offline node and set the online daemon node + // This ensures HTTP RPC server uses the online node, not any cached offline node + cctx.ClearCachedNode() cctx.ConstructNode = func() (*core.IpfsNode, error) { return node, nil } @@ -537,10 +541,19 @@ take effect. if err != nil { return err } + + pluginErrc := make(chan error, 1) select { - case <-node.Process.Closing(): + case <-node.Context().Done(): + close(pluginErrc) default: - node.Process.AddChild(goprocess.WithTeardown(cctx.Plugins.Close)) + context.AfterFunc(node.Context(), func() { + err := cctx.Plugins.Close() + if err != nil { + pluginErrc <- fmt.Errorf("closing plugins: %w", err) + } + close(pluginErrc) + }) } // construct api endpoint - every time @@ -558,6 +571,11 @@ take effect. if err := mountFuse(req, cctx); err != nil { return err } + defer func() { + if _err != nil { + nodeMount.Unmount(node) + } + }() } // repo blockstore GC - if --enable-gc flag is present @@ -566,9 +584,9 @@ take effect. return err } - // Add any files downloaded by migration. - if cacheMigrations || pinMigrations { - err = addMigrations(cctx.Context(), node, fetcher, pinMigrations) + // Add any files downloaded by external migrations (embedded migrations don't download files) + if externalMigrationFetcher != nil && (cacheMigrations || pinMigrations) { + err = addMigrations(cctx.Context(), node, externalMigrationFetcher, pinMigrations) if err != nil { fmt.Fprintln(os.Stderr, "Could not add migration to IPFS:", err) } @@ -577,10 +595,10 @@ take effect. os.RemoveAll(migrations.DownloadDirectory) migrations.DownloadDirectory = "" } - if fetcher != nil { + if externalMigrationFetcher != nil { // If there is an error closing the IpfsFetcher, then print error, but // do not fail because of it. - err = fetcher.Close() + err = externalMigrationFetcher.Close() if err != nil { log.Errorf("error closing IPFS fetcher: %s", err) } @@ -646,6 +664,17 @@ take effect. ⚠️ Local CIDs will not be announced to Amino DHT, making them impossible to retrieve without manual peering ⚠️ If this is not intentional, call 'ipfs config profile apply announce-on', or set 'Reprovider.Interval=22h' +`) + } + + // Inform user about Routing.AcceleratedDHTClient when enabled + if cfg.Routing.AcceleratedDHTClient.WithDefault(config.DefaultAcceleratedDHTClient) { + fmt.Print(` + +ℹ️ Routing.AcceleratedDHTClient is enabled for faster content discovery +ℹ️ and DHT provides. Routing table is initializing. IPFS is ready to use, +ℹ️ but performance will improve over time as more peers are discovered + `) } @@ -692,16 +721,26 @@ take effect. log.Fatal("Support for IPFS_REUSEPORT was removed. Use LIBP2P_TCP_REUSEPORT instead.") } + unmountErrc := make(chan error) + context.AfterFunc(node.Context(), func() { + <-node.Context().Done() + nodeMount.Unmount(node) + close(unmountErrc) + }) + // collect long-running errors and block for shutdown // TODO(cryptix): our fuse currently doesn't follow this pattern for graceful shutdown - var errs error - for err := range merge(apiErrc, gwErrc, gcErrc, p2pGwErrc) { + var errs []error + for err := range merge(apiErrc, gwErrc, gcErrc, p2pGwErrc, pluginErrc, unmountErrc) { if err != nil { - errs = multierr.Append(errs, err) + errs = append(errs, err) } } + if len(errs) != 0 { + return errors.Join(errs...) + } - return errs + return nil } // serveHTTPApi collects options, creates listener, prints status message and starts serving requests. @@ -851,6 +890,12 @@ func printLibp2pPorts(node *core.IpfsNode) { return } + if node.PeerHost == nil { + log.Error("PeerHost is nil - this should not happen and likely indicates an FX dependency injection issue or race condition") + fmt.Println("Swarm not properly initialized - node PeerHost is nil.") + return + } + ifaceAddrs, err := node.PeerHost.Network().InterfaceListenAddresses() if err != nil { log.Errorf("failed to read listening addresses: %s", err) @@ -1032,6 +1077,10 @@ func serveTrustlessGatewayOverLibp2p(cctx *oldcmds.Context) (<-chan error, error return nil, err } + if node.PeerHost == nil { + return nil, fmt.Errorf("cannot create libp2p gateway: node PeerHost is nil (this should not happen and likely indicates an FX dependency injection issue or race condition)") + } + h := p2phttp.Host{ StreamHost: node.PeerHost, } @@ -1042,14 +1091,13 @@ func serveTrustlessGatewayOverLibp2p(cctx *oldcmds.Context) (<-chan error, error errc := make(chan error, 1) go func() { - defer close(errc) errc <- h.Serve() + close(errc) }() - go func() { - <-node.Process.Closing() + context.AfterFunc(node.Context(), func() { h.Close() - }() + }) return errc, nil } @@ -1134,14 +1182,14 @@ func maybeRunGC(req *cmds.Request, node *core.IpfsNode) (<-chan error, error) { return errc, nil } -// merge does fan-in of multiple read-only error channels -// taken from http://blog.golang.org/pipelines +// merge does fan-in of multiple read-only error channels. func merge(cs ...<-chan error) <-chan error { var wg sync.WaitGroup out := make(chan error) - // Start an output goroutine for each input channel in cs. output - // copies values from c to out until c is closed, then calls wg.Done. + // Start a goroutine for each input channel in cs, that copies values from + // the input channel to the output channel until the input channel is + // closed. output := func(c <-chan error) { for n := range c { out <- n @@ -1155,8 +1203,8 @@ func merge(cs ...<-chan error) <-chan error { } } - // Start a goroutine to close out once all the output goroutines are - // done. This must start after the wg.Add call. + // Start a goroutine to close out once all the output goroutines, and other + // things to wait on, are done. go func() { wg.Wait() close(out) @@ -1227,8 +1275,6 @@ Visit https://github.com/ipfs/kubo/releases or https://dist.ipfs.tech/#kubo and select { case <-ctx.Done(): return - case <-nd.Process.Closing(): - return case <-ticker.C: continue } diff --git a/cmd/ipfs/kubo/start.go b/cmd/ipfs/kubo/start.go index 19a88f37c..4a8709cc6 100644 --- a/cmd/ipfs/kubo/start.go +++ b/cmd/ipfs/kubo/start.go @@ -214,8 +214,8 @@ func insideGUI() bool { func checkDebug(req *cmds.Request) { // check if user wants to debug. option OR env var. debug, _ := req.Options["debug"].(bool) - ipfsLogLevel, _ := logging.LevelFromString(os.Getenv("IPFS_LOGGING")) // IPFS_LOGGING is deprecated - goLogLevel, _ := logging.LevelFromString(os.Getenv("GOLOG_LOG_LEVEL")) + ipfsLogLevel, _ := logging.Parse(os.Getenv("IPFS_LOGGING")) // IPFS_LOGGING is deprecated + goLogLevel, _ := logging.Parse(os.Getenv("GOLOG_LOG_LEVEL")) if debug || goLogLevel == logging.LevelDebug || ipfsLogLevel == logging.LevelDebug { u.Debug = true diff --git a/cmd/ipfswatch/ipfswatch_test.go b/cmd/ipfswatch/ipfswatch_test.go index 20397afef..75d007521 100644 --- a/cmd/ipfswatch/ipfswatch_test.go +++ b/cmd/ipfswatch/ipfswatch_test.go @@ -6,11 +6,11 @@ package main import ( "testing" - "github.com/ipfs/kubo/thirdparty/assert" + "github.com/stretchr/testify/require" ) func TestIsHidden(t *testing.T) { - assert.True(IsHidden("bar/.git"), t, "dirs beginning with . should be recognized as hidden") - assert.False(IsHidden("."), t, ". for current dir should not be considered hidden") - assert.False(IsHidden("bar/baz"), t, "normal dirs should not be hidden") + require.True(t, IsHidden("bar/.git"), "dirs beginning with . should be recognized as hidden") + require.False(t, IsHidden("."), ". for current dir should not be considered hidden") + require.False(t, IsHidden("bar/baz"), "normal dirs should not be hidden") } diff --git a/cmd/ipfswatch/main.go b/cmd/ipfswatch/main.go index 6850f6423..3178cf564 100644 --- a/cmd/ipfswatch/main.go +++ b/cmd/ipfswatch/main.go @@ -21,7 +21,6 @@ import ( fsnotify "github.com/fsnotify/fsnotify" "github.com/ipfs/boxo/files" - process "github.com/jbenet/goprocess" ) var ( @@ -54,7 +53,6 @@ func main() { } func run(ipfsPath, watchPath string) error { - proc := process.WithParent(process.Background()) log.Printf("running IPFSWatch on '%s' using repo at '%s'...", watchPath, ipfsPath) ipfsPath, err := fsutil.ExpandHome(ipfsPath) @@ -99,11 +97,11 @@ func run(ipfsPath, watchPath string) error { corehttp.WebUIOption, corehttp.CommandsOption(cmdCtx(node, ipfsPath)), } - proc.Go(func(p process.Process) { + go func() { if err := corehttp.ListenAndServe(node, addr, opts...); err != nil { return } - }) + }() } interrupts := make(chan os.Signal, 1) @@ -137,7 +135,7 @@ func run(ipfsPath, watchPath string) error { } } } - proc.Go(func(p process.Process) { + go func() { file, err := os.Open(e.Name) if err != nil { log.Println(err) @@ -162,7 +160,7 @@ func run(ipfsPath, watchPath string) error { log.Println(err) } log.Printf("added %s... key: %s", e.Name, k) - }) + }() } case err := <-watcher.Errors: log.Println(err) diff --git a/commands/context.go b/commands/context.go index ca425acbf..c8893ae17 100644 --- a/commands/context.go +++ b/commands/context.go @@ -53,6 +53,23 @@ func (c *Context) GetNode() (*core.IpfsNode, error) { return c.node, err } +// ClearCachedNode clears any cached node, forcing GetNode to construct a new one. +// +// This method is critical for mitigating racy FX dependency injection behavior +// that can occur during daemon startup. The daemon may create multiple IpfsNode +// instances during initialization - first an offline node during early init, then +// the proper online daemon node. Without clearing the cache, HTTP RPC handlers may +// end up using the first (offline) cached node instead of the intended online daemon node. +// +// This behavior was likely present forever in go-ipfs, but recent changes made it more +// prominent and forced us to proactively mitigate FX shortcomings. The daemon calls +// this method immediately before setting its ConstructNode function to ensure that +// subsequent GetNode() calls use the correct online daemon node rather than any +// stale cached offline node from initialization. +func (c *Context) ClearCachedNode() { + c.node = nil +} + // GetAPI returns CoreAPI instance backed by ipfs node. // It may construct the node with the provided function. func (c *Context) GetAPI() (coreiface.CoreAPI, error) { diff --git a/config/autoconf.go b/config/autoconf.go new file mode 100644 index 000000000..2f1d41b26 --- /dev/null +++ b/config/autoconf.go @@ -0,0 +1,319 @@ +package config + +import ( + "maps" + "math/rand" + "strings" + + "github.com/ipfs/boxo/autoconf" + logging "github.com/ipfs/go-log/v2" + peer "github.com/libp2p/go-libp2p/core/peer" +) + +var log = logging.Logger("config") + +// AutoConf contains the configuration for the autoconf subsystem +type AutoConf struct { + // URL is the HTTP(S) URL to fetch the autoconf.json from + // Default: see boxo/autoconf.MainnetAutoConfURL + URL *OptionalString `json:",omitempty"` + + // Enabled determines whether to use autoconf + // Default: true + Enabled Flag `json:",omitempty"` + + // RefreshInterval is how often to refresh autoconf data + // Default: 24h + RefreshInterval *OptionalDuration `json:",omitempty"` + + // TLSInsecureSkipVerify allows skipping TLS verification (for testing only) + // Default: false + TLSInsecureSkipVerify Flag `json:",omitempty"` +} + +const ( + // AutoPlaceholder is the string used as a placeholder for autoconf values + AutoPlaceholder = "auto" + + // DefaultAutoConfEnabled is the default value for AutoConf.Enabled + DefaultAutoConfEnabled = true + + // DefaultAutoConfURL is the default URL for fetching autoconf + DefaultAutoConfURL = autoconf.MainnetAutoConfURL + + // DefaultAutoConfRefreshInterval is the default interval for refreshing autoconf data + DefaultAutoConfRefreshInterval = autoconf.DefaultRefreshInterval + + // AutoConf client configuration constants + DefaultAutoConfCacheSize = autoconf.DefaultCacheSize + DefaultAutoConfTimeout = autoconf.DefaultTimeout +) + +// getNativeSystems returns the list of systems that should be used natively based on routing type +func getNativeSystems(routingType string) []string { + switch routingType { + case "dht", "dhtclient", "dhtserver": + return []string{autoconf.SystemAminoDHT} // Only native DHT + case "auto", "autoclient": + return []string{autoconf.SystemAminoDHT} // Native DHT, delegated others + case "delegated": + return []string{} // Everything delegated + case "none": + return []string{} // No native systems + default: + return []string{} // Custom mode + } +} + +// selectRandomResolver picks a random resolver from a list for load balancing +func selectRandomResolver(resolvers []string) string { + if len(resolvers) == 0 { + return "" + } + return resolvers[rand.Intn(len(resolvers))] +} + +// DNSResolversWithAutoConf returns DNS resolvers with "auto" values replaced by autoconf values +func (c *Config) DNSResolversWithAutoConf() map[string]string { + if c.DNS.Resolvers == nil { + return nil + } + + resolved := make(map[string]string) + autoConf := c.getAutoConf() + autoExpanded := 0 + + // Process each configured resolver + for domain, resolver := range c.DNS.Resolvers { + if resolver == AutoPlaceholder { + // Try to resolve from autoconf + if autoConf != nil && autoConf.DNSResolvers != nil { + if resolvers, exists := autoConf.DNSResolvers[domain]; exists && len(resolvers) > 0 { + resolved[domain] = selectRandomResolver(resolvers) + autoExpanded++ + } + } + // If autoConf is disabled or domain not found, skip this "auto" resolver + } else { + // Keep custom resolver as-is + resolved[domain] = resolver + } + } + + // Add default resolvers from autoconf that aren't already configured + if autoConf != nil && autoConf.DNSResolvers != nil { + for domain, resolvers := range autoConf.DNSResolvers { + if _, exists := resolved[domain]; !exists && len(resolvers) > 0 { + resolved[domain] = selectRandomResolver(resolvers) + } + } + } + + // Log expansion statistics + if autoExpanded > 0 { + log.Debugf("expanded %d 'auto' DNS.Resolvers from autoconf", autoExpanded) + } + + return resolved +} + +// expandAutoConfSlice is a generic helper for expanding "auto" placeholders in string slices +// It handles the common pattern of: iterate through slice, expand "auto" once, keep custom values +func expandAutoConfSlice(sourceSlice []string, autoConfData []string) []string { + var resolved []string + autoExpanded := false + + for _, item := range sourceSlice { + if item == AutoPlaceholder { + // Replace with autoconf data (only once) + if autoConfData != nil && !autoExpanded { + resolved = append(resolved, autoConfData...) + autoExpanded = true + } + // If autoConfData is nil or already expanded, skip redundant "auto" entries silently + } else { + // Keep custom item + resolved = append(resolved, item) + } + } + + return resolved +} + +// BootstrapWithAutoConf returns bootstrap config with "auto" values replaced by autoconf values +func (c *Config) BootstrapWithAutoConf() []string { + autoConf := c.getAutoConf() + var autoConfData []string + + if autoConf != nil { + routingType := c.Routing.Type.WithDefault(DefaultRoutingType) + nativeSystems := getNativeSystems(routingType) + autoConfData = autoConf.GetBootstrapPeers(nativeSystems...) + log.Debugf("BootstrapWithAutoConf: processing with routing type: %s", routingType) + } else { + log.Debugf("BootstrapWithAutoConf: autoConf disabled, using original config") + } + + result := expandAutoConfSlice(c.Bootstrap, autoConfData) + log.Debugf("BootstrapWithAutoConf: final result contains %d peers", len(result)) + return result +} + +// getAutoConf is a helper to get autoconf data with fallbacks +func (c *Config) getAutoConf() *autoconf.Config { + if !c.AutoConf.Enabled.WithDefault(DefaultAutoConfEnabled) { + log.Debugf("getAutoConf: AutoConf disabled, returning nil") + return nil + } + + // Create or get cached client with config + client, err := GetAutoConfClient(c) + if err != nil { + log.Debugf("getAutoConf: client creation failed - %v", err) + return nil + } + + // Use GetCached to avoid network I/O during config operations + // This ensures config retrieval doesn't block on network operations + result := client.GetCached() + + log.Debugf("getAutoConf: returning autoconf data") + return result +} + +// BootstrapPeersWithAutoConf returns bootstrap peers with "auto" values replaced by autoconf values +// and parsed into peer.AddrInfo structures +func (c *Config) BootstrapPeersWithAutoConf() ([]peer.AddrInfo, error) { + bootstrapStrings := c.BootstrapWithAutoConf() + return ParseBootstrapPeers(bootstrapStrings) +} + +// DelegatedRoutersWithAutoConf returns delegated router URLs without trailing slashes +func (c *Config) DelegatedRoutersWithAutoConf() []string { + autoConf := c.getAutoConf() + + // Use autoconf to expand the endpoints with supported paths for read operations + routingType := c.Routing.Type.WithDefault(DefaultRoutingType) + nativeSystems := getNativeSystems(routingType) + return autoconf.ExpandDelegatedEndpoints( + c.Routing.DelegatedRouters, + autoConf, + nativeSystems, + // Kubo supports all read paths + autoconf.RoutingV1ProvidersPath, + autoconf.RoutingV1PeersPath, + autoconf.RoutingV1IPNSPath, + ) +} + +// DelegatedPublishersWithAutoConf returns delegated publisher URLs without trailing slashes +func (c *Config) DelegatedPublishersWithAutoConf() []string { + autoConf := c.getAutoConf() + + // Use autoconf to expand the endpoints with IPNS write path + routingType := c.Routing.Type.WithDefault(DefaultRoutingType) + nativeSystems := getNativeSystems(routingType) + return autoconf.ExpandDelegatedEndpoints( + c.Ipns.DelegatedPublishers, + autoConf, + nativeSystems, + autoconf.RoutingV1IPNSPath, // Only IPNS operations (for write) + ) +} + +// expandConfigField expands a specific config field with autoconf values +// Handles both top-level fields ("Bootstrap") and nested fields ("DNS.Resolvers") +func (c *Config) expandConfigField(expandedCfg map[string]any, fieldPath string) { + // Check if this field supports autoconf expansion + expandFunc, supported := supportedAutoConfFields[fieldPath] + if !supported { + return + } + + // Handle top-level fields (no dot in path) + if !strings.Contains(fieldPath, ".") { + if _, exists := expandedCfg[fieldPath]; exists { + expandedCfg[fieldPath] = expandFunc(c) + } + return + } + + // Handle nested fields (section.field format) + parts := strings.SplitN(fieldPath, ".", 2) + if len(parts) != 2 { + return + } + + sectionName, fieldName := parts[0], parts[1] + if section, exists := expandedCfg[sectionName]; exists { + if sectionMap, ok := section.(map[string]any); ok { + if _, exists := sectionMap[fieldName]; exists { + sectionMap[fieldName] = expandFunc(c) + expandedCfg[sectionName] = sectionMap + } + } + } +} + +// ExpandAutoConfValues expands "auto" placeholders in config with their actual values using the same methods as the daemon +func (c *Config) ExpandAutoConfValues(cfg map[string]any) (map[string]any, error) { + // Create a deep copy of the config map to avoid modifying the original + expandedCfg := maps.Clone(cfg) + + // Use the same expansion methods that the daemon uses - ensures runtime consistency + // Unified expansion for all supported autoconf fields + c.expandConfigField(expandedCfg, "Bootstrap") + c.expandConfigField(expandedCfg, "DNS.Resolvers") + c.expandConfigField(expandedCfg, "Routing.DelegatedRouters") + c.expandConfigField(expandedCfg, "Ipns.DelegatedPublishers") + + return expandedCfg, nil +} + +// supportedAutoConfFields maps field keys to their expansion functions +var supportedAutoConfFields = map[string]func(*Config) any{ + "Bootstrap": func(c *Config) any { + expanded := c.BootstrapWithAutoConf() + return stringSliceToInterfaceSlice(expanded) + }, + "DNS.Resolvers": func(c *Config) any { + expanded := c.DNSResolversWithAutoConf() + return stringMapToInterfaceMap(expanded) + }, + "Routing.DelegatedRouters": func(c *Config) any { + expanded := c.DelegatedRoutersWithAutoConf() + return stringSliceToInterfaceSlice(expanded) + }, + "Ipns.DelegatedPublishers": func(c *Config) any { + expanded := c.DelegatedPublishersWithAutoConf() + return stringSliceToInterfaceSlice(expanded) + }, +} + +// ExpandConfigField expands auto values for a specific config field using the same methods as the daemon +func (c *Config) ExpandConfigField(key string, value any) any { + if expandFunc, supported := supportedAutoConfFields[key]; supported { + return expandFunc(c) + } + + // Return original value if no expansion needed (not a field that supports auto values) + return value +} + +// Helper functions for type conversion between string types and any types for JSON compatibility + +func stringSliceToInterfaceSlice(slice []string) []any { + result := make([]any, len(slice)) + for i, v := range slice { + result[i] = v + } + return result +} + +func stringMapToInterfaceMap(m map[string]string) map[string]any { + result := make(map[string]any) + for k, v := range m { + result[k] = v + } + return result +} diff --git a/config/autoconf_client.go b/config/autoconf_client.go new file mode 100644 index 000000000..1775fc445 --- /dev/null +++ b/config/autoconf_client.go @@ -0,0 +1,136 @@ +package config + +import ( + "fmt" + "path/filepath" + "sync" + + "github.com/ipfs/boxo/autoconf" + logging "github.com/ipfs/go-log/v2" + version "github.com/ipfs/kubo" +) + +var autoconfLog = logging.Logger("autoconf") + +// Singleton state for autoconf client +var ( + clientOnce sync.Once + clientCache *autoconf.Client + clientErr error +) + +// GetAutoConfClient returns a cached autoconf client or creates a new one. +// This is thread-safe and uses a singleton pattern. +func GetAutoConfClient(cfg *Config) (*autoconf.Client, error) { + clientOnce.Do(func() { + clientCache, clientErr = newAutoConfClient(cfg) + }) + return clientCache, clientErr +} + +// newAutoConfClient creates a new autoconf client with the given config +func newAutoConfClient(cfg *Config) (*autoconf.Client, error) { + // Get repo path for cache directory + repoPath, err := PathRoot() + if err != nil { + return nil, fmt.Errorf("failed to get repo path: %w", err) + } + + // Prepare refresh interval with nil check + refreshInterval := cfg.AutoConf.RefreshInterval + if refreshInterval == nil { + refreshInterval = &OptionalDuration{} + } + + // Use default URL if not specified + url := cfg.AutoConf.URL.WithDefault(DefaultAutoConfURL) + + // Build client options + options := []autoconf.Option{ + autoconf.WithCacheDir(filepath.Join(repoPath, "autoconf")), + autoconf.WithUserAgent(version.GetUserAgentVersion()), + autoconf.WithCacheSize(DefaultAutoConfCacheSize), + autoconf.WithTimeout(DefaultAutoConfTimeout), + autoconf.WithRefreshInterval(refreshInterval.WithDefault(DefaultAutoConfRefreshInterval)), + autoconf.WithFallback(autoconf.GetMainnetFallbackConfig), + autoconf.WithURL(url), + } + + return autoconf.NewClient(options...) +} + +// ValidateAutoConfWithRepo validates that autoconf setup is correct at daemon startup with repo access +func ValidateAutoConfWithRepo(cfg *Config, swarmKeyExists bool) error { + if !cfg.AutoConf.Enabled.WithDefault(DefaultAutoConfEnabled) { + // AutoConf is disabled, check for "auto" values and warn + return validateAutoConfDisabled(cfg) + } + + // Check for private network with default mainnet URL + url := cfg.AutoConf.URL.WithDefault(DefaultAutoConfURL) + if swarmKeyExists && url == DefaultAutoConfURL { + return fmt.Errorf("AutoConf cannot use the default mainnet URL (%s) on a private network (swarm.key or LIBP2P_FORCE_PNET detected). Either disable AutoConf by setting AutoConf.Enabled=false, or configure AutoConf.URL to point to a configuration service specific to your private swarm", DefaultAutoConfURL) + } + + // Further validation will happen lazily when config is accessed + return nil +} + +// validateAutoConfDisabled checks for "auto" values when AutoConf is disabled and logs errors +func validateAutoConfDisabled(cfg *Config) error { + hasAutoValues := false + var errors []string + + // Check Bootstrap + for _, peer := range cfg.Bootstrap { + if peer == AutoPlaceholder { + hasAutoValues = true + errors = append(errors, "Bootstrap contains 'auto' but AutoConf.Enabled=false") + break + } + } + + // Check DNS.Resolvers + if cfg.DNS.Resolvers != nil { + for _, resolver := range cfg.DNS.Resolvers { + if resolver == AutoPlaceholder { + hasAutoValues = true + errors = append(errors, "DNS.Resolvers contains 'auto' but AutoConf.Enabled=false") + break + } + } + } + + // Check Routing.DelegatedRouters + for _, router := range cfg.Routing.DelegatedRouters { + if router == AutoPlaceholder { + hasAutoValues = true + errors = append(errors, "Routing.DelegatedRouters contains 'auto' but AutoConf.Enabled=false") + break + } + } + + // Check Ipns.DelegatedPublishers + for _, publisher := range cfg.Ipns.DelegatedPublishers { + if publisher == AutoPlaceholder { + hasAutoValues = true + errors = append(errors, "Ipns.DelegatedPublishers contains 'auto' but AutoConf.Enabled=false") + break + } + } + + // Log all errors + for _, errMsg := range errors { + autoconfLog.Error(errMsg) + } + + // If only auto values exist and no static ones, fail to start + if hasAutoValues { + if len(cfg.Bootstrap) == 1 && cfg.Bootstrap[0] == AutoPlaceholder { + autoconfLog.Error("Kubo cannot start with only 'auto' Bootstrap values when AutoConf.Enabled=false") + return fmt.Errorf("no usable bootstrap peers: AutoConf is disabled (AutoConf.Enabled=false) but 'auto' placeholder is used in Bootstrap config. Either set AutoConf.Enabled=true to enable automatic configuration, or replace 'auto' with specific Bootstrap peer addresses") + } + } + + return nil +} diff --git a/config/autoconf_test.go b/config/autoconf_test.go new file mode 100644 index 000000000..f4d447dc5 --- /dev/null +++ b/config/autoconf_test.go @@ -0,0 +1,92 @@ +package config + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestAutoConfDefaults(t *testing.T) { + // Test that AutoConf has the correct default values + cfg := &Config{ + AutoConf: AutoConf{ + URL: NewOptionalString(DefaultAutoConfURL), + Enabled: True, + }, + } + + assert.Equal(t, DefaultAutoConfURL, cfg.AutoConf.URL.WithDefault(DefaultAutoConfURL)) + assert.True(t, cfg.AutoConf.Enabled.WithDefault(DefaultAutoConfEnabled)) + + // Test default refresh interval + if cfg.AutoConf.RefreshInterval == nil { + // This is expected - nil means use default + duration := (*OptionalDuration)(nil).WithDefault(DefaultAutoConfRefreshInterval) + assert.Equal(t, DefaultAutoConfRefreshInterval, duration) + } +} + +func TestAutoConfProfile(t *testing.T) { + cfg := &Config{ + Bootstrap: []string{"some", "existing", "peers"}, + DNS: DNS{ + Resolvers: map[string]string{ + "eth.": "https://example.com", + }, + }, + Routing: Routing{ + DelegatedRouters: []string{"https://existing.router"}, + }, + Ipns: Ipns{ + DelegatedPublishers: []string{"https://existing.publisher"}, + }, + AutoConf: AutoConf{ + Enabled: False, + }, + } + + // Apply autoconf profile + profile, ok := Profiles["autoconf-on"] + require.True(t, ok, "autoconf-on profile not found") + + err := profile.Transform(cfg) + require.NoError(t, err) + + // Check that values were set to "auto" + assert.Equal(t, []string{AutoPlaceholder}, cfg.Bootstrap) + assert.Equal(t, AutoPlaceholder, cfg.DNS.Resolvers["."]) + assert.Equal(t, []string{AutoPlaceholder}, cfg.Routing.DelegatedRouters) + assert.Equal(t, []string{AutoPlaceholder}, cfg.Ipns.DelegatedPublishers) + + // Check that AutoConf was enabled + assert.True(t, cfg.AutoConf.Enabled.WithDefault(DefaultAutoConfEnabled)) + + // Check that URL was set + assert.Equal(t, DefaultAutoConfURL, cfg.AutoConf.URL.WithDefault(DefaultAutoConfURL)) +} + +func TestInitWithAutoValues(t *testing.T) { + identity := Identity{ + PeerID: "QmTest", + } + + cfg, err := InitWithIdentity(identity) + require.NoError(t, err) + + // Check that Bootstrap is set to "auto" + assert.Equal(t, []string{AutoPlaceholder}, cfg.Bootstrap) + + // Check that DNS resolver is set to "auto" + assert.Equal(t, AutoPlaceholder, cfg.DNS.Resolvers["."]) + + // Check that DelegatedRouters is set to "auto" + assert.Equal(t, []string{AutoPlaceholder}, cfg.Routing.DelegatedRouters) + + // Check that DelegatedPublishers is set to "auto" + assert.Equal(t, []string{AutoPlaceholder}, cfg.Ipns.DelegatedPublishers) + + // Check that AutoConf is enabled with correct URL + assert.True(t, cfg.AutoConf.Enabled.WithDefault(DefaultAutoConfEnabled)) + assert.Equal(t, DefaultAutoConfURL, cfg.AutoConf.URL.WithDefault(DefaultAutoConfURL)) +} diff --git a/config/bootstrap_peers.go b/config/bootstrap_peers.go index 55fe66a98..54670b4c9 100644 --- a/config/bootstrap_peers.go +++ b/config/bootstrap_peers.go @@ -2,28 +2,11 @@ package config import ( "errors" - "fmt" peer "github.com/libp2p/go-libp2p/core/peer" ma "github.com/multiformats/go-multiaddr" ) -// DefaultBootstrapAddresses are the hardcoded bootstrap addresses -// for IPFS. they are nodes run by the IPFS team. docs on these later. -// As with all p2p networks, bootstrap is an important security concern. -// -// NOTE: This is here -- and not inside cmd/ipfs/init.go -- because of an -// import dependency issue. TODO: move this into a config/default/ package. -var DefaultBootstrapAddresses = []string{ - "/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", - "/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa", // rust-libp2p-server - "/dnsaddr/bootstrap.libp2p.io/p2p/QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb", - "/dnsaddr/bootstrap.libp2p.io/p2p/QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt", - "/dnsaddr/va1.bootstrap.libp2p.io/p2p/12D3KooWKnDdG3iXw9eTFijk3EWSunZcFi54Zka4wmtqtt6rPxc8", // js-libp2p-amino-dht-bootstrapper - "/ip4/104.131.131.82/tcp/4001/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", // mars.i.ipfs.io - "/ip4/104.131.131.82/udp/4001/quic-v1/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", // mars.i.ipfs.io -} - // ErrInvalidPeerAddr signals an address is not a valid peer address. var ErrInvalidPeerAddr = errors.New("invalid peer address") @@ -31,18 +14,6 @@ func (c *Config) BootstrapPeers() ([]peer.AddrInfo, error) { return ParseBootstrapPeers(c.Bootstrap) } -// DefaultBootstrapPeers returns the (parsed) set of default bootstrap peers. -// if it fails, it returns a meaningful error for the user. -// This is here (and not inside cmd/ipfs/init) because of module dependency problems. -func DefaultBootstrapPeers() ([]peer.AddrInfo, error) { - ps, err := ParseBootstrapPeers(DefaultBootstrapAddresses) - if err != nil { - return nil, fmt.Errorf(`failed to parse hardcoded bootstrap peers: %w -This is a problem with the ipfs codebase. Please report it to the dev team`, err) - } - return ps, nil -} - func (c *Config) SetBootstrapPeers(bps []peer.AddrInfo) { c.Bootstrap = BootstrapPeerStrings(bps) } diff --git a/config/bootstrap_peers_test.go b/config/bootstrap_peers_test.go index eeea9b5fd..f07f2f24a 100644 --- a/config/bootstrap_peers_test.go +++ b/config/bootstrap_peers_test.go @@ -1,24 +1,28 @@ package config import ( - "sort" "testing" + + "github.com/ipfs/boxo/autoconf" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) -func TestBoostrapPeerStrings(t *testing.T) { - parsed, err := ParseBootstrapPeers(DefaultBootstrapAddresses) - if err != nil { - t.Fatal(err) - } +func TestBootstrapPeerStrings(t *testing.T) { + // Test round-trip: string -> parse -> format -> string + // This ensures that parsing and formatting are inverse operations - formatted := BootstrapPeerStrings(parsed) - sort.Strings(formatted) - expected := append([]string{}, DefaultBootstrapAddresses...) - sort.Strings(expected) + // Start with the default bootstrap peer multiaddr strings + originalStrings := autoconf.FallbackBootstrapPeers - for i, s := range formatted { - if expected[i] != s { - t.Fatalf("expected %s, %s", expected[i], s) - } - } + // Parse multiaddr strings into structured peer data + parsed, err := ParseBootstrapPeers(originalStrings) + require.NoError(t, err, "parsing bootstrap peers should succeed") + + // Format the parsed data back into multiaddr strings + formattedStrings := BootstrapPeerStrings(parsed) + + // Verify round-trip: we should get back exactly what we started with + assert.ElementsMatch(t, originalStrings, formattedStrings, + "round-trip through parse/format should preserve all bootstrap peers") } diff --git a/config/config.go b/config/config.go index eee7e768b..3236ad003 100644 --- a/config/config.go +++ b/config/config.go @@ -31,7 +31,9 @@ type Config struct { Pubsub PubsubConfig Peering Peering DNS DNS + Migration Migration + AutoConf AutoConf Provider Provider Reprovider Reprovider diff --git a/config/dns.go b/config/dns.go index 8e1fc85a5..0b269675f 100644 --- a/config/dns.go +++ b/config/dns.go @@ -10,7 +10,7 @@ type DNS struct { // // Example: // - Custom resolver for ENS: `eth.` → `https://dns.eth.limo/dns-query` - // - Override the default OS resolver: `.` → `https://doh.applied-privacy.net/query` + // - Override the default OS resolver: `.` → `https://1.1.1.1/dns-query` Resolvers map[string]string // MaxCacheTTL is the maximum duration DNS entries are valid in the cache. MaxCacheTTL *OptionalDuration `json:",omitempty"` diff --git a/config/gateway.go b/config/gateway.go index 35af598b4..56eb0c395 100644 --- a/config/gateway.go +++ b/config/gateway.go @@ -1,10 +1,18 @@ package config +import ( + "github.com/ipfs/boxo/gateway" +) + const ( DefaultInlineDNSLink = false DefaultDeserializedResponses = true DefaultDisableHTMLErrors = false DefaultExposeRoutingAPI = false + + // Gateway limit defaults from boxo + DefaultRetrievalTimeout = gateway.DefaultRetrievalTimeout + DefaultMaxConcurrentRequests = gateway.DefaultMaxConcurrentRequests ) type GatewaySpec struct { @@ -73,4 +81,21 @@ type Gateway struct { // ExposeRoutingAPI configures the gateway port to expose // routing system as HTTP API at /routing/v1 (https://specs.ipfs.tech/routing/http-routing-v1/). ExposeRoutingAPI Flag + + // RetrievalTimeout enforces a maximum duration for content retrieval: + // - Time to first byte: If the gateway cannot start writing the response within + // this duration (e.g., stuck searching for providers), a 504 Gateway Timeout + // is returned. + // - Time between writes: After the first byte, the timeout resets each time new + // bytes are written to the client. If the gateway cannot write additional data + // within this duration after the last successful write, the response is terminated. + // This helps free resources when the gateway gets stuck looking for providers + // or cannot retrieve the requested content. + // A value of 0 disables this timeout. + RetrievalTimeout *OptionalDuration `json:",omitempty"` + + // MaxConcurrentRequests limits concurrent HTTP requests handled by the gateway. + // Requests beyond this limit receive 429 Too Many Requests with Retry-After header. + // A value of 0 disables the limit. + MaxConcurrentRequests *OptionalInteger `json:",omitempty"` } diff --git a/config/import.go b/config/import.go index 21bf232c1..c51917286 100644 --- a/config/import.go +++ b/config/import.go @@ -21,7 +21,6 @@ const ( // write-batch. The total size of the batch is limited by // BatchMaxnodes and BatchMaxSize. DefaultBatchMaxSize = 100 << 20 // 20MiB - ) var ( diff --git a/config/init.go b/config/init.go index 373d744d4..cc7b22ca8 100644 --- a/config/init.go +++ b/config/init.go @@ -23,11 +23,6 @@ func Init(out io.Writer, nBitsForKeypair int) (*Config, error) { } func InitWithIdentity(identity Identity) (*Config, error) { - bootstrapPeers, err := DefaultBootstrapPeers() - if err != nil { - return nil, err - } - datastore := DefaultDatastoreConfig() conf := &Config{ @@ -40,7 +35,7 @@ func InitWithIdentity(identity Identity) (*Config, error) { Addresses: addressesConfig(), Datastore: datastore, - Bootstrap: BootstrapPeerStrings(bootstrapPeers), + Bootstrap: []string{AutoPlaceholder}, Identity: identity, Discovery: Discovery{ MDNS: MDNS{ @@ -56,7 +51,8 @@ func InitWithIdentity(identity Identity) (*Config, error) { }, Ipns: Ipns{ - ResolveCacheSize: 128, + ResolveCacheSize: 128, + DelegatedPublishers: []string{AutoPlaceholder}, }, Gateway: Gateway{ @@ -72,11 +68,12 @@ func InitWithIdentity(identity Identity) (*Config, error) { RemoteServices: map[string]RemotePinningService{}, }, DNS: DNS{ - Resolvers: map[string]string{}, + Resolvers: map[string]string{ + ".": AutoPlaceholder, + }, }, - Migration: Migration{ - DownloadSources: []string{}, - Keep: "", + Routing: Routing{ + DelegatedRouters: []string{AutoPlaceholder}, }, } diff --git a/config/ipns.go b/config/ipns.go index 288421973..6ffe981bc 100644 --- a/config/ipns.go +++ b/config/ipns.go @@ -20,4 +20,7 @@ type Ipns struct { // Enable namesys pubsub (--enable-namesys-pubsub) UsePubsub Flag `json:",omitempty"` + + // Simplified configuration for delegated IPNS publishers + DelegatedPublishers []string } diff --git a/config/migration.go b/config/migration.go index e172988a9..d2626800c 100644 --- a/config/migration.go +++ b/config/migration.go @@ -2,16 +2,18 @@ package config const DefaultMigrationKeep = "cache" -var DefaultMigrationDownloadSources = []string{"HTTPS", "IPFS"} +// DefaultMigrationDownloadSources defines the default download sources for legacy migrations (repo versions <16). +// Only HTTPS is supported for legacy migrations. IPFS downloads are not supported. +var DefaultMigrationDownloadSources = []string{"HTTPS"} -// Migration configures how migrations are downloaded and if the downloads are -// added to IPFS locally. +// Migration configures how legacy migrations are downloaded (repo versions <16). +// +// DEPRECATED: This configuration only applies to legacy external migrations for repository +// versions below 16. Modern repositories (v16+) use embedded migrations that do not require +// external downloads. These settings will be ignored for modern repository versions. type Migration struct { - // Sources in order of preference, where "IPFS" means use IPFS and "HTTPS" - // means use default gateways. Any other values are interpreted as - // hostnames for custom gateways. Empty list means "use default sources". - DownloadSources []string - // Whether or not to keep the migration after downloading it. - // Options are "discard", "cache", "pin". Empty string for default. - Keep string + // DEPRECATED: This field is deprecated and ignored for modern repositories (repo versions ≥16). + DownloadSources []string `json:",omitempty"` + // DEPRECATED: This field is deprecated and ignored for modern repositories (repo versions ≥16). + Keep string `json:",omitempty"` } diff --git a/config/plugins.go b/config/plugins.go index 08a1acb34..0c438cbd7 100644 --- a/config/plugins.go +++ b/config/plugins.go @@ -7,5 +7,5 @@ type Plugins struct { type Plugin struct { Disabled bool - Config interface{} + Config interface{} `json:",omitempty"` } diff --git a/config/profile.go b/config/profile.go index ec2e5a0b7..1479bfc13 100644 --- a/config/profile.go +++ b/config/profile.go @@ -87,6 +87,12 @@ is useful when using the daemon in test environments.`, c.Bootstrap = []string{} c.Discovery.MDNS.Enabled = false c.AutoTLS.Enabled = False + c.AutoConf.Enabled = False + + // Explicitly set autoconf-controlled fields to empty when autoconf is disabled + c.DNS.Resolvers = map[string]string{} + c.Routing.DelegatedRouters = []string{} + c.Ipns.DelegatedPublishers = []string{} return nil }, }, @@ -97,11 +103,10 @@ Inverse profile of the test profile.`, Transform: func(c *Config) error { c.Addresses = addressesConfig() - bootstrapPeers, err := DefaultBootstrapPeers() - if err != nil { - return err - } - c.Bootstrap = appendSingle(c.Bootstrap, BootstrapPeerStrings(bootstrapPeers)) + // Use AutoConf system for bootstrap peers + c.Bootstrap = []string{AutoPlaceholder} + c.AutoConf.Enabled = Default + c.AutoConf.URL = nil // Clear URL to use implicit default c.Swarm.DisableNatPortMap = false c.Discovery.MDNS.Enabled = true @@ -349,6 +354,39 @@ fetching may be degraded. return nil }, }, + "autoconf-on": { + Description: `Sets configuration to use implicit defaults from remote autoconf service. +Bootstrap peers, DNS resolvers, delegated routers, and IPNS delegated publishers are set to "auto". +This profile requires AutoConf to be enabled and configured.`, + + Transform: func(c *Config) error { + c.Bootstrap = []string{AutoPlaceholder} + c.DNS.Resolvers = map[string]string{ + ".": AutoPlaceholder, + } + c.Routing.DelegatedRouters = []string{AutoPlaceholder} + c.Ipns.DelegatedPublishers = []string{AutoPlaceholder} + c.AutoConf.Enabled = True + if c.AutoConf.URL == nil { + c.AutoConf.URL = NewOptionalString(DefaultAutoConfURL) + } + return nil + }, + }, + "autoconf-off": { + Description: `Disables AutoConf and sets networking fields to empty for manual configuration. +Bootstrap peers, DNS resolvers, delegated routers, and IPNS delegated publishers are set to empty. +Use this when you want normal networking but prefer manual control over all endpoints.`, + + Transform: func(c *Config) error { + c.Bootstrap = nil + c.DNS.Resolvers = nil + c.Routing.DelegatedRouters = nil + c.Ipns.DelegatedPublishers = nil + c.AutoConf.Enabled = False + return nil + }, + }, } func getAvailablePort() (port int, err error) { diff --git a/config/reprovider.go b/config/reprovider.go index 3e8a5b476..e7d687360 100644 --- a/config/reprovider.go +++ b/config/reprovider.go @@ -1,15 +1,44 @@ package config -import "time" +import ( + "strings" + "time" +) const ( DefaultReproviderInterval = time.Hour * 22 // https://github.com/ipfs/kubo/pull/9326 DefaultReproviderStrategy = "all" ) +type ReproviderStrategy int + +const ( + ReproviderStrategyAll ReproviderStrategy = 1 << iota + ReproviderStrategyPinned + ReproviderStrategyRoots + ReproviderStrategyMFS +) + // Reprovider configuration describes how CID from local datastore are periodically re-announced to routing systems. // For provide behavior of ad-hoc or newly created CIDs and their first-time announcement, see Provider.* type Reprovider struct { Interval *OptionalDuration `json:",omitempty"` // Time period to reprovide locally stored objects to the network Strategy *OptionalString `json:",omitempty"` // Which keys to announce } + +func ParseReproviderStrategy(s string) ReproviderStrategy { + var strategy ReproviderStrategy + for _, part := range strings.Split(s, "+") { + switch part { + case "all", "flat", "": // special case, does not mix with others ("flat" is deprecated, maps to "all") + return ReproviderStrategyAll + case "pinned": + strategy |= ReproviderStrategyPinned + case "roots": + strategy |= ReproviderStrategyRoots + case "mfs": + strategy |= ReproviderStrategyMFS + } + } + return strategy +} diff --git a/config/reprovider_test.go b/config/reprovider_test.go new file mode 100644 index 000000000..20b338eb0 --- /dev/null +++ b/config/reprovider_test.go @@ -0,0 +1,27 @@ +package config + +import "testing" + +func TestParseReproviderStrategy(t *testing.T) { + tests := []struct { + input string + expect ReproviderStrategy + }{ + {"all", ReproviderStrategyAll}, + {"pinned", ReproviderStrategyPinned}, + {"mfs", ReproviderStrategyMFS}, + {"pinned+mfs", ReproviderStrategyPinned | ReproviderStrategyMFS}, + {"invalid", 0}, + {"all+invalid", ReproviderStrategyAll}, + {"", ReproviderStrategyAll}, + {"flat", ReproviderStrategyAll}, // deprecated, maps to "all" + {"flat+all", ReproviderStrategyAll}, + } + + for _, tt := range tests { + result := ParseReproviderStrategy(tt.input) + if result != tt.expect { + t.Errorf("ParseReproviderStrategy(%q) = %d, want %d", tt.input, result, tt.expect) + } + } +} diff --git a/config/routing.go b/config/routing.go index aea60c3bd..bd234e8a3 100644 --- a/config/routing.go +++ b/config/routing.go @@ -11,6 +11,7 @@ import ( const ( DefaultAcceleratedDHTClient = false DefaultLoopbackAddressesOnLanDHT = false + DefaultRoutingType = "auto" CidContactRoutingURL = "https://cid.contact" PublicGoodDelegatedRoutingURL = "https://delegated-ipfs.dev" // cid.contact + amino dht (incl. IPNS PUTs) EnvHTTPRouters = "IPFS_HTTP_ROUTERS" @@ -18,11 +19,6 @@ const ( ) var ( - // Default HTTP routers used in parallel to DHT when Routing.Type = "auto" - DefaultHTTPRouters = getEnvOrDefault(EnvHTTPRouters, []string{ - CidContactRoutingURL, // https://github.com/ipfs/kubo/issues/9422#issuecomment-1338142084 - }) - // Default filter-protocols to pass along with delegated routing requests (as defined in IPIP-484) // and also filter out locally DefaultHTTPRoutersFilterProtocols = getEnvOrDefault(EnvHTTPRoutersFilterProtocols, []string{ @@ -37,8 +33,9 @@ var ( type Routing struct { // Type sets default daemon routing mode. // - // Can be one of "auto", "autoclient", "dht", "dhtclient", "dhtserver", "none", or "custom". + // Can be one of "auto", "autoclient", "dht", "dhtclient", "dhtserver", "none", "delegated", or "custom". // When unset or set to "auto", DHT and implicit routers are used. + // When "delegated" is set, only HTTP delegated routers and IPNS publishers are used (no DHT). // When "custom" is set, user-provided Routing.Routers is used. Type *OptionalString `json:",omitempty"` @@ -49,7 +46,7 @@ type Routing struct { IgnoreProviders []string `json:",omitempty"` // Simplified configuration used by default when Routing.Type=auto|autoclient - DelegatedRouters []string `json:",omitempty"` + DelegatedRouters []string // Advanced configuration used when Routing.Type=custom Routers Routers `json:",omitempty"` diff --git a/core/commands/add.go b/core/commands/add.go index f800e4f42..b24eab083 100644 --- a/core/commands/add.go +++ b/core/commands/add.go @@ -37,6 +37,7 @@ type AddEvent struct { } const ( + pinNameOptionName = "pin-name" quietOptionName = "quiet" quieterOptionName = "quieter" silentOptionName = "silent" @@ -75,13 +76,15 @@ Adds the content of to IPFS. Use -r to add directories (recursively). `, LongDescription: ` Adds the content of to IPFS. Use -r to add directories. -Note that directories are added recursively, to form the IPFS -MerkleDAG. +Note that directories are added recursively, and big files are chunked, +to form the IPFS MerkleDAG. Learn more: https://docs.ipfs.tech/concepts/merkle-dag/ -If the daemon is not running, it will just add locally. +If the daemon is not running, it will just add locally to the repo at $IPFS_PATH. If the daemon is started later, it will be advertised after a few seconds when the reprovider runs. +BASIC EXAMPLES: + The wrap option, '-w', wraps the file (or files, if using the recursive option) in a directory. This directory contains only the files which have been added, and means that the file retains @@ -100,6 +103,12 @@ You can now refer to the added file in a gateway, like so: Files imported with 'ipfs add' are protected from GC (implicit '--pin=true'), but it is up to you to remember the returned CID to get the data back later. +If you need to back up or transport content-addressed data using a non-IPFS +medium, CID can be preserved with CAR files. +See 'dag export' and 'dag import' for more information. + +MFS INTEGRATION: + Passing '--to-files' creates a reference in Files API (MFS), making it easier to find it in the future: @@ -111,6 +120,8 @@ to find it in the future: See 'ipfs files --help' to learn more about using MFS for keeping track of added files and directories. +CHUNKING EXAMPLES: + The chunker option, '-s', specifies the chunking strategy that dictates how to break files into blocks. Blocks with same content can be deduplicated. Different chunking strategies will produce different @@ -131,14 +142,16 @@ want to use a 1024 times larger chunk sizes for most files. You can now check what blocks have been created by: - > ipfs object links QmafrLBfzRLV4XSH1XcaMMeaXEUhDJjmtDfsYU95TrWG87 + > ipfs ls QmafrLBfzRLV4XSH1XcaMMeaXEUhDJjmtDfsYU95TrWG87 QmY6yj1GsermExDXoosVE3aSPxdMNYr6aKuw3nA8LoWPRS 2059 Qmf7ZQeSxq2fJVJbCmgTrLLVN9tDR9Wy5k75DxQKuz5Gyt 1195 - > ipfs object links Qmf1hDN65tR55Ubh2RN1FPxr69xq3giVBz1KApsresY8Gn + > ipfs ls Qmf1hDN65tR55Ubh2RN1FPxr69xq3giVBz1KApsresY8Gn QmY6yj1GsermExDXoosVE3aSPxdMNYr6aKuw3nA8LoWPRS 2059 QmerURi9k4XzKCaaPbsK6BL5pMEjF7PGphjDvkkjDtsVf3 868 QmQB28iwSriSUSMqG2nXDTLtdPHgWb4rebBrU7Q1j4vxPv 338 +ADVANCED CONFIGURATION: + Finally, a note on hash (CID) determinism and 'ipfs add' command. Almost all the flags provided by this command will change the final CID, and @@ -146,12 +159,11 @@ new flags may be added in the future. It is not guaranteed for the implicit defaults of 'ipfs add' to remain the same in future Kubo releases, or for other IPFS software to use the same import parameters as Kubo. +Note: CIDv1 is automatically used when using non-default options like custom +hash functions or when raw-leaves is explicitly enabled. + Use Import.* configuration options to override global implicit defaults: https://github.com/ipfs/kubo/blob/master/docs/config.md#import - -If you need to back up or transport content-addressed data using a non-IPFS -medium, CID can be preserved with CAR files. -See 'dag export' and 'dag import' for more information. `, }, @@ -159,36 +171,45 @@ See 'dag export' and 'dag import' for more information. cmds.FileArg("path", true, true, "The path to a file to be added to IPFS.").EnableRecursive().EnableStdin(), }, Options: []cmds.Option{ + // Input Processing cmds.OptionRecursivePath, // a builtin option that allows recursive paths (-r, --recursive) cmds.OptionDerefArgs, // a builtin option that resolves passed in filesystem links (--dereference-args) cmds.OptionStdinName, // a builtin option that optionally allows wrapping stdin into a named file cmds.OptionHidden, cmds.OptionIgnore, cmds.OptionIgnoreRules, + // Output Control cmds.BoolOption(quietOptionName, "q", "Write minimal output."), cmds.BoolOption(quieterOptionName, "Q", "Write only final hash."), cmds.BoolOption(silentOptionName, "Write no output."), cmds.BoolOption(progressOptionName, "p", "Stream progress data."), - cmds.BoolOption(trickleOptionName, "t", "Use trickle-dag format for dag generation."), + // Basic Add Behavior cmds.BoolOption(onlyHashOptionName, "n", "Only chunk and hash - do not write to disk."), cmds.BoolOption(wrapOptionName, "w", "Wrap files with a directory object."), - cmds.StringOption(chunkerOptionName, "s", "Chunking algorithm, size-[bytes], rabin-[min]-[avg]-[max] or buzhash. Default: Import.UnixFSChunker"), - cmds.BoolOption(rawLeavesOptionName, "Use raw blocks for leaf nodes. Default: Import.UnixFSRawLeaves"), - cmds.IntOption(maxFileLinksOptionName, "Limit the maximum number of links in UnixFS file nodes to this value. (experimental) Default: Import.UnixFSFileMaxLinks"), - cmds.IntOption(maxDirectoryLinksOptionName, "Limit the maximum number of links in UnixFS basic directory nodes to this value. Default: Import.UnixFSDirectoryMaxLinks. WARNING: experimental, Import.UnixFSHAMTThreshold is a safer alternative."), - cmds.IntOption(maxHAMTFanoutOptionName, "Limit the maximum number of links of a UnixFS HAMT directory node to this (power of 2, multiple of 8). Default: Import.UnixFSHAMTDirectoryMaxFanout WARNING: experimental, see Import.UnixFSHAMTDirectorySizeThreshold as well."), - cmds.BoolOption(noCopyOptionName, "Add the file using filestore. Implies raw-leaves. (experimental)"), - cmds.BoolOption(fstoreCacheOptionName, "Check the filestore for pre-existing blocks. (experimental)"), - cmds.IntOption(cidVersionOptionName, "CID version. Defaults to 0 unless an option that depends on CIDv1 is passed. Passing version 1 will cause the raw-leaves option to default to true. Default: Import.CidVersion"), - cmds.StringOption(hashOptionName, "Hash function to use. Implies CIDv1 if not sha2-256. Default: Import.HashFunction"), - cmds.BoolOption(inlineOptionName, "Inline small blocks into CIDs. (experimental)"), - cmds.IntOption(inlineLimitOptionName, "Maximum block size to inline. (experimental)").WithDefault(32), cmds.BoolOption(pinOptionName, "Pin locally to protect added files from garbage collection.").WithDefault(true), + cmds.StringOption(pinNameOptionName, "Name to use for the pin. Requires explicit value (e.g., --pin-name=myname)."), + // MFS Integration cmds.StringOption(toFilesOptionName, "Add reference to Files API (MFS) at the provided path."), - cmds.BoolOption(preserveModeOptionName, "Apply existing POSIX permissions to created UnixFS entries. Disables raw-leaves. (experimental)"), - cmds.BoolOption(preserveMtimeOptionName, "Apply existing POSIX modification time to created UnixFS entries. Disables raw-leaves. (experimental)"), - cmds.UintOption(modeOptionName, "Custom POSIX file mode to store in created UnixFS entries. Disables raw-leaves. (experimental)"), - cmds.Int64Option(mtimeOptionName, "Custom POSIX modification time to store in created UnixFS entries (seconds before or after the Unix Epoch). Disables raw-leaves. (experimental)"), + // CID & Hashing + cmds.IntOption(cidVersionOptionName, "CID version (0 or 1). CIDv1 automatically enables raw-leaves and is required for non-sha2-256 hashes. Default: Import.CidVersion"), + cmds.StringOption(hashOptionName, "Hash function to use. Implies CIDv1 if not sha2-256. Default: Import.HashFunction"), + cmds.BoolOption(rawLeavesOptionName, "Use raw blocks for leaf nodes. Note: CIDv1 automatically enables raw-leaves. Default: false for CIDv0, true for CIDv1 (Import.UnixFSRawLeaves)"), + // Chunking & DAG Structure + cmds.StringOption(chunkerOptionName, "s", "Chunking algorithm, size-[bytes], rabin-[min]-[avg]-[max] or buzhash. Files larger than chunk size are split into multiple blocks. Default: Import.UnixFSChunker"), + cmds.BoolOption(trickleOptionName, "t", "Use trickle-dag format for dag generation."), + // Advanced UnixFS Limits + cmds.IntOption(maxFileLinksOptionName, "Limit the maximum number of links in UnixFS file nodes to this value. WARNING: experimental. Default: Import.UnixFSFileMaxLinks"), + cmds.IntOption(maxDirectoryLinksOptionName, "Limit the maximum number of links in UnixFS basic directory nodes to this value. WARNING: experimental, Import.UnixFSHAMTDirectorySizeThreshold is safer. Default: Import.UnixFSDirectoryMaxLinks"), + cmds.IntOption(maxHAMTFanoutOptionName, "Limit the maximum number of links of a UnixFS HAMT directory node to this (power of 2, multiple of 8). WARNING: experimental, Import.UnixFSHAMTDirectorySizeThreshold is safer. Default: Import.UnixFSHAMTDirectoryMaxFanout"), + // Experimental Features + cmds.BoolOption(inlineOptionName, "Inline small blocks into CIDs. WARNING: experimental"), + cmds.IntOption(inlineLimitOptionName, "Maximum block size to inline. WARNING: experimental").WithDefault(32), + cmds.BoolOption(noCopyOptionName, "Add the file using filestore. Implies raw-leaves. WARNING: experimental"), + cmds.BoolOption(fstoreCacheOptionName, "Check the filestore for pre-existing blocks. WARNING: experimental"), + cmds.BoolOption(preserveModeOptionName, "Apply existing POSIX permissions to created UnixFS entries. WARNING: experimental, forces dag-pb for root block, disables raw-leaves"), + cmds.BoolOption(preserveMtimeOptionName, "Apply existing POSIX modification time to created UnixFS entries. WARNING: experimental, forces dag-pb for root block, disables raw-leaves"), + cmds.UintOption(modeOptionName, "Custom POSIX file mode to store in created UnixFS entries. WARNING: experimental, forces dag-pb for root block, disables raw-leaves"), + cmds.Int64Option(mtimeOptionName, "Custom POSIX modification time to store in created UnixFS entries (seconds before or after the Unix Epoch). WARNING: experimental, forces dag-pb for root block, disables raw-leaves"), cmds.UintOption(mtimeNsecsOptionName, "Custom POSIX modification time (optional time fraction in nanoseconds)"), }, PreRun: func(req *cmds.Request, env cmds.Environment) error { @@ -230,6 +251,7 @@ See 'dag export' and 'dag import' for more information. silent, _ := req.Options[silentOptionName].(bool) chunker, _ := req.Options[chunkerOptionName].(string) dopin, _ := req.Options[pinOptionName].(bool) + pinName, pinNameSet := req.Options[pinNameOptionName].(string) rawblks, rbset := req.Options[rawLeavesOptionName].(bool) maxFileLinks, maxFileLinksSet := req.Options[maxFileLinksOptionName].(int) maxDirectoryLinks, maxDirectoryLinksSet := req.Options[maxDirectoryLinksOptionName].(int) @@ -260,6 +282,8 @@ See 'dag export' and 'dag import' for more information. cidVer = int(cfg.Import.CidVersion.WithDefault(config.DefaultCidVersion)) } + // Pin names are only used when explicitly provided via --pin-name=value + if !rbset && cfg.Import.UnixFSRawLeaves != config.Default { rbset = true rawblks = cfg.Import.UnixFSRawLeaves.WithDefault(config.DefaultUnixFSRawLeaves) @@ -296,7 +320,9 @@ See 'dag export' and 'dag import' for more information. if onlyHash && toFilesSet { return fmt.Errorf("%s and %s options are not compatible", onlyHashOptionName, toFilesOptionName) } - + if !dopin && pinNameSet { + return fmt.Errorf("%s option requires %s to be set", pinNameOptionName, pinOptionName) + } if wrap && toFilesSet { return fmt.Errorf("%s and %s options are not compatible", wrapOptionName, toFilesOptionName) } @@ -326,7 +352,7 @@ See 'dag export' and 'dag import' for more information. options.Unixfs.Chunker(chunker), - options.Unixfs.Pin(dopin), + options.Unixfs.Pin(dopin, pinName), options.Unixfs.HashOnly(onlyHash), options.Unixfs.FsCache(fscache), options.Unixfs.Nocopy(nocopy), diff --git a/core/commands/bootstrap.go b/core/commands/bootstrap.go index 6d760f47f..e5a55dfab 100644 --- a/core/commands/bootstrap.go +++ b/core/commands/bootstrap.go @@ -41,15 +41,15 @@ Running 'ipfs bootstrap' with no arguments will run 'ipfs bootstrap list'. }, } -const ( - defaultOptionName = "default" -) - var bootstrapAddCmd = &cmds.Command{ Helptext: cmds.HelpText{ Tagline: "Add peers to the bootstrap list.", ShortDescription: `Outputs a list of peers that were added (that weren't already in the bootstrap list). + +The special values 'default' and 'auto' can be used to add the default +bootstrap peers. Both are equivalent and will add the 'auto' placeholder to +the bootstrap list, which gets resolved using the AutoConf system. ` + bootstrapSecurityWarning, }, @@ -57,29 +57,23 @@ in the bootstrap list). cmds.StringArg("peer", false, true, peerOptionDesc).EnableStdin(), }, - Options: []cmds.Option{ - cmds.BoolOption(defaultOptionName, "Add default bootstrap nodes. (Deprecated, use 'default' subcommand instead)"), - }, - Subcommands: map[string]*cmds.Command{ - "default": bootstrapAddDefaultCmd, - }, - Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { - deflt, _ := req.Options[defaultOptionName].(bool) - - inputPeers := config.DefaultBootstrapAddresses - if !deflt { - if err := req.ParseBodyArgs(); err != nil { - return err - } - - inputPeers = req.Arguments + if err := req.ParseBodyArgs(); err != nil { + return err } + inputPeers := req.Arguments if len(inputPeers) == 0 { return errors.New("no bootstrap peers to add") } + // Convert "default" to "auto" for backward compatibility + for i, peer := range inputPeers { + if peer == "default" { + inputPeers[i] = "auto" + } + } + cfgRoot, err := cmdenv.GetConfigRoot(env) if err != nil { return err @@ -95,6 +89,13 @@ in the bootstrap list). return err } + // Check if trying to add "auto" when AutoConf is disabled + for _, peer := range inputPeers { + if peer == config.AutoPlaceholder && !cfg.AutoConf.Enabled.WithDefault(config.DefaultAutoConfEnabled) { + return errors.New("cannot add default bootstrap peers: AutoConf is disabled (AutoConf.Enabled=false). Enable AutoConf by setting AutoConf.Enabled=true in your config, or add specific peer addresses instead") + } + } + added, err := bootstrapAdd(r, cfg, inputPeers) if err != nil { return err @@ -110,44 +111,6 @@ in the bootstrap list). }, } -var bootstrapAddDefaultCmd = &cmds.Command{ - Helptext: cmds.HelpText{ - Tagline: "Add default peers to the bootstrap list.", - ShortDescription: `Outputs a list of peers that were added (that weren't already -in the bootstrap list).`, - }, - Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { - cfgRoot, err := cmdenv.GetConfigRoot(env) - if err != nil { - return err - } - - r, err := fsrepo.Open(cfgRoot) - if err != nil { - return err - } - - defer r.Close() - cfg, err := r.Config() - if err != nil { - return err - } - - added, err := bootstrapAdd(r, cfg, config.DefaultBootstrapAddresses) - if err != nil { - return err - } - - return cmds.EmitOnce(res, &BootstrapOutput{added}) - }, - Type: BootstrapOutput{}, - Encoders: cmds.EncoderMap{ - cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *BootstrapOutput) error { - return bootstrapWritePeers(w, "added ", out.Peers) - }), - }, -} - const ( bootstrapAllOptionName = "all" ) @@ -251,6 +214,9 @@ var bootstrapListCmd = &cmds.Command{ Tagline: "Show peers in the bootstrap list.", ShortDescription: "Peers are output in the format '/'.", }, + Options: []cmds.Option{ + cmds.BoolOption(configExpandAutoName, "Expand 'auto' placeholders from AutoConf service."), + }, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { cfgRoot, err := cmdenv.GetConfigRoot(env) @@ -268,12 +234,16 @@ var bootstrapListCmd = &cmds.Command{ return err } - peers, err := cfg.BootstrapPeers() - if err != nil { - return err + // Check if user wants to expand auto values + expandAuto, _ := req.Options[configExpandAutoName].(bool) + if expandAuto { + // Use the same expansion method as the daemon + expandedBootstrap := cfg.BootstrapWithAutoConf() + return cmds.EmitOnce(res, &BootstrapOutput{expandedBootstrap}) } - return cmds.EmitOnce(res, &BootstrapOutput{config.BootstrapPeerStrings(peers)}) + // Simply return the bootstrap config as-is, including any "auto" values + return cmds.EmitOnce(res, &BootstrapOutput{cfg.Bootstrap}) }, Type: BootstrapOutput{}, Encoders: cmds.EncoderMap{ @@ -297,7 +267,11 @@ func bootstrapWritePeers(w io.Writer, prefix string, peers []string) error { } func bootstrapAdd(r repo.Repo, cfg *config.Config, peers []string) ([]string, error) { + // Validate peers - skip validation for "auto" placeholder for _, p := range peers { + if p == config.AutoPlaceholder { + continue // Skip validation for "auto" placeholder + } m, err := ma.NewMultiaddr(p) if err != nil { return nil, err @@ -347,6 +321,16 @@ func bootstrapAdd(r repo.Repo, cfg *config.Config, peers []string) ([]string, er } func bootstrapRemove(r repo.Repo, cfg *config.Config, toRemove []string) ([]string, error) { + // Check if bootstrap contains "auto" + hasAuto := slices.Contains(cfg.Bootstrap, config.AutoPlaceholder) + + if hasAuto && cfg.AutoConf.Enabled.WithDefault(config.DefaultAutoConfEnabled) { + // Cannot selectively remove peers when using "auto" bootstrap + // Users should either disable AutoConf or replace "auto" with specific peers + return nil, fmt.Errorf("cannot remove individual bootstrap peers when using 'auto' placeholder: the 'auto' value is managed by AutoConf. Either disable AutoConf by setting AutoConf.Enabled=false and replace 'auto' with specific peer addresses, or use 'ipfs bootstrap rm --all' to remove all peers") + } + + // Original logic for non-auto bootstrap removed := make([]peer.AddrInfo, 0, len(toRemove)) keep := make([]peer.AddrInfo, 0, len(cfg.Bootstrap)) @@ -406,16 +390,28 @@ func bootstrapRemove(r repo.Repo, cfg *config.Config, toRemove []string) ([]stri } func bootstrapRemoveAll(r repo.Repo, cfg *config.Config) ([]string, error) { - removed, err := cfg.BootstrapPeers() - if err != nil { - return nil, err + // Check if bootstrap contains "auto" - if so, we need special handling + hasAuto := slices.Contains(cfg.Bootstrap, config.AutoPlaceholder) + + var removed []string + if hasAuto { + // When "auto" is present, we can't parse it as peer.AddrInfo + // Just return the raw bootstrap list as strings for display + removed = slices.Clone(cfg.Bootstrap) + } else { + // Original logic for configs without "auto" + removedPeers, err := cfg.BootstrapPeers() + if err != nil { + return nil, err + } + removed = config.BootstrapPeerStrings(removedPeers) } cfg.Bootstrap = nil if err := r.SetConfig(cfg); err != nil { return nil, err } - return config.BootstrapPeerStrings(removed), nil + return removed, nil } const bootstrapSecurityWarning = ` diff --git a/core/commands/cid.go b/core/commands/cid.go index 8491715be..0be9f6cc1 100644 --- a/core/commands/cid.go +++ b/core/commands/cid.go @@ -121,7 +121,8 @@ The optional format string is a printf style format string: return "" }), }, - Type: CidFormatRes{}, + Type: CidFormatRes{}, + Extra: CreateCmdExtras(SetDoesNotUseRepo(true)), } type CidFormatRes struct { @@ -151,6 +152,7 @@ Useful when processing third-party CIDs which could come with arbitrary formats. }, PostRun: cidFmtCmd.PostRun, Type: cidFmtCmd.Type, + Extra: CreateCmdExtras(SetDoesNotUseRepo(true)), } type cidFormatOpts struct { @@ -291,7 +293,7 @@ var basesCmd = &cmds.Command{ multibaseSorter{val}.Sort() for _, v := range val { code := v.Code - if code < 32 || code >= 127 { + if !unicode.IsPrint(rune(code)) { // don't display non-printable prefixes code = ' ' } @@ -309,7 +311,8 @@ var basesCmd = &cmds.Command{ return nil }), }, - Type: []CodeAndName{}, + Type: []CodeAndName{}, + Extra: CreateCmdExtras(SetDoesNotUseRepo(true)), } const ( @@ -369,7 +372,8 @@ var codecsCmd = &cmds.Command{ return nil }), }, - Type: []CodeAndName{}, + Type: []CodeAndName{}, + Extra: CreateCmdExtras(SetDoesNotUseRepo(true)), } var hashesCmd = &cmds.Command{ @@ -393,6 +397,7 @@ var hashesCmd = &cmds.Command{ }, Encoders: codecsCmd.Encoders, Type: codecsCmd.Type, + Extra: CreateCmdExtras(SetDoesNotUseRepo(true)), } type multibaseSorter struct { @@ -404,7 +409,7 @@ func (s multibaseSorter) Sort() { if n := cmp.Compare(unicode.ToLower(rune(a.Code)), unicode.ToLower(rune(b.Code))); n != 0 { return n } - // lowecase letters should come before uppercase + // lowercase letters should come before uppercase return cmp.Compare(b.Code, a.Code) }) } diff --git a/core/commands/commands.go b/core/commands/commands.go index 9e2b60dc8..b1cd6c45d 100644 --- a/core/commands/commands.go +++ b/core/commands/commands.go @@ -233,12 +233,11 @@ type nonFatalError string // contain non-fatal errors. The helper function is allowed to panic // on internal errors. func streamResult(procVal func(interface{}, io.Writer) nonFatalError) func(cmds.Response, cmds.ResponseEmitter) error { - return func(res cmds.Response, re cmds.ResponseEmitter) (err error) { + return func(res cmds.Response, re cmds.ResponseEmitter) (rerr error) { defer func() { if r := recover(); r != nil { - err = fmt.Errorf("internal error: %v", r) + rerr = fmt.Errorf("internal error: %v", r) } - re.Close() }() var errors bool @@ -248,7 +247,8 @@ func streamResult(procVal func(interface{}, io.Writer) nonFatalError) func(cmds. if err == io.EOF { break } - return err + rerr = err + return } errorMsg := procVal(v, os.Stdout) @@ -260,8 +260,8 @@ func streamResult(procVal func(interface{}, io.Writer) nonFatalError) func(cmds. } if errors { - return fmt.Errorf("errors while displaying some entries") + rerr = fmt.Errorf("errors while displaying some entries") } - return nil + return } } diff --git a/core/commands/commands_test.go b/core/commands/commands_test.go index d8b4c4083..23782f209 100644 --- a/core/commands/commands_test.go +++ b/core/commands/commands_test.go @@ -30,7 +30,6 @@ func TestCommands(t *testing.T) { "/block/stat", "/bootstrap", "/bootstrap/add", - "/bootstrap/add/default", "/bootstrap/list", "/bootstrap/rm", "/bootstrap/rm/all", @@ -163,6 +162,9 @@ func TestCommands(t *testing.T) { "/pin/update", "/pin/verify", "/ping", + "/provide", + "/provide/clear", + "/provide/stat", "/pubsub", "/pubsub/ls", "/pubsub/peers", diff --git a/core/commands/config.go b/core/commands/config.go index 8329e972f..c28466a98 100644 --- a/core/commands/config.go +++ b/core/commands/config.go @@ -5,8 +5,10 @@ import ( "errors" "fmt" "io" + "maps" "os" "os/exec" + "slices" "strings" "github.com/anmitsu/go-shlex" @@ -33,6 +35,7 @@ const ( configBoolOptionName = "bool" configJSONOptionName = "json" configDryRunOptionName = "dry-run" + configExpandAutoName = "expand-auto" ) var ConfigCmd = &cmds.Command{ @@ -75,6 +78,7 @@ Set multiple values in the 'Addresses.AppendAnnounce' array: Options: []cmds.Option{ cmds.BoolOption(configBoolOptionName, "Set a boolean value."), cmds.BoolOption(configJSONOptionName, "Parse stringified JSON."), + cmds.BoolOption(configExpandAutoName, "Expand 'auto' placeholders to their expanded values from AutoConf service."), }, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { args := req.Arguments @@ -105,6 +109,11 @@ Set multiple values in the 'Addresses.AppendAnnounce' array: } defer r.Close() if len(args) == 2 { + // Check if user is trying to write config with expand flag + if expandAuto, _ := req.Options[configExpandAutoName].(bool); expandAuto { + return fmt.Errorf("--expand-auto can only be used for reading config values, not for setting them") + } + value := args[1] if parseJSON, _ := req.Options[configJSONOptionName].(bool); parseJSON { @@ -121,7 +130,13 @@ Set multiple values in the 'Addresses.AppendAnnounce' array: output, err = setConfig(r, key, value) } } else { - output, err = getConfig(r, key) + // Check if user wants to expand auto values for getter + expandAuto, _ := req.Options[configExpandAutoName].(bool) + if expandAuto { + output, err = getConfigWithAutoExpand(r, key) + } else { + output, err = getConfig(r, key) + } } if err != nil { @@ -208,6 +223,23 @@ NOTE: For security reasons, this command will omit your private key and remote s return err } + // Check if user wants to expand auto values + expandAuto, _ := req.Options[configExpandAutoName].(bool) + if expandAuto { + // Load full config to use resolution methods + var fullCfg config.Config + err = json.Unmarshal(data, &fullCfg) + if err != nil { + return err + } + + // Expand auto values and update the map + cfg, err = fullCfg.ExpandAutoConfValues(cfg) + if err != nil { + return err + } + } + cfg, err = scrubValue(cfg, []string{config.IdentityTag, config.PrivKeyTag}) if err != nil { return err @@ -417,7 +449,8 @@ var configProfileApplyCmd = &cmds.Command{ func buildProfileHelp() string { var out string - for name, profile := range config.Profiles { + for _, name := range slices.Sorted(maps.Keys(config.Profiles)) { + profile := config.Profiles[name] dlines := strings.Split(profile.Description, "\n") for i := range dlines { dlines[i] = " " + dlines[i] @@ -498,6 +531,28 @@ func getConfig(r repo.Repo, key string) (*ConfigField, error) { }, nil } +func getConfigWithAutoExpand(r repo.Repo, key string) (*ConfigField, error) { + // First get the current value + value, err := r.GetConfigKey(key) + if err != nil { + return nil, fmt.Errorf("failed to get config value: %q", err) + } + + // Load full config for resolution + fullCfg, err := r.Config() + if err != nil { + return nil, fmt.Errorf("failed to load config: %q", err) + } + + // Expand auto values based on the key + expandedValue := fullCfg.ExpandConfigField(key, value) + + return &ConfigField{ + Key: key, + Value: expandedValue, + }, nil +} + func setConfig(r repo.Repo, key string, value interface{}) (*ConfigField, error) { err := r.SetConfigKey(key, value) if err != nil { diff --git a/core/commands/log.go b/core/commands/log.go index 81427aa13..0ebb1ac43 100644 --- a/core/commands/log.go +++ b/core/commands/log.go @@ -3,16 +3,37 @@ package commands import ( "fmt" "io" + "slices" cmds "github.com/ipfs/go-ipfs-cmds" logging "github.com/ipfs/go-log/v2" ) -// Golang os.Args overrides * and replaces the character argument with -// an array which includes every file in the user's CWD. As a -// workaround, we use 'all' instead. The util library still uses * so -// we convert it at this step. -var logAllKeyword = "all" +const ( + // allLogSubsystems is used to specify all log subsystems when setting the + // log level. + allLogSubsystems = "*" + // allLogSubsystemsAlias is a convenience alias for allLogSubsystems that + // doesn't require shell escaping. + allLogSubsystemsAlias = "all" + // defaultLogLevel is used to request and to identify the default log + // level. + defaultLogLevel = "default" + // defaultSubsystemKey is the subsystem name that is used to denote the + // default log level. We use parentheses for UI clarity to distinguish it + // from regular subsystem names. + defaultSubsystemKey = "(default)" + // logLevelOption is an option for the tail subcommand to select the log + // level to output. + logLevelOption = "log-level" + // noSubsystemSpecified is used when no subsystem argument is provided + noSubsystemSpecified = "" +) + +type logLevelOutput struct { + Levels map[string]string `json:",omitempty"` + Message string `json:",omitempty"` +} var LogCmd = &cmds.Command{ Helptext: cmds.HelpText{ @@ -39,46 +60,161 @@ system (not just for the daemon logs, but all commands): var logLevelCmd = &cmds.Command{ Helptext: cmds.HelpText{ - Tagline: "Change the logging level.", + Tagline: "Change or get the logging level.", ShortDescription: ` -Change the verbosity of one or all subsystems log output. This does not affect -the event log. +Get or change the logging level of one or all logging subsystems. + +This command provides a runtime alternative to the GOLOG_LOG_LEVEL +environment variable for debugging and troubleshooting. + +UNDERSTANDING DEFAULT vs '*': + +The "default" level is the fallback used by unconfigured subsystems. +You cannot set the default level directly - it only changes when you use '*'. + +The '*' wildcard represents ALL subsystems including the default level. +Setting '*' changes everything at once, including the default. + +EXAMPLES - Getting levels: + + ipfs log level # Show only the default fallback level + ipfs log level all # Show all subsystem levels (100+ lines) + ipfs log level core # Show level for 'core' subsystem only + +EXAMPLES - Setting levels: + + ipfs log level core debug # Set 'core' to 'debug' (default unchanged) + ipfs log level all info # Set ALL to 'info' (including default) + ipfs log level core default # Reset 'core' to use current default level + +WILDCARD OPTIONS: + +Use 'all' (convenient) or '*' (requires escaping) to affect all subsystems: + ipfs log level all debug # Convenient - no shell escaping needed + ipfs log level '*' debug # Equivalent but needs quotes: '*' or "*" or \* + +BEHAVIOR EXAMPLES: + +Initial state (all using default 'error'): + $ ipfs log level => error + $ ipfs log level core => error + +After setting one subsystem: + $ ipfs log level core debug + $ ipfs log level => error (default unchanged!) + $ ipfs log level core => debug (explicitly set) + $ ipfs log level dht => error (still uses default) + +After setting everything with 'all': + $ ipfs log level all info + $ ipfs log level => info (default changed!) + $ ipfs log level core => info (all changed) + $ ipfs log level dht => info (all changed) + +The 'default' keyword always refers to the current default level: + $ ipfs log level => error + $ ipfs log level core default # Sets core to 'error' + $ ipfs log level all info # Changes default to 'info' + $ ipfs log level core default # Now sets core to 'info' `, }, Arguments: []cmds.Argument{ - // TODO use a different keyword for 'all' because all can theoretically - // clash with a subsystem name - cmds.StringArg("subsystem", true, false, fmt.Sprintf("The subsystem logging identifier. Use '%s' for all subsystems.", logAllKeyword)), - cmds.StringArg("level", true, false, `The log level, with 'debug' the most verbose and 'fatal' the least verbose. - One of: debug, info, warn, error, dpanic, panic, fatal. - `), + cmds.StringArg("subsystem", false, false, fmt.Sprintf("The subsystem logging identifier. Use '%s' or '%s' to get or set the log level of all subsystems including the default. If not specified, only show the default log level.", allLogSubsystemsAlias, allLogSubsystems)), + cmds.StringArg("level", false, false, fmt.Sprintf("The log level, with 'debug' as the most verbose and 'fatal' the least verbose. Use '%s' to set to the current default level. One of: debug, info, warn, error, dpanic, panic, fatal, %s", defaultLogLevel, defaultLogLevel)), }, NoLocal: true, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { - args := req.Arguments - subsystem, level := args[0], args[1] + var level, subsystem string - if subsystem == logAllKeyword { - subsystem = "*" + if len(req.Arguments) > 0 { + subsystem = req.Arguments[0] + if len(req.Arguments) > 1 { + level = req.Arguments[1] + } + + // Normalize aliases to the canonical "*" form + if subsystem == allLogSubsystems || subsystem == allLogSubsystemsAlias { + subsystem = "*" + } } - if err := logging.SetLogLevel(subsystem, level); err != nil { - return err + // If a level is specified, then set the log level. + if level != "" { + if level == defaultLogLevel { + level = logging.DefaultLevel().String() + } + + if err := logging.SetLogLevel(subsystem, level); err != nil { + return err + } + + s := fmt.Sprintf("Changed log level of '%s' to '%s'\n", subsystem, level) + log.Info(s) + + return cmds.EmitOnce(res, &logLevelOutput{Message: s}) } - s := fmt.Sprintf("Changed log level of '%s' to '%s'\n", subsystem, level) - log.Info(s) + // Get the level for the requested subsystem. + switch subsystem { + case noSubsystemSpecified: + // Return the default log level + levelMap := map[string]string{logging.DefaultName: logging.DefaultLevel().String()} + return cmds.EmitOnce(res, &logLevelOutput{Levels: levelMap}) + case allLogSubsystems, allLogSubsystemsAlias: + // Return levels for all subsystems (default behavior) + levels := logging.SubsystemLevelNames() + + // Replace default subsystem key with defaultSubsystemKey. + levels[defaultSubsystemKey] = levels[logging.DefaultName] + delete(levels, logging.DefaultName) + return cmds.EmitOnce(res, &logLevelOutput{Levels: levels}) + default: + // Return level for a specific subsystem. + level, err := logging.SubsystemLevelName(subsystem) + if err != nil { + return err + } + levelMap := map[string]string{subsystem: level} + return cmds.EmitOnce(res, &logLevelOutput{Levels: levelMap}) + } - return cmds.EmitOnce(res, &MessageOutput{s}) }, Encoders: cmds.EncoderMap{ - cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *MessageOutput) error { - fmt.Fprint(w, out.Message) + cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *logLevelOutput) error { + if out.Message != "" { + fmt.Fprint(w, out.Message) + return nil + } + + // Check if this is an RPC call by looking for the encoding option + encoding, _ := req.Options["encoding"].(string) + isRPC := encoding == "json" + + // Determine whether to show subsystem names in output. + // Show subsystem names when: + // 1. It's an RPC call (needs JSON structure with named fields) + // 2. Multiple subsystems are displayed (for clarity when showing many levels) + showNames := isRPC || len(out.Levels) > 1 + + levelNames := make([]string, 0, len(out.Levels)) + for subsystem, level := range out.Levels { + if showNames { + // Show subsystem name when it's RPC or when showing multiple subsystems + levelNames = append(levelNames, fmt.Sprintf("%s: %s", subsystem, level)) + } else { + // For CLI calls with single subsystem, only show the level + levelNames = append(levelNames, level) + } + } + slices.Sort(levelNames) + for _, ln := range levelNames { + fmt.Fprintln(w, ln) + } return nil }), }, - Type: MessageOutput{}, + Type: logLevelOutput{}, } var logLsCmd = &cmds.Command{ @@ -103,12 +239,10 @@ subsystems of a running daemon. Type: stringList{}, } -const logLevelOption = "log-level" - var logTailCmd = &cmds.Command{ Status: cmds.Experimental, Helptext: cmds.HelpText{ - Tagline: "Read and outpt log messages.", + Tagline: "Read and output log messages.", ShortDescription: ` Outputs log messages as they are generated. @@ -130,7 +264,7 @@ This will only return 'info' logs from bitswap and skip 'debug'. var pipeReader *logging.PipeReader logLevelString, _ := req.Options[logLevelOption].(string) if logLevelString != "" { - logLevel, err := logging.LevelFromString(logLevelString) + logLevel, err := logging.Parse(logLevelString) if err != nil { return fmt.Errorf("setting log level %s: %w", logLevelString, err) } diff --git a/core/commands/name/publish.go b/core/commands/name/publish.go index 168d7fb44..918606d63 100644 --- a/core/commands/name/publish.go +++ b/core/commands/name/publish.go @@ -16,17 +16,19 @@ import ( options "github.com/ipfs/kubo/core/coreiface/options" ) -var errAllowOffline = errors.New("can't publish while offline: pass `--allow-offline` to override") +var errAllowOffline = errors.New("can't publish while offline: pass `--allow-offline` to override or `--allow-delegated` if Ipns.DelegatedPublishers are set up") const ( - ipfsPathOptionName = "ipfs-path" - resolveOptionName = "resolve" - allowOfflineOptionName = "allow-offline" - lifeTimeOptionName = "lifetime" - ttlOptionName = "ttl" - keyOptionName = "key" - quieterOptionName = "quieter" - v1compatOptionName = "v1compat" + ipfsPathOptionName = "ipfs-path" + resolveOptionName = "resolve" + allowOfflineOptionName = "allow-offline" + allowDelegatedOptionName = "allow-delegated" + lifeTimeOptionName = "lifetime" + ttlOptionName = "ttl" + keyOptionName = "key" + quieterOptionName = "quieter" + v1compatOptionName = "v1compat" + sequenceOptionName = "sequence" ) var PublishCmd = &cmds.Command{ @@ -47,6 +49,14 @@ which is the hash of its public key. You can use the 'ipfs key' commands to list and generate more names and their respective keys. +Publishing Modes: + +By default, IPNS records are published to both the DHT and any configured +HTTP delegated publishers. You can control this behavior with the following flags: + + --allow-offline Allow publishing when offline (publishes to local datastore, network operations are optional) + --allow-delegated Allow publishing without DHT connectivity (local + HTTP delegated publishers only) + Examples: Publish an with your default name: @@ -54,18 +64,33 @@ Publish an with your default name: > ipfs name publish /ipfs/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy Published to QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n: /ipfs/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy -Publish an with another name, added by an 'ipfs key' command: +Publish without DHT (HTTP delegated publishers only): - > ipfs key gen --type=rsa --size=2048 mykey - > ipfs name publish --key=mykey /ipfs/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy - Published to QmSrPmbaUKA3ZodhzPWZnpFgcPMFWF4QsxXbkWfEptTBJd: /ipfs/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy - -Alternatively, publish an using a valid PeerID (as listed by -'ipfs key list -l'): - - > ipfs name publish --key=QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n /ipfs/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy + > ipfs name publish --allow-delegated /ipfs/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy Published to QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n: /ipfs/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy +Publish when offline (local publish, network optional): + + > ipfs name publish --allow-offline /ipfs/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy + Published to QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n: /ipfs/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy + +Notes: + +The --ttl option specifies the time duration for caching IPNS records. +Lower values like '1m' enable faster updates but increase network load, +while the default of 1 hour reduces traffic but may delay propagation. +Gateway operators may override this with Ipns.MaxCacheTTL configuration. + +The --sequence option sets a custom sequence number for the IPNS record. +The sequence number must be monotonically increasing (greater than the +current record's sequence). This is useful for manually coordinating +updates across multiple writers. If not specified, the sequence number +increments automatically. + +For faster IPNS updates, consider: +- Using a lower --ttl value (e.g., '1m' for quick updates) +- Enabling PubSub via Ipns.UsePubsub in the config + `, }, @@ -79,7 +104,9 @@ Alternatively, publish an using a valid PeerID (as listed by cmds.StringOption(ttlOptionName, "Time duration hint, akin to --lifetime, indicating how long to cache this record before checking for updates.").WithDefault(ipns.DefaultRecordTTL.String()), cmds.BoolOption(quieterOptionName, "Q", "Write only final IPNS Name encoded as CIDv1 (for use in /ipns content paths)."), cmds.BoolOption(v1compatOptionName, "Produce a backward-compatible IPNS Record by including fields for both V1 and V2 signatures.").WithDefault(true), - cmds.BoolOption(allowOfflineOptionName, "When --offline, save the IPNS record to the local datastore without broadcasting to the network (instead of failing)."), + cmds.BoolOption(allowOfflineOptionName, "Allow publishing when offline - publishes to local datastore without requiring network connectivity."), + cmds.BoolOption(allowDelegatedOptionName, "Allow publishing without DHT connectivity - uses local datastore and HTTP delegated publishers only."), + cmds.Uint64Option(sequenceOptionName, "Set a custom sequence number for the IPNS record (must be higher than current)."), ke.OptionIPNSBase, }, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { @@ -89,9 +116,15 @@ Alternatively, publish an using a valid PeerID (as listed by } allowOffline, _ := req.Options[allowOfflineOptionName].(bool) + allowDelegated, _ := req.Options[allowDelegatedOptionName].(bool) compatibleWithV1, _ := req.Options[v1compatOptionName].(bool) kname, _ := req.Options[keyOptionName].(string) + // Validate flag combinations + if allowOffline && allowDelegated { + return errors.New("cannot use both --allow-offline and --allow-delegated flags") + } + validTimeOpt, _ := req.Options[lifeTimeOptionName].(string) validTime, err := time.ParseDuration(validTimeOpt) if err != nil { @@ -100,6 +133,7 @@ Alternatively, publish an using a valid PeerID (as listed by opts := []options.NamePublishOption{ options.Name.AllowOffline(allowOffline), + options.Name.AllowDelegated(allowDelegated), options.Name.Key(kname), options.Name.ValidTime(validTime), options.Name.CompatibleWithV1(compatibleWithV1), @@ -114,6 +148,10 @@ Alternatively, publish an using a valid PeerID (as listed by opts = append(opts, options.Name.TTL(d)) } + if sequence, found := req.Options[sequenceOptionName].(uint64); found { + opts = append(opts, options.Name.Sequence(sequence)) + } + p, err := cmdutils.PathOrCidPath(req.Arguments[0]) if err != nil { return err diff --git a/core/commands/provide.go b/core/commands/provide.go new file mode 100644 index 000000000..ba52ca50b --- /dev/null +++ b/core/commands/provide.go @@ -0,0 +1,178 @@ +package commands + +import ( + "fmt" + "io" + "text/tabwriter" + "time" + + humanize "github.com/dustin/go-humanize" + "github.com/ipfs/boxo/provider" + cmds "github.com/ipfs/go-ipfs-cmds" + "github.com/ipfs/kubo/core/commands/cmdenv" + "github.com/libp2p/go-libp2p-kad-dht/fullrt" + "golang.org/x/exp/constraints" +) + +const ( + provideQuietOptionName = "quiet" +) + +var ProvideCmd = &cmds.Command{ + Status: cmds.Experimental, + Helptext: cmds.HelpText{ + Tagline: "Control providing operations", + ShortDescription: ` +Control providing operations. + +NOTE: This command is experimental and not all provide-related commands have +been migrated to this namespace yet. For example, 'ipfs routing +provide|reprovide' are still under the routing namespace, 'ipfs stats +reprovide' provides statistics. Additionally, 'ipfs bitswap reprovide' and +'ipfs stats provide' are deprecated. +`, + }, + + Subcommands: map[string]*cmds.Command{ + "clear": provideClearCmd, + "stat": provideStatCmd, + }, +} + +var provideClearCmd = &cmds.Command{ + Status: cmds.Experimental, + Helptext: cmds.HelpText{ + Tagline: "Clear all CIDs from the provide queue.", + ShortDescription: ` +Clear all CIDs from the reprovide queue. + +Note: Kubo will automatically clear the queue when it detects a change of +Reprovider.Strategy upon a restart. For more information about reprovider +strategies, see: +https://github.com/ipfs/kubo/blob/master/docs/config.md#reproviderstrategy +`, + }, + Options: []cmds.Option{ + cmds.BoolOption(provideQuietOptionName, "q", "Do not write output."), + }, + Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { + n, err := cmdenv.GetNode(env) + if err != nil { + return err + } + + quiet, _ := req.Options[provideQuietOptionName].(bool) + if n.Provider == nil { + return nil + } + + cleared := n.Provider.Clear() + if quiet { + return nil + } + _ = re.Emit(cleared) + + return nil + }, + Type: int(0), + Encoders: cmds.EncoderMap{ + cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, cleared int) error { + quiet, _ := req.Options[provideQuietOptionName].(bool) + if quiet { + return nil + } + + _, err := fmt.Fprintf(w, "removed %d items from provide queue\n", cleared) + return err + }), + }, +} + +type provideStats struct { + provider.ReproviderStats + fullRT bool +} + +var provideStatCmd = &cmds.Command{ + Status: cmds.Experimental, + Helptext: cmds.HelpText{ + Tagline: "Returns statistics about the node's provider system.", + ShortDescription: ` +Returns statistics about the content the node is reproviding every +Reprovider.Interval according to Reprovider.Strategy: +https://github.com/ipfs/kubo/blob/master/docs/config.md#reprovider + +This interface is not stable and may change from release to release. + +`, + }, + Arguments: []cmds.Argument{}, + Options: []cmds.Option{}, + Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { + nd, err := cmdenv.GetNode(env) + if err != nil { + return err + } + + if !nd.IsOnline { + return ErrNotOnline + } + + stats, err := nd.Provider.Stat() + if err != nil { + return err + } + _, fullRT := nd.DHTClient.(*fullrt.FullRT) + + if err := res.Emit(provideStats{stats, fullRT}); err != nil { + return err + } + + return nil + }, + Encoders: cmds.EncoderMap{ + cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, s provideStats) error { + wtr := tabwriter.NewWriter(w, 1, 2, 1, ' ', 0) + defer wtr.Flush() + + fmt.Fprintf(wtr, "TotalReprovides:\t%s\n", humanNumber(s.TotalReprovides)) + fmt.Fprintf(wtr, "AvgReprovideDuration:\t%s\n", humanDuration(s.AvgReprovideDuration)) + fmt.Fprintf(wtr, "LastReprovideDuration:\t%s\n", humanDuration(s.LastReprovideDuration)) + if !s.LastRun.IsZero() { + fmt.Fprintf(wtr, "LastReprovide:\t%s\n", humanTime(s.LastRun)) + if s.fullRT { + fmt.Fprintf(wtr, "NextReprovide:\t%s\n", humanTime(s.LastRun.Add(s.ReprovideInterval))) + } + } + return nil + }), + }, + Type: provideStats{}, +} + +func humanDuration(val time.Duration) string { + return val.Truncate(time.Microsecond).String() +} + +func humanTime(val time.Time) string { + return val.Format("2006-01-02 15:04:05") +} + +func humanNumber[T constraints.Float | constraints.Integer](n T) string { + nf := float64(n) + str := humanSI(nf, 0) + fullStr := humanFull(nf, 0) + if str != fullStr { + return fmt.Sprintf("%s\t(%s)", str, fullStr) + } + return str +} + +func humanSI(val float64, decimals int) string { + v, unit := humanize.ComputeSI(val) + return fmt.Sprintf("%s%s", humanFull(v, decimals), unit) +} + +func humanFull(val float64, decimals int) string { + return humanize.CommafWithDigits(val, decimals) +} diff --git a/core/commands/repo.go b/core/commands/repo.go index 77ce68590..017143127 100644 --- a/core/commands/repo.go +++ b/core/commands/repo.go @@ -16,7 +16,6 @@ import ( corerepo "github.com/ipfs/kubo/core/corerepo" fsrepo "github.com/ipfs/kubo/repo/fsrepo" "github.com/ipfs/kubo/repo/fsrepo/migrations" - "github.com/ipfs/kubo/repo/fsrepo/migrations/ipfsfetcher" humanize "github.com/dustin/go-humanize" bstore "github.com/ipfs/boxo/blockstore" @@ -57,6 +56,7 @@ const ( repoQuietOptionName = "quiet" repoSilentOptionName = "silent" repoAllowDowngradeOptionName = "allow-downgrade" + repoToVersionOptionName = "to" ) var repoGcCmd = &cmds.Command{ @@ -283,8 +283,7 @@ var repoVerifyCmd = &cmds.Command{ return err } - bs := bstore.NewBlockstore(nd.Repo.Datastore()) - bs.HashOnRead(true) + bs := &bstore.ValidatingBlockstore{Blockstore: bstore.NewBlockstore(nd.Repo.Datastore())} keys, err := bs.AllKeysChan(req.Context) if err != nil { @@ -374,63 +373,81 @@ var repoVersionCmd = &cmds.Command{ var repoMigrateCmd = &cmds.Command{ Helptext: cmds.HelpText{ - Tagline: "Apply any outstanding migrations to the repo.", + Tagline: "Apply repository migrations to a specific version.", + ShortDescription: ` +'ipfs repo migrate' applies repository migrations to bring the repository +to a specific version. By default, migrates to the latest version supported +by this IPFS binary. + +Examples: + ipfs repo migrate # Migrate to latest version + ipfs repo migrate --to=17 # Migrate to version 17 + ipfs repo migrate --to=16 --allow-downgrade # Downgrade to version 16 + +WARNING: Downgrading a repository may cause data loss and requires using +an older IPFS binary that supports the target version. After downgrading, +you must use an IPFS implementation compatible with that repository version. + +Repository versions 16+ use embedded migrations for faster, more reliable +migration. Versions below 16 require external migration tools. +`, }, Options: []cmds.Option{ + cmds.IntOption(repoToVersionOptionName, "Target repository version").WithDefault(fsrepo.RepoVersion), cmds.BoolOption(repoAllowDowngradeOptionName, "Allow downgrading to a lower repo version"), }, NoRemote: true, + // SetDoesNotUseRepo(true) might seem counter-intuitive since migrations + // do access the repo, but it's correct - we need direct filesystem access + // without going through the daemon. Migrations handle their own locking. + Extra: CreateCmdExtras(SetDoesNotUseRepo(true)), Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { cctx := env.(*oldcmds.Context) allowDowngrade, _ := req.Options[repoAllowDowngradeOptionName].(bool) + targetVersion, _ := req.Options[repoToVersionOptionName].(int) - _, err := fsrepo.Open(cctx.ConfigRoot) + // Get current repo version + currentVersion, err := migrations.RepoVersion(cctx.ConfigRoot) + if err != nil { + return fmt.Errorf("could not get current repo version: %w", err) + } - if err == nil { - fmt.Println("Repo does not require migration.") + // Check if migration is needed + if currentVersion == targetVersion { + fmt.Printf("Repository is already at version %d.\n", targetVersion) return nil - } else if err != fsrepo.ErrNeedMigration { - return err } - fmt.Println("Found outdated fs-repo, starting migration.") + // Validate downgrade request + if targetVersion < currentVersion && !allowDowngrade { + return fmt.Errorf("downgrade from version %d to %d requires --allow-downgrade flag", currentVersion, targetVersion) + } - // Read Migration section of IPFS config - configFileOpt, _ := req.Options[ConfigFileOption].(string) - migrationCfg, err := migrations.ReadMigrationConfig(cctx.ConfigRoot, configFileOpt) + // Check if repo is locked by daemon before running migration + locked, err := fsrepo.LockedByOtherProcess(cctx.ConfigRoot) if err != nil { - return err + return fmt.Errorf("could not check repo lock: %w", err) + } + if locked { + return fmt.Errorf("cannot run migration while daemon is running (repo.lock exists)") } - // Define function to create IPFS fetcher. Do not supply an - // already-constructed IPFS fetcher, because this may be expensive and - // not needed according to migration config. Instead, supply a function - // to construct the particular IPFS fetcher implementation used here, - // which is called only if an IPFS fetcher is needed. - newIpfsFetcher := func(distPath string) migrations.Fetcher { - return ipfsfetcher.NewIpfsFetcher(distPath, 0, &cctx.ConfigRoot, configFileOpt) - } + fmt.Printf("Migrating repository from version %d to %d...\n", currentVersion, targetVersion) - // Fetch migrations from current distribution, or location from environ - fetchDistPath := migrations.GetDistPathEnv(migrations.CurrentIpfsDist) - - // Create fetchers according to migrationCfg.DownloadSources - fetcher, err := migrations.GetMigrationFetcher(migrationCfg.DownloadSources, fetchDistPath, newIpfsFetcher) + // Use hybrid migration strategy that intelligently combines external and embedded migrations + err = migrations.RunHybridMigrations(cctx.Context(), targetVersion, cctx.ConfigRoot, allowDowngrade) if err != nil { - return err - } - defer fetcher.Close() - - err = migrations.RunMigration(cctx.Context(), fetcher, fsrepo.RepoVersion, "", allowDowngrade) - if err != nil { - fmt.Println("The migrations of fs-repo failed:") + fmt.Println("Repository migration failed:") fmt.Printf(" %s\n", err) fmt.Println("If you think this is a bug, please file an issue and include this whole log output.") - fmt.Println(" https://github.com/ipfs/fs-repo-migrations") + fmt.Println(" https://github.com/ipfs/kubo") return err } - fmt.Printf("Success: fs-repo has been migrated to version %d.\n", fsrepo.RepoVersion) + fmt.Printf("Repository successfully migrated to version %d.\n", targetVersion) + if targetVersion < fsrepo.RepoVersion { + fmt.Println("WARNING: After downgrading, you must use an IPFS binary compatible with this repository version.") + } return nil }, } diff --git a/core/commands/root.go b/core/commands/root.go index 80c2309df..d70a49376 100644 --- a/core/commands/root.go +++ b/core/commands/root.go @@ -65,6 +65,7 @@ ADVANCED COMMANDS p2p Libp2p stream mounting (experimental) filestore Manage the filestore (experimental) mount Mount an IPFS read-only mount point (experimental) + provide Control providing operations NETWORK COMMANDS id Show info about IPFS peers @@ -133,6 +134,7 @@ var rootSubcommands = map[string]*cmds.Command{ "files": FilesCmd, "filestore": FileStoreCmd, "get": GetCmd, + "provide": ProvideCmd, "pubsub": PubsubCmd, "repo": RepoCmd, "stats": StatsCmd, diff --git a/core/commands/stat_provide.go b/core/commands/stat_provide.go index ef06d8e28..56a0f3dc4 100644 --- a/core/commands/stat_provide.go +++ b/core/commands/stat_provide.go @@ -1,65 +1,22 @@ package commands import ( - "fmt" - "io" - "text/tabwriter" - cmds "github.com/ipfs/go-ipfs-cmds" - "github.com/ipfs/kubo/core/commands/cmdenv" - "github.com/libp2p/go-libp2p-kad-dht/fullrt" ) var statProvideCmd = &cmds.Command{ Status: cmds.Deprecated, Helptext: cmds.HelpText{ - Tagline: "Deprecated command, use 'ipfs stats reprovide' instead.", + Tagline: "Deprecated command, use 'ipfs provide stat' instead.", ShortDescription: ` 'ipfs stats provide' is deprecated because provide and reprovide operations are now distinct. This command may be replaced by provide only stats in the future. `, }, - Arguments: []cmds.Argument{}, - Options: []cmds.Option{}, - Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { - nd, err := cmdenv.GetNode(env) - if err != nil { - return err - } - - if !nd.IsOnline { - return ErrNotOnline - } - - stats, err := nd.Provider.Stat() - if err != nil { - return err - } - _, fullRT := nd.DHTClient.(*fullrt.FullRT) - - if err := res.Emit(reprovideStats{stats, fullRT}); err != nil { - return err - } - - return nil - }, - Encoders: cmds.EncoderMap{ - cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, s reprovideStats) error { - wtr := tabwriter.NewWriter(w, 1, 2, 1, ' ', 0) - defer wtr.Flush() - - fmt.Fprintf(wtr, "TotalProvides:\t%s\n", humanNumber(s.TotalReprovides)) - fmt.Fprintf(wtr, "AvgProvideDuration:\t%s\n", humanDuration(s.AvgReprovideDuration)) - fmt.Fprintf(wtr, "LastReprovideDuration:\t%s\n", humanDuration(s.LastReprovideDuration)) - if !s.LastRun.IsZero() { - fmt.Fprintf(wtr, "LastRun:\t%s\n", humanTime(s.LastRun)) - if s.fullRT { - fmt.Fprintf(wtr, "NextRun:\t%s\n", humanTime(s.LastRun.Add(s.ReprovideInterval))) - } - } - return nil - }), - }, - Type: reprovideStats{}, + Arguments: provideStatCmd.Arguments, + Options: provideStatCmd.Options, + Run: provideStatCmd.Run, + Encoders: provideStatCmd.Encoders, + Type: provideStatCmd.Type, } diff --git a/core/commands/stat_reprovide.go b/core/commands/stat_reprovide.go index 10dbc727d..87893d1b5 100644 --- a/core/commands/stat_reprovide.go +++ b/core/commands/stat_reprovide.go @@ -1,104 +1,21 @@ package commands import ( - "fmt" - "io" - "text/tabwriter" - "time" - - humanize "github.com/dustin/go-humanize" - "github.com/ipfs/boxo/provider" cmds "github.com/ipfs/go-ipfs-cmds" - "github.com/ipfs/kubo/core/commands/cmdenv" - "github.com/libp2p/go-libp2p-kad-dht/fullrt" - "golang.org/x/exp/constraints" ) -type reprovideStats struct { - provider.ReproviderStats - fullRT bool -} - var statReprovideCmd = &cmds.Command{ - Status: cmds.Experimental, + Status: cmds.Deprecated, Helptext: cmds.HelpText{ - Tagline: "Returns statistics about the node's reprovider system.", + Tagline: "Deprecated command, use 'ipfs provide stat' instead.", ShortDescription: ` -Returns statistics about the content the node is reproviding every -Reprovider.Interval according to Reprovider.Strategy: -https://github.com/ipfs/kubo/blob/master/docs/config.md#reprovider - -This interface is not stable and may change from release to release. - +'ipfs stats reprovide' is deprecated because provider stats are now +available from 'ipfs provide stat'. `, }, - Arguments: []cmds.Argument{}, - Options: []cmds.Option{}, - Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { - nd, err := cmdenv.GetNode(env) - if err != nil { - return err - } - - if !nd.IsOnline { - return ErrNotOnline - } - - stats, err := nd.Provider.Stat() - if err != nil { - return err - } - _, fullRT := nd.DHTClient.(*fullrt.FullRT) - - if err := res.Emit(reprovideStats{stats, fullRT}); err != nil { - return err - } - - return nil - }, - Encoders: cmds.EncoderMap{ - cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, s reprovideStats) error { - wtr := tabwriter.NewWriter(w, 1, 2, 1, ' ', 0) - defer wtr.Flush() - - fmt.Fprintf(wtr, "TotalReprovides:\t%s\n", humanNumber(s.TotalReprovides)) - fmt.Fprintf(wtr, "AvgReprovideDuration:\t%s\n", humanDuration(s.AvgReprovideDuration)) - fmt.Fprintf(wtr, "LastReprovideDuration:\t%s\n", humanDuration(s.LastReprovideDuration)) - if !s.LastRun.IsZero() { - fmt.Fprintf(wtr, "LastReprovide:\t%s\n", humanTime(s.LastRun)) - if s.fullRT { - fmt.Fprintf(wtr, "NextReprovide:\t%s\n", humanTime(s.LastRun.Add(s.ReprovideInterval))) - } - } - return nil - }), - }, - Type: reprovideStats{}, -} - -func humanDuration(val time.Duration) string { - return val.Truncate(time.Microsecond).String() -} - -func humanTime(val time.Time) string { - return val.Format("2006-01-02 15:04:05") -} - -func humanNumber[T constraints.Float | constraints.Integer](n T) string { - nf := float64(n) - str := humanSI(nf, 0) - fullStr := humanFull(nf, 0) - if str != fullStr { - return fmt.Sprintf("%s\t(%s)", str, fullStr) - } - return str -} - -func humanSI(val float64, decimals int) string { - v, unit := humanize.ComputeSI(val) - return fmt.Sprintf("%s%s", humanFull(v, decimals), unit) -} - -func humanFull(val float64, decimals int) string { - return humanize.CommafWithDigits(val, decimals) + Arguments: provideStatCmd.Arguments, + Options: provideStatCmd.Options, + Run: provideStatCmd.Run, + Encoders: provideStatCmd.Encoders, + Type: provideStatCmd.Type, } diff --git a/core/core.go b/core/core.go index 186da1f09..f8a6a258f 100644 --- a/core/core.go +++ b/core/core.go @@ -29,7 +29,6 @@ import ( provider "github.com/ipfs/boxo/provider" ipld "github.com/ipfs/go-ipld-format" logging "github.com/ipfs/go-log/v2" - goprocess "github.com/jbenet/goprocess" ddht "github.com/libp2p/go-libp2p-kad-dht/dual" pubsub "github.com/libp2p/go-libp2p-pubsub" psrouter "github.com/libp2p/go-libp2p-pubsub-router" @@ -98,6 +97,7 @@ type IpfsNode struct { Filters *ma.Filters `optional:"true"` Bootstrapper io.Closer `optional:"true"` // the periodic bootstrapper Routing irouting.ProvideManyRouter `optional:"true"` // the routing system. recommend ipfs-dht + ContentDiscovery routing.ContentDiscovery `optional:"true"` // the discovery part of the routing system DNSResolver *madns.Resolver // the DNS resolver IPLDPathResolver pathresolver.Resolver `name:"ipldPathResolver"` // The IPLD path resolver UnixFSPathResolver pathresolver.Resolver `name:"unixFSPathResolver"` // The UnixFS path resolver @@ -107,6 +107,8 @@ type IpfsNode struct { Bitswap *bitswap.Bitswap `optional:"true"` // The Bitswap instance Namesys namesys.NameSystem // the name system, resolves paths to hashes Provider provider.System // the value provider system + ProvidingStrategy config.ReproviderStrategy `optional:"true"` + ProvidingKeyChanFunc provider.KeyChanFunc `optional:"true"` IpnsRepub *ipnsrp.Republisher `optional:"true"` ResourceManager network.ResourceManager `optional:"true"` @@ -118,8 +120,7 @@ type IpfsNode struct { P2P *p2p.P2P `optional:"true"` - Process goprocess.Process - ctx context.Context + ctx context.Context stop func() error @@ -212,7 +213,8 @@ func (n *IpfsNode) loadBootstrapPeers() ([]peer.AddrInfo, error) { return nil, err } - return cfg.BootstrapPeers() + // Use auto-config resolution for actual bootstrap connectivity + return cfg.BootstrapPeersWithAutoConf() } func (n *IpfsNode) saveTempBootstrapPeers(ctx context.Context, peerList []peer.AddrInfo) error { diff --git a/core/coreapi/coreapi.go b/core/coreapi/coreapi.go index 6e099e5fd..66763e884 100644 --- a/core/coreapi/coreapi.go +++ b/core/coreapi/coreapi.go @@ -26,6 +26,7 @@ import ( provider "github.com/ipfs/boxo/provider" offlineroute "github.com/ipfs/boxo/routing/offline" ipld "github.com/ipfs/go-ipld-format" + logging "github.com/ipfs/go-log/v2" "github.com/ipfs/kubo/config" coreiface "github.com/ipfs/kubo/core/coreiface" "github.com/ipfs/kubo/core/coreiface/options" @@ -44,6 +45,8 @@ import ( "github.com/ipfs/kubo/repo" ) +var log = logging.Logger("coreapi") + type CoreAPI struct { nctx context.Context @@ -70,7 +73,8 @@ type CoreAPI struct { ipldPathResolver pathresolver.Resolver unixFSPathResolver pathresolver.Resolver - provider provider.System + provider provider.System + providingStrategy config.ReproviderStrategy pubSub *pubsub.PubSub @@ -185,7 +189,8 @@ func (api *CoreAPI) WithOptions(opts ...options.ApiOption) (coreiface.CoreAPI, e ipldPathResolver: n.IPLDPathResolver, unixFSPathResolver: n.UnixFSPathResolver, - provider: n.Provider, + provider: n.Provider, + providingStrategy: n.ProvidingStrategy, pubSub: n.PubSub, @@ -235,8 +240,6 @@ func (api *CoreAPI) WithOptions(opts ...options.ApiOption) (coreiface.CoreAPI, e return nil, fmt.Errorf("error constructing namesys: %w", err) } - subAPI.provider = provider.NewNoopProvider() - subAPI.peerstore = nil subAPI.peerHost = nil subAPI.recordValidator = nil diff --git a/core/coreapi/name.go b/core/coreapi/name.go index 305c19e43..5e7971698 100644 --- a/core/coreapi/name.go +++ b/core/coreapi/name.go @@ -45,9 +45,25 @@ func (api *NameAPI) Publish(ctx context.Context, p path.Path, opts ...caopts.Nam span.SetAttributes(attribute.Float64("ttl", options.TTL.Seconds())) } - err = api.checkOnline(options.AllowOffline) - if err != nil { - return ipns.Name{}, err + // Handle different publishing modes + if options.AllowDelegated { + // AllowDelegated mode: check if delegated publishers are configured + cfg, err := api.repo.Config() + if err != nil { + return ipns.Name{}, fmt.Errorf("failed to read config: %w", err) + } + delegatedPublishers := cfg.DelegatedPublishersWithAutoConf() + if len(delegatedPublishers) == 0 { + return ipns.Name{}, errors.New("no delegated publishers configured: add Ipns.DelegatedPublishers or use --allow-offline for local-only publishing") + } + // For allow-delegated mode, we only require that we have delegated publishers configured + // The node doesn't need P2P connectivity since we're using HTTP publishing + } else { + // Normal mode: check online status with allow-offline flag + err = api.checkOnline(options.AllowOffline) + if err != nil { + return ipns.Name{}, err + } } k, err := keylookup(api.privateKey, api.repo.Keystore(), options.Key) @@ -66,6 +82,10 @@ func (api *NameAPI) Publish(ctx context.Context, p path.Path, opts ...caopts.Nam publishOptions = append(publishOptions, namesys.PublishWithTTL(*options.TTL)) } + if options.Sequence != nil { + publishOptions = append(publishOptions, namesys.PublishWithSequence(*options.Sequence)) + } + err = api.namesys.Publish(ctx, k, p, publishOptions...) if err != nil { return ipns.Name{}, err diff --git a/core/coreapi/pin.go b/core/coreapi/pin.go index 878b4c28d..9bb44bac5 100644 --- a/core/coreapi/pin.go +++ b/core/coreapi/pin.go @@ -44,10 +44,6 @@ func (api *PinAPI) Add(ctx context.Context, p path.Path, opts ...caopts.PinAddOp return fmt.Errorf("pin: %s", err) } - if err := api.provider.Provide(ctx, dagNode.Cid(), true); err != nil { - return err - } - return api.pinning.Flush(ctx) } diff --git a/core/coreapi/test/api_test.go b/core/coreapi/test/api_test.go index dfd8cf685..7867e1f1c 100644 --- a/core/coreapi/test/api_test.go +++ b/core/coreapi/test/api_test.go @@ -70,6 +70,9 @@ func (NodeProvider) MakeAPISwarm(t *testing.T, ctx context.Context, fullIdentity c.Identity = ident c.Experimental.FilestoreEnabled = true c.AutoTLS.Enabled = config.False // disable so no /ws listener is added + // For provider tests, avoid that content gets + // auto-provided without calling "provide" (unless pinned). + c.Reprovider.Strategy = config.NewOptionalString("roots") ds := syncds.MutexWrap(datastore.NewMapDatastore()) r := &repo.Mock{ diff --git a/core/coreapi/test/path_test.go b/core/coreapi/test/path_test.go index 692853a9a..f1337e809 100644 --- a/core/coreapi/test/path_test.go +++ b/core/coreapi/test/path_test.go @@ -39,7 +39,7 @@ func TestPathUnixFSHAMTPartial(t *testing.T) { dir[strconv.Itoa(i)] = files.NewBytesFile([]byte(strconv.Itoa(i))) } - r, err := a.Unixfs().Add(ctx, files.NewMapDirectory(dir), options.Unixfs.Pin(false)) + r, err := a.Unixfs().Add(ctx, files.NewMapDirectory(dir), options.Unixfs.Pin(false, "")) if err != nil { t.Fatal(err) } diff --git a/core/coreapi/unixfs.go b/core/coreapi/unixfs.go index eece797a5..de03b6099 100644 --- a/core/coreapi/unixfs.go +++ b/core/coreapi/unixfs.go @@ -16,6 +16,7 @@ import ( uio "github.com/ipfs/boxo/ipld/unixfs/io" "github.com/ipfs/boxo/mfs" "github.com/ipfs/boxo/path" + provider "github.com/ipfs/boxo/provider" cid "github.com/ipfs/go-cid" cidutil "github.com/ipfs/go-cidutil" ds "github.com/ipfs/go-datastore" @@ -58,6 +59,7 @@ func (api *UnixfsAPI) Add(ctx context.Context, files files.Node, opts ...options attribute.Bool("maxhamtfanoutset", settings.MaxHAMTFanoutSet), attribute.Int("layout", int(settings.Layout)), attribute.Bool("pin", settings.Pin), + attribute.String("pin-name", settings.PinName), attribute.Bool("onlyhash", settings.OnlyHash), attribute.Bool("fscache", settings.FsCache), attribute.Bool("nocopy", settings.NoCopy), @@ -101,7 +103,22 @@ func (api *UnixfsAPI) Add(ctx context.Context, files files.Node, opts ...options bserv := blockservice.New(addblockstore, exch, blockservice.WriteThrough(cfg.Datastore.WriteThrough.WithDefault(config.DefaultWriteThrough)), ) // hash security 001 - dserv := merkledag.NewDAGService(bserv) + + var dserv ipld.DAGService = merkledag.NewDAGService(bserv) + + // wrap the DAGService in a providingDAG service which provides every block written. + // note about strategies: + // - "all" gets handled directly at the blockstore so no need to provide + // - "roots" gets handled in the pinner + // - "mfs" gets handled in mfs + // We need to provide the "pinned" cases only. Added blocks are not + // going to be provided by the blockstore (wrong strategy for that), + // nor by the pinner (the pinner doesn't traverse the pinned DAG itself, it only + // handles roots). This wrapping ensures all blocks of pinned content get provided. + if settings.Pin && !settings.OnlyHash && + (api.providingStrategy&config.ReproviderStrategyPinned) != 0 { + dserv = &providingDagService{dserv, api.provider} + } // add a sync call to the DagService // this ensures that data written to the DagService is persisted to the underlying datastore @@ -125,6 +142,11 @@ func (api *UnixfsAPI) Add(ctx context.Context, files files.Node, opts ...options } } + // Note: the dag service gets wrapped multiple times: + // 1. providingDagService (if pinned strategy) - provides blocks as they're added + // 2. syncDagService - ensures data persistence + // 3. batchingDagService (in coreunix.Adder) - batches operations for efficiency + fileAdder, err := coreunix.NewAdder(ctx, pinning, addblockstore, syncDserv) if err != nil { return path.ImmutablePath{}, err @@ -136,6 +158,9 @@ func (api *UnixfsAPI) Add(ctx context.Context, files files.Node, opts ...options fileAdder.Progress = settings.Progress } fileAdder.Pin = settings.Pin && !settings.OnlyHash + if settings.Pin { + fileAdder.PinName = settings.PinName + } fileAdder.Silent = settings.Silent fileAdder.RawLeaves = settings.RawLeaves if settings.MaxFileLinksSet { @@ -179,7 +204,8 @@ func (api *UnixfsAPI) Add(ctx context.Context, files files.Node, opts ...options if err != nil { return path.ImmutablePath{}, err } - mr, err := mfs.NewRoot(ctx, md, emptyDirNode, nil) + // MFS root for OnlyHash mode: provider is nil since we're not storing/providing anything + mr, err := mfs.NewRoot(ctx, md, emptyDirNode, nil, nil) if err != nil { return path.ImmutablePath{}, err } @@ -192,12 +218,6 @@ func (api *UnixfsAPI) Add(ctx context.Context, files files.Node, opts ...options return path.ImmutablePath{}, err } - if !settings.OnlyHash { - if err := api.provider.Provide(ctx, nd.Cid(), true); err != nil { - return path.ImmutablePath{}, err - } - } - return path.FromCid(nd.Cid()), nil } @@ -363,3 +383,40 @@ type syncDagService struct { func (s *syncDagService) Sync() error { return s.syncFn() } + +type providingDagService struct { + ipld.DAGService + provider provider.System +} + +func (pds *providingDagService) Add(ctx context.Context, n ipld.Node) error { + if err := pds.DAGService.Add(ctx, n); err != nil { + return err + } + // Provider errors are logged but not propagated. + // We don't want DAG operations to fail due to providing issues. + // The user's data is still stored successfully even if the + // announcement to the routing system fails temporarily. + if err := pds.provider.Provide(ctx, n.Cid(), true); err != nil { + log.Error(err) + } + return nil +} + +func (pds *providingDagService) AddMany(ctx context.Context, nds []ipld.Node) error { + if err := pds.DAGService.AddMany(ctx, nds); err != nil { + return err + } + // Same error handling philosophy as Add(): log but don't fail. + // Note: Provide calls are intentionally blocking here - the Provider + // implementation should handle concurrency/queuing internally. + for _, n := range nds { + if err := pds.provider.Provide(ctx, n.Cid(), true); err != nil { + log.Error(err) + break + } + } + return nil +} + +var _ ipld.DAGService = (*providingDagService)(nil) diff --git a/core/corehttp/corehttp.go b/core/corehttp/corehttp.go index 595a0aa5f..344991923 100644 --- a/core/corehttp/corehttp.go +++ b/core/corehttp/corehttp.go @@ -13,8 +13,6 @@ import ( logging "github.com/ipfs/go-log/v2" core "github.com/ipfs/kubo/core" - "github.com/jbenet/goprocess" - periodicproc "github.com/jbenet/goprocess/periodic" ma "github.com/multiformats/go-multiaddr" manet "github.com/multiformats/go-multiaddr/net" ) @@ -97,7 +95,7 @@ func Serve(node *core.IpfsNode, lis net.Listener, options ...ServeOption) error } select { - case <-node.Process.Closing(): + case <-node.Context().Done(): return fmt.Errorf("failed to start server, process closing") default: } @@ -107,20 +105,31 @@ func Serve(node *core.IpfsNode, lis net.Listener, options ...ServeOption) error } var serverError error - serverProc := node.Process.Go(func(p goprocess.Process) { + serverClosed := make(chan struct{}) + go func() { serverError = server.Serve(lis) - }) + close(serverClosed) + }() // wait for server to exit. select { - case <-serverProc.Closed(): + case <-serverClosed: // if node being closed before server exits, close server - case <-node.Process.Closing(): + case <-node.Context().Done(): log.Infof("server at %s terminating...", addr) - warnProc := periodicproc.Tick(5*time.Second, func(_ goprocess.Process) { - log.Infof("waiting for server at %s to terminate...", addr) - }) + go func() { + ticker := time.NewTicker(5 * time.Second) + defer ticker.Stop() + for { + select { + case <-ticker.C: + log.Infof("waiting for server at %s to terminate...", addr) + case <-serverClosed: + return + } + } + }() // This timeout shouldn't be necessary if all of our commands // are obeying their contexts but we should have *some* timeout. @@ -130,10 +139,8 @@ func Serve(node *core.IpfsNode, lis net.Listener, options ...ServeOption) error // Should have already closed but we still need to wait for it // to set the error. - <-serverProc.Closed() + <-serverClosed serverError = err - - warnProc.Close() } log.Infof("server at %s terminated", addr) diff --git a/core/corehttp/gateway.go b/core/corehttp/gateway.go index 6ac381885..340882a7e 100644 --- a/core/corehttp/gateway.go +++ b/core/corehttp/gateway.go @@ -97,11 +97,21 @@ func Libp2pGatewayOption() ServeOption { return nil, err } + // Get gateway configuration from the node's config + cfg, err := n.Repo.Config() + if err != nil { + return nil, err + } + gwConfig := gateway.Config{ - DeserializedResponses: false, - NoDNSLink: true, + // Keep these constraints for security + DeserializedResponses: false, // Trustless-only + NoDNSLink: true, // No DNS resolution PublicGateways: nil, Menu: nil, + // Apply timeout and concurrency limits from user config + RetrievalTimeout: cfg.Gateway.RetrievalTimeout.WithDefault(config.DefaultRetrievalTimeout), + MaxConcurrentRequests: int(cfg.Gateway.MaxConcurrentRequests.WithDefault(int64(config.DefaultMaxConcurrentRequests))), } handler := gateway.NewHandler(gwConfig, &offlineGatewayErrWrapper{gwimpl: backend}) @@ -258,6 +268,8 @@ func getGatewayConfig(n *core.IpfsNode) (gateway.Config, map[string][]string, er DisableHTMLErrors: cfg.Gateway.DisableHTMLErrors.WithDefault(config.DefaultDisableHTMLErrors), NoDNSLink: cfg.Gateway.NoDNSLink, PublicGateways: map[string]*gateway.PublicGateway{}, + RetrievalTimeout: cfg.Gateway.RetrievalTimeout.WithDefault(config.DefaultRetrievalTimeout), + MaxConcurrentRequests: int(cfg.Gateway.MaxConcurrentRequests.WithDefault(int64(config.DefaultMaxConcurrentRequests))), } // Add default implicit known gateways, such as subdomain gateway on localhost. diff --git a/core/corehttp/metrics.go b/core/corehttp/metrics.go index f43362ff7..be1031513 100644 --- a/core/corehttp/metrics.go +++ b/core/corehttp/metrics.go @@ -87,6 +87,7 @@ func MetricsCollectionOption(handlerName string) ServeOption { Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, } + // Legacy metric - new metrics are provided by boxo/gateway as gw_http_responses_total reqCnt := prometheus.NewCounterVec( prometheus.CounterOpts{ Namespace: opts.Namespace, diff --git a/core/corehttp/p2p_proxy_test.go b/core/corehttp/p2p_proxy_test.go index 969bc31e1..e915c0822 100644 --- a/core/corehttp/p2p_proxy_test.go +++ b/core/corehttp/p2p_proxy_test.go @@ -5,9 +5,8 @@ import ( "strings" "testing" - "github.com/ipfs/kubo/thirdparty/assert" - protocol "github.com/libp2p/go-libp2p/core/protocol" + "github.com/stretchr/testify/require" ) type TestCase struct { @@ -29,12 +28,10 @@ func TestParseRequest(t *testing.T) { req, _ := http.NewRequest(http.MethodGet, url, strings.NewReader("")) parsed, err := parseRequest(req) - if err != nil { - t.Fatal(err) - } - assert.True(parsed.httpPath == tc.path, t, "proxy request path") - assert.True(parsed.name == protocol.ID(tc.name), t, "proxy request name") - assert.True(parsed.target == tc.target, t, "proxy request peer-id") + require.NoError(t, err) + require.Equal(t, tc.path, parsed.httpPath, "proxy request path") + require.Equal(t, protocol.ID(tc.name), parsed.name, "proxy request name") + require.Equal(t, tc.target, parsed.target, "proxy request peer-id") } } @@ -49,8 +46,6 @@ func TestParseRequestInvalidPath(t *testing.T) { req, _ := http.NewRequest(http.MethodGet, url, strings.NewReader("")) _, err := parseRequest(req) - if err == nil { - t.Fail() - } + require.Error(t, err) } } diff --git a/core/corehttp/webui.go b/core/corehttp/webui.go index 387a5b9ca..9c3244ad2 100644 --- a/core/corehttp/webui.go +++ b/core/corehttp/webui.go @@ -1,11 +1,12 @@ package corehttp // WebUI version confirmed to work with this Kubo version -const WebUIPath = "/ipfs/bafybeibfd5kbebqqruouji6ct5qku3tay273g7mt24mmrfzrsfeewaal5y" // v4.7.0 +const WebUIPath = "/ipfs/bafybeifplj2s3yegn7ko7tdnwpoxa4c5uaqnk2ajnw5geqm34slcj6b6mu" // v4.8.0 // WebUIPaths is a list of all past webUI paths. var WebUIPaths = []string{ WebUIPath, + "/ipfs/bafybeibfd5kbebqqruouji6ct5qku3tay273g7mt24mmrfzrsfeewaal5y", // v4.7.0 "/ipfs/bafybeibpaa5kqrj4gkemiswbwndjqiryl65cks64ypwtyerxixu56gnvvm", // v4.6.0 "/ipfs/bafybeiata4qg7xjtwgor6r5dw63jjxyouenyromrrb4lrewxrlvav7gzgi", // v4.5.0 "/ipfs/bafybeigp3zm7cqoiciqk5anlheenqjsgovp7j7zq6hah4nu6iugdgb4nby", // v4.4.2 diff --git a/core/coreiface/options/name.go b/core/coreiface/options/name.go index 7b4b6a8fd..8fc4f552a 100644 --- a/core/coreiface/options/name.go +++ b/core/coreiface/options/name.go @@ -16,6 +16,8 @@ type NamePublishSettings struct { TTL *time.Duration CompatibleWithV1 bool AllowOffline bool + AllowDelegated bool + Sequence *uint64 } type NameResolveSettings struct { @@ -34,7 +36,8 @@ func NamePublishOptions(opts ...NamePublishOption) (*NamePublishSettings, error) ValidTime: DefaultNameValidTime, Key: "self", - AllowOffline: false, + AllowOffline: false, + AllowDelegated: false, } for _, opt := range opts { @@ -96,6 +99,16 @@ func (nameOpts) AllowOffline(allow bool) NamePublishOption { } } +// AllowDelegated is an option for Name.Publish which allows publishing without +// DHT connectivity, using local datastore and HTTP delegated publishers only. +// Default value is false +func (nameOpts) AllowDelegated(allowDelegated bool) NamePublishOption { + return func(settings *NamePublishSettings) error { + settings.AllowDelegated = allowDelegated + return nil + } +} + // TTL is an option for Name.Publish which specifies the time duration the // published record should be cached for (caution: experimental). func (nameOpts) TTL(ttl time.Duration) NamePublishOption { @@ -105,6 +118,15 @@ func (nameOpts) TTL(ttl time.Duration) NamePublishOption { } } +// Sequence is an option for Name.Publish which specifies the sequence number of +// a namesys record. +func (nameOpts) Sequence(seq uint64) NamePublishOption { + return func(settings *NamePublishSettings) error { + settings.Sequence = &seq + return nil + } +} + // CompatibleWithV1 is an option for [Name.Publish] which specifies if the // created record should be backwards compatible with V1 IPNS Records. func (nameOpts) CompatibleWithV1(compatible bool) NamePublishOption { diff --git a/core/coreiface/options/unixfs.go b/core/coreiface/options/unixfs.go index 20f18d1e0..45e880ed1 100644 --- a/core/coreiface/options/unixfs.go +++ b/core/coreiface/options/unixfs.go @@ -39,6 +39,7 @@ type UnixfsAddSettings struct { Layout Layout Pin bool + PinName string OnlyHash bool FsCache bool NoCopy bool @@ -83,6 +84,7 @@ func UnixfsAddOptions(opts ...UnixfsAddOption) (*UnixfsAddSettings, cid.Prefix, Layout: BalancedLayout, Pin: false, + PinName: "", OnlyHash: false, FsCache: false, NoCopy: false, @@ -280,9 +282,12 @@ func (unixfsOpts) Layout(layout Layout) UnixfsAddOption { } // Pin tells the adder to pin the file root recursively after adding -func (unixfsOpts) Pin(pin bool) UnixfsAddOption { +func (unixfsOpts) Pin(pin bool, pinName string) UnixfsAddOption { return func(settings *UnixfsAddSettings) error { settings.Pin = pin + if pin { + settings.PinName = pinName + } return nil } } diff --git a/core/coreiface/tests/name.go b/core/coreiface/tests/name.go index 1e739fdd0..0e091548a 100644 --- a/core/coreiface/tests/name.go +++ b/core/coreiface/tests/name.go @@ -142,8 +142,6 @@ func (tp *TestSuite) TestBasicPublishResolveKey(t *testing.T) { } func (tp *TestSuite) TestBasicPublishResolveTimeout(t *testing.T) { - t.Skip("ValidTime doesn't appear to work at this time resolution") - ctx, cancel := context.WithCancel(context.Background()) defer cancel() apis, err := tp.MakeAPISwarm(t, ctx, 5) @@ -155,14 +153,25 @@ func (tp *TestSuite) TestBasicPublishResolveTimeout(t *testing.T) { self, err := api.Key().Self(ctx) require.NoError(t, err) - name, err := api.Name().Publish(ctx, p, opt.Name.ValidTime(time.Millisecond*100)) + name, err := api.Name().Publish(ctx, p, opt.Name.ValidTime(time.Second*1)) require.NoError(t, err) require.Equal(t, name.String(), ipns.NameFromPeer(self.ID()).String()) - time.Sleep(time.Second) - - _, err = api.Name().Resolve(ctx, name.String()) + // First resolve should succeed (before expiration) + resPath, err := api.Name().Resolve(ctx, name.String()) require.NoError(t, err) + require.Equal(t, p.String(), resPath.String()) + + // Wait for record to expire (1 second ValidTime + buffer) + time.Sleep(time.Second * 2) + + // Second resolve should now fail after ValidTime expiration (cached) + _, err = api.Name().Resolve(ctx, name.String()) + require.Error(t, err, "IPNS resolution should fail after ValidTime expires (cached)") + + // Third resolve should also fail after ValidTime expiration (non-cached) + _, err = api.Name().Resolve(ctx, name.String(), opt.Name.Cache(false)) + require.Error(t, err, "IPNS resolution should fail after ValidTime expires (non-cached)") } // TODO: When swarm api is created, add multinode tests diff --git a/core/coreiface/tests/pin.go b/core/coreiface/tests/pin.go index 4c606323f..18f90c051 100644 --- a/core/coreiface/tests/pin.go +++ b/core/coreiface/tests/pin.go @@ -433,7 +433,7 @@ func getThreeChainedNodes(t *testing.T, ctx context.Context, api iface.CoreAPI, return immutablePathCidContainer{leaf}, parent, grandparent } -func assertPinTypes(t *testing.T, ctx context.Context, api iface.CoreAPI, recusive, direct, indirect []cidContainer) { +func assertPinTypes(t *testing.T, ctx context.Context, api iface.CoreAPI, recursive, direct, indirect []cidContainer) { assertPinLsAllConsistency(t, ctx, api) list, err := accPins(ctx, api, opt.Pin.Ls.Recursive()) @@ -441,7 +441,7 @@ func assertPinTypes(t *testing.T, ctx context.Context, api iface.CoreAPI, recusi t.Fatal(err) } - assertPinCids(t, list, recusive...) + assertPinCids(t, list, recursive...) list, err = accPins(ctx, api, opt.Pin.Ls.Direct()) if err != nil { diff --git a/core/coreiface/tests/routing.go b/core/coreiface/tests/routing.go index 753d49550..147cb9b74 100644 --- a/core/coreiface/tests/routing.go +++ b/core/coreiface/tests/routing.go @@ -171,6 +171,13 @@ func (tp *TestSuite) TestRoutingFindProviders(t *testing.T) { t.Fatal(err) } + // Pin so that it is provided, given that providing strategy is + // "roots" and addTestObject does not pin. + err = apis[0].Pin().Add(ctx, p) + if err != nil { + t.Fatal(err) + } + time.Sleep(3 * time.Second) out, err := apis[2].Routing().FindProviders(ctx, p, options.Routing.NumProviders(1)) diff --git a/core/coreiface/tests/unixfs.go b/core/coreiface/tests/unixfs.go index 43447990e..c2717216c 100644 --- a/core/coreiface/tests/unixfs.go +++ b/core/coreiface/tests/unixfs.go @@ -539,7 +539,7 @@ func (tp *TestSuite) TestAddPinned(t *testing.T) { t.Fatal(err) } - _, err = api.Unixfs().Add(ctx, strFile(helloStr)(), options.Unixfs.Pin(true)) + _, err = api.Unixfs().Add(ctx, strFile(helloStr)(), options.Unixfs.Pin(true, "")) if err != nil { t.Fatal(err) } diff --git a/core/coreunix/add.go b/core/coreunix/add.go index eb6f25e0f..55a9d5bec 100644 --- a/core/coreunix/add.go +++ b/core/coreunix/add.go @@ -76,6 +76,7 @@ type Adder struct { Out chan<- interface{} Progress bool Pin bool + PinName string Trickle bool RawLeaves bool MaxLinks int @@ -102,7 +103,7 @@ func (adder *Adder) mfsRoot() (*mfs.Root, error) { } // Note, this adds it to DAGService already. - mr, err := mfs.NewEmptyRoot(adder.ctx, adder.dagService, nil, mfs.MkdirOpts{ + mr, err := mfs.NewEmptyRoot(adder.ctx, adder.dagService, nil, nil, mfs.MkdirOpts{ CidBuilder: adder.CidBuilder, MaxLinks: adder.MaxDirectoryLinks, MaxHAMTFanout: adder.MaxHAMTFanout, @@ -182,9 +183,10 @@ func (adder *Adder) curRootNode() (ipld.Node, error) { return root, err } -// Recursively pins the root node of Adder and -// writes the pin state to the backing datastore. -func (adder *Adder) PinRoot(ctx context.Context, root ipld.Node) error { +// PinRoot recursively pins the root node of Adder with an optional name and +// writes the pin state to the backing datastore. If name is empty, the pin +// will be created without a name. +func (adder *Adder) PinRoot(ctx context.Context, root ipld.Node, name string) error { ctx, span := tracing.Span(ctx, "CoreUnix.Adder", "PinRoot") defer span.End() @@ -207,7 +209,7 @@ func (adder *Adder) PinRoot(ctx context.Context, root ipld.Node) error { adder.tempRoot = rnk } - err = adder.pinning.PinWithMode(ctx, rnk, pin.Recursive, "") + err = adder.pinning.PinWithMode(ctx, rnk, pin.Recursive, name) if err != nil { return err } @@ -369,7 +371,12 @@ func (adder *Adder) AddAllAndPin(ctx context.Context, file files.Node) (ipld.Nod if !adder.Pin { return nd, nil } - return nd, adder.PinRoot(ctx, nd) + + if err := adder.PinRoot(ctx, nd, adder.PinName); err != nil { + return nil, err + } + + return nd, nil } func (adder *Adder) addFileNode(ctx context.Context, path string, file files.Node, toplevel bool) error { @@ -409,7 +416,7 @@ func (adder *Adder) addFileNode(ctx context.Context, path string, file files.Nod case files.Directory: return adder.addDir(ctx, path, f, toplevel) case *files.Symlink: - return adder.addSymlink(path, f) + return adder.addSymlink(ctx, path, f) case files.File: return adder.addFile(path, f) default: @@ -417,7 +424,7 @@ func (adder *Adder) addFileNode(ctx context.Context, path string, file files.Nod } } -func (adder *Adder) addSymlink(path string, l *files.Symlink) error { +func (adder *Adder) addSymlink(ctx context.Context, path string, l *files.Symlink) error { sdata, err := unixfs.SymlinkData(l.Target) if err != nil { return err @@ -475,7 +482,7 @@ func (adder *Adder) addDir(ctx context.Context, path string, dir files.Directory // if we need to store mode or modification time then create a new root which includes that data if toplevel && (adder.FileMode != 0 || !adder.FileMtime.IsZero()) { - mr, err := mfs.NewEmptyRoot(ctx, adder.dagService, nil, + mr, err := mfs.NewEmptyRoot(ctx, adder.dagService, nil, nil, mfs.MkdirOpts{ CidBuilder: adder.CidBuilder, MaxLinks: adder.MaxDirectoryLinks, @@ -530,7 +537,7 @@ func (adder *Adder) maybePauseForGC(ctx context.Context) error { return err } - err = adder.PinRoot(ctx, rn) + err = adder.PinRoot(ctx, rn, "") if err != nil { return err } diff --git a/core/coreunix/add_test.go b/core/coreunix/add_test.go index 1eb050ee9..a11dd13e4 100644 --- a/core/coreunix/add_test.go +++ b/core/coreunix/add_test.go @@ -93,8 +93,15 @@ func TestAddMultipleGCLive(t *testing.T) { // finish write and unblock gc pipew1.Close() - // Should have gotten the lock at this point - <-gc1started + // Wait for GC to acquire the lock + // The adder needs to finish processing file 'a' and call maybePauseForGC + // when starting file 'b' before GC can proceed + select { + case <-gc1started: + // GC got the lock as expected + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for GC to start - possible deadlock") + } removedHashes := make(map[string]struct{}) for r := range gc1out { @@ -123,7 +130,15 @@ func TestAddMultipleGCLive(t *testing.T) { pipew2.Close() - <-gc2started + // Wait for second GC to acquire the lock + // The adder needs to finish processing file 'b' and call maybePauseForGC + // when starting file 'c' before GC can proceed + select { + case <-gc2started: + // GC got the lock as expected + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for second GC to start - possible deadlock") + } for r := range gc2out { if r.Error != nil { diff --git a/core/node/bitswap.go b/core/node/bitswap.go index 976d82765..e73145292 100644 --- a/core/node/bitswap.go +++ b/core/node/bitswap.go @@ -14,16 +14,14 @@ import ( "github.com/ipfs/boxo/bitswap/network/httpnet" blockstore "github.com/ipfs/boxo/blockstore" exchange "github.com/ipfs/boxo/exchange" - "github.com/ipfs/boxo/exchange/providing" - provider "github.com/ipfs/boxo/provider" rpqm "github.com/ipfs/boxo/routing/providerquerymanager" "github.com/ipfs/go-cid" ipld "github.com/ipfs/go-ipld-format" version "github.com/ipfs/kubo" "github.com/ipfs/kubo/config" - irouting "github.com/ipfs/kubo/routing" "github.com/libp2p/go-libp2p/core/host" peer "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/routing" "go.uber.org/fx" blocks "github.com/ipfs/go-block-format" @@ -75,7 +73,7 @@ type bitswapIn struct { Mctx helpers.MetricsCtx Cfg *config.Config Host host.Host - Rt irouting.ProvideManyRouter + Discovery routing.ContentDiscovery Bs blockstore.GCBlockstore BitswapOpts []bitswap.Option `group:"bitswap-options"` } @@ -88,9 +86,14 @@ func Bitswap(serverEnabled, libp2pEnabled, httpEnabled bool) interface{} { var bitswapNetworks, bitswapLibp2p network.BitSwapNetwork var bitswapBlockstore blockstore.Blockstore = in.Bs + connEvtMgr := network.NewConnectEventManager() + libp2pEnabled := in.Cfg.Bitswap.Libp2pEnabled.WithDefault(config.DefaultBitswapLibp2pEnabled) if libp2pEnabled { - bitswapLibp2p = bsnet.NewFromIpfsHost(in.Host) + bitswapLibp2p = bsnet.NewFromIpfsHost( + in.Host, + bsnet.WithConnectEventManager(connEvtMgr), + ) } if httpEnabled { @@ -112,6 +115,7 @@ func Bitswap(serverEnabled, libp2pEnabled, httpEnabled bool) interface{} { httpnet.WithMaxBlockSize(int64(maxBlockSize)), httpnet.WithUserAgent(version.GetUserAgentVersion()), httpnet.WithMetricsLabelsForEndpoints(httpCfg.Allowlist), + httpnet.WithConnectEventManager(connEvtMgr), ) bitswapNetworks = network.New(in.Host.Peerstore(), bitswapLibp2p, bitswapHTTP) } else if libp2pEnabled { @@ -178,7 +182,7 @@ func Bitswap(serverEnabled, libp2pEnabled, httpEnabled bool) interface{} { ignoredPeerIDs = append(ignoredPeerIDs, pid) } providerQueryMgr, err := rpqm.New(bitswapNetworks, - in.Rt, + in.Discovery, rpqm.WithMaxProviders(maxProviders), rpqm.WithIgnoreProviders(ignoredPeerIDs...), ) @@ -216,32 +220,6 @@ func OnlineExchange(isBitswapActive bool) interface{} { } } -type providingExchangeIn struct { - fx.In - - BaseExch exchange.Interface - Provider provider.System -} - -// ProvidingExchange creates a providing.Exchange with the existing exchange -// and the provider.System. -// We cannot do this in OnlineExchange because it causes cycles so this is for -// a decorator. -func ProvidingExchange(provide bool) interface{} { - return func(in providingExchangeIn, lc fx.Lifecycle) exchange.Interface { - exch := in.BaseExch - if provide { - exch = providing.New(in.BaseExch, in.Provider) - lc.Append(fx.Hook{ - OnStop: func(ctx context.Context) error { - return exch.Close() - }, - }) - } - return exch - } -} - type noopExchange struct { closer io.Closer } diff --git a/core/node/builder.go b/core/node/builder.go index 411e3228c..4014308f5 100644 --- a/core/node/builder.go +++ b/core/node/builder.go @@ -7,6 +7,7 @@ import ( "go.uber.org/fx" + "github.com/ipfs/boxo/autoconf" "github.com/ipfs/kubo/core/node/helpers" "github.com/ipfs/kubo/core/node/libp2p" "github.com/ipfs/kubo/repo" @@ -125,7 +126,7 @@ func defaultRepo(dstore repo.Datastore) (repo.Repo, error) { return nil, err } - c.Bootstrap = cfg.DefaultBootstrapAddresses + c.Bootstrap = autoconf.FallbackBootstrapPeers c.Addresses.Swarm = []string{"/ip4/0.0.0.0/tcp/4001", "/ip4/0.0.0.0/udp/4001/quic-v1"} c.Identity.PeerID = pid.String() c.Identity.PrivKey = base64.StdEncoding.EncodeToString(privkeyb) diff --git a/core/node/core.go b/core/node/core.go index cb3439939..0a0ded89a 100644 --- a/core/node/core.go +++ b/core/node/core.go @@ -2,6 +2,7 @@ package node import ( "context" + "errors" "fmt" "github.com/ipfs/boxo/blockservice" @@ -17,6 +18,7 @@ import ( pathresolver "github.com/ipfs/boxo/path/resolver" pin "github.com/ipfs/boxo/pinning/pinner" "github.com/ipfs/boxo/pinning/pinner/dspinner" + provider "github.com/ipfs/boxo/provider" "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" format "github.com/ipfs/go-ipld-format" @@ -47,25 +49,50 @@ func BlockService(cfg *config.Config) func(lc fx.Lifecycle, bs blockstore.Blocks } // Pinning creates new pinner which tells GC which blocks should be kept -func Pinning(bstore blockstore.Blockstore, ds format.DAGService, repo repo.Repo) (pin.Pinner, error) { - rootDS := repo.Datastore() +func Pinning(strategy string) func(bstore blockstore.Blockstore, ds format.DAGService, repo repo.Repo, prov provider.System) (pin.Pinner, error) { + // Parse strategy at function creation time (not inside the returned function) + // This happens before the provider is created, which is why we pass the strategy + // string and parse it here, rather than using fx-provided ProvidingStrategy. + strategyFlag := config.ParseReproviderStrategy(strategy) - syncFn := func(ctx context.Context) error { - if err := rootDS.Sync(ctx, blockstore.BlockPrefix); err != nil { - return err + return func(bstore blockstore.Blockstore, + ds format.DAGService, + repo repo.Repo, + prov provider.System) (pin.Pinner, error) { + rootDS := repo.Datastore() + + syncFn := func(ctx context.Context) error { + if err := rootDS.Sync(ctx, blockstore.BlockPrefix); err != nil { + return err + } + return rootDS.Sync(ctx, filestore.FilestorePrefix) } - return rootDS.Sync(ctx, filestore.FilestorePrefix) + syncDs := &syncDagService{ds, syncFn} + + ctx := context.TODO() + + var opts []dspinner.Option + roots := (strategyFlag & config.ReproviderStrategyRoots) != 0 + pinned := (strategyFlag & config.ReproviderStrategyPinned) != 0 + + // Important: Only one of WithPinnedProvider or WithRootsProvider should be active. + // Having both would cause duplicate root advertisements since "pinned" includes all + // pinned content (roots + children), while "roots" is just the root CIDs. + // We prioritize "pinned" if both are somehow set (though this shouldn't happen + // with proper strategy parsing). + if pinned { + opts = append(opts, dspinner.WithPinnedProvider(prov)) + } else if roots { + opts = append(opts, dspinner.WithRootsProvider(prov)) + } + + pinning, err := dspinner.New(ctx, rootDS, syncDs, opts...) + if err != nil { + return nil, err + } + + return pinning, nil } - syncDs := &syncDagService{ds, syncFn} - - ctx := context.TODO() - - pinning, err := dspinner.New(ctx, rootDS, syncDs) - if err != nil { - return nil, err - } - - return pinning, nil } var ( @@ -152,63 +179,76 @@ func Dag(bs blockservice.BlockService) format.DAGService { } // Files loads persisted MFS root -func Files(mctx helpers.MetricsCtx, lc fx.Lifecycle, repo repo.Repo, dag format.DAGService, bs blockstore.Blockstore) (*mfs.Root, error) { - dsk := datastore.NewKey("/local/filesroot") - pf := func(ctx context.Context, c cid.Cid) error { - rootDS := repo.Datastore() - if err := rootDS.Sync(ctx, blockstore.BlockPrefix); err != nil { - return err - } - if err := rootDS.Sync(ctx, filestore.FilestorePrefix); err != nil { - return err +func Files(strategy string) func(mctx helpers.MetricsCtx, lc fx.Lifecycle, repo repo.Repo, dag format.DAGService, bs blockstore.Blockstore, prov provider.System) (*mfs.Root, error) { + return func(mctx helpers.MetricsCtx, lc fx.Lifecycle, repo repo.Repo, dag format.DAGService, bs blockstore.Blockstore, prov provider.System) (*mfs.Root, error) { + dsk := datastore.NewKey("/local/filesroot") + pf := func(ctx context.Context, c cid.Cid) error { + rootDS := repo.Datastore() + if err := rootDS.Sync(ctx, blockstore.BlockPrefix); err != nil { + return err + } + if err := rootDS.Sync(ctx, filestore.FilestorePrefix); err != nil { + return err + } + + if err := rootDS.Put(ctx, dsk, c.Bytes()); err != nil { + return err + } + return rootDS.Sync(ctx, dsk) } - if err := rootDS.Put(ctx, dsk, c.Bytes()); err != nil { - return err - } - return rootDS.Sync(ctx, dsk) - } + var nd *merkledag.ProtoNode + ctx := helpers.LifecycleCtx(mctx, lc) + val, err := repo.Datastore().Get(ctx, dsk) - var nd *merkledag.ProtoNode - ctx := helpers.LifecycleCtx(mctx, lc) - val, err := repo.Datastore().Get(ctx, dsk) + switch { + case errors.Is(err, datastore.ErrNotFound): + nd = unixfs.EmptyDirNode() + err := dag.Add(ctx, nd) + if err != nil { + return nil, fmt.Errorf("failure writing filesroot to dagstore: %s", err) + } + case err == nil: + c, err := cid.Cast(val) + if err != nil { + return nil, err + } - switch { - case err == datastore.ErrNotFound || val == nil: - nd = unixfs.EmptyDirNode() - err := dag.Add(ctx, nd) - if err != nil { - return nil, fmt.Errorf("failure writing filesroot to dagstore: %s", err) - } - case err == nil: - c, err := cid.Cast(val) - if err != nil { + offlineDag := merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs))) + rnd, err := offlineDag.Get(ctx, c) + if err != nil { + return nil, fmt.Errorf("error loading filesroot from dagservice: %s", err) + } + + pbnd, ok := rnd.(*merkledag.ProtoNode) + if !ok { + return nil, merkledag.ErrNotProtobuf + } + + nd = pbnd + default: return nil, err } - offineDag := merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs))) - rnd, err := offineDag.Get(ctx, c) - if err != nil { - return nil, fmt.Errorf("error loading filesroot from dagservice: %s", err) + // MFS (Mutable File System) provider integration: + // Only pass the provider to MFS when the strategy includes "mfs". + // MFS will call Provide() on every DAGService.Add() operation, + // which is sufficient for the "mfs" strategy - it ensures all + // MFS content gets announced as it's added or modified. + // For non-mfs strategies, we set provider to nil to avoid unnecessary providing. + strategyFlag := config.ParseReproviderStrategy(strategy) + if strategyFlag&config.ReproviderStrategyMFS == 0 { + prov = nil } - pbnd, ok := rnd.(*merkledag.ProtoNode) - if !ok { - return nil, merkledag.ErrNotProtobuf - } + root, err := mfs.NewRoot(ctx, dag, nd, pf, prov) - nd = pbnd - default: - return nil, err + lc.Append(fx.Hook{ + OnStop: func(ctx context.Context) error { + return root.Close() + }, + }) + + return root, err } - - root, err := mfs.NewRoot(ctx, dag, nd, pf) - - lc.Append(fx.Hook{ - OnStop: func(ctx context.Context) error { - return root.Close() - }, - }) - - return root, err } diff --git a/core/node/dns.go b/core/node/dns.go index d338e0e8b..3f0875afb 100644 --- a/core/node/dns.go +++ b/core/node/dns.go @@ -16,5 +16,8 @@ func DNSResolver(cfg *config.Config) (*madns.Resolver, error) { dohOpts = append(dohOpts, doh.WithMaxCacheTTL(cfg.DNS.MaxCacheTTL.WithDefault(time.Duration(math.MaxUint32)*time.Second))) } - return gateway.NewDNSResolver(cfg.DNS.Resolvers, dohOpts...) + // Replace "auto" DNS resolver placeholders with autoconf values + resolvers := cfg.DNSResolversWithAutoConf() + + return gateway.NewDNSResolver(resolvers, dohOpts...) } diff --git a/core/node/groups.go b/core/node/groups.go index 9d53aeef5..9904574a8 100644 --- a/core/node/groups.go +++ b/core/node/groups.go @@ -216,6 +216,7 @@ func LibP2P(bcfg *BuildCfg, cfg *config.Config, userResourceOverrides rcmgr.Part fx.Provide(libp2p.Routing), fx.Provide(libp2p.ContentRouting), + fx.Provide(libp2p.ContentDiscovery), fx.Provide(libp2p.BaseRouting(cfg)), maybeProvide(libp2p.PubsubRouter, bcfg.getOpt("ipnsps")), @@ -249,7 +250,12 @@ func Storage(bcfg *BuildCfg, cfg *config.Config) fx.Option { return fx.Options( fx.Provide(RepoConfig), fx.Provide(Datastore), - fx.Provide(BaseBlockstoreCtor(cacheOpts, cfg.Datastore.HashOnRead, cfg.Datastore.WriteThrough.WithDefault(config.DefaultWriteThrough))), + fx.Provide(BaseBlockstoreCtor( + cacheOpts, + cfg.Datastore.HashOnRead, + cfg.Datastore.WriteThrough.WithDefault(config.DefaultWriteThrough), + cfg.Reprovider.Strategy.WithDefault(config.DefaultReproviderStrategy), + )), finalBstore, ) } @@ -349,8 +355,6 @@ func Online(bcfg *BuildCfg, cfg *config.Config, userResourceOverrides rcmgr.Part fx.Provide(BitswapOptions(cfg)), fx.Provide(Bitswap(isBitswapServerEnabled, isBitswapLibp2pEnabled, isHTTPRetrievalEnabled)), fx.Provide(OnlineExchange(isBitswapLibp2pEnabled)), - // Replace our Exchange with a Providing exchange! - fx.Decorate(ProvidingExchange(isProviderEnabled && isBitswapServerEnabled)), fx.Provide(DNSResolver), fx.Provide(Namesys(ipnsCacheSize, cfg.Ipns.MaxCacheTTL.WithDefault(config.DefaultIpnsMaxCacheTTL))), fx.Provide(Peering), @@ -380,6 +384,7 @@ func Offline(cfg *config.Config) fx.Option { fx.Provide(libp2p.Routing), fx.Provide(libp2p.ContentRouting), fx.Provide(libp2p.OfflineRouting), + fx.Provide(libp2p.ContentDiscovery), OfflineProviders(), ) } @@ -389,8 +394,6 @@ var Core = fx.Options( fx.Provide(Dag), fx.Provide(FetcherConfig), fx.Provide(PathResolverConfig), - fx.Provide(Pinning), - fx.Provide(Files), ) func Networked(bcfg *BuildCfg, cfg *config.Config, userResourceOverrides rcmgr.PartialLimitConfig) fx.Option { @@ -440,16 +443,18 @@ func IPFS(ctx context.Context, bcfg *BuildCfg) fx.Option { uio.HAMTShardingSize = int(shardSingThresholdInt) uio.DefaultShardWidth = int(shardMaxFanout) + providerStrategy := cfg.Reprovider.Strategy.WithDefault(config.DefaultReproviderStrategy) + return fx.Options( bcfgOpts, - fx.Provide(baseProcess), - Storage(bcfg, cfg), Identity(cfg), IPNS, Networked(bcfg, cfg, userResourceOverrides), fx.Provide(BlockService(cfg)), + fx.Provide(Pinning(providerStrategy)), + fx.Provide(Files(providerStrategy)), Core, ) } diff --git a/core/node/helpers.go b/core/node/helpers.go index 491d627bf..05cccfd01 100644 --- a/core/node/helpers.go +++ b/core/node/helpers.go @@ -4,7 +4,6 @@ import ( "context" "errors" - "github.com/jbenet/goprocess" "go.uber.org/fx" ) @@ -55,14 +54,3 @@ func maybeInvoke(opt interface{}, enable bool) fx.Option { } return fx.Options() } - -// baseProcess creates a goprocess which is closed when the lifecycle signals it to stop -func baseProcess(lc fx.Lifecycle) goprocess.Process { - p := goprocess.WithParent(goprocess.Background()) - lc.Append(fx.Hook{ - OnStop: func(_ context.Context) error { - return p.Close() - }, - }) - return p -} diff --git a/core/node/libp2p/addrs.go b/core/node/libp2p/addrs.go index 91fae17c5..135b71d91 100644 --- a/core/node/libp2p/addrs.go +++ b/core/node/libp2p/addrs.go @@ -36,7 +36,7 @@ func AddrFilters(filters []string) func() (*ma.Filters, Libp2pOpts, error) { } } -func makeAddrsFactory(announce []string, appendAnnouce []string, noAnnounce []string) (p2pbhost.AddrsFactory, error) { +func makeAddrsFactory(announce []string, appendAnnounce []string, noAnnounce []string) (p2pbhost.AddrsFactory, error) { var err error // To assign to the slice in the for loop existing := make(map[string]bool) // To avoid duplicates @@ -50,7 +50,7 @@ func makeAddrsFactory(announce []string, appendAnnouce []string, noAnnounce []st } var appendAnnAddrs []ma.Multiaddr - for _, addr := range appendAnnouce { + for _, addr := range appendAnnounce { if existing[addr] { // skip AppendAnnounce that is on the Announce list already continue @@ -99,14 +99,14 @@ func makeAddrsFactory(announce []string, appendAnnouce []string, noAnnounce []st }, nil } -func AddrsFactory(announce []string, appendAnnouce []string, noAnnounce []string) interface{} { +func AddrsFactory(announce []string, appendAnnounce []string, noAnnounce []string) interface{} { return func(params struct { fx.In ForgeMgr *p2pforge.P2PForgeCertMgr `optional:"true"` }, ) (opts Libp2pOpts, err error) { var addrsFactory p2pbhost.AddrsFactory - announceAddrsFactory, err := makeAddrsFactory(announce, appendAnnouce, noAnnounce) + announceAddrsFactory, err := makeAddrsFactory(announce, appendAnnounce, noAnnounce) if err != nil { return opts, err } @@ -115,8 +115,8 @@ func AddrsFactory(announce []string, appendAnnouce []string, noAnnounce []string } else { addrsFactory = func(multiaddrs []ma.Multiaddr) []ma.Multiaddr { forgeProcessing := params.ForgeMgr.AddressFactory()(multiaddrs) - annouceProcessing := announceAddrsFactory(forgeProcessing) - return annouceProcessing + announceProcessing := announceAddrsFactory(forgeProcessing) + return announceProcessing } } opts.Opts = append(opts.Opts, libp2p.AddrsFactory(addrsFactory)) diff --git a/core/node/libp2p/host.go b/core/node/libp2p/host.go index 7950f3dc6..9e71d3359 100644 --- a/core/node/libp2p/host.go +++ b/core/node/libp2p/host.go @@ -49,7 +49,8 @@ func Host(mctx helpers.MetricsCtx, lc fx.Lifecycle, params P2PHostIn) (out P2PHo if err != nil { return out, err } - bootstrappers, err := cfg.BootstrapPeers() + // Use auto-config resolution for actual connectivity + bootstrappers, err := cfg.BootstrapPeersWithAutoConf() if err != nil { return out, err } diff --git a/core/node/libp2p/routing.go b/core/node/libp2p/routing.go index 94c99e5dd..6fafe37a4 100644 --- a/core/node/libp2p/routing.go +++ b/core/node/libp2p/routing.go @@ -95,7 +95,8 @@ func BaseRouting(cfg *config.Config) interface{} { if err != nil { return out, err } - bspeers, err := cfg.BootstrapPeers() + // Use auto-config resolution for actual connectivity + bspeers, err := cfg.BootstrapPeersWithAutoConf() if err != nil { return out, err } @@ -177,6 +178,12 @@ func ContentRouting(in p2pOnlineContentRoutingIn) routing.ContentRouting { } } +// ContentDiscovery narrows down the given content routing facility so that it +// only does discovery. +func ContentDiscovery(in irouting.ProvideManyRouter) routing.ContentDiscovery { + return in +} + type p2pOnlineRoutingIn struct { fx.In @@ -185,7 +192,7 @@ type p2pOnlineRoutingIn struct { } // Routing will get all routers obtained from different methods (delegated -// routers, pub-sub, and so on) and add them all together using a TieredRouter. +// routers, pub-sub, and so on) and add them all together using a ParallelRouter. func Routing(in p2pOnlineRoutingIn) irouting.ProvideManyRouter { routers := in.Routers @@ -291,24 +298,36 @@ func autoRelayFeeder(cfgPeering config.Peering, peerChan chan<- peer.AddrInfo) f } // Additionally, feed closest peers discovered via DHT - if dht == nil { - /* noop due to missing dht.WAN. happens in some unit tests, - not worth fixing as we will refactor this after go-libp2p 0.20 */ - continue + if dht != nil { + closestPeers, err := dht.WAN.GetClosestPeers(ctx, h.ID().String()) + if err == nil { + for _, p := range closestPeers { + addrs := h.Peerstore().Addrs(p) + if len(addrs) == 0 { + continue + } + dhtPeer := peer.AddrInfo{ID: p, Addrs: addrs} + select { + case peerChan <- dhtPeer: + case <-ctx.Done(): + return + } + } + } } - closestPeers, err := dht.WAN.GetClosestPeers(ctx, h.ID().String()) - if err != nil { - // no-op: usually 'failed to find any peer in table' during startup - continue - } - for _, p := range closestPeers { + + // Additionally, feed all connected swarm peers as potential relay candidates. + // This includes peers from HTTP routing, manual swarm connect, mDNS discovery, etc. + // (fixes https://github.com/ipfs/kubo/issues/10899) + connectedPeers := h.Network().Peers() + for _, p := range connectedPeers { addrs := h.Peerstore().Addrs(p) if len(addrs) == 0 { continue } - dhtPeer := peer.AddrInfo{ID: p, Addrs: addrs} + swarmPeer := peer.AddrInfo{ID: p, Addrs: addrs} select { - case peerChan <- dhtPeer: + case peerChan <- swarmPeer: case <-ctx.Done(): return } diff --git a/core/node/libp2p/routingopt.go b/core/node/libp2p/routingopt.go index 43565265b..c8f22af2f 100644 --- a/core/node/libp2p/routingopt.go +++ b/core/node/libp2p/routingopt.go @@ -2,9 +2,12 @@ package libp2p import ( "context" + "fmt" "os" + "strings" "time" + "github.com/ipfs/boxo/autoconf" "github.com/ipfs/go-datastore" "github.com/ipfs/kubo/config" irouting "github.com/ipfs/kubo/routing" @@ -32,46 +35,144 @@ type RoutingOption func(args RoutingOptionArgs) (routing.Routing, error) var noopRouter = routinghelpers.Null{} +// EndpointSource tracks where a URL came from to determine appropriate capabilities +type EndpointSource struct { + URL string + SupportsRead bool // came from DelegatedRoutersWithAutoConf (Read operations) + SupportsWrite bool // came from DelegatedPublishersWithAutoConf (Write operations) +} + +// determineCapabilities determines endpoint capabilities based on URL path and source +func determineCapabilities(endpoint EndpointSource) (string, autoconf.EndpointCapabilities, error) { + parsed, err := autoconf.DetermineKnownCapabilities(endpoint.URL, endpoint.SupportsRead, endpoint.SupportsWrite) + if err != nil { + log.Debugf("Skipping endpoint %q: %v", endpoint.URL, err) + return "", autoconf.EndpointCapabilities{}, nil // Return empty caps, not error + } + + return parsed.BaseURL, parsed.Capabilities, nil +} + +// collectAllEndpoints gathers URLs from both router and publisher sources +func collectAllEndpoints(cfg *config.Config) []EndpointSource { + var endpoints []EndpointSource + + // Get router URLs (Read operations) + var routerURLs []string + if envRouters := os.Getenv(config.EnvHTTPRouters); envRouters != "" { + // Use environment variable override if set (space or comma separated) + splitFunc := func(r rune) bool { return r == ',' || r == ' ' } + routerURLs = strings.FieldsFunc(envRouters, splitFunc) + log.Warnf("Using HTTP routers from %s environment variable instead of config/autoconf: %v", config.EnvHTTPRouters, routerURLs) + } else { + // Use delegated routers from autoconf + routerURLs = cfg.DelegatedRoutersWithAutoConf() + // No fallback - if autoconf doesn't provide endpoints, use empty list + // This exposes any autoconf issues rather than masking them with hardcoded defaults + } + + // Add router URLs to collection + for _, url := range routerURLs { + endpoints = append(endpoints, EndpointSource{ + URL: url, + SupportsRead: true, + SupportsWrite: false, + }) + } + + // Get publisher URLs (Write operations) + publisherURLs := cfg.DelegatedPublishersWithAutoConf() + + // Add publisher URLs, merging with existing router URLs if they match + for _, url := range publisherURLs { + found := false + for i, existing := range endpoints { + if existing.URL == url { + endpoints[i].SupportsWrite = true + found = true + break + } + } + if !found { + endpoints = append(endpoints, EndpointSource{ + URL: url, + SupportsRead: false, + SupportsWrite: true, + }) + } + } + + return endpoints +} + func constructDefaultHTTPRouters(cfg *config.Config) ([]*routinghelpers.ParallelRouter, error) { var routers []*routinghelpers.ParallelRouter httpRetrievalEnabled := cfg.HTTPRetrieval.Enabled.WithDefault(config.DefaultHTTPRetrievalEnabled) - // Use config.DefaultHTTPRouters if custom override was sent via config.EnvHTTPRouters - // or if user did not set any preference in cfg.Routing.DelegatedRouters - var httpRouterEndpoints []string - if os.Getenv(config.EnvHTTPRouters) != "" || len(cfg.Routing.DelegatedRouters) == 0 { - httpRouterEndpoints = config.DefaultHTTPRouters - } else { - httpRouterEndpoints = cfg.Routing.DelegatedRouters + // Collect URLs from both router and publisher sources + endpoints := collectAllEndpoints(cfg) + + // Group endpoints by origin (base URL) and aggregate capabilities + originCapabilities := make(map[string]autoconf.EndpointCapabilities) + for _, endpoint := range endpoints { + // Parse endpoint and determine capabilities based on source + baseURL, capabilities, err := determineCapabilities(endpoint) + if err != nil { + return nil, fmt.Errorf("failed to parse endpoint %q: %w", endpoint.URL, err) + } + + // Aggregate capabilities for this origin + existing := originCapabilities[baseURL] + existing.Merge(capabilities) + originCapabilities[baseURL] = existing } - // Append HTTP routers for additional speed - for _, endpoint := range httpRouterEndpoints { - httpRouter, err := irouting.ConstructHTTPRouter(endpoint, cfg.Identity.PeerID, httpAddrsFromConfig(cfg.Addresses), cfg.Identity.PrivKey, httpRetrievalEnabled) + // Create single HTTP router and composer per origin + for baseURL, capabilities := range originCapabilities { + // Construct HTTP router using base URL (without path) + httpRouter, err := irouting.ConstructHTTPRouter(baseURL, cfg.Identity.PeerID, httpAddrsFromConfig(cfg.Addresses), cfg.Identity.PrivKey, httpRetrievalEnabled) if err != nil { return nil, err } - // Mapping router to /routing/v1/* endpoints + + // Configure router operations based on aggregated capabilities // https://specs.ipfs.tech/routing/http-routing-v1/ - r := &irouting.Composer{ - GetValueRouter: httpRouter, // GET /routing/v1/ipns - PutValueRouter: httpRouter, // PUT /routing/v1/ipns + composer := &irouting.Composer{ + GetValueRouter: noopRouter, // Default disabled, enabled below based on capabilities + PutValueRouter: noopRouter, // Default disabled, enabled below based on capabilities ProvideRouter: noopRouter, // we don't have spec for sending provides to /routing/v1 (revisit once https://github.com/ipfs/specs/pull/378 or similar is ratified) - FindPeersRouter: httpRouter, // /routing/v1/peers - FindProvidersRouter: httpRouter, // /routing/v1/providers + FindPeersRouter: noopRouter, // Default disabled, enabled below based on capabilities + FindProvidersRouter: noopRouter, // Default disabled, enabled below based on capabilities } - if endpoint == config.CidContactRoutingURL { - // Special-case: cid.contact only supports /routing/v1/providers/cid - // we disable other endpoints to avoid sending requests that always fail - r.GetValueRouter = noopRouter - r.PutValueRouter = noopRouter - r.ProvideRouter = noopRouter - r.FindPeersRouter = noopRouter + // Enable specific capabilities + if capabilities.IPNSGet { + composer.GetValueRouter = httpRouter // GET /routing/v1/ipns for IPNS resolution + } + if capabilities.IPNSPut { + composer.PutValueRouter = httpRouter // PUT /routing/v1/ipns for IPNS publishing + } + if capabilities.Peers { + composer.FindPeersRouter = httpRouter // GET /routing/v1/peers + } + if capabilities.Providers { + composer.FindProvidersRouter = httpRouter // GET /routing/v1/providers + } + + // Handle special cases and backward compatibility + if baseURL == config.CidContactRoutingURL { + // Special-case: cid.contact only supports /routing/v1/providers/cid endpoint + // Override any capabilities detected from URL path to ensure only providers is enabled + // TODO: Consider moving this to configuration or removing once cid.contact adds more capabilities + composer.GetValueRouter = noopRouter + composer.PutValueRouter = noopRouter + composer.ProvideRouter = noopRouter + composer.FindPeersRouter = noopRouter + composer.FindProvidersRouter = httpRouter // Only providers supported } routers = append(routers, &routinghelpers.ParallelRouter{ - Router: r, + Router: composer, IgnoreError: true, // https://github.com/ipfs/kubo/pull/9475#discussion_r1042507387 Timeout: 15 * time.Second, // 5x server value from https://github.com/ipfs/kubo/pull/9475#discussion_r1042428529 DoNotWaitForSearchValue: true, @@ -81,6 +182,31 @@ func constructDefaultHTTPRouters(cfg *config.Config) ([]*routinghelpers.Parallel return routers, nil } +// ConstructDelegatedOnlyRouting returns routers used when Routing.Type is set to "delegated" +// This provides HTTP-only routing without DHT, using only delegated routers and IPNS publishers. +// Useful for environments where DHT connectivity is not available or desired +func ConstructDelegatedOnlyRouting(cfg *config.Config) RoutingOption { + return func(args RoutingOptionArgs) (routing.Routing, error) { + // Use only HTTP routers (includes both read and write capabilities) - no DHT + var routers []*routinghelpers.ParallelRouter + + // Add HTTP delegated routers (includes both router and publisher capabilities) + httpRouters, err := constructDefaultHTTPRouters(cfg) + if err != nil { + return nil, err + } + routers = append(routers, httpRouters...) + + // Validate that we have at least one router configured + if len(routers) == 0 { + return nil, fmt.Errorf("no delegated routers or publishers configured for 'delegated' routing mode") + } + + routing := routinghelpers.NewComposableParallel(routers) + return routing, nil + } +} + // ConstructDefaultRouting returns routers used when Routing.Type is unset or set to "auto" func ConstructDefaultRouting(cfg *config.Config, routingOpt RoutingOption) RoutingOption { return func(args RoutingOptionArgs) (routing.Routing, error) { diff --git a/core/node/libp2p/routingopt_test.go b/core/node/libp2p/routingopt_test.go index 801fc0344..1a06045d9 100644 --- a/core/node/libp2p/routingopt_test.go +++ b/core/node/libp2p/routingopt_test.go @@ -3,7 +3,9 @@ package libp2p import ( "testing" + "github.com/ipfs/boxo/autoconf" config "github.com/ipfs/kubo/config" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -32,3 +34,191 @@ func TestHttpAddrsFromConfig(t *testing.T) { AppendAnnounce: []string{"/ip4/192.168.0.2/tcp/4001"}, }), "AppendAnnounce addrs should be included if specified") } + +func TestDetermineCapabilities(t *testing.T) { + tests := []struct { + name string + endpoint EndpointSource + expectedBaseURL string + expectedCapabilities autoconf.EndpointCapabilities + expectError bool + }{ + { + name: "URL with no path should have all Read capabilities", + endpoint: EndpointSource{ + URL: "https://example.com", + SupportsRead: true, + SupportsWrite: false, + }, + expectedBaseURL: "https://example.com", + expectedCapabilities: autoconf.EndpointCapabilities{ + Providers: true, + Peers: true, + IPNSGet: true, + IPNSPut: false, + }, + expectError: false, + }, + { + name: "URL with trailing slash should have all Read capabilities", + endpoint: EndpointSource{ + URL: "https://example.com/", + SupportsRead: true, + SupportsWrite: false, + }, + expectedBaseURL: "https://example.com", + expectedCapabilities: autoconf.EndpointCapabilities{ + Providers: true, + Peers: true, + IPNSGet: true, + IPNSPut: false, + }, + expectError: false, + }, + { + name: "URL with IPNS path should have only IPNS capabilities", + endpoint: EndpointSource{ + URL: "https://example.com/routing/v1/ipns", + SupportsRead: true, + SupportsWrite: true, + }, + expectedBaseURL: "https://example.com", + expectedCapabilities: autoconf.EndpointCapabilities{ + Providers: false, + Peers: false, + IPNSGet: true, + IPNSPut: true, + }, + expectError: false, + }, + { + name: "URL with providers path should have only Providers capability", + endpoint: EndpointSource{ + URL: "https://example.com/routing/v1/providers", + SupportsRead: true, + SupportsWrite: false, + }, + expectedBaseURL: "https://example.com", + expectedCapabilities: autoconf.EndpointCapabilities{ + Providers: true, + Peers: false, + IPNSGet: false, + IPNSPut: false, + }, + expectError: false, + }, + { + name: "URL with peers path should have only Peers capability", + endpoint: EndpointSource{ + URL: "https://example.com/routing/v1/peers", + SupportsRead: true, + SupportsWrite: false, + }, + expectedBaseURL: "https://example.com", + expectedCapabilities: autoconf.EndpointCapabilities{ + Providers: false, + Peers: true, + IPNSGet: false, + IPNSPut: false, + }, + expectError: false, + }, + { + name: "URL with Write support only should enable IPNSPut for no-path endpoint", + endpoint: EndpointSource{ + URL: "https://example.com", + SupportsRead: false, + SupportsWrite: true, + }, + expectedBaseURL: "https://example.com", + expectedCapabilities: autoconf.EndpointCapabilities{ + Providers: false, + Peers: false, + IPNSGet: false, + IPNSPut: true, + }, + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + baseURL, capabilities, err := determineCapabilities(tt.endpoint) + + if tt.expectError { + assert.Error(t, err) + return + } + + require.NoError(t, err) + assert.Equal(t, tt.expectedBaseURL, baseURL) + assert.Equal(t, tt.expectedCapabilities, capabilities) + }) + } +} + +func TestEndpointCapabilitiesReadWriteLogic(t *testing.T) { + t.Run("Read endpoint with no path should enable read capabilities", func(t *testing.T) { + endpoint := EndpointSource{ + URL: "https://example.com", + SupportsRead: true, + SupportsWrite: false, + } + _, capabilities, err := determineCapabilities(endpoint) + require.NoError(t, err) + + // Read endpoint with no path should enable all read capabilities + assert.True(t, capabilities.Providers) + assert.True(t, capabilities.Peers) + assert.True(t, capabilities.IPNSGet) + assert.False(t, capabilities.IPNSPut) // Write capability should be false + }) + + t.Run("Write endpoint with no path should enable write capabilities", func(t *testing.T) { + endpoint := EndpointSource{ + URL: "https://example.com", + SupportsRead: false, + SupportsWrite: true, + } + _, capabilities, err := determineCapabilities(endpoint) + require.NoError(t, err) + + // Write endpoint with no path should only enable IPNS write capability + assert.False(t, capabilities.Providers) + assert.False(t, capabilities.Peers) + assert.False(t, capabilities.IPNSGet) + assert.True(t, capabilities.IPNSPut) // Only write capability should be true + }) + + t.Run("Specific path should only enable matching capabilities", func(t *testing.T) { + endpoint := EndpointSource{ + URL: "https://example.com/routing/v1/ipns", + SupportsRead: true, + SupportsWrite: true, + } + _, capabilities, err := determineCapabilities(endpoint) + require.NoError(t, err) + + // Specific IPNS path should only enable IPNS capabilities based on source + assert.False(t, capabilities.Providers) + assert.False(t, capabilities.Peers) + assert.True(t, capabilities.IPNSGet) // Read capability enabled + assert.True(t, capabilities.IPNSPut) // Write capability enabled + }) + + t.Run("Unsupported paths should result in empty capabilities", func(t *testing.T) { + endpoint := EndpointSource{ + URL: "https://example.com/routing/v1/unsupported", + SupportsRead: true, + SupportsWrite: false, + } + _, capabilities, err := determineCapabilities(endpoint) + require.NoError(t, err) + + // Unsupported paths should result in no capabilities + assert.False(t, capabilities.Providers) + assert.False(t, capabilities.Peers) + assert.False(t, capabilities.IPNSGet) + assert.False(t, capabilities.IPNSPut) + }) +} diff --git a/core/node/provider.go b/core/node/provider.go index 79fed2a4f..17a312f98 100644 --- a/core/node/provider.go +++ b/core/node/provider.go @@ -2,6 +2,7 @@ package node import ( "context" + "errors" "fmt" "time" @@ -9,8 +10,12 @@ import ( "github.com/ipfs/boxo/fetcher" "github.com/ipfs/boxo/mfs" pin "github.com/ipfs/boxo/pinning/pinner" + "github.com/ipfs/boxo/pinning/pinner/dspinner" provider "github.com/ipfs/boxo/provider" "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/query" + "github.com/ipfs/kubo/config" "github.com/ipfs/kubo/repo" irouting "github.com/ipfs/kubo/routing" "go.uber.org/fx" @@ -21,12 +26,17 @@ import ( // and in 'ipfs stats provide' report. const sampledBatchSize = 1000 +// Datastore key used to store previous reprovide strategy. +const reprovideStrategyKey = "/reprovideStrategy" + func ProviderSys(reprovideInterval time.Duration, acceleratedDHTClient bool, provideWorkerCount int) fx.Option { - return fx.Provide(func(lc fx.Lifecycle, cr irouting.ProvideManyRouter, keyProvider provider.KeyChanFunc, repo repo.Repo, bs blockstore.Blockstore) (provider.System, error) { + return fx.Provide(func(lc fx.Lifecycle, cr irouting.ProvideManyRouter, repo repo.Repo) (provider.System, error) { + // Initialize provider.System first, before pinner/blockstore/etc. + // The KeyChanFunc will be set later via SetKeyProvider() once we have + // created the pinner, blockstore and other dependencies. opts := []provider.Option{ provider.Online(cr), provider.ReproviderInterval(reprovideInterval), - provider.KeyProvider(keyProvider), provider.ProvideWorkerCount(provideWorkerCount), } if !acceleratedDHTClient && reprovideInterval > 0 { @@ -45,16 +55,20 @@ func ProviderSys(reprovideInterval time.Duration, acceleratedDHTClient bool, pro defer cancel() // FIXME: I want a running counter of blocks so size of blockstore can be an O(1) lookup. - ch, err := bs.AllKeysChan(ctx) + // Note: talk to datastore directly, as to not depend on Blockstore here. + qr, err := repo.Datastore().Query(ctx, query.Query{ + Prefix: blockstore.BlockPrefix.String(), + KeysOnly: true}) if err != nil { logger.Errorf("fetching AllKeysChain in provider ThroughputReport: %v", err) return false } + defer qr.Close() count = 0 countLoop: for { select { - case _, ok := <-ch: + case _, ok := <-qr.Next(): if !ok { break countLoop } @@ -114,6 +128,7 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#routingaccelerateddhtcli return false }, sampledBatchSize)) } + sys, err := provider.New(repo.Datastore(), opts...) if err != nil { return nil, err @@ -132,21 +147,18 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#routingaccelerateddhtcli // ONLINE/OFFLINE // OnlineProviders groups units managing provider routing records online -func OnlineProviders(provide bool, reprovideStrategy string, reprovideInterval time.Duration, acceleratedDHTClient bool, provideWorkerCount int) fx.Option { +func OnlineProviders(provide bool, providerStrategy string, reprovideInterval time.Duration, acceleratedDHTClient bool, provideWorkerCount int) fx.Option { if !provide { return OfflineProviders() } - var keyProvider fx.Option - switch reprovideStrategy { - case "all", "", "roots", "pinned", "mfs", "pinned+mfs", "flat": - keyProvider = fx.Provide(newProvidingStrategy(reprovideStrategy)) - default: - return fx.Error(fmt.Errorf("unknown reprovider strategy %q", reprovideStrategy)) + strategyFlag := config.ParseReproviderStrategy(providerStrategy) + if strategyFlag == 0 { + return fx.Error(fmt.Errorf("unknown reprovider strategy %q", providerStrategy)) } return fx.Options( - keyProvider, + fx.Provide(setReproviderKeyProvider(providerStrategy)), ProviderSys(reprovideInterval, acceleratedDHTClient, provideWorkerCount), ) } @@ -172,51 +184,120 @@ func mfsProvider(mfsRoot *mfs.Root, fetcher fetcher.Factory) provider.KeyChanFun } } -func mfsRootProvider(mfsRoot *mfs.Root) provider.KeyChanFunc { - return func(ctx context.Context) (<-chan cid.Cid, error) { - rootNode, err := mfsRoot.GetDirectory().GetNode() - if err != nil { - return nil, fmt.Errorf("error loading mfs root, cannot provide MFS: %w", err) - } - ch := make(chan cid.Cid, 1) - ch <- rootNode.Cid() - close(ch) - return ch, nil +type provStrategyIn struct { + fx.In + Pinner pin.Pinner + Blockstore blockstore.Blockstore + OfflineIPLDFetcher fetcher.Factory `name:"offlineIpldFetcher"` + OfflineUnixFSFetcher fetcher.Factory `name:"offlineUnixfsFetcher"` + MFSRoot *mfs.Root + Provider provider.System + Repo repo.Repo +} + +type provStrategyOut struct { + fx.Out + ProvidingStrategy config.ReproviderStrategy + ProvidingKeyChanFunc provider.KeyChanFunc +} + +// createKeyProvider creates the appropriate KeyChanFunc based on strategy. +// Each strategy has different behavior: +// - "roots": Only root CIDs of pinned content +// - "pinned": All pinned content (roots + children) +// - "mfs": Only MFS content +// - "all": all blocks +func createKeyProvider(strategyFlag config.ReproviderStrategy, in provStrategyIn) provider.KeyChanFunc { + switch strategyFlag { + case config.ReproviderStrategyRoots: + return provider.NewBufferedProvider(dspinner.NewPinnedProvider(true, in.Pinner, in.OfflineIPLDFetcher)) + case config.ReproviderStrategyPinned: + return provider.NewBufferedProvider(dspinner.NewPinnedProvider(false, in.Pinner, in.OfflineIPLDFetcher)) + case config.ReproviderStrategyPinned | config.ReproviderStrategyMFS: + return provider.NewPrioritizedProvider( + provider.NewBufferedProvider(dspinner.NewPinnedProvider(false, in.Pinner, in.OfflineIPLDFetcher)), + mfsProvider(in.MFSRoot, in.OfflineUnixFSFetcher), + ) + case config.ReproviderStrategyMFS: + return mfsProvider(in.MFSRoot, in.OfflineUnixFSFetcher) + default: // "all", "", "flat" (compat) + return in.Blockstore.AllKeysChan } } -func newProvidingStrategy(strategy string) interface{} { - type input struct { - fx.In - Pinner pin.Pinner - Blockstore blockstore.Blockstore - OfflineIPLDFetcher fetcher.Factory `name:"offlineIpldFetcher"` - OfflineUnixFSFetcher fetcher.Factory `name:"offlineUnixfsFetcher"` - MFSRoot *mfs.Root +// detectStrategyChange checks if the reproviding strategy has changed from what's persisted. +// Returns: (previousStrategy, hasChanged, error) +func detectStrategyChange(ctx context.Context, strategy string, ds datastore.Datastore) (string, bool, error) { + strategyKey := datastore.NewKey(reprovideStrategyKey) + + prev, err := ds.Get(ctx, strategyKey) + if err != nil { + if errors.Is(err, datastore.ErrNotFound) { + return "", strategy != "", nil + } + return "", false, err } - return func(in input) provider.KeyChanFunc { - switch strategy { - case "roots": - return provider.NewBufferedProvider(provider.NewPinnedProvider(true, in.Pinner, in.OfflineIPLDFetcher)) - case "pinned": - return provider.NewBufferedProvider(provider.NewPinnedProvider(false, in.Pinner, in.OfflineIPLDFetcher)) - case "pinned+mfs": - return provider.NewPrioritizedProvider( - provider.NewBufferedProvider(provider.NewPinnedProvider(false, in.Pinner, in.OfflineIPLDFetcher)), - mfsProvider(in.MFSRoot, in.OfflineUnixFSFetcher), - ) - case "mfs": - return mfsProvider(in.MFSRoot, in.OfflineUnixFSFetcher) - case "flat": - return provider.NewBlockstoreProvider(in.Blockstore) - default: // "all", "" - return provider.NewPrioritizedProvider( - provider.NewPrioritizedProvider( - provider.NewBufferedProvider(provider.NewPinnedProvider(true, in.Pinner, in.OfflineIPLDFetcher)), - mfsRootProvider(in.MFSRoot), - ), - provider.NewBlockstoreProvider(in.Blockstore), - ) + + previousStrategy := string(prev) + return previousStrategy, previousStrategy != strategy, nil +} + +// persistStrategy saves the current reproviding strategy to the datastore. +// Empty string strategies are deleted rather than stored. +func persistStrategy(ctx context.Context, strategy string, ds datastore.Datastore) error { + strategyKey := datastore.NewKey(reprovideStrategyKey) + + if strategy == "" { + return ds.Delete(ctx, strategyKey) + } + return ds.Put(ctx, strategyKey, []byte(strategy)) +} + +// handleStrategyChange manages strategy change detection and queue clearing. +// Strategy change detection: when the reproviding strategy changes, +// we clear the provide queue to avoid unexpected behavior from mixing +// strategies. This ensures a clean transition between different providing modes. +func handleStrategyChange(strategy string, provider provider.System, ds datastore.Datastore) { + ctx := context.Background() + + previous, changed, err := detectStrategyChange(ctx, strategy, ds) + if err != nil { + logger.Error("cannot read previous reprovide strategy", "err", err) + return + } + + if !changed { + return + } + + logger.Infow("Reprovider.Strategy changed, clearing provide queue", "previous", previous, "current", strategy) + provider.Clear() + + if err := persistStrategy(ctx, strategy, ds); err != nil { + logger.Error("cannot update reprovide strategy", "err", err) + } +} + +func setReproviderKeyProvider(strategy string) func(in provStrategyIn) provStrategyOut { + strategyFlag := config.ParseReproviderStrategy(strategy) + + return func(in provStrategyIn) provStrategyOut { + // Create the appropriate key provider based on strategy + kcf := createKeyProvider(strategyFlag, in) + + // SetKeyProvider breaks the circular dependency between provider, blockstore, and pinner. + // We cannot create the blockstore without the provider (it needs to provide blocks), + // and we cannot determine the reproviding strategy without the pinner/blockstore. + // This deferred initialization allows us to create provider.System first, + // then set the actual key provider function after all dependencies are ready. + in.Provider.SetKeyProvider(kcf) + + // Handle strategy changes (detection, queue clearing, persistence) + handleStrategyChange(strategy, in.Provider, in.Repo.Datastore()) + + return provStrategyOut{ + ProvidingStrategy: strategyFlag, + ProvidingKeyChanFunc: kcf, } } } diff --git a/core/node/storage.go b/core/node/storage.go index fd8dfb82e..b4ffb2587 100644 --- a/core/node/storage.go +++ b/core/node/storage.go @@ -2,6 +2,7 @@ package node import ( blockstore "github.com/ipfs/boxo/blockstore" + provider "github.com/ipfs/boxo/provider" "github.com/ipfs/go-datastore" config "github.com/ipfs/kubo/config" "go.uber.org/fx" @@ -27,11 +28,30 @@ func Datastore(repo repo.Repo) datastore.Datastore { type BaseBlocks blockstore.Blockstore // BaseBlockstoreCtor creates cached blockstore backed by the provided datastore -func BaseBlockstoreCtor(cacheOpts blockstore.CacheOpts, hashOnRead bool, writeThrough bool) func(mctx helpers.MetricsCtx, repo repo.Repo, lc fx.Lifecycle) (bs BaseBlocks, err error) { - return func(mctx helpers.MetricsCtx, repo repo.Repo, lc fx.Lifecycle) (bs BaseBlocks, err error) { +func BaseBlockstoreCtor( + cacheOpts blockstore.CacheOpts, + hashOnRead bool, + writeThrough bool, + providingStrategy string, + +) func(mctx helpers.MetricsCtx, repo repo.Repo, prov provider.System, lc fx.Lifecycle) (bs BaseBlocks, err error) { + return func(mctx helpers.MetricsCtx, repo repo.Repo, prov provider.System, lc fx.Lifecycle) (bs BaseBlocks, err error) { + opts := []blockstore.Option{blockstore.WriteThrough(writeThrough)} + + // Blockstore providing integration: + // When strategy includes "all" the blockstore directly provides blocks as they're Put. + // Important: Provide calls from blockstore are intentionally BLOCKING. + // The Provider implementation (not the blockstore) should handle concurrency/queuing. + // This avoids spawning unbounded goroutines for concurrent block additions. + strategyFlag := config.ParseReproviderStrategy(providingStrategy) + if strategyFlag&config.ReproviderStrategyAll != 0 { + opts = append(opts, blockstore.Provider(prov)) + } + // hash security - bs = blockstore.NewBlockstore(repo.Datastore(), - blockstore.WriteThrough(writeThrough), + bs = blockstore.NewBlockstore( + repo.Datastore(), + opts..., ) bs = &verifbs.VerifBS{Blockstore: bs} bs, err = blockstore.CachedBlockstore(helpers.LifecycleCtx(mctx, lc), bs, cacheOpts) @@ -41,8 +61,8 @@ func BaseBlockstoreCtor(cacheOpts blockstore.CacheOpts, hashOnRead bool, writeTh bs = blockstore.NewIdStore(bs) - if hashOnRead { // TODO: review: this is how it was done originally, is there a reason we can't just pass this directly? - bs.HashOnRead(true) + if hashOnRead { + bs = &blockstore.ValidatingBlockstore{Blockstore: bs} } return diff --git a/docs/RELEASE_CHECKLIST.md b/docs/RELEASE_CHECKLIST.md index 423e96632..0bf36a2de 100644 --- a/docs/RELEASE_CHECKLIST.md +++ b/docs/RELEASE_CHECKLIST.md @@ -1,4 +1,4 @@ - + # ✅ Release Checklist (vX.Y.Z[-rcN]) @@ -30,7 +30,7 @@ This section covers tasks to be done during each release. ### 1. Prepare release branch -- [ ] Prepare the release branch and update version numbers accordingly +- [ ] Prepare the release branch and update version numbers accordingly - [ ] create a new branch `release-vX.Y.Z` - use `master` as base if `Z == 0` - use `release` as base if `Z > 0` @@ -39,16 +39,16 @@ This section covers tasks to be done during each release. - [ ] create a draft PR from `release-vX.Y.Z` to `release` ([example](https://github.com/ipfs/kubo/pull/9306)) - [ ] Cherry-pick commits from `master` to the `release-vX.Y.Z` using `git cherry-pick -x ` ([example](https://github.com/ipfs/kubo/pull/10636/commits/033de22e3bc6191dbb024ad6472f5b96b34e3ccf)) - **NOTE:** cherry-picking with `-x` is important - - [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) Replace the `Changelog` and `Contributors` sections of the [changelog](docs/changelogs/vX.Y.md) with the stdout (do **NOT** copy the stderr) of `./bin/mkreleaselog`. - - **NOTE:** `mkreleaselog` expects your `$GOPATH/src/github.com/ipfs/kubo` to include latest commits from `release-vX.Y.Z` - [ ] verify all CI checks on the PR from `release-vX.Y.Z` to `release` are passing + - [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) Replace the `Changelog` and `Contributors` sections of the [changelog](docs/changelogs/vX.Y.md) with the stdout (do **NOT** copy the stderr) of `./bin/mkreleaselog`. + - **NOTE:** `mkreleaselog` expects your `$GOPATH/src/github.com/ipfs/kubo` to include latest commits from `release-vX.Y.Z` - [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) Merge the PR from `release-vX.Y.Z` to `release` using the `Create a merge commit` - do **NOT** use `Squash and merge` nor `Rebase and merge` because we need to be able to sign the merge commit - do **NOT** delete the `release-vX.Y.Z` branch ### 2. Tag release -- [ ] Create the release tag +- [ ] Create the release tag - ⚠️ **NOTE:** This is a dangerous operation! Go and Docker publishing are difficult to reverse! Have the release reviewer verify all the commands marked with ! - [ ] ![](https://img.shields.io/badge/only-RC-blue?style=flat-square) tag the HEAD commit using `git tag -s vX.Y.Z(-rcN) -m 'Prerelease X.Y.Z(-rcN)'` - [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) tag the HEAD commit of the `release` branch using `git tag -s vX.Y.Z -m 'Release X.Y.Z'` @@ -61,31 +61,28 @@ This section covers tasks to be done during each release. - [ ] Publish Docker image to [DockerHub](https://hub.docker.com/r/ipfs/kubo/tags) - [ ] Wait for [Publish docker image](https://github.com/ipfs/kubo/actions/workflows/docker-image.yml) workflow run initiated by the tag push to finish - [ ] verify the image is available on [Docker Hub → tags](https://hub.docker.com/r/ipfs/kubo/tags) -- [ ] Publish the release to [dist.ipfs.tech](https://dist.ipfs.tech) +- [ ] Publish the release to [dist.ipfs.tech](https://dist.ipfs.tech) - [ ] check out [ipfs/distributions](https://github.com/ipfs/distributions) - - [ ] create new branch: run `git checkout -b release-kubo-X.Y.Z(-rcN)` - - [ ] run `./dist.sh add-version kubo vX.Y.Z(-rcN)` to add the new version to the `versions` file ([usage](https://github.com/ipfs/distributions#usage)) + - [ ] create new branch: run `git checkout -b release-kubo-X.Y.Z(-rcN)` - [ ] Verify [ipfs/distributions](https://github.com/ipfs/distributions)'s `.tool-versions`'s `golang` entry is set to the [latest go release](https://go.dev/doc/devel/release) on the major go branch [Kubo is being tested on](https://github.com/ipfs/kubo/blob/master/.github/workflows/gotest.yml) (see `go-version:`). If not, update `.tool-versions` to match the latest golang. - - [ ] create and merge the PR which updates `dists/kubo/versions` and `dists/go-ipfs/versions` (**NOTE:** ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) will also have `dists/kubo/current` and `dists/go-ipfs/current` – [example](https://github.com/ipfs/distributions/pull/1125)) + - [ ] run `./dist.sh add-version kubo vX.Y.Z(-rcN)` to add the new version to the `versions` file ([usage](https://github.com/ipfs/distributions#usage)) + - [ ] create and merge the PR which updates `dists/kubo/versions` (**NOTE:** ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) will also have `dists/kubo/current` – [example](https://github.com/ipfs/distributions/pull/1125)) - [ ] wait for the [CI](https://github.com/ipfs/distributions/actions/workflows/main.yml) workflow run initiated by the merge to master to finish - [ ] verify the release is available on [dist.ipfs.tech](https://dist.ipfs.tech/#kubo) - [ ] Publish the release to [NPM](https://www.npmjs.com/package/kubo?activeTab=versions) - - [ ] manually dispatch the [Release to npm](https://github.com/ipfs/npm-go-ipfs/actions/workflows/main.yml) workflow - - [ ] check [Release to npm](https://github.com/ipfs/npm-go-ipfs/actions/workflows/main.yml) workflow run logs to verify it discovered the new release + - [ ] manually dispatch the [Release to npm](https://github.com/ipfs/npm-kubo/actions/workflows/main.yml) workflow if it was not executed already and verify it discovered the new release - [ ] verify the release is available on [NPM](https://www.npmjs.com/package/kubo?activeTab=versions) - [ ] Publish the release to [GitHub kubo/releases](https://github.com/ipfs/kubo/releases) - - [ ] create a new release on [github.com/ipfs/kubo/releases](https://docs.github.com/en/repositories/releasing-projects-on-github/managing-releases-in-a-repository#creating-a-release) - - [RC example](https://github.com/ipfs/kubo/releases/tag/v0.17.0-rc1) - - [FINAL example](https://github.com/ipfs/kubo/releases/tag/v0.17.0) + - [ ] [create](https://docs.github.com/en/repositories/releasing-projects-on-github/managing-releases-in-a-repository#creating-a-release) a new release + - [RC example](https://github.com/ipfs/kubo/releases/tag/v0.36.0-rc1) + - [FINAL example](https://github.com/ipfs/kubo/releases/tag/v0.35.0) - [ ] use the `vX.Y.Z(-rcN)` tag - [ ] link to the release issue - [ ] ![](https://img.shields.io/badge/only-RC-blue?style=flat-square) link to the changelog in the description - [ ] ![](https://img.shields.io/badge/only-RC-blue?style=flat-square) check the `This is a pre-release` checkbox - [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) copy the changelog (without the header) in the description - [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) do **NOT** check the `This is a pre-release` checkbox - - [ ] run the [sync-release-assets](https://github.com/ipfs/kubo/actions/workflows/sync-release-assets.yml) workflow - - [ ] wait for the [sync-release-assets](https://github.com/ipfs/kubo/actions/workflows/sync-release-assets.yml) workflow run to finish - - [ ] verify the release assets are present in the [GitHub release](https://github.com/ipfs/kubo/releases/tag/vX.Y.Z(-rcN)) + - [ ] run the [sync-release-assets](https://github.com/ipfs/kubo/actions/workflows/sync-release-assets.yml) workflow and verify the release assets are attached to the GitHub release ### 4. After Publishing @@ -95,12 +92,15 @@ This section covers tasks to be done during each release. - [ ] Create and merge a PR from `merge-release-vX.Y.Z` to `master` - ⚠️ do **NOT** use `Squash and merge` nor `Rebase and merge` because we need to be able to sign the merge commit - ⚠️ **NOTE:** make sure to ignore the changes to [version.go](version.go) (keep the `-dev` in `master`) -- [ ] Update Kubo staging environment, see the [Running Kubo tests on staging](https://www.notion.so/Running-Kubo-tests-on-staging-488578bb46154f9bad982e4205621af8) for details. - - [ ] ![](https://img.shields.io/badge/only-RC-blue?style=flat-square) Test last release against the current RC - - [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) Test last release against the current one -- [ ] Promote the release +- [ ] Update [ipshipyard/waterworks-infra](https://github.com/ipshipyard/waterworks-infra) + - [ ] Update Kubo staging environment, see the [Running Kubo tests on staging](https://www.notion.so/Running-Kubo-tests-on-staging-488578bb46154f9bad982e4205621af8) for details. + - [ ] ![](https://img.shields.io/badge/only-RC-blue?style=flat-square) Test last release against the current RC + - [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) Test last release against the current one + - [ ] Update collab cluster boxes to the tagged release (final or RC) + - [ ] Update libp2p bootstrappers to the tagged release (final or RC) +- [ ] Promote the release - [ ] create an [IPFS Discourse](https://discuss.ipfs.tech) topic ([prerelease example](https://discuss.ipfs.tech/t/kubo-v0-16-0-rc1-release-candidate-is-out/15248), [release example](https://discuss.ipfs.tech/t/kubo-v0-16-0-release-is-out/15249)) - - [ ] use `Kubo vX.Y.Z(-rcN) is out!` as the title and `kubo` and `go-ipfs` as tags + - [ ] use `Kubo vX.Y.Z(-rcN) is out!` as the title and `kubo` as tags - [ ] repeat the title as a heading (`##`) in the description - [ ] link to the GitHub Release, binaries on IPNS, docker pull command and release notes in the description - [ ] pin the [IPFS Discourse](https://discuss.ipfs.tech) topic globally, you can make the topic a banner if there is no banner already @@ -112,25 +112,24 @@ This section covers tasks to be done during each release. - [ ] ![](https://img.shields.io/badge/only-RC-blue?style=flat-square) create an issue comment mentioning early testers on the release issue ([example](https://github.com/ipfs/kubo/issues/9319#issuecomment-1311002478)) - [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) create an issue comment linking to the release on the release issue ([example](https://github.com/ipfs/kubo/issues/9417#issuecomment-1400740975)) - [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) ![](https://img.shields.io/badge/not-PATCH-orange?style=flat-square) promote on bsky.app ([example](https://bsky.app/profile/ipshipyard.com/post/3lh2brzrwbs2c)) - - [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) ![](https://img.shields.io/badge/not-PATCH-orange?style=flat-square) promote on x.com ([example](https://x.com/ipshipyard/status/1885346348808929609)) + - [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) ![](https://img.shields.io/badge/not-PATCH-orange?style=flat-square) promote on x.com ([example](https://x.com/ipshipyard/status/1885346348808929609)) - [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) ![](https://img.shields.io/badge/not-PATCH-orange?style=flat-square) post the link to the [GitHub Release](https://github.com/ipfs/kubo/releases/tag/vX.Y.Z(-rcN)) to [Reddit](https://reddit.com/r/ipfs) ([example](https://www.reddit.com/r/ipfs/comments/9x0q0k/kubo_v0160_release_is_out/)) - [ ] Manually smoke-test the new version with [IPFS Companion Browser Extension](https://docs.ipfs.tech/install/ipfs-companion/) -- [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) Update Kubo in [ipfs-desktop](https://github.com/ipfs/ipfs-desktop) - - [ ] check out [ipfs/ipfs-desktop](https://github.com/ipfs/ipfs-desktop) - - [ ] run `npm install ` - - [ ] create a PR which updates `package.json` and `package-lock.json` +- [ ] Update Kubo in [ipfs-desktop](https://github.com/ipfs/ipfs-desktop) + - [ ] create a PR which updates `kubo` version to the tagged version in `package.json` and `package-lock.json` + - [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) switch to final release and merge - [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) Update Kubo docs at docs.ipfs.tech: - [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) run the [update-on-new-ipfs-tag.yml](https://github.com/ipfs/ipfs-docs/actions/workflows/update-on-new-ipfs-tag.yml) workflow - [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) merge the PR created by the [update-on-new-ipfs-tag.yml](https://github.com/ipfs/ipfs-docs/actions/workflows/update-on-new-ipfs-tag.yml) workflow run -- [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) Create a blog entry on [blog.ipfs.tech](https://blog.ipfs.tech) +- [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) Create a blog entry on [blog.ipfs.tech](https://blog.ipfs.tech) - [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) create a PR which adds a release note for the new Kubo version ([example](https://github.com/ipfs/ipfs-blog/pull/529)) - [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) merge the PR - [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) verify the blog entry was published - [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) ![](https://img.shields.io/badge/not-PATCH-orange?style=flat-square) Create a dependency update PR - [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) ![](https://img.shields.io/badge/not-PATCH-orange?style=flat-square) check out [ipfs/kubo](https://github.com/ipfs/kubo) - [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) ![](https://img.shields.io/badge/not-PATCH-orange?style=flat-square) go over direct dependencies from `go.mod` in the root directory (NOTE: do not run `go get -u` as it will upgrade indirect dependencies which may cause problems) - - [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) ![](https://img.shields.io/badge/not-PATCH-orange?style=flat-square) run `make mod_tidy` + - [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) ![](https://img.shields.io/badge/not-PATCH-orange?style=flat-square) run `make mod_tidy` - [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) ![](https://img.shields.io/badge/not-PATCH-orange?style=flat-square) create a PR which updates `go.mod` and `go.sum` - [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) ![](https://img.shields.io/badge/not-PATCH-orange?style=flat-square) add the PR to the next release milestone - [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) ![](https://img.shields.io/badge/not-PATCH-orange?style=flat-square) Create the next release issue diff --git a/docs/changelogs/v0.10.md b/docs/changelogs/v0.10.md index 9a1d8b8af..429ff7d37 100644 --- a/docs/changelogs/v0.10.md +++ b/docs/changelogs/v0.10.md @@ -80,7 +80,7 @@ Performance profiles can now be collected using `ipfs diag profile`. If you need #### 🍎 Mac OS notarized binaries -The go-ipfs and related migration binaries (for both Intel and Apple Sillicon) are now signed and notarized to make Mac OS installation easier. +The go-ipfs and related migration binaries (for both Intel and Apple Silicon) are now signed and notarized to make Mac OS installation easier. #### 👨‍👩‍👦 Improved MDNS diff --git a/docs/changelogs/v0.18.md b/docs/changelogs/v0.18.md index 972ecb84e..70ce9ef24 100644 --- a/docs/changelogs/v0.18.md +++ b/docs/changelogs/v0.18.md @@ -629,7 +629,7 @@ and various improvements have been made to improve the UX including: - feat: WithLocalPublication option to enable local only publishing on a topic (#481) ([libp2p/go-libp2p-pubsub#481](https://github.com/libp2p/go-libp2p-pubsub/pull/481)) - update pubsub deps (#491) ([libp2p/go-libp2p-pubsub#491](https://github.com/libp2p/go-libp2p-pubsub/pull/491)) - Gossipsub: Unsubscribe backoff (#488) ([libp2p/go-libp2p-pubsub#488](https://github.com/libp2p/go-libp2p-pubsub/pull/488)) - - Adds exponential backoff to re-spawing new streams for supposedly dead peers (#483) ([libp2p/go-libp2p-pubsub#483](https://github.com/libp2p/go-libp2p-pubsub/pull/483)) + - Adds exponential backoff to re-spawning new streams for supposedly dead peers (#483) ([libp2p/go-libp2p-pubsub#483](https://github.com/libp2p/go-libp2p-pubsub/pull/483)) - Publishing option for signing a message with a custom private key (#486) ([libp2p/go-libp2p-pubsub#486](https://github.com/libp2p/go-libp2p-pubsub/pull/486)) - fix unused GossipSubHistoryGossip, make seenMessages ttl configurable, make score params SeenMsgTTL configurable - Update README.md diff --git a/docs/changelogs/v0.2.md b/docs/changelogs/v0.2.md index 4e60221d5..4d42ea2f5 100644 --- a/docs/changelogs/v0.2.md +++ b/docs/changelogs/v0.2.md @@ -10,7 +10,7 @@ config file Bootstrap field changed accordingly. users can upgrade cleanly with: - ipfs bootstrap >boostrap_peers + ipfs bootstrap >bootstrap_peers ipfs bootstrap rm --all diff --git a/docs/changelogs/v0.20.md b/docs/changelogs/v0.20.md index 3a6ce8f64..e26c0695d 100644 --- a/docs/changelogs/v0.20.md +++ b/docs/changelogs/v0.20.md @@ -471,7 +471,7 @@ You can read more about the rationale behind this decision on the [tracking issu - identify: fix stale comment (#2179) ([libp2p/go-libp2p#2179](https://github.com/libp2p/go-libp2p/pull/2179)) - relay service: add metrics (#2154) ([libp2p/go-libp2p#2154](https://github.com/libp2p/go-libp2p/pull/2154)) - identify: Fix IdentifyWait when Connected events happen out of order (#2173) ([libp2p/go-libp2p#2173](https://github.com/libp2p/go-libp2p/pull/2173)) - - chore: fix ressource manager's README (#2168) ([libp2p/go-libp2p#2168](https://github.com/libp2p/go-libp2p/pull/2168)) + - chore: fix resource manager's README (#2168) ([libp2p/go-libp2p#2168](https://github.com/libp2p/go-libp2p/pull/2168)) - relay: fix deadlock when closing (#2171) ([libp2p/go-libp2p#2171](https://github.com/libp2p/go-libp2p/pull/2171)) - core: remove LocalPrivateKey method from network.Conn interface (#2144) ([libp2p/go-libp2p#2144](https://github.com/libp2p/go-libp2p/pull/2144)) - routed host: return connection error instead of routing error (#2169) ([libp2p/go-libp2p#2169](https://github.com/libp2p/go-libp2p/pull/2169)) diff --git a/docs/changelogs/v0.21.md b/docs/changelogs/v0.21.md index 569ea8f79..e8511d981 100644 --- a/docs/changelogs/v0.21.md +++ b/docs/changelogs/v0.21.md @@ -263,7 +263,7 @@ should be using AcceleratedDHTClient because they are falling behind. - chore: release v0.24.0 - fix: don't add unresponsive DHT servers to the Routing Table (#820) ([libp2p/go-libp2p-kad-dht#820](https://github.com/libp2p/go-libp2p-kad-dht/pull/820)) - github.com/libp2p/go-libp2p-kbucket (v0.5.0 -> v0.6.3): - - fix: fix abba bug in UsefullNewPeer ([libp2p/go-libp2p-kbucket#122](https://github.com/libp2p/go-libp2p-kbucket/pull/122)) + - fix: fix abba bug in UsefulNewPeer ([libp2p/go-libp2p-kbucket#122](https://github.com/libp2p/go-libp2p-kbucket/pull/122)) - chore: release v0.6.2 ([libp2p/go-libp2p-kbucket#121](https://github.com/libp2p/go-libp2p-kbucket/pull/121)) - Replacing UsefulPeer() with UsefulNewPeer() ([libp2p/go-libp2p-kbucket#120](https://github.com/libp2p/go-libp2p-kbucket/pull/120)) - chore: release 0.6.1 ([libp2p/go-libp2p-kbucket#119](https://github.com/libp2p/go-libp2p-kbucket/pull/119)) diff --git a/docs/changelogs/v0.22.md b/docs/changelogs/v0.22.md index 3aa55f30e..503c618fc 100644 --- a/docs/changelogs/v0.22.md +++ b/docs/changelogs/v0.22.md @@ -236,7 +236,7 @@ This includes a breaking change to `ipfs id` and some of the `ipfs swarm` comman - chore: cleanup error handling in compparallel - fix: correctly handle errors in compparallel - fix: make the ProvideMany docs clearer - - perf: remove goroutine that just waits before closing with a synchrous waitgroup + - perf: remove goroutine that just waits before closing with a synchronous waitgroup - github.com/libp2p/go-nat (v0.1.0 -> v0.2.0): - release v0.2.0 (#30) ([libp2p/go-nat#30](https://github.com/libp2p/go-nat/pull/30)) - update deps, use contexts on UPnP functions (#29) ([libp2p/go-nat#29](https://github.com/libp2p/go-nat/pull/29)) diff --git a/docs/changelogs/v0.31.md b/docs/changelogs/v0.31.md index b20b15862..e055cc9f4 100644 --- a/docs/changelogs/v0.31.md +++ b/docs/changelogs/v0.31.md @@ -36,7 +36,7 @@ For a description of the available tuning parameters, see [kubo/docs/datastores. We've notices users were applying `lowpower` profile, and then reporting content routing issues. This was because `lowpower` disabled reprovider system and locally hosted data was no longer announced on Amino DHT. -This release changes [`lowpower` profile](https://github.com/ipfs/kubo/blob/master/docs/config.md#lowpower-profile) to not change reprovider settings, ensuring the new users are not sabotaging themselves. It also adds [`annouce-on`](https://github.com/ipfs/kubo/blob/master/docs/config.md#announce-on-profile) and [`announce-off`](https://github.com/ipfs/kubo/blob/master/docs/config.md#announce-off-profile) profiles for controlling announcement settings separately. +This release changes [`lowpower` profile](https://github.com/ipfs/kubo/blob/master/docs/config.md#lowpower-profile) to not change reprovider settings, ensuring the new users are not sabotaging themselves. It also adds [`announce-on`](https://github.com/ipfs/kubo/blob/master/docs/config.md#announce-on-profile) and [`announce-off`](https://github.com/ipfs/kubo/blob/master/docs/config.md#announce-off-profile) profiles for controlling announcement settings separately. > [!IMPORTANT] > If you've ever applied the `lowpower` profile before, there is a high chance your node is not announcing to DHT anymore. diff --git a/docs/changelogs/v0.36.md b/docs/changelogs/v0.36.md index 5c078e330..2a5234477 100644 --- a/docs/changelogs/v0.36.md +++ b/docs/changelogs/v0.36.md @@ -40,7 +40,7 @@ See [`HTTPRetrieval`](https://github.com/ipfs/kubo/blob/master/docs/config.md#ht The Bitswap client now supports broadcast reduction logic, which is enabled by default. This feature significantly reduces the number of broadcast messages sent to peers, resulting in lower bandwidth usage during load spikes. -The overall logic works by sending to non-local peers only if those peers have previously replied that they want data blocks. To minimize impact on existing workloads, by default, broadcasts are still always sent to peers on the local network, or the ones defined in `Peering.Peers`. +The overall logic works by sending to non-local peers only if those peers have previously replied that they have data blocks. To minimize impact on existing workloads, by default, broadcasts are still always sent to peers on the local network, or the ones defined in `Peering.Peers`. At Shipyard, we conducted A/B testing on our internal Kubo staging gateway with organic CID requests to `ipfs.io`. While these results may not exactly match your specific workload, the benefits proved significant enough to make this feature default. Here are the key findings: @@ -215,7 +215,7 @@ The `ipfs config edit` command did not correctly handle the `EDITOR` environment - chore: update to boxo merkledag package - feat: car debug handles the zero length block ([ipld/go-car#569](https://github.com/ipld/go-car/pull/569)) - chore(deps): bump github.com/rogpeppe/go-internal from 1.13.1 to 1.14.1 in /cmd ([ipld/go-car#566](https://github.com/ipld/go-car/pull/566)) - - Add a concatination cli utility ([ipld/go-car#565](https://github.com/ipld/go-car/pull/565)) + - Add a concatenation cli utility ([ipld/go-car#565](https://github.com/ipld/go-car/pull/565)) - github.com/ipld/go-codec-dagpb (v1.6.0 -> v1.7.0): - chore: v1.7.0 bump - github.com/libp2p/go-flow-metrics (v0.2.0 -> v0.3.0): diff --git a/docs/changelogs/v0.37.md b/docs/changelogs/v0.37.md new file mode 100644 index 000000000..595076131 --- /dev/null +++ b/docs/changelogs/v0.37.md @@ -0,0 +1,438 @@ +# Kubo changelog v0.37 + + + +This release was brought to you by the [Shipyard](https://ipshipyard.com/) team. + +- [v0.37.0](#v0370) + +## v0.37.0 + +- [Overview](#overview) +- [🔦 Highlights](#-highlights) + - [🚀 Repository migration from v16 to v17 with embedded tooling](#-repository-migration-from-v16-to-v17-with-embedded-tooling) + - [🚦 Gateway concurrent request limits and retrieval timeouts](#-gateway-concurrent-request-limits-and-retrieval-timeouts) + - [🔧 AutoConf: Complete control over network defaults](#-autoconf-complete-control-over-network-defaults) + - [🗑️ Clear provide queue when reprovide strategy changes](#-clear-provide-queue-when-reprovide-strategy-changes) + - [🪵 Revamped `ipfs log level` command](#-revamped-ipfs-log-level-command) + - [📌 Named pins in `ipfs add` command](#-named-pins-in-ipfs-add-command) + - [📝 New IPNS publishing options](#-new-ipns-publishing-options) + - [🔢 Custom sequence numbers in `ipfs name publish`](#-custom-sequence-numbers-in-ipfs-name-publish) + - [⚙️ `Reprovider.Strategy` is now consistently respected](#-reprovider-strategy-is-now-consistently-respected) + - [⚙️ `Reprovider.Strategy=all`: improved memory efficiency](#-reproviderstrategyall-improved-memory-efficiency) + - [🧹 Removed unnecessary dependencies](#-removed-unnecessary-dependencies) + - [🔍 Improved `ipfs cid`](#-improved-ipfs-cid) + - [⚠️ Deprecated `ipfs stats reprovide`](#-deprecated-ipfs-stats-reprovide) + - [🔄 AutoRelay now uses all connected peers for relay discovery](#-autorelay-now-uses-all-connected-peers-for-relay-discovery) + - [📊 Anonymous telemetry for better feature prioritization](#-anonymous-telemetry-for-better-feature-prioritization) +- [📦️ Important dependency updates](#-important-dependency-updates) +- [📝 Changelog](#-changelog) +- [👨‍👩‍👧‍👦 Contributors](#-contributors) + +### Overview + +Kubo 0.37.0 introduces embedded repository migrations, gateway resource protection, complete AutoConf control, improved reprovider strategies, and anonymous telemetry for better feature prioritization. This release significantly improves memory efficiency, network configuration flexibility, and operational reliability while maintaining full backward compatibility. + +### 🔦 Highlights + +#### 🚀 Repository migration from v16 to v17 with embedded tooling + +This release migrates the Kubo repository from version 16 to version 17. Migrations are now built directly into the binary - completing in milliseconds without internet access or external downloads. + +`ipfs daemon --migrate` performs migrations automatically. Manual migration: `ipfs repo migrate --to=17` (or `--to=16 --allow-downgrade` for compatibility). Embedded migrations apply to v17+; older versions still require external tools. + +**Legacy migration deprecation**: Support for legacy migrations that download binaries from the internet will be removed in a future version. Only embedded migrations for the last 3 releases will be supported. Users with very old repositories should update in stages rather than skipping multiple versions. + +#### 🚦 Gateway concurrent request limits and retrieval timeouts + +New configurable limits protect gateway resources during high load: + +- **[`Gateway.RetrievalTimeout`](https://github.com/ipfs/kubo/blob/master/docs/config.md#gatewayretrievaltimeout)** (default: 30s): Maximum duration for content retrieval. Returns 504 Gateway Timeout when exceeded - applies to both initial retrieval (time to first byte) and between subsequent writes. +- **[`Gateway.MaxConcurrentRequests`](https://github.com/ipfs/kubo/blob/master/docs/config.md#gatewaymaxconcurrentrequests)** (default: 4096): Limits concurrent HTTP requests. Returns 429 Too Many Requests when exceeded. Protects nodes from traffic spikes and resource exhaustion, especially useful behind reverse proxies without rate-limiting. + +New Prometheus metrics for monitoring: + +- `ipfs_http_gw_concurrent_requests`: Current requests being processed +- `ipfs_http_gw_responses_total`: HTTP responses by status code +- `ipfs_http_gw_retrieval_timeouts_total`: Timeouts by status code and truncation status + +Tuning tips: + +- Monitor metrics to understand gateway behavior and adjust based on observations +- Watch `ipfs_http_gw_concurrent_requests` for saturation +- Track `ipfs_http_gw_retrieval_timeouts_total` vs success rates to identify timeout patterns indicating routing or storage provider issues + +#### 🔧 AutoConf: Complete control over network defaults + +Configuration fields now support `["auto"]` placeholders that resolve to network defaults from [`AutoConf.URL`](https://github.com/ipfs/kubo/blob/master/docs/config.md#autoconfurl). These defaults can be inspected, replaced with custom values, or disabled entirely. Previously, empty configuration fields like `Routing.DelegatedRouters: []` would use hardcoded defaults - this system makes those defaults explicit through `"auto"` values. When upgrading to Kubo 0.37, custom configurations remain unchanged. + +New `--expand-auto` flag shows resolved values for any config field: + +```bash +ipfs config show --expand-auto # View all resolved endpoints +ipfs config Bootstrap --expand-auto # Check specific values +ipfs config Routing.DelegatedRouters --expand-auto +ipfs config DNS.Resolvers --expand-auto +``` + +Configuration can be managed via: +- Replace `"auto"` with custom endpoints or set `[]` to disable features +- Switch modes with `--profile=autoconf-on|autoconf-off` +- Configure via `AutoConf.Enabled` and custom manifests via `AutoConf.URL` + +```bash +# Enable automatic configuration +ipfs config profiles apply autoconf-on + +# Or manually set specific fields +ipfs config Bootstrap '["auto"]' +ipfs config --json DNS.Resolvers '{".": ["https://dns.example.com/dns-query"], "eth.": ["auto"]}' +``` + +Organizations can host custom AutoConf manifests for private networks. See [AutoConf documentation](https://github.com/ipfs/kubo/blob/master/docs/config.md#autoconf) and format spec at https://conf.ipfs-mainnet.org/ + +#### 🗑️ Clear provide queue when reprovide strategy changes + +Changing [`Reprovider.Strategy`](https://github.com/ipfs/kubo/blob/master/docs/config.md#reproviderstrategy) and restarting Kubo now automatically clears the provide queue. Only content matching the new strategy will be announced. + +Manual queue clearing is also available: + +- `ipfs provide clear` - clear all queued content announcements + +> [!NOTE] +> Upgrading to Kubo 0.37 will automatically clear any preexisting provide queue. The next time `Reprovider.Interval` hits, `Reprovider.Strategy` will be executed on a clean slate, ensuring consistent behavior with your current configuration. + +#### 🪵 Revamped `ipfs log level` command + +The `ipfs log level` command has been completely revamped to support both getting and setting log levels with a unified interface. + +**New: Getting log levels** + +- `ipfs log level` - Shows default level only +- `ipfs log level all` - Shows log level for every subsystem, including default level +- `ipfs log level foo` - Shows log level for a specific subsystem only +- Kubo RPC API: `POST /api/v0/log/level?arg=` + +**Enhanced: Setting log levels** + +- `ipfs log level foo debug` - Sets "foo" subsystem to "debug" level +- `ipfs log level all info` - Sets all subsystems to "info" level (convenient, no escaping) +- `ipfs log level '*' info` - Equivalent to above but requires shell escaping +- `ipfs log level foo default` - Sets "foo" subsystem to current default level + +The command now provides full visibility into your current logging configuration while maintaining full backward compatibility. Both `all` and `*` work for specifying all subsystems, with `all` being more convenient since it doesn't require shell escaping. + +#### 🧷 Named pins in `ipfs add` command + +Added `--pin-name` flag to `ipfs add` for assigning names to pins. + +```console +$ ipfs add --pin-name=testname cat.jpg +added bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi cat.jpg + +$ ipfs pin ls --names +bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi recursive testname +``` + +#### 📝 New IPNS publishing options + +Added support for controlling IPNS record publishing strategies with new command flags and configuration. + +**New command flags:** +```bash +# Publish without network connectivity (local datastore only) +ipfs name publish --allow-offline /ipfs/QmHash + +# Publish without DHT connectivity (uses local datastore and HTTP delegated publishers) +ipfs name publish --allow-delegated /ipfs/QmHash +``` + +**Delegated publishers configuration:** + +[`Ipns.DelegatedPublishers`](https://github.com/ipfs/kubo/blob/master/docs/config.md#ipnsdelegatedpublishers) configures HTTP endpoints for IPNS publishing. Supports `"auto"` for network defaults or custom HTTP endpoints. The `--allow-delegated` flag enables publishing through these endpoints without requiring DHT connectivity, useful for nodes behind restrictive networks or during testing. + +#### 🔢 Custom sequence numbers in `ipfs name publish` + +Added `--sequence` flag to `ipfs name publish` for setting custom sequence numbers in IPNS records. This enables advanced use cases like manually coordinating updates across multiple nodes. See `ipfs name publish --help` for details. + +#### ⚙️ `Reprovider.Strategy` is now consistently respected + +Prior to this version, files added, blocks received etc. were "provided" to the network (announced on the DHT) regardless of the ["reproviding strategy" setting](https://github.com/ipfs/kubo/blob/master/docs/config.md#reproviderstrategy). For example: + +- Strategy set to "pinned" + `ipfs add --pin=false` → file was provided regardless +- Strategy set to "roots" + `ipfs pin add` → all blocks (not only the root) were provided + +Only the periodic "reproviding" action (runs every 22h by default) respected the strategy. + +This was inefficient as content that should not be provided was getting provided once. Now all operations respect `Reprovider.Strategy`. If set to "roots", no blocks other than pin roots will be provided regardless of what is fetched, added etc. + +> [!NOTE] +> **Behavior change:** The `--offline` flag no longer affects providing behavior. Both `ipfs add` and `ipfs --offline add` now provide blocks according to the reproviding strategy when run against an online daemon (previously `--offline add` did not provide). Since `ipfs add` has been nearly as fast as offline mode [since v0.35](https://github.com/ipfs/kubo/blob/master/docs/changelogs/v0.35.md#fast-ipfs-add-in-online-mode), `--offline` is rarely needed. To run truly offline operations, use `ipfs --offline daemon`. + +#### ⚙️ `Reprovider.Strategy=all`: improved memory efficiency + +The memory cost of `Reprovider.Strategy=all` no longer grows with the number of pins. The strategy now processes blocks directly from the datastore in undefined order, eliminating the memory pressure tied to the number of pins. + +As part of this improvement, the `flat` reprovider strategy has been renamed to `all` (the default). This cleanup removes the workaround introduced in v0.28 for pin root prioritization. With the introduction of more granular strategies like [`pinned+mfs`](https://github.com/ipfs/kubo/blob/master/docs/config.md#reproviderstrategy), we can now optimize the default `all` strategy for lower memory usage without compromising users who need pin root prioritization ([rationale](https://github.com/ipfs/kubo/pull/10928#issuecomment-3211040182)). + +> [!NOTE] +> **Migration guidance:** If you experience undesired announcement delays of root CIDs with the new `all` strategy, switch to `pinned+mfs` for root prioritization. + +#### 🧹 Removed unnecessary dependencies + +Kubo has been cleaned up by removing unnecessary dependencies and packages: + +- Removed `thirdparty/assert` (replaced by `github.com/stretchr/testify/require`) +- Removed `thirdparty/dir` (replaced by `misc/fsutil`) +- Removed `thirdparty/notifier` (unused) +- Removed `goprocess` dependency (replaced with native Go `context` patterns) + +These changes reduce the dependency footprint while improving code maintainability and following Go best practices. + +#### 🔍 Improved `ipfs cid` + +Certain `ipfs cid` commands can now be run without a daemon or repository, and return correct exit code 1 on error, making it easier to perform CID conversion in scripts and CI/CD pipelines. + +While at it, we also fixed unicode support in `ipfs cid bases --prefix` to correctly show `base256emoji` 🚀 :-) + +#### ⚠️ Deprecated `ipfs stats reprovide` + +The `ipfs stats reprovide` command has moved to `ipfs provide stat`. This was done to organize provider commands in one location. + +> [!NOTE] +> `ipfs stats reprovide` still works, but is marked as deprecated and will be removed in a future release. + +#### 🔄 AutoRelay now uses all connected peers for relay discovery + +AutoRelay's relay discovery now includes all connected peers as potential relay candidates, not just peers discovered through the DHT. This allows peers connected via HTTP routing and manual `ipfs swarm connect` commands to serve as relays, improving connectivity for nodes using non-DHT routing configurations. + +#### 📊 Anonymous telemetry for better feature prioritization + +Per a suggestion from the IPFS Foundation, Kubo now sends optional anonymized telemetry information to Shipyard [maintainers](https://github.com/ipshipyard/roadmaps/issues/20). + +**Privacy first**: The telemetry system collects only anonymous data - no personally identifiable information, file paths, or content data. A random UUID is generated on first run for anonymous identification. Users are notified before any data is sent and have time to opt-out. + +**Why**: We want to better understand Kubo usage across the ecosystem so we can better direct funding and work efforts. For example, we have little insights into how many nodes are NAT'ed and rely on AutoNAT for reachability. Some of the information can be inferred by crawling the network or logging `/identify` details in the bootstrappers, but users have no way of opting out from that, so we believe it is more transparent to concentrate this functionality in one place. + +**What**: Currently, we send the following anonymous metrics: + +
Click to see telemetry metrics example + +``` + "uuid": "", + "agent_version": "kubo/0.37.0-dev", + "private_network": false, + "bootstrappers_custom": false, + "repo_size_bucket": 1073741824, + "uptime_bucket": 86400000000000, + "reprovider_strategy": "pinned", + "routing_type": "auto", + "routing_accelerated_dht_client": false, + "routing_delegated_count": 0, + "autonat_service_mode": "enabled", + "autonat_reachability": "", + "autoconf": true, + "autoconf_custom": false, + "swarm_enable_hole_punching": true, + "swarm_circuit_addresses": false, + "swarm_ipv4_public_addresses": true, + "swarm_ipv6_public_addresses": true, + "auto_tls_auto_wss": true, + "auto_tls_domain_suffix_custom": false, + "discovery_mdns_enabled": true, + "platform_os": "linux", + "platform_arch": "amd64", + "platform_containerized": false, + "platform_vm": false +``` + +
+ +The exact data sent for your node can be inspected by setting `GOLOG_LOG_LEVEL="telemetry=debug"`. Users will see an informative message the first time they launch a telemetry-enabled daemon, with time to opt-out before any data is collected. Telemetry data is sent every 24h, with the first collection starting 15 minutes after daemon launch. + +**User control**: You can opt-out at any time: + +- Set environment variable `IPFS_TELEMETRY=off` before starting the daemon +- Or run `ipfs config Plugins.Plugins.telemetry.Config.Mode off` and restart the daemon + +The telemetry plugin code lives in `plugin/plugins/telemetry`. + +Learn more: [`/kubo/docs/telemetry.md`](https://github.com/ipfs/kubo/blob/master/docs/telemetry.md) + +### 📦️ Important dependency updates + +- update `boxo` to [v0.34.0](https://github.com/ipfs/boxo/releases/tag/v0.34.0) (incl. [v0.33.1](https://github.com/ipfs/boxo/releases/tag/v0.33.1)) +- update `go-libp2p` to [v0.43.0](https://github.com/libp2p/go-libp2p/releases/tag/v0.43.0) +- update `go-libp2p-kad-dht` to [v0.34.0](https://github.com/libp2p/go-libp2p-kad-dht/releases/tag/v0.34.0) +- update `go-libp2p-pubsub` to [v0.14.2](https://github.com/libp2p/go-libp2p-pubsub/releases/tag/v0.14.2) (incl. [v0.14.1](https://github.com/libp2p/go-libp2p-pubsub/releases/tag/v0.14.1), [v0.14.0](https://github.com/libp2p/go-libp2p-pubsub/releases/tag/v0.14.0)) +- update `ipfs-webui` to [v4.8.0](https://github.com/ipfs/ipfs-webui/releases/tag/v4.8.0) +- update to [Go 1.25](https://go.dev/doc/go1.25) + +### 📝 Changelog + +
Full Changelog + +- github.com/ipfs/kubo: + - chore: set version to v0.37.0 + - feat(ci): docker linting (#10927) ([ipfs/kubo#10927](https://github.com/ipfs/kubo/pull/10927)) + - fix: disable telemetry in test profile (#10931) ([ipfs/kubo#10931](https://github.com/ipfs/kubo/pull/10931)) + - fix: harness tests random panic (#10933) ([ipfs/kubo#10933](https://github.com/ipfs/kubo/pull/10933)) + - chore: v0.37.0-rc1 + - feat: Reprovider.Strategy: rename "flat" to "all" (#10928) ([ipfs/kubo#10928](https://github.com/ipfs/kubo/pull/10928)) + - docs: improve `ipfs add --help` (#10926) ([ipfs/kubo#10926](https://github.com/ipfs/kubo/pull/10926)) + - feat: optimize docker builds (#10925) ([ipfs/kubo#10925](https://github.com/ipfs/kubo/pull/10925)) + - feat(config): AutoConf with "auto" placeholders (#10883) ([ipfs/kubo#10883](https://github.com/ipfs/kubo/pull/10883)) + - fix(ci): make NewRandPort thread-safe (#10921) ([ipfs/kubo#10921](https://github.com/ipfs/kubo/pull/10921)) + - fix: resolve TestAddMultipleGCLive race condition (#10916) ([ipfs/kubo#10916](https://github.com/ipfs/kubo/pull/10916)) + - feat: telemetry plugin (#10866) ([ipfs/kubo#10866](https://github.com/ipfs/kubo/pull/10866)) + - fix typos in docs and comments (#10920) ([ipfs/kubo#10920](https://github.com/ipfs/kubo/pull/10920)) + - Upgrade to Boxo v0.34.0 (#10917) ([ipfs/kubo#10917](https://github.com/ipfs/kubo/pull/10917)) + - test: fix flaky repo verify (#10743) ([ipfs/kubo#10743](https://github.com/ipfs/kubo/pull/10743)) + - feat(config): `Gateway.RetrievalTimeout|MaxConcurrentRequests` (#10905) ([ipfs/kubo#10905](https://github.com/ipfs/kubo/pull/10905)) + - chore: replace random test utils with equivalents in go-test/random (#10915) ([ipfs/kubo#10915](https://github.com/ipfs/kubo/pull/10915)) + - feat: require go1.25 for building kubo (#10913) ([ipfs/kubo#10913](https://github.com/ipfs/kubo/pull/10913)) + - feat(ci): reusable spellcheck from unified CI (#10873) ([ipfs/kubo#10873](https://github.com/ipfs/kubo/pull/10873)) + - fix(ci): docker build (#10914) ([ipfs/kubo#10914](https://github.com/ipfs/kubo/pull/10914)) + - Replace `uber-go/multierr` with `errors.Join` (#10912) ([ipfs/kubo#10912](https://github.com/ipfs/kubo/pull/10912)) + - feat(ipns): support passing custom sequence number during publishing (#10851) ([ipfs/kubo#10851](https://github.com/ipfs/kubo/pull/10851)) + - fix(relay): feed connected peers to AutoRelay discovery (#10901) ([ipfs/kubo#10901](https://github.com/ipfs/kubo/pull/10901)) + - fix(sharness): no blocking on unclean FUSE unmount (#10906) ([ipfs/kubo#10906](https://github.com/ipfs/kubo/pull/10906)) + - feat: add query functionality to log level command (#10885) ([ipfs/kubo#10885](https://github.com/ipfs/kubo/pull/10885)) + - fix(ci): switch to debian:bookworm-slim + - Fix failing FUSE test (#10904) ([ipfs/kubo#10904](https://github.com/ipfs/kubo/pull/10904)) + - fix(cmd): exit 1 on error (#10903) ([ipfs/kubo#10903](https://github.com/ipfs/kubo/pull/10903)) + - feat: go-libp2p v0.43.0 (#10892) ([ipfs/kubo#10892](https://github.com/ipfs/kubo/pull/10892)) + - fix: `ipfs cid` without repo (#10897) ([ipfs/kubo#10897](https://github.com/ipfs/kubo/pull/10897)) + - client/rpc: re-enable tests on windows. (#10895) ([ipfs/kubo#10895](https://github.com/ipfs/kubo/pull/10895)) + - fix: Provide according to Reprovider.Strategy (#10886) ([ipfs/kubo#10886](https://github.com/ipfs/kubo/pull/10886)) + - feat: ipfs-webui v4.8.0 (#10902) ([ipfs/kubo#10902](https://github.com/ipfs/kubo/pull/10902)) + - refactor: move `ipfs stat provide/reprovide` to `ipfs provide stat` (#10896) ([ipfs/kubo#10896](https://github.com/ipfs/kubo/pull/10896)) + - Bitswap: use a single ConnectEventManager. ([ipfs/kubo#10889](https://github.com/ipfs/kubo/pull/10889)) + - feat(add): add support for naming pinned CIDs (#10877) ([ipfs/kubo#10877](https://github.com/ipfs/kubo/pull/10877)) + - refactor: remove goprocess (#10872) ([ipfs/kubo#10872](https://github.com/ipfs/kubo/pull/10872)) + - feat(daemon): accelerated client startup note (#10859) ([ipfs/kubo#10859](https://github.com/ipfs/kubo/pull/10859)) + - docs:added GOLOG_LOG_LEVEL to debug-guide for logging more info (#10894) ([ipfs/kubo#10894](https://github.com/ipfs/kubo/pull/10894)) + - core: Add a ContentDiscovery field ([ipfs/kubo#10890](https://github.com/ipfs/kubo/pull/10890)) + - chore: update go-libp2p and p2p-forge (#10887) ([ipfs/kubo#10887](https://github.com/ipfs/kubo/pull/10887)) + - Upgrade to Boxo v0.33.1 (#10888) ([ipfs/kubo#10888](https://github.com/ipfs/kubo/pull/10888)) + - remove unneeded thirdparty packages (#10871) ([ipfs/kubo#10871](https://github.com/ipfs/kubo/pull/10871)) + - provider: clear provide queue when reprovide strategy changes (#10863) ([ipfs/kubo#10863](https://github.com/ipfs/kubo/pull/10863)) + - chore: merge release v0.36.0 ([ipfs/kubo#10868](https://github.com/ipfs/kubo/pull/10868)) + - docs: release checklist fixes from 0.36 (#10861) ([ipfs/kubo#10861](https://github.com/ipfs/kubo/pull/10861)) + - docs(config): add network exposure considerations (#10856) ([ipfs/kubo#10856](https://github.com/ipfs/kubo/pull/10856)) + - fix: handling of EDITOR env var (#10855) ([ipfs/kubo#10855](https://github.com/ipfs/kubo/pull/10855)) + - refactor: use slices.Sort where appropriate (#10858) ([ipfs/kubo#10858](https://github.com/ipfs/kubo/pull/10858)) + - Upgrade to Boxo v0.33.0 (#10857) ([ipfs/kubo#10857](https://github.com/ipfs/kubo/pull/10857)) + - chore: Upgrade github.com/cockroachdb/pebble/v2 to v2.0.6 for Go 1.25 support (#10850) ([ipfs/kubo#10850](https://github.com/ipfs/kubo/pull/10850)) + - core:constructor: add a log line about http retrieval ([ipfs/kubo#10852](https://github.com/ipfs/kubo/pull/10852)) + - chore: p2p-forge v0.6.0 + go-libp2p 0.42.0 (#10840) ([ipfs/kubo#10840](https://github.com/ipfs/kubo/pull/10840)) + - docs: fix minor typos (#10849) ([ipfs/kubo#10849](https://github.com/ipfs/kubo/pull/10849)) + - Replace use of go-car v1 with go-car/v2 (#10845) ([ipfs/kubo#10845](https://github.com/ipfs/kubo/pull/10845)) + - chore: 0.37.0-dev +- github.com/ipfs/boxo (v0.33.0 -> v0.34.0): + - Release v0.34.0 ([ipfs/boxo#1003](https://github.com/ipfs/boxo/pull/1003)) + - blockstore: remove HashOnRead ([ipfs/boxo#1001](https://github.com/ipfs/boxo/pull/1001)) + - Update go-log to v2.8.1 ([ipfs/boxo#998](https://github.com/ipfs/boxo/pull/998)) + - feat: autoconf client library (#997) ([ipfs/boxo#997](https://github.com/ipfs/boxo/pull/997)) + - feat(gateway): concurrency and retrieval timeout limits (#994) ([ipfs/boxo#994](https://github.com/ipfs/boxo/pull/994)) + - update dependencies ([ipfs/boxo#999](https://github.com/ipfs/boxo/pull/999)) + - fix: cidqueue gc must iterate all elements in queue ([ipfs/boxo#1000](https://github.com/ipfs/boxo/pull/1000)) + - Replace `uber-go/multierr` with `errors.Join` ([ipfs/boxo#996](https://github.com/ipfs/boxo/pull/996)) + - feat(namesys/IPNSPublisher): expose ability to set Sequence (#962) ([ipfs/boxo#962](https://github.com/ipfs/boxo/pull/962)) + - upgrade to go-libp2p v0.43.0 ([ipfs/boxo#993](https://github.com/ipfs/boxo/pull/993)) + - Remove providing Exchange. Call Provide() from relevant places. ([ipfs/boxo#976](https://github.com/ipfs/boxo/pull/976)) + - reprovider: s/initial/initial ([ipfs/boxo#992](https://github.com/ipfs/boxo/pull/992)) + - Release v0.33.1 ([ipfs/boxo#991](https://github.com/ipfs/boxo/pull/991)) + - fix(bootstrap): filter-out peers behind relays (#987) ([ipfs/boxo#987](https://github.com/ipfs/boxo/pull/987)) + - Bitswap: fix double-worker in connectEventManager. Logging improvements. ([ipfs/boxo#986](https://github.com/ipfs/boxo/pull/986)) + - upgrade to go-libp2p v0.42.1 (#988) ([ipfs/boxo#988](https://github.com/ipfs/boxo/pull/988)) + - bitswap/httpnet: fix sudden stop of http retrieval requests (#984) ([ipfs/boxo#984](https://github.com/ipfs/boxo/pull/984)) + - bitswap/client: disable use of traceability block by default (#956) ([ipfs/boxo#956](https://github.com/ipfs/boxo/pull/956)) + - test(gateway): fix race in TestCarBackendTar (#985) ([ipfs/boxo#985](https://github.com/ipfs/boxo/pull/985)) + - Shutdown the sessionWantSender changes queue when session is shutdown (#983) ([ipfs/boxo#983](https://github.com/ipfs/boxo/pull/983)) + - bitswap/httpnet: start pinging before signaling Connected ([ipfs/boxo#982](https://github.com/ipfs/boxo/pull/982)) + - Queue all changes in order using non-blocking async queue ([ipfs/boxo#981](https://github.com/ipfs/boxo/pull/981)) + - bitswap/httpnet: fix peers silently stopping from doing http requests ([ipfs/boxo#980](https://github.com/ipfs/boxo/pull/980)) + - provider: clear provide queue (#978) ([ipfs/boxo#978](https://github.com/ipfs/boxo/pull/978)) + - update dependencies ([ipfs/boxo#977](https://github.com/ipfs/boxo/pull/977)) +- github.com/ipfs/go-datastore (v0.8.2 -> v0.8.3): + - new version (#245) ([ipfs/go-datastore#245](https://github.com/ipfs/go-datastore/pull/245)) + - sort using slices.Sort (#243) ([ipfs/go-datastore#243](https://github.com/ipfs/go-datastore/pull/243)) + - Replace `uber-go/multierr` with `errors.Join` (#242) ([ipfs/go-datastore#242](https://github.com/ipfs/go-datastore/pull/242)) + - replace gopkg.in/check.v1 with github.com/stretchr/testify (#241) ([ipfs/go-datastore#241](https://github.com/ipfs/go-datastore/pull/241)) +- github.com/ipfs/go-ipld-cbor (v0.2.0 -> v0.2.1): + - new version ([ipfs/go-ipld-cbor#111](https://github.com/ipfs/go-ipld-cbor/pull/111)) + - update dependencies ([ipfs/go-ipld-cbor#110](https://github.com/ipfs/go-ipld-cbor/pull/110)) +- github.com/ipfs/go-log/v2 (v2.6.0 -> v2.8.1): + - new version (#171) ([ipfs/go-log#171](https://github.com/ipfs/go-log/pull/171)) + - feat: add LevelEnabled function to check if log level enabled (#170) ([ipfs/go-log#170](https://github.com/ipfs/go-log/pull/170)) + - Replace `uber-go/multierr` with `errors.Join` (#168) ([ipfs/go-log#168](https://github.com/ipfs/go-log/pull/168)) + - new version (#167) ([ipfs/go-log#167](https://github.com/ipfs/go-log/pull/167)) + - Test using testify package (#166) ([ipfs/go-log#166](https://github.com/ipfs/go-log/pull/166)) + - Revise the loglevel API to be more golang idiomatic (#165) ([ipfs/go-log#165](https://github.com/ipfs/go-log/pull/165)) + - new version (#164) ([ipfs/go-log#164](https://github.com/ipfs/go-log/pull/164)) + - feat: add GetLogLevel and GetAllLogLevels (#160) ([ipfs/go-log#160](https://github.com/ipfs/go-log/pull/160)) +- github.com/ipfs/go-test (v0.2.2 -> v0.2.3): + - new version (#30) ([ipfs/go-test#30](https://github.com/ipfs/go-test/pull/30)) + - fix: multihash random generation (#28) ([ipfs/go-test#28](https://github.com/ipfs/go-test/pull/28)) + - Add RandomName function to generate random filename (#26) ([ipfs/go-test#26](https://github.com/ipfs/go-test/pull/26)) +- github.com/libp2p/go-libp2p (v0.42.0 -> v0.43.0): + - Release v0.43 (#3353) ([libp2p/go-libp2p#3353](https://github.com/libp2p/go-libp2p/pull/3353)) + - basichost: fix deadlock with addrs_manager (#3348) ([libp2p/go-libp2p#3348](https://github.com/libp2p/go-libp2p/pull/3348)) + - basichost: fix Addrs docstring (#3341) ([libp2p/go-libp2p#3341](https://github.com/libp2p/go-libp2p/pull/3341)) + - quic: upgrade quic-go to v0.53 (#3323) ([libp2p/go-libp2p#3323](https://github.com/libp2p/go-libp2p/pull/3323)) +- github.com/libp2p/go-libp2p-kad-dht (v0.33.1 -> v0.34.0): + - chore: release v0.34.0 (#1130) ([libp2p/go-libp2p-kad-dht#1130](https://github.com/libp2p/go-libp2p-kad-dht/pull/1130)) + - make crawler protocol messenger configurable (#1128) ([libp2p/go-libp2p-kad-dht#1128](https://github.com/libp2p/go-libp2p-kad-dht/pull/1128)) + - fix: move non-error log to warning level (#1119) ([libp2p/go-libp2p-kad-dht#1119](https://github.com/libp2p/go-libp2p-kad-dht/pull/1119)) + - migrate providers package (#1094) ([libp2p/go-libp2p-kad-dht#1094](https://github.com/libp2p/go-libp2p-kad-dht/pull/1094)) +- github.com/libp2p/go-libp2p-pubsub (v0.13.1 -> v0.14.2): + - Release v0.14.2 (#629) ([libp2p/go-libp2p-pubsub#629](https://github.com/libp2p/go-libp2p-pubsub/pull/629)) + - Fix test races and enable race tests in CI (#626) ([libp2p/go-libp2p-pubsub#626](https://github.com/libp2p/go-libp2p-pubsub/pull/626)) + - Fix race when calling Preprocess and msg ID generator(#627) ([libp2p/go-libp2p-pubsub#627](https://github.com/libp2p/go-libp2p-pubsub/pull/627)) + - Release v0.14.1 (#623) ([libp2p/go-libp2p-pubsub#623](https://github.com/libp2p/go-libp2p-pubsub/pull/623)) + - fix(BatchPublishing): Make topic.AddToBatch threadsafe (#622) ([libp2p/go-libp2p-pubsub#622](https://github.com/libp2p/go-libp2p-pubsub/pull/622)) + - Release v0.14.0 (#614) ([libp2p/go-libp2p-pubsub#614](https://github.com/libp2p/go-libp2p-pubsub/pull/614)) + - refactor: 10x faster RPC splitting (#615) ([libp2p/go-libp2p-pubsub#615](https://github.com/libp2p/go-libp2p-pubsub/pull/615)) + - test: Fix flaky TestMessageBatchPublish (#616) ([libp2p/go-libp2p-pubsub#616](https://github.com/libp2p/go-libp2p-pubsub/pull/616)) + - Send IDONTWANT before first publish (#612) ([libp2p/go-libp2p-pubsub#612](https://github.com/libp2p/go-libp2p-pubsub/pull/612)) + - feat(gossipsub): Add MessageBatch (#607) ([libp2p/go-libp2p-pubsub#607](https://github.com/libp2p/go-libp2p-pubsub/pull/607)) + - fix(IDONTWANT)!: Do not IDONTWANT your sender (#609) ([libp2p/go-libp2p-pubsub#609](https://github.com/libp2p/go-libp2p-pubsub/pull/609)) +- github.com/multiformats/go-multiaddr (v0.16.0 -> v0.16.1): + - Release v0.16.1 (#281) ([multiformats/go-multiaddr#281](https://github.com/multiformats/go-multiaddr/pull/281)) + - reduce allocations in Bytes() and manet methods (#280) ([multiformats/go-multiaddr#280](https://github.com/multiformats/go-multiaddr/pull/280)) +- github.com/whyrusleeping/cbor-gen (v0.1.2 -> v0.3.1): + - fix: capture field count early for "optional" length check (#112) ([whyrusleeping/cbor-gen#112](https://github.com/whyrusleeping/cbor-gen/pull/112)) + - doc: basic cbor-gen documentation (#110) ([whyrusleeping/cbor-gen#110](https://github.com/whyrusleeping/cbor-gen/pull/110)) + - feat: add support for optional fields at the end of tuple structs (#109) ([whyrusleeping/cbor-gen#109](https://github.com/whyrusleeping/cbor-gen/pull/109)) + - Regenerate test files ([whyrusleeping/cbor-gen#107](https://github.com/whyrusleeping/cbor-gen/pull/107)) + - improve allocations in map serialization ([whyrusleeping/cbor-gen#105](https://github.com/whyrusleeping/cbor-gen/pull/105)) + - fixed array in struct instead of heap slice ([whyrusleeping/cbor-gen#104](https://github.com/whyrusleeping/cbor-gen/pull/104)) + - optionally sort type names in generated code file ([whyrusleeping/cbor-gen#102](https://github.com/whyrusleeping/cbor-gen/pull/102)) + - fix handling of an []*string field ([whyrusleeping/cbor-gen#101](https://github.com/whyrusleeping/cbor-gen/pull/101)) + - fix: reject negative big integers ([whyrusleeping/cbor-gen#100](https://github.com/whyrusleeping/cbor-gen/pull/100)) + +
+ +### 👨‍👩‍👧‍👦 Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Marcin Rataj | 26 | +16033/-755 | 176 | +| Andrew Gillis | 35 | +2656/-1911 | 142 | +| Hector Sanjuan | 30 | +2638/-760 | 114 | +| Marco Munizaga | 11 | +1244/-362 | 41 | +| Russell Dempsey | 2 | +1031/-33 | 7 | +| Guillaume Michel | 4 | +899/-65 | 15 | +| whyrusleeping | 4 | +448/-177 | 15 | +| sukun | 9 | +312/-191 | 31 | +| gammazero | 23 | +239/-216 | 45 | +| Brian Olson | 5 | +343/-16 | 11 | +| Steven Allen | 3 | +294/-7 | 9 | +| Sergey Gorbunov | 2 | +247/-11 | 9 | +| Kapil Sareen | 1 | +86/-13 | 10 | +| Masih H. Derkani | 1 | +72/-24 | 1 | +| Piotr Galar | 1 | +40/-55 | 23 | +| Rod Vagg | 1 | +13/-11 | 3 | +| Ankita Sahu | 1 | +2/-0 | 1 | +| Štefan Baebler | 1 | +1/-0 | 1 | diff --git a/docs/changelogs/v0.38.md b/docs/changelogs/v0.38.md new file mode 100644 index 000000000..3d2de2f9b --- /dev/null +++ b/docs/changelogs/v0.38.md @@ -0,0 +1,29 @@ +# Kubo changelog v0.38 + + + +This release was brought to you by the [Shipyard](https://ipshipyard.com/) team. + +- [v0.38.0](#v0380) + +## v0.38.0 + +- [Overview](#overview) +- [🔦 Highlights](#-highlights) +- [📦️ Important dependency updates](#-important-dependency-updates) +- [📝 Changelog](#-changelog) +- [👨‍👩‍👧‍👦 Contributors](#-contributors) + +### Overview + +### 🔦 Highlights + +### 📦️ Important dependency updates + +### 📝 Changelog + +
Full Changelog + +
+ +### 👨‍👩‍👧‍👦 Contributors \ No newline at end of file diff --git a/docs/changelogs/v0.4.md b/docs/changelogs/v0.4.md index 3f2b9c358..de15c51dd 100644 --- a/docs/changelogs/v0.4.md +++ b/docs/changelogs/v0.4.md @@ -3255,7 +3255,7 @@ other requested improvements. See below for the full list of changes. - Make sure all keystore keys get republished ([ipfs/go-ipfs#3951](https://github.com/ipfs/go-ipfs/pull/3951)) - Documentation - Adding documentation on PubSub encodings ([ipfs/go-ipfs#3909](https://github.com/ipfs/go-ipfs/pull/3909)) - - Change 'neccessary' to 'necessary' ([ipfs/go-ipfs#3941](https://github.com/ipfs/go-ipfs/pull/3941)) + - Change 'necessary' to 'necessary' ([ipfs/go-ipfs#3941](https://github.com/ipfs/go-ipfs/pull/3941)) - README.md: add Nix to the linux package managers ([ipfs/go-ipfs#3939](https://github.com/ipfs/go-ipfs/pull/3939)) - More verbose errors in filestore ([ipfs/go-ipfs#3964](https://github.com/ipfs/go-ipfs/pull/3964)) - Bug fixes diff --git a/docs/changelogs/v0.8.md b/docs/changelogs/v0.8.md index d9d42fa89..8b28ff706 100644 --- a/docs/changelogs/v0.8.md +++ b/docs/changelogs/v0.8.md @@ -26,7 +26,7 @@ ipfs pin remote service add myservice https://myservice.tld:1234/api/path myacce ipfs pin remote add /ipfs/bafymydata --service=myservice --name=myfile ipfs pin remote ls --service=myservice --name=myfile ipfs pin remote ls --service=myservice --cid=bafymydata -ipfs pin remote rm --serivce=myservice --name=myfile +ipfs pin remote rm --service=myservice --name=myfile ``` A few notes: diff --git a/docs/changelogs/v0.9.md b/docs/changelogs/v0.9.md index 64b94e97e..c0dba5abd 100644 --- a/docs/changelogs/v0.9.md +++ b/docs/changelogs/v0.9.md @@ -337,7 +337,7 @@ SECIO was deprecated and turned off by default given the prevalence of TLS and N - schema/gen/go: please vet a bit more - Introduce 'quip' data building helpers. ([ipld/go-ipld-prime#134](https://github.com/ipld/go-ipld-prime/pull/134)) - gengo: support for unions with stringprefix representation. ([ipld/go-ipld-prime#133](https://github.com/ipld/go-ipld-prime/pull/133)) - - target of opporunity DRY improvement: use more shared templates for structs with stringjoin representations. + - target of opportunity DRY improvement: use more shared templates for structs with stringjoin representations. - fix small consistency typo in gen function names. - drop old generation mechanisms that were already deprecated. - error type cleanup, and helpers. diff --git a/docs/config.md b/docs/config.md index 82046b5fa..6e25814ca 100644 --- a/docs/config.md +++ b/docs/config.md @@ -36,6 +36,11 @@ config file at runtime. - [`AutoTLS.RegistrationToken`](#autotlsregistrationtoken) - [`AutoTLS.RegistrationDelay`](#autotlsregistrationdelay) - [`AutoTLS.CAEndpoint`](#autotlscaendpoint) + - [`AutoConf`](#autoconf) + - [`AutoConf.URL`](#autoconfurl) + - [`AutoConf.Enabled`](#autoconfenabled) + - [`AutoConf.RefreshInterval`](#autoconfrefreshinterval) + - [`AutoConf.TLSInsecureSkipVerify`](#autoconftlsinsecureskipverify) - [`Bitswap`](#bitswap) - [`Bitswap.Libp2pEnabled`](#bitswaplibp2penabled) - [`Bitswap.ServerEnabled`](#bitswapserverenabled) @@ -60,6 +65,8 @@ config file at runtime. - [`Gateway.DeserializedResponses`](#gatewaydeserializedresponses) - [`Gateway.DisableHTMLErrors`](#gatewaydisablehtmlerrors) - [`Gateway.ExposeRoutingAPI`](#gatewayexposeroutingapi) + - [`Gateway.RetrievalTimeout`](#gatewayretrievaltimeout) + - [`Gateway.MaxConcurrentRequests`](#gatewaymaxconcurrentrequests) - [`Gateway.HTTPHeaders`](#gatewayhttpheaders) - [`Gateway.RootRedirect`](#gatewayrootredirect) - [`Gateway.FastDirIndexThreshold`](#gatewayfastdirindexthreshold) @@ -98,6 +105,7 @@ config file at runtime. - [`Ipns.ResolveCacheSize`](#ipnsresolvecachesize) - [`Ipns.MaxCacheTTL`](#ipnsmaxcachettl) - [`Ipns.UsePubsub`](#ipnsusepubsub) + - [`Ipns.DelegatedPublishers`](#ipnsdelegatedpublishers) - [`Migration`](#migration) - [`Migration.DownloadSources`](#migrationdownloadsources) - [`Migration.Keep`](#migrationkeep) @@ -223,6 +231,8 @@ config file at runtime. - [`default-datastore` profile](#default-datastore-profile) - [`local-discovery` profile](#local-discovery-profile) - [`default-networking` profile](#default-networking-profile) + - [`autoconf-on` profile](#autoconf-on-profile) + - [`autoconf-off` profile](#autoconf-off-profile) - [`flatfs` profile](#flatfs-profile) - [`flatfs-measure` profile](#flatfs-measure-profile) - [`pebbleds` profile](#pebbleds-profile) @@ -235,6 +245,9 @@ config file at runtime. - [`legacy-cid-v0` profile](#legacy-cid-v0-profile) - [`test-cid-v1` profile](#test-cid-v1-profile) - [`test-cid-v1-wide` profile](#test-cid-v1-wide-profile) + - [Security](#security) + - [Port and Network Exposure](#port-and-network-exposure) + - [Security Best Practices](#security-best-practices) - [Types](#types) - [`flag`](#flag) - [`priority`](#priority) @@ -271,6 +284,7 @@ Supported Transports: > > - If you need secure access to a subset of RPC, secure it with [`API.Authorizations`](#apiauthorizations) or custom auth middleware running in front of the localhost-only RPC port defined here. > - If you are looking for an interface designed for browsers and public internet, use [`Addresses.Gateway`](#addressesgateway) port instead. +> - See [Security section](#security) for network exposure considerations. Default: `/ip4/127.0.0.1/tcp/5001` @@ -286,6 +300,16 @@ Supported Transports: * tcp/ip{4,6} - `/ipN/.../tcp/...` * unix - `/unix/path/to/socket` +> [!CAUTION] +> **SECURITY CONSIDERATIONS FOR GATEWAY EXPOSURE** +> +> By default, the gateway is bound to localhost for security. If you bind to `0.0.0.0` +> or a public IP, anyone with access can trigger retrieval of arbitrary CIDs, causing +> bandwidth usage and potential exposure to malicious content. Limit with +> [`Gateway.NoFetch`](#gatewaynofetch). Consider firewall rules, authentication, +> and [`Gateway.PublicGateways`](#gatewaypublicgateways) for public exposure. +> See [Security section](#security) for network exposure considerations. + Default: `/ip4/127.0.0.1/tcp/8080` Type: `strings` ([multiaddrs][multiaddr]) @@ -304,6 +328,7 @@ Supported Transports: > [!IMPORTANT] > Make sure your firewall rules allow incoming connections on both TCP and UDP ports defined here. +> See [Security section](#security) for network exposure considerations. Note that quic (Draft-29) used to be supported with the format `/ipN/.../udp/.../quic`, but has since been [removed](https://github.com/libp2p/go-libp2p/releases/tag/v0.30.0). @@ -521,6 +546,150 @@ Default: 1 Minute Type: `duration` (when `0`/unset, the default value is used) +## `AutoConf` + +The AutoConf feature enables Kubo nodes to automatically fetch and apply network configuration from a remote JSON endpoint. This system allows dynamic configuration updates for bootstrap peers, DNS resolvers, delegated routing, and IPNS publishing endpoints without requiring manual updates to each node's local config. + +AutoConf works by using special `"auto"` placeholder values in configuration fields. When Kubo encounters these placeholders, it fetches the latest configuration from the specified URL and resolves the placeholders with the appropriate values at runtime. The original configuration file remains unchanged - `"auto"` values are preserved in the JSON and only resolved in memory during node operation. + +### Key Features + +- **Remote Configuration**: Fetch network defaults from a trusted URL +- **Automatic Updates**: Periodic background checks for configuration updates +- **Graceful Fallback**: Uses hardcoded IPFS Mainnet bootstrappers when remote config is unavailable +- **Validation**: Ensures all fetched configuration values are valid multiaddrs and URLs +- **Caching**: Stores multiple versions locally with ETags for efficient updates +- **User Notification**: Logs ERROR when new configuration is available requiring node restart +- **Debug Logging**: AutoConf operations can be inspected by setting `GOLOG_LOG_LEVEL="error,autoconf=debug"` + +### Supported Fields + +AutoConf can resolve `"auto"` placeholders in the following configuration fields: + +- `Bootstrap` - Bootstrap peer addresses +- `DNS.Resolvers` - DNS-over-HTTPS resolver endpoints +- `Routing.DelegatedRouters` - Delegated routing HTTP API endpoints +- `Ipns.DelegatedPublishers` - IPNS delegated publishing HTTP API endpoints + +### Usage Example + +```json +{ + "AutoConf": { + "URL": "https://example.com/autoconf.json", + "Enabled": true, + "RefreshInterval": "24h" + }, + "Bootstrap": ["auto"], + "DNS": { + "Resolvers": { + ".": ["auto"], + "eth.": ["auto"], + "custom.": ["https://dns.example.com/dns-query"] + } + }, + "Routing": { + "DelegatedRouters": ["auto", "https://router.example.org/routing/v1"] + } +} +``` + +**Notes:** + +- Configuration fetching happens at daemon startup and periodically in the background +- When new configuration is detected, users must restart their node to apply changes +- Mixed configurations are supported: you can use both `"auto"` and static values +- If AutoConf is disabled but `"auto"` values exist, daemon startup will fail with validation errors +- Cache is stored in `$IPFS_PATH/autoconf/` with up to 3 versions retained + +### Path-Based Routing Configuration + +AutoConf supports path-based routing URLs that automatically enable specific routing operations based on the URL path. This allows precise control over which HTTP Routing V1 endpoints are used for different operations: + +**Supported paths:** +- `/routing/v1/providers` - Enables provider record lookups only +- `/routing/v1/peers` - Enables peer routing lookups only +- `/routing/v1/ipns` - Enables IPNS record operations only +- No path - Enables all routing operations (backward compatibility) + +**AutoConf JSON structure with path-based routing:** + +```json +{ + "DelegatedRouters": { + "mainnet-for-nodes-with-dht": [ + "https://cid.contact/routing/v1/providers" + ], + "mainnet-for-nodes-without-dht": [ + "https://delegated-ipfs.dev/routing/v1/providers", + "https://delegated-ipfs.dev/routing/v1/peers", + "https://delegated-ipfs.dev/routing/v1/ipns" + ] + }, + "DelegatedPublishers": { + "mainnet-for-ipns-publishers-with-http": [ + "https://delegated-ipfs.dev/routing/v1/ipns" + ] + } +} +``` + +**Node type categories:** +- `mainnet-for-nodes-with-dht`: Mainnet nodes with DHT enabled (typically only need additional provider lookups) +- `mainnet-for-nodes-without-dht`: Mainnet nodes without DHT (need comprehensive routing services) +- `mainnet-for-ipns-publishers-with-http`: Mainnet nodes that publish IPNS records via HTTP + +This design enables efficient, selective routing where each endpoint URL automatically determines its capabilities based on the path, while maintaining semantic grouping by node configuration type. + +Default: `{}` + +Type: `object` + +### `AutoConf.Enabled` + +Controls whether the AutoConf system is active. When enabled, Kubo will fetch configuration from the specified URL and resolve `"auto"` placeholders at runtime. When disabled, any `"auto"` values in the configuration will cause daemon startup to fail with validation errors. + +This provides a safety mechanism to ensure nodes don't start with unresolved placeholders when AutoConf is intentionally disabled. + +Default: `true` + +Type: `flag` + +### `AutoConf.URL` + +Specifies the HTTP(S) URL from which to fetch the autoconf JSON. The endpoint should return a JSON document containing Bootstrap peers, DNS resolvers, delegated routing endpoints, and IPNS publishing endpoints that will replace `"auto"` placeholders in the local configuration. + +The URL must serve a JSON document matching the AutoConf schema. Kubo validates all multiaddr and URL values before caching to ensure they are properly formatted. + +When not specified in the configuration, the default mainnet URL is used automatically. + + + +> [!NOTE] +> Public good autoconf manifest at `conf.ipfs-mainnet.org` is provided by the team at [Shipyard](https://ipshipyard.com). + +Default: `"https://conf.ipfs-mainnet.org/autoconf.json"` (when not specified) + +Type: `optionalString` + +### `AutoConf.RefreshInterval` + +Specifies how frequently Kubo should refresh autoconf data. This controls both how often cached autoconf data is considered fresh and how frequently the background service checks for new configuration updates. + +When a new configuration version is detected during background updates, Kubo logs an ERROR message informing the user that a node restart is required to apply the changes to any `"auto"` entries in their configuration. + +Default: `24h` + +Type: `optionalDuration` + +### `AutoConf.TLSInsecureSkipVerify` + +**FOR TESTING ONLY** - Allows skipping TLS certificate verification when fetching autoconf from HTTPS URLs. This should never be enabled in production as it makes the configuration fetching vulnerable to man-in-the-middle attacks. + +Default: `false` + +Type: `flag` + ## `AutoTLS` The [AutoTLS](https://blog.libp2p.io/autotls/) feature enables publicly reachable Kubo nodes (those dialable from the public @@ -640,6 +809,7 @@ Default: [certmagic.LetsEncryptProductionCA](https://pkg.go.dev/github.com/caddy Type: `optionalString` + ## `Bitswap` High level client and server configuration of the [Bitswap Protocol](https://specs.ipfs.tech/bitswap-protocol/) over libp2p. @@ -673,11 +843,18 @@ Type: `flag` ## `Bootstrap` -Bootstrap is an array of [multiaddrs][multiaddr] of trusted nodes that your node connects to, to fetch other nodes of the network on startup. +Bootstrap peers help your node discover and connect to the IPFS network when starting up. This array contains [multiaddrs][multiaddr] of trusted nodes that your node contacts first to find other peers and content. -Default: [`config.DefaultBootstrapAddresses`](https://github.com/ipfs/kubo/blob/master/config/bootstrap_peers.go) +The special value `"auto"` automatically uses curated, up-to-date bootstrap peers from [AutoConf](#autoconf), ensuring your node can always connect to the healthy network without manual maintenance. -Type: `array[string]` ([multiaddrs][multiaddr]) +**What this gives you:** +- **Reliable startup**: Your node can always find the network, even if some bootstrap peers go offline +- **Automatic updates**: New bootstrap peers are added as the network evolves +- **Custom control**: Add your own trusted peers alongside or instead of the defaults + +Default: `["auto"]` + +Type: `array[string]` ([multiaddrs][multiaddr] or `"auto"`) ## `Datastore` @@ -932,6 +1109,55 @@ Default: `false` Type: `flag` +### `Gateway.RetrievalTimeout` + +Maximum duration Kubo will wait for content retrieval (new bytes to arrive). + +**Timeout behavior:** +- **Time to first byte**: Returns 504 Gateway Timeout if the gateway cannot start writing within this duration (e.g., stuck searching for providers) +- **Time between writes**: After first byte, timeout resets with each write. Response terminates if no new data can be written within this duration + +**Truncation handling:** When timeout occurs after HTTP 200 headers are sent (e.g., during CAR streams), the gateway: +- Appends error message to indicate truncation +- Forces TCP reset (RST) to prevent caching incomplete responses +- Records in metrics with original status code and `truncated=true` flag + +**Monitoring:** Track `ipfs_http_gw_retrieval_timeouts_total` by status code and truncation status. + +**Tuning guidance:** +- Compare timeout rates (`ipfs_http_gw_retrieval_timeouts_total`) with success rates (`ipfs_http_gw_responses_total{status="200"}`) +- High timeout rate: consider increasing timeout or scaling horizontally if hardware is constrained +- Many 504s may indicate routing problems - check requested CIDs and provider availability using https://check.ipfs.network/ +- `truncated=true` timeouts indicate retrieval stalled mid-file with no new bytes for the timeout duration + +A value of 0 disables this timeout. + +Default: `30s` + +Type: `optionalDuration` + +### `Gateway.MaxConcurrentRequests` + +Limits concurrent HTTP requests. Requests beyond limit receive 429 Too Many Requests. + +Protects nodes from traffic spikes and resource exhaustion, especially behind reverse proxies without rate-limiting. Default (4096) aligns with common reverse proxy configurations (e.g., nginx: 8 workers × 1024 connections). + +**Monitoring:** `ipfs_http_gw_concurrent_requests` tracks current requests in flight. + +**Tuning guidance:** +- Monitor `ipfs_http_gw_concurrent_requests` gauge for usage patterns +- Track 429s (`ipfs_http_gw_responses_total{status="429"}`) and success rate (`{status="200"}`) +- Near limit with low resource usage → increase value +- Memory pressure or OOMs → decrease value and consider scaling +- Set slightly below reverse proxy limit for graceful degradation +- Start with default, adjust based on observed performance for your hardware + +A value of 0 disables the limit. + +Default: `4096` + +Type: `optionalInteger` + ### `Gateway.HTTPHeaders` Headers to set on gateway responses. @@ -1418,21 +1644,51 @@ Default: `disabled` Type: `flag` +### `Ipns.DelegatedPublishers` + +HTTP endpoints for delegated IPNS publishing operations. These endpoints must support the [IPNS API](https://specs.ipfs.tech/routing/http-routing-v1/#ipns-api) from the Delegated Routing V1 HTTP specification. + +The special value `"auto"` loads delegated publishers from [AutoConf](#autoconf) when enabled. + +**Publishing behavior depends on routing configuration:** + +- `Routing.Type=auto` (default): Uses DHT for publishing, `"auto"` resolves to empty list +- `Routing.Type=delegated`: Uses HTTP delegated publishers only, `"auto"` resolves to configured endpoints + +When using `"auto"`, inspect the effective publishers with: `ipfs config Ipns.DelegatedPublishers --expand-auto` + +**Command flags override publishing behavior:** + +- `--allow-offline` - Publishes to local datastore without requiring network connectivity +- `--allow-delegated` - Uses local datastore and HTTP delegated publishers only (no DHT connectivity required) + +For self-hosting, you can run your own `/routing/v1/ipns` endpoint using [someguy](https://github.com/ipfs/someguy/). + +Default: `["auto"]` + +Type: `array[string]` (URLs or `"auto"`) + ## `Migration` -Migration configures how migrations are downloaded and if the downloads are added to IPFS locally. +> [!WARNING] +> **DEPRECATED:** Only applies to legacy migrations (repo versions <16). Modern repos (v16+) use embedded migrations. +> This section is optional and will not appear in new configurations. ### `Migration.DownloadSources` -Sources in order of preference, where "IPFS" means use IPFS and "HTTPS" means use default gateways. Any other values are interpreted as hostnames for custom gateways. An empty list means "use default sources". +**DEPRECATED:** Download sources for legacy migrations. Only `"HTTPS"` is supported. -Default: `["HTTPS", "IPFS"]` +Type: `array[string]` (optional) + +Default: `["HTTPS"]` ### `Migration.Keep` -Specifies whether or not to keep the migration after downloading it. Options are "discard", "cache", "pin". Empty string for default. +**DEPRECATED:** Controls retention of legacy migration binaries. Options: `"cache"` (default), `"discard"`, `"keep"`. -Default: `cache` +Type: `string` (optional) + +Default: `"cache"` ## `Mounts` @@ -1806,7 +2062,6 @@ Type: `optionalDuration` (unset for the default) Tells reprovider what should be announced. Valid strategies are: - `"all"` - announce all CIDs of stored blocks - - Order: root blocks of direct and recursive pins and MFS root are announced first, then the rest of blockstore - `"pinned"` - only announce recursively pinned CIDs (`ipfs pin add -r`, both roots and child blocks) - Order: root blocks of direct and recursive pins are announced first, then the child blocks of recursive pins - `"roots"` - only announce the root block of explicitly pinned CIDs (`ipfs pin add`) @@ -1822,12 +2077,13 @@ Tells reprovider what should be announced. Valid strategies are: - `"pinned+mfs"` - a combination of the `pinned` and `mfs` strategies. - **ℹ️ NOTE:** This is the suggested strategy for users who run without GC and don't want to provide everything in cache. - Order: first `pinned` and then the locally available part of `mfs`. -- `"flat"` - same as `all`, announce all CIDs of stored blocks, but without prioritizing anything. -> [!IMPORTANT] -> Reproviding larger pinsets using the `all`, `mfs`, `pinned`, `pinned+mfs` or `roots` strategies requires additional memory, with an estimated ~1 GiB of RAM per 20 million items for reproviding to the Amino DHT. -> This is due to the use of a buffered provider, which avoids holding a lock on the entire pinset during the reprovide cycle. -> The `flat` strategy can be used to lower memory requirements, but only recommended if memory utilization is too high, prioritization of pins is not necessary, and it is acceptable to announce every block cached in the local repository. +**Strategy changes automatically clear the provide queue.** When you change `Reprovider.Strategy` and restart Kubo, the provide queue is automatically cleared to ensure only content matching your new strategy is announced. You can also manually clear the queue using `ipfs provide clear`. + +**Memory requirements:** + +- Reproviding larger pinsets using the `mfs`, `pinned`, `pinned+mfs` or `roots` strategies requires additional memory, with an estimated ~1 GiB of RAM per 20 million items for reproviding to the Amino DHT. +- This is due to the use of a buffered provider, which avoids holding a lock on the entire pinset during the reprovide cycle. Default: `"all"` @@ -1839,7 +2095,7 @@ Contains options for content, peer, and IPNS routing mechanisms. ### `Routing.Type` -There are multiple routing options: "auto", "autoclient", "none", "dht", "dhtclient", and "custom". +There are multiple routing options: "auto", "autoclient", "none", "dht", "dhtclient", "delegated", and "custom". * **DEFAULT:** If unset, or set to "auto", your node will use the public IPFS DHT (aka "Amino") and parallel [`Routing.DelegatedRouters`](#routingdelegatedrouters) for additional speed. @@ -1876,6 +2132,15 @@ by leveraging [`Routing.DelegatedRouters`](#routingdelegatedrouters) HTTP endpoi introduced in [IPIP-337](https://github.com/ipfs/specs/pull/337) in addition to the Amino DHT. +When `Routing.Type` is set to `delegated`, your node will use **only** HTTP delegated routers and IPNS publishers, +without initializing the Amino DHT at all. This mode is useful for environments where peer-to-peer DHT connectivity +is not available or desired, while still enabling content routing and IPNS publishing via HTTP APIs. +This mode requires configuring [`Routing.DelegatedRouters`](#routingdelegatedrouters) for content routing and +[`Ipns.DelegatedPublishers`](#ipnsdelegatedpublishers) for IPNS publishing. + +**Note:** `delegated` mode operates as read-only for content providing - your node cannot announce content to the network +since there is no DHT connectivity. Content providing is automatically disabled when using this routing type. + [Advanced routing rules](https://github.com/ipfs/kubo/blob/master/docs/delegated-routing.md) can be configured in `Routing.Routers` after setting `Routing.Type` to `custom`. Default: `auto` (DHT + [`Routing.DelegatedRouters`](#routingdelegatedrouters)) @@ -1962,14 +2227,16 @@ Type: `array[string]` An array of URL hostnames for delegated routers to be queried in addition to the Amino DHT when `Routing.Type` is set to `auto` (default) or `autoclient`. These endpoints must support the [Delegated Routing V1 HTTP API](https://specs.ipfs.tech/routing/http-routing-v1/). +The special value `"auto"` uses delegated routers from [AutoConf](#autoconf) when enabled. + > [!TIP] > Delegated routing allows IPFS implementations to offload tasks like content routing, peer routing, and naming to a separate process or server while also benefiting from HTTP caching. > > One can run their own delegated router either by implementing the [Delegated Routing V1 HTTP API](https://specs.ipfs.tech/routing/http-routing-v1/) themselves, or by using [Someguy](https://github.com/ipfs/someguy), a turn-key implementation that proxies requests to other routing systems. A public utility instance of Someguy is hosted at [`https://delegated-ipfs.dev`](https://docs.ipfs.tech/concepts/public-utilities/#delegated-routing). -Default: `["https://cid.contact"]` (empty or `nil` will also use this default; to disable delegated routing, set `Routing.Type` to `dht` or `dhtclient`) +Default: `["auto"]` -Type: `array[string]` +Type: `array[string]` (URLs or `"auto"`) ### `Routing.Routers` @@ -2485,6 +2752,14 @@ transports, multiaddrs for these transports must be added to `Addresses.Swarm`. Supported transports are: QUIC, TCP, WS, Relay, WebTransport and WebRTCDirect. +> [!CAUTION] +> **SECURITY CONSIDERATIONS FOR NETWORK TRANSPORTS** +> +> Enabling network transports allows your node to accept connections from the internet. +> Ensure your firewall rules and [`Addresses.Swarm`](#addressesswarm) configuration +> align with your security requirements. +> See [Security section](#security) for network exposure considerations. + Each field in this section is a `flag`. #### `Swarm.Transports.Network.TCP` @@ -2718,16 +2993,10 @@ Example: Be mindful that: - Currently only `https://` URLs for [DNS over HTTPS (DoH)](https://en.wikipedia.org/wiki/DNS_over_HTTPS) endpoints are supported as values. - The default catch-all resolver is the cleartext one provided by your operating system. It can be overridden by adding a DoH entry for the DNS root indicated by `.` as illustrated above. -- Out-of-the-box support for selected non-ICANN TLDs relies on third-party centralized services provided by respective communities on best-effort basis. The implicit DoH resolvers are: - ```json - { - "eth.": "https://dns.eth.limo/dns-query", - "crypto.": "https://resolver.unstoppable.io/dns-query" - } - ``` - To get all the benefits of a decentralized naming system we strongly suggest setting DoH endpoint to an empty string and running own decentralized resolver as catch-all one on localhost. +- Out-of-the-box support for selected non-ICANN TLDs relies on third-party centralized services provided by respective communities on best-effort basis. +- The special value `"auto"` uses DNS resolvers from [AutoConf](#autoconf) when enabled. For example: `{".": "auto"}` uses any custom DoH resolver (global or per TLD) provided by AutoConf system. -Default: `{}` +Default: `{".": "auto"}` Type: `object[string -> string]` @@ -2921,7 +3190,7 @@ have when building the DAG while importing. This setting controls both the fanout for basic, non-HAMT folder nodes. It sets a limit after which directories are converted to a HAMT-based structure. -When unset (0), no limit exists for chilcren. Directories will be converted to +When unset (0), no limit exists for children. Directories will be converted to HAMTs based on their estimated size only. This setting will cause basic directories to be converted to HAMTs when they @@ -2939,8 +3208,8 @@ Type: `optionalInteger` The maximum number of children that a node part of a Unixfs HAMT directory (aka sharded directory) can have. -HAMT directory have unlimited children and are used when basic directories -become too big or reach `MaxLinks`. A HAMT is an structure made of unixfs +HAMT directories have unlimited children and are used when basic directories +become too big or reach `MaxLinks`. A HAMT is a structure made of unixfs nodes that store the list of elements in the folder. This option controls the maximum number of children that the HAMT nodes can have. @@ -3060,6 +3329,16 @@ is useful when using the daemon in test environments. Restores default network settings. Inverse profile of the test profile. +### `autoconf-on` profile + +Safe default for joining the public IPFS Mainnet swarm with automatic configuration. +Can also be used with custom AutoConf.URL for other networks. + +### `autoconf-off` profile + +Disables AutoConf and clears all networking fields for manual configuration. +Use this for private networks or when you want explicit control over all endpoints. + ### `flatfs` profile Configures the node to use the flatfs datastore. @@ -3163,7 +3442,7 @@ Disables [Reprovider](#reprovider) system (and announcing to Amino DHT). ### `announce-on` profile -(Re-)enables [Reprovider](#reprovider) system (reverts [`announce-off` profile](#annouce-off-profile). +(Re-)enables [Reprovider](#reprovider) system (reverts [`announce-off` profile](#announce-off-profile)). ### `legacy-cid-v0` profile @@ -3202,6 +3481,27 @@ See for exact [`Imp > Follow [kubo#4143](https://github.com/ipfs/kubo/issues/4143) for more details, > and provide feedback in [discuss.ipfs.tech/t/should-we-profile-cids](https://discuss.ipfs.tech/t/should-we-profile-cids/18507) or [ipfs/specs#499](https://github.com/ipfs/specs/pull/499). +## Security + +This section provides an overview of security considerations for configurations that expose network services. + +### Port and Network Exposure + +Several configuration options expose TCP or UDP ports that can make your Kubo node accessible from the network: + +- **[`Addresses.API`](#addressesapi)** - Exposes the admin RPC API (default: localhost:5001) +- **[`Addresses.Gateway`](#addressesgateway)** - Exposes the HTTP gateway (default: localhost:8080) +- **[`Addresses.Swarm`](#addressesswarm)** - Exposes P2P connectivity (default: 0.0.0.0:4001, both UDP and TCP) +- **[`Swarm.Transports.Network`](#swarmtransportsnetwork)** - Controls which P2P transport protocols are enabled over TCP and UDP + +### Security Best Practices + +- Keep admin services ([`Addresses.API`](#addressesapi)) bound to localhost unless authentication ([`API.Authorizations`](#apiauthorizations)) is configured +- Use [`Gateway.NoFetch`](#gatewaynofetch) to prevent arbitrary CID retrieval if Kubo is acting as a public gateway available to anyone +- Configure firewall rules to restrict access to exposed ports. Note that [`Addresses.Swarm`](#addressesswarm) is special - all incoming traffic to swarm ports should be allowed to ensure proper P2P connectivity +- Control which public-facing addresses are announced to other peers using [`Addresses.NoAnnounce`](#addressesnoannounce), [`Addresses.Announce`](#addressesannounce), and [`Addresses.AppendAnnounce`](#addressesappendannounce) +- Consider using the [`server` profile](#server-profile) for production deployments + ## Types This document refers to the standard JSON types (e.g., `null`, `string`, diff --git a/docs/debug-guide.md b/docs/debug-guide.md index 74034f41f..7268ef6bc 100644 --- a/docs/debug-guide.md +++ b/docs/debug-guide.md @@ -15,6 +15,8 @@ This is a document for helping debug Kubo. Please add to it if you can! ### Beginning +> **Note:** Enable more logs by setting `GOLOG_LOG_LEVEL` env variable when troubleshooting. See [go-log documentation](https://github.com/ipfs/go-log#golog_log_level) for configuration options and available log levels. + When you see ipfs doing something (using lots of CPU, memory, or otherwise being weird), the first thing you want to do is gather all the relevant profiling information. diff --git a/docs/environment-variables.md b/docs/environment-variables.md index ed18f8f3b..cd900de94 100644 --- a/docs/environment-variables.md +++ b/docs/environment-variables.md @@ -18,6 +18,7 @@ - [`IPFS_HTTP_ROUTERS_FILTER_PROTOCOLS`](#ipfs_http_routers_filter_protocols) - [`IPFS_CONTENT_BLOCKING_DISABLE`](#ipfs_content_blocking_disable) - [`IPFS_WAIT_REPO_LOCK`](#ipfs_wait_repo_lock) + - [`IPFS_TELEMETRY`](#ipfs_telemetry) - [`LIBP2P_TCP_REUSEPORT`](#libp2p_tcp_reuseport) - [`LIBP2P_TCP_MUX`](#libp2p_tcp_mux) - [`LIBP2P_MUX_PREFS`](#libp2p_mux_prefs) @@ -152,9 +153,15 @@ $ ipfs resolve -r /ipns/dnslink-test2.example.com ## `IPFS_HTTP_ROUTERS` -Overrides all implicit HTTP routers enabled when `Routing.Type=auto` with -the space-separated list of URLs provided in this variable. -Useful for testing and debugging in offline contexts. +Overrides AutoConf and all other HTTP routers when set. +When `Routing.Type=auto`, this environment variable takes precedence over +both AutoConf-provided endpoints and any manually configured delegated routers. +The value should be a space or comma-separated list of HTTP routing endpoint URLs. + +This is useful for: +- Testing and debugging in offline contexts +- Overriding AutoConf endpoints temporarily +- Using custom or private HTTP routing services Example: @@ -163,11 +170,11 @@ $ ipfs config Routing.Type auto $ IPFS_HTTP_ROUTERS="http://127.0.0.1:7423" ipfs daemon ``` -The above will replace implicit HTTP routers with single one, allowing for +The above will replace all AutoConf endpoints with a single local one, allowing for inspection/debug of HTTP requests sent by Kubo via `while true ; do nc -l 7423; done` or more advanced tools like [mitmproxy](https://docs.mitmproxy.org/stable/#mitmproxy). -Default: `config.DefaultHTTPRouters` +When not set, Kubo uses endpoints from AutoConf (when enabled) or manually configured `Routing.DelegatedRouters`. ## `IPFS_HTTP_ROUTERS_FILTER_PROTOCOLS` @@ -194,6 +201,22 @@ IPFS_WAIT_REPO_LOCK="15s" If the lock cannot be acquired because someone else has the lock, and `IPFS_WAIT_REPO_LOCK` is set to a valid value, then acquiring the lock is retried every second until the lock is acquired or the specified wait time has elapsed. +## `IPFS_TELEMETRY` + +Controls the behavior of the [telemetry plugin](telemetry.md). Valid values are: + +- `on`: Enables telemetry. +- `off`: Disables telemetry. +- `auto`: Like `on`, but logs an informative message about telemetry and gives user 15 minutes to opt-out before first collection. Used automatically on first run and when `IPFS_TELEMETRY` is not set. + +The mode can also be set in the config file under `Plugins.Plugins.telemetry.Config.Mode`. + +Example: + +```bash +export IPFS_TELEMETRY="off" +``` + ## `LIBP2P_TCP_REUSEPORT` Kubo tries to reuse the same source port for all connections to improve NAT diff --git a/docs/examples/kubo-as-a-library/go.mod b/docs/examples/kubo-as-a-library/go.mod index 6c0881fab..5e728552d 100644 --- a/docs/examples/kubo-as-a-library/go.mod +++ b/docs/examples/kubo-as-a-library/go.mod @@ -1,16 +1,16 @@ module github.com/ipfs/kubo/examples/kubo-as-a-library -go 1.24 +go 1.25 // Used to keep this in sync with the current version of kubo. You should remove // this if you copy this example. replace github.com/ipfs/kubo => ./../../.. require ( - github.com/ipfs/boxo v0.33.0 + github.com/ipfs/boxo v0.34.0 github.com/ipfs/kubo v0.0.0-00010101000000-000000000000 - github.com/libp2p/go-libp2p v0.42.0 - github.com/multiformats/go-multiaddr v0.16.0 + github.com/libp2p/go-libp2p v0.43.0 + github.com/multiformats/go-multiaddr v0.16.1 ) require ( @@ -23,9 +23,10 @@ require ( github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect - github.com/caddyserver/certmagic v0.21.6 // indirect + github.com/caddyserver/certmagic v0.23.0 // indirect github.com/caddyserver/zerossl v0.1.3 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cenkalti/backoff/v5 v5.0.2 // indirect github.com/ceramicnetwork/go-dag-jose v0.1.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cockroachdb/crlib v0.0.0-20241015224233-894974b3ad94 // indirect @@ -49,23 +50,21 @@ require ( github.com/flynn/noise v1.1.0 // indirect github.com/francoispqt/gojay v1.2.13 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/gabriel-vasile/mimetype v1.4.6 // indirect - github.com/gammazero/chanqueue v1.1.0 // indirect - github.com/gammazero/deque v1.0.0 // indirect + github.com/gabriel-vasile/mimetype v1.4.9 // indirect + github.com/gammazero/chanqueue v1.1.1 // indirect + github.com/gammazero/deque v1.1.0 // indirect github.com/getsentry/sentry-go v0.27.0 // indirect github.com/go-jose/go-jose/v4 v4.0.5 // indirect - github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/glog v1.2.4 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e // indirect github.com/google/gopacket v1.1.19 // indirect - github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/websocket v1.5.3 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 // indirect github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/huin/goupnp v1.3.0 // indirect @@ -76,7 +75,7 @@ require ( github.com/ipfs/go-block-format v0.2.2 // indirect github.com/ipfs/go-cid v0.5.0 // indirect github.com/ipfs/go-cidutil v0.1.0 // indirect - github.com/ipfs/go-datastore v0.8.2 // indirect + github.com/ipfs/go-datastore v0.8.3 // indirect github.com/ipfs/go-ds-badger v0.3.4 // indirect github.com/ipfs/go-ds-flatfs v0.5.5 // indirect github.com/ipfs/go-ds-leveldb v0.5.2 // indirect @@ -87,35 +86,34 @@ require ( github.com/ipfs/go-ipfs-ds-help v1.1.1 // indirect github.com/ipfs/go-ipfs-pq v0.0.3 // indirect github.com/ipfs/go-ipfs-redirects-file v0.1.2 // indirect - github.com/ipfs/go-ipld-cbor v0.2.0 // indirect + github.com/ipfs/go-ipld-cbor v0.2.1 // indirect github.com/ipfs/go-ipld-format v0.6.2 // indirect github.com/ipfs/go-ipld-git v0.1.1 // indirect github.com/ipfs/go-ipld-legacy v0.2.2 // indirect - github.com/ipfs/go-log/v2 v2.6.0 // indirect + github.com/ipfs/go-log/v2 v2.8.1 // indirect github.com/ipfs/go-metrics-interface v0.3.0 // indirect github.com/ipfs/go-peertaskqueue v0.8.2 // indirect github.com/ipfs/go-unixfsnode v1.10.1 // indirect github.com/ipld/go-car/v2 v2.14.3 // indirect github.com/ipld/go-codec-dagpb v1.7.0 // indirect github.com/ipld/go-ipld-prime v0.21.0 // indirect - github.com/ipshipyard/p2p-forge v0.6.0 // indirect + github.com/ipshipyard/p2p-forge v0.6.1 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect - github.com/jbenet/goprocess v0.1.4 // indirect github.com/klauspost/compress v1.18.0 // indirect - github.com/klauspost/cpuid/v2 v2.2.10 // indirect + github.com/klauspost/cpuid/v2 v2.3.0 // indirect github.com/koron/go-ssdp v0.0.6 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect - github.com/libdns/libdns v0.2.2 // indirect + github.com/libdns/libdns v1.0.0-beta.1 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/libp2p/go-cidranger v1.1.0 // indirect github.com/libp2p/go-doh-resolver v0.5.0 // indirect github.com/libp2p/go-flow-metrics v0.3.0 // indirect github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect - github.com/libp2p/go-libp2p-kad-dht v0.33.1 // indirect + github.com/libp2p/go-libp2p-kad-dht v0.34.0 // indirect github.com/libp2p/go-libp2p-kbucket v0.7.0 // indirect - github.com/libp2p/go-libp2p-pubsub v0.13.1 // indirect + github.com/libp2p/go-libp2p-pubsub v0.14.2 // indirect github.com/libp2p/go-libp2p-pubsub-router v0.6.0 // indirect github.com/libp2p/go-libp2p-record v0.3.1 // indirect github.com/libp2p/go-libp2p-routing-helpers v0.7.5 // indirect @@ -127,8 +125,8 @@ require ( github.com/libp2p/zeroconf/v2 v2.2.0 // indirect github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mholt/acmez/v3 v3.0.0 // indirect - github.com/miekg/dns v1.1.66 // indirect + github.com/mholt/acmez/v3 v3.1.2 // indirect + github.com/miekg/dns v1.1.68 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect github.com/minio/sha256-simd v1.0.1 // indirect @@ -143,7 +141,6 @@ require ( github.com/multiformats/go-multistream v0.6.1 // indirect github.com/multiformats/go-varint v0.0.7 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/onsi/ginkgo/v2 v2.23.4 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/openzipkin/zipkin-go v0.4.3 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect @@ -169,21 +166,20 @@ require ( github.com/pion/webrtc/v4 v4.1.2 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/polydawn/refmt v0.89.0 // indirect - github.com/prometheus/client_golang v1.22.0 // indirect + github.com/prometheus/client_golang v1.23.0 // indirect github.com/prometheus/client_model v0.6.2 // indirect - github.com/prometheus/common v0.64.0 // indirect - github.com/prometheus/procfs v0.16.1 // indirect + github.com/prometheus/common v0.65.0 // indirect + github.com/prometheus/procfs v0.17.0 // indirect github.com/quic-go/qpack v0.5.1 // indirect - github.com/quic-go/quic-go v0.52.0 // indirect - github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 // indirect + github.com/quic-go/quic-go v0.54.0 // indirect + github.com/quic-go/webtransport-go v0.9.0 // indirect github.com/rogpeppe/go-internal v1.13.1 // indirect - github.com/samber/lo v1.47.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb // indirect github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc // indirect github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 // indirect - github.com/whyrusleeping/cbor-gen v0.1.2 // indirect + github.com/whyrusleeping/cbor-gen v0.3.1 // indirect github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f // indirect github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 // indirect @@ -191,18 +187,17 @@ require ( github.com/zeebo/blake3 v0.2.4 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 // indirect - go.opentelemetry.io/otel v1.35.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0 // indirect - go.opentelemetry.io/otel/exporters/zipkin v1.31.0 // indirect - go.opentelemetry.io/otel/metric v1.35.0 // indirect - go.opentelemetry.io/otel/sdk v1.31.0 // indirect - go.opentelemetry.io/otel/trace v1.35.0 // indirect - go.opentelemetry.io/proto/otlp v1.3.1 // indirect - go.uber.org/automaxprocs v1.6.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 // indirect + go.opentelemetry.io/otel v1.37.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.37.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.37.0 // indirect + go.opentelemetry.io/otel/exporters/zipkin v1.37.0 // indirect + go.opentelemetry.io/otel/metric v1.37.0 // indirect + go.opentelemetry.io/otel/sdk v1.37.0 // indirect + go.opentelemetry.io/otel/trace v1.37.0 // indirect + go.opentelemetry.io/proto/otlp v1.7.0 // indirect go.uber.org/dig v1.19.0 // indirect go.uber.org/fx v1.24.0 // indirect go.uber.org/mock v0.5.2 // indirect @@ -210,21 +205,21 @@ require ( go.uber.org/zap v1.27.0 // indirect go.uber.org/zap/exp v0.3.0 // indirect go4.org v0.0.0-20230225012048-214862532bf5 // indirect - golang.org/x/crypto v0.39.0 // indirect - golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b // indirect - golang.org/x/mod v0.25.0 // indirect - golang.org/x/net v0.41.0 // indirect - golang.org/x/sync v0.15.0 // indirect - golang.org/x/sys v0.33.0 // indirect - golang.org/x/text v0.26.0 // indirect + golang.org/x/crypto v0.41.0 // indirect + golang.org/x/exp v0.0.0-20250813145105-42675adae3e6 // indirect + golang.org/x/mod v0.27.0 // indirect + golang.org/x/net v0.43.0 // indirect + golang.org/x/sync v0.16.0 // indirect + golang.org/x/sys v0.35.0 // indirect + golang.org/x/text v0.28.0 // indirect golang.org/x/time v0.12.0 // indirect - golang.org/x/tools v0.34.0 // indirect + golang.org/x/tools v0.36.0 // indirect golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect gonum.org/v1/gonum v0.16.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9 // indirect - google.golang.org/grpc v1.67.1 // indirect - google.golang.org/protobuf v1.36.6 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect + google.golang.org/grpc v1.73.0 // indirect + google.golang.org/protobuf v1.36.7 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.4.1 // indirect ) diff --git a/docs/examples/kubo-as-a-library/go.sum b/docs/examples/kubo-as-a-library/go.sum index bcaa73e90..50a10c6b6 100644 --- a/docs/examples/kubo-as-a-library/go.sum +++ b/docs/examples/kubo-as-a-library/go.sum @@ -61,12 +61,14 @@ github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= -github.com/caddyserver/certmagic v0.21.6 h1:1th6GfprVfsAtFNOu4StNMF5IxK5XiaI0yZhAHlZFPE= -github.com/caddyserver/certmagic v0.21.6/go.mod h1:n1sCo7zV1Ez2j+89wrzDxo4N/T1Ws/Vx8u5NvuBFabw= +github.com/caddyserver/certmagic v0.23.0 h1:CfpZ/50jMfG4+1J/u2LV6piJq4HOfO6ppOnOf7DkFEU= +github.com/caddyserver/certmagic v0.23.0/go.mod h1:9mEZIWqqWoI+Gf+4Trh04MOVPD0tGSxtqsxg87hAIH4= github.com/caddyserver/zerossl v0.1.3 h1:onS+pxp3M8HnHpN5MMbOMyNjmTheJyWRaZYwn+YTAyA= github.com/caddyserver/zerossl v0.1.3/go.mod h1:CxA0acn7oEGO6//4rtrRjYgEoa4MFw/XofZnrYwGqG4= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8= +github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/ceramicnetwork/go-dag-jose v0.1.1 h1:7pObs22egc14vSS3AfCFfS1VmaL4lQUsAK7OGC3PlKk= github.com/ceramicnetwork/go-dag-jose v0.1.1/go.mod h1:8ptnYwY2Z2y/s5oJnNBn/UCxLg6CpramNJ2ZXF/5aNY= @@ -157,12 +159,12 @@ github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4 github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/gabriel-vasile/mimetype v1.4.6 h1:3+PzJTKLkvgjeTbts6msPJt4DixhT4YtFNf1gtGe3zc= -github.com/gabriel-vasile/mimetype v1.4.6/go.mod h1:JX1qVKqZd40hUPpAfiNTe0Sne7hdfKSbOqqmkq8GCXc= -github.com/gammazero/chanqueue v1.1.0 h1:yiwtloc1azhgGLFo2gMloJtQvkYD936Ai7tBfa+rYJw= -github.com/gammazero/chanqueue v1.1.0/go.mod h1:fMwpwEiuUgpab0sH4VHiVcEoji1pSi+EIzeG4TPeKPc= -github.com/gammazero/deque v1.0.0 h1:LTmimT8H7bXkkCy6gZX7zNLtkbz4NdS2z8LZuor3j34= -github.com/gammazero/deque v1.0.0/go.mod h1:iflpYvtGfM3U8S8j+sZEKIak3SAKYpA5/SQewgfXDKo= +github.com/gabriel-vasile/mimetype v1.4.9 h1:5k+WDwEsD9eTLL8Tz3L0VnmVh9QxGjRmjBvAG7U/oYY= +github.com/gabriel-vasile/mimetype v1.4.9/go.mod h1:WnSQhFKJuBlRyLiKohA/2DtIlPFAbguNaG7QCHcyGok= +github.com/gammazero/chanqueue v1.1.1 h1:n9Y+zbBxw2f7uUE9wpgs0rOSkP/I/yhDLiNuhyVjojQ= +github.com/gammazero/chanqueue v1.1.1/go.mod h1:fMwpwEiuUgpab0sH4VHiVcEoji1pSi+EIzeG4TPeKPc= +github.com/gammazero/deque v1.1.0 h1:OyiyReBbnEG2PP0Bnv1AASLIYvyKqIFN5xfl1t8oGLo= +github.com/gammazero/deque v1.1.0/go.mod h1:JVrR+Bj1NMQbPnYclvDlvSX0nVGReLrQZ0aUMuWLctg= github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9 h1:r5GgOLGbza2wVHRzK7aAj6lWZjfbAwiu/RDCVOKjRyM= @@ -178,13 +180,11 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2 github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE= github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= -github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= @@ -246,8 +246,6 @@ github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OI github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a h1://KbezygeMJZCSHH+HgUZiTeSoiuFspbMg1ge+eFj18= -github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -266,8 +264,8 @@ github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aN github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 h1:X5VWvz21y3gzm9Nw/kaUeku/1+uBhcekkmy4IkffJww= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1/go.mod h1:Zanoh4+gvIgluNqcfMVTJueD4wSS5hT7zTt4Mrutd90= github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -289,8 +287,8 @@ github.com/ipfs-shipyard/nopfs/ipfs v0.25.0 h1:OqNqsGZPX8zh3eFMO8Lf8EHRRnSGBMqcd github.com/ipfs-shipyard/nopfs/ipfs v0.25.0/go.mod h1:BxhUdtBgOXg1B+gAPEplkg/GpyTZY+kCMSfsJvvydqU= github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= -github.com/ipfs/boxo v0.33.0 h1:9ow3chwkDzMj0Deq4AWRUEI7WnIIV7SZhPTzzG2mmfw= -github.com/ipfs/boxo v0.33.0/go.mod h1:3IPh7YFcCIcKp6o02mCHovrPntoT5Pctj/7j4syh/RM= +github.com/ipfs/boxo v0.34.0 h1:pMP9bAsTs4xVh8R0ZmxIWviV7kjDa60U24QrlGgHb1g= +github.com/ipfs/boxo v0.34.0/go.mod h1:kzdH/ewDybtO3+M8MCVkpwnIIc/d2VISX95DFrY4vQA= github.com/ipfs/go-bitfield v1.1.0 h1:fh7FIo8bSwaJEh6DdTWbCeZ1eqOaOkKFI74SCnsWbGA= github.com/ipfs/go-bitfield v1.1.0/go.mod h1:paqf1wjq/D2BBmzfTVFlJQ9IlFOZpg422HL0HqsGWHU= github.com/ipfs/go-block-format v0.0.3/go.mod h1:4LmD4ZUw0mhO+JSKdpWwrzATiEfM7WWgQ8H5l6P8MVk= @@ -305,8 +303,8 @@ github.com/ipfs/go-cidutil v0.1.0 h1:RW5hO7Vcf16dplUU60Hs0AKDkQAVPVplr7lk97CFL+Q github.com/ipfs/go-cidutil v0.1.0/go.mod h1:e7OEVBMIv9JaOxt9zaGEmAoSlXW9jdFZ5lP/0PwcfpA= github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= -github.com/ipfs/go-datastore v0.8.2 h1:Jy3wjqQR6sg/LhyY0NIePZC3Vux19nLtg7dx0TVqr6U= -github.com/ipfs/go-datastore v0.8.2/go.mod h1:W+pI1NsUsz3tcsAACMtfC+IZdnQTnC/7VfPoJBQuts0= +github.com/ipfs/go-datastore v0.8.3 h1:z391GsQyGKUIUof2tPoaZVeDknbt7fNHs6Gqjcw5Jo4= +github.com/ipfs/go-datastore v0.8.3/go.mod h1:raxQ/CreIy9L6MxT71ItfMX12/ASN6EhXJoUFjICQ2M= github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= github.com/ipfs/go-ds-badger v0.0.7/go.mod h1:qt0/fWzZDoPW6jpQeqUjR5kBfhDNB65jd9YlmAvpQBk= @@ -336,10 +334,8 @@ github.com/ipfs/go-ipfs-redirects-file v0.1.2 h1:QCK7VtL91FH17KROVVy5KrzDx2hu68Q github.com/ipfs/go-ipfs-redirects-file v0.1.2/go.mod h1:yIiTlLcDEM/8lS6T3FlCEXZktPPqSOyuY6dEzVqw7Fw= github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ= -github.com/ipfs/go-ipfs-util v0.0.3 h1:2RFdGez6bu2ZlZdI+rWfIdbQb1KudQp3VGwPtdNCmE0= -github.com/ipfs/go-ipfs-util v0.0.3/go.mod h1:LHzG1a0Ig4G+iZ26UUOMjHd+lfM84LZCrn17xAKWBvs= -github.com/ipfs/go-ipld-cbor v0.2.0 h1:VHIW3HVIjcMd8m4ZLZbrYpwjzqlVUfjLM7oK4T5/YF0= -github.com/ipfs/go-ipld-cbor v0.2.0/go.mod h1:Cp8T7w1NKcu4AQJLqK0tWpd1nkgTxEVB5C6kVpLW6/0= +github.com/ipfs/go-ipld-cbor v0.2.1 h1:H05yEJbK/hxg0uf2AJhyerBDbjOuHX4yi+1U/ogRa7E= +github.com/ipfs/go-ipld-cbor v0.2.1/go.mod h1:x9Zbeq8CoE5R2WicYgBMcr/9mnkQ0lHddYWJP2sMV3A= github.com/ipfs/go-ipld-format v0.6.2 h1:bPZQ+A05ol0b3lsJSl0bLvwbuQ+HQbSsdGTy4xtYUkU= github.com/ipfs/go-ipld-format v0.6.2/go.mod h1:nni2xFdHKx5lxvXJ6brt/pndtGxKAE+FPR1rg4jTkyk= github.com/ipfs/go-ipld-git v0.1.1 h1:TWGnZjS0htmEmlMFEkA3ogrNCqWjIxwr16x1OsdhG+Y= @@ -349,14 +345,14 @@ github.com/ipfs/go-ipld-legacy v0.2.2/go.mod h1:hhkj+b3kG9b2BcUNw8IFYAsfeNo8E3U7 github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= github.com/ipfs/go-log v1.0.5 h1:2dOuUCB1Z7uoczMWgAyDck5JLb72zHzrMnGnCNNbvY8= github.com/ipfs/go-log v1.0.5/go.mod h1:j0b8ZoR+7+R99LD9jZ6+AJsrzkPbSXbZfGakb5JPtIo= -github.com/ipfs/go-log/v2 v2.6.0 h1:2Nu1KKQQ2ayonKp4MPo6pXCjqw1ULc9iohRqWV5EYqg= -github.com/ipfs/go-log/v2 v2.6.0/go.mod h1:p+Efr3qaY5YXpx9TX7MoLCSEZX5boSWj9wh86P5HJa8= +github.com/ipfs/go-log/v2 v2.8.1 h1:Y/X36z7ASoLJaYIJAL4xITXgwf7RVeqb1+/25aq/Xk0= +github.com/ipfs/go-log/v2 v2.8.1/go.mod h1:NyhTBcZmh2Y55eWVjOeKf8M7e4pnJYM3yDZNxQBWEEY= github.com/ipfs/go-metrics-interface v0.3.0 h1:YwG7/Cy4R94mYDUuwsBfeziJCVm9pBMJ6q/JR9V40TU= github.com/ipfs/go-metrics-interface v0.3.0/go.mod h1:OxxQjZDGocXVdyTPocns6cOLwHieqej/jos7H4POwoY= github.com/ipfs/go-peertaskqueue v0.8.2 h1:PaHFRaVFdxQk1Qo3OKiHPYjmmusQy7gKQUaL8JDszAU= github.com/ipfs/go-peertaskqueue v0.8.2/go.mod h1:L6QPvou0346c2qPJNiJa6BvOibxDfaiPlqHInmzg0FA= -github.com/ipfs/go-test v0.2.2 h1:1yjYyfbdt1w93lVzde6JZ2einh3DIV40at4rVoyEcE8= -github.com/ipfs/go-test v0.2.2/go.mod h1:cmLisgVwkdRCnKu/CFZOk2DdhOcwghr5GsHeqwexoRA= +github.com/ipfs/go-test v0.2.3 h1:Z/jXNAReQFtCYyn7bsv/ZqUwS6E7iIcSpJ2CuzCvnrc= +github.com/ipfs/go-test v0.2.3/go.mod h1:QW8vSKkwYvWFwIZQLGQXdkt9Ud76eQXRQ9Ao2H+cA1o= github.com/ipfs/go-unixfsnode v1.10.1 h1:hGKhzuH6NSzZ4y621wGuDspkjXRNG3B+HqhlyTjSwSM= github.com/ipfs/go-unixfsnode v1.10.1/go.mod h1:eguv/otvacjmfSbYvmamc9ssNAzLvRk0+YN30EYeOOY= github.com/ipld/go-car/v2 v2.14.3 h1:1Mhl82/ny8MVP+w1M4LXbj4j99oK3gnuZG2GmG1IhC8= @@ -368,8 +364,8 @@ github.com/ipld/go-ipld-prime v0.21.0 h1:n4JmcpOlPDIxBcY037SVfpd1G+Sj1nKZah0m6QH github.com/ipld/go-ipld-prime v0.21.0/go.mod h1:3RLqy//ERg/y5oShXXdx5YIp50cFGOanyMctpPjsvxQ= github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20230102063945-1a409dc236dd h1:gMlw/MhNr2Wtp5RwGdsW23cs+yCuj9k2ON7i9MiJlRo= github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20230102063945-1a409dc236dd/go.mod h1:wZ8hH8UxeryOs4kJEJaiui/s00hDSbE37OKsL47g+Sw= -github.com/ipshipyard/p2p-forge v0.6.0 h1:kNhYxgYGtqF3MLts/i0hw+7ygtgNB4Qv8h6fo7j6Iq4= -github.com/ipshipyard/p2p-forge v0.6.0/go.mod h1:pj8Zcs+ex5OMq5a1bFLHqW0oL3qYO0v5eGLZmit0l7U= +github.com/ipshipyard/p2p-forge v0.6.1 h1:987/hUC1YxI56CcMX6iTB+9BLjFV0d2SJnig9Z1pf8A= +github.com/ipshipyard/p2p-forge v0.6.1/go.mod h1:pj8Zcs+ex5OMq5a1bFLHqW0oL3qYO0v5eGLZmit0l7U= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= @@ -377,8 +373,6 @@ github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABo github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk= github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY= github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= -github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o= -github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -397,8 +391,8 @@ github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6 github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= -github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= +github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= +github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/koron/go-ssdp v0.0.6 h1:Jb0h04599eq/CY7rB5YEqPS83HmRfHP2azkxMN2rFtU= github.com/koron/go-ssdp v0.0.6/go.mod h1:0R9LfRJGek1zWTjN3JUNlm5INCDYGpRDfAptnct63fI= @@ -412,8 +406,8 @@ github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/libdns/libdns v0.2.2 h1:O6ws7bAfRPaBsgAYt8MDe2HcNBGC29hkZ9MX2eUSX3s= -github.com/libdns/libdns v0.2.2/go.mod h1:4Bj9+5CQiNMVGf87wjX4CY3HQJypUHRuLvlsfsZqLWQ= +github.com/libdns/libdns v1.0.0-beta.1 h1:KIf4wLfsrEpXpZ3vmc/poM8zCATXT2klbdPe6hyOBjQ= +github.com/libdns/libdns v1.0.0-beta.1/go.mod h1:4Bj9+5CQiNMVGf87wjX4CY3HQJypUHRuLvlsfsZqLWQ= github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ= github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= @@ -426,20 +420,20 @@ github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZ github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= github.com/libp2p/go-flow-metrics v0.3.0 h1:q31zcHUvHnwDO0SHaukewPYgwOBSxtt830uJtUx6784= github.com/libp2p/go-flow-metrics v0.3.0/go.mod h1:nuhlreIwEguM1IvHAew3ij7A8BMlyHQJ279ao24eZZo= -github.com/libp2p/go-libp2p v0.42.0 h1:A8foZk+ZEhZTv0Jb++7xUFlrFhBDv4j2Vh/uq4YX+KE= -github.com/libp2p/go-libp2p v0.42.0/go.mod h1:4NGcjbD9OIvFiSRb0XueCO19zJ4kSPK5vkyyOUYmMro= +github.com/libp2p/go-libp2p v0.43.0 h1:b2bg2cRNmY4HpLK8VHYQXLX2d3iND95OjodLFymvqXU= +github.com/libp2p/go-libp2p v0.43.0/go.mod h1:IiSqAXDyP2sWH+J2gs43pNmB/y4FOi2XQPbsb+8qvzc= github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= github.com/libp2p/go-libp2p-core v0.2.4/go.mod h1:STh4fdfa5vDYr0/SzYYeqnt+E6KfEV5VxfIrm0bcI0g= github.com/libp2p/go-libp2p-core v0.3.0/go.mod h1:ACp3DmS3/N64c2jDzcV429ukDpicbL6+TrrxANBjPGw= -github.com/libp2p/go-libp2p-kad-dht v0.33.1 h1:hKFhHMf7WH69LDjaxsJUWOU6qZm71uO47M/a5ijkiP0= -github.com/libp2p/go-libp2p-kad-dht v0.33.1/go.mod h1:CdmNk4VeGJa9EXM9SLNyNVySEvduKvb+5rSC/H4pLAo= +github.com/libp2p/go-libp2p-kad-dht v0.34.0 h1:yvJ/Vrt36GVjsqPxiGcuuwOloKuZLV9Aa7awIKyNXy0= +github.com/libp2p/go-libp2p-kad-dht v0.34.0/go.mod h1:JNbkES4W5tajS6uYivw6MPs0842cPHAwhgaPw8sQG4o= github.com/libp2p/go-libp2p-kbucket v0.3.1/go.mod h1:oyjT5O7tS9CQurok++ERgc46YLwEpuGoFq9ubvoUOio= github.com/libp2p/go-libp2p-kbucket v0.7.0 h1:vYDvRjkyJPeWunQXqcW2Z6E93Ywx7fX0jgzb/dGOKCs= github.com/libp2p/go-libp2p-kbucket v0.7.0/go.mod h1:blOINGIj1yiPYlVEX0Rj9QwEkmVnz3EP8LK1dRKBC6g= github.com/libp2p/go-libp2p-peerstore v0.1.4/go.mod h1:+4BDbDiiKf4PzpANZDAT+knVdLxvqh7hXOujessqdzs= -github.com/libp2p/go-libp2p-pubsub v0.13.1 h1:tV3ttzzZSCk0EtEXnxVmWIXgjVxXx+20Jwjbs/Ctzjo= -github.com/libp2p/go-libp2p-pubsub v0.13.1/go.mod h1:MKPU5vMI8RRFyTP0HfdsF9cLmL1nHAeJm44AxJGJx44= +github.com/libp2p/go-libp2p-pubsub v0.14.2 h1:nT5lFHPQOFJcp9CW8hpKtvbpQNdl2udJuzLQWbgRum8= +github.com/libp2p/go-libp2p-pubsub v0.14.2/go.mod h1:MKPU5vMI8RRFyTP0HfdsF9cLmL1nHAeJm44AxJGJx44= github.com/libp2p/go-libp2p-pubsub-router v0.6.0 h1:D30iKdlqDt5ZmLEYhHELCMRj8b4sFAqrUcshIUvVP/s= github.com/libp2p/go-libp2p-pubsub-router v0.6.0/go.mod h1:FY/q0/RBTKsLA7l4vqC2cbRbOvyDotg8PJQ7j8FDudE= github.com/libp2p/go-libp2p-record v0.3.1 h1:cly48Xi5GjNw5Wq+7gmjfBiG9HCzQVkiZOUZ8kUl+Fg= @@ -477,12 +471,12 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= -github.com/mholt/acmez/v3 v3.0.0 h1:r1NcjuWR0VaKP2BTjDK9LRFBw/WvURx3jlaEUl9Ht8E= -github.com/mholt/acmez/v3 v3.0.0/go.mod h1:L1wOU06KKvq7tswuMDwKdcHeKpFFgkppZy/y0DFxagQ= +github.com/mholt/acmez/v3 v3.1.2 h1:auob8J/0FhmdClQicvJvuDavgd5ezwLBfKuYmynhYzc= +github.com/mholt/acmez/v3 v3.1.2/go.mod h1:L1wOU06KKvq7tswuMDwKdcHeKpFFgkppZy/y0DFxagQ= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= -github.com/miekg/dns v1.1.66 h1:FeZXOS3VCVsKnEAd+wBkjMC3D2K+ww66Cq3VnCINuJE= -github.com/miekg/dns v1.1.66/go.mod h1:jGFzBsSNbJw6z1HYut1RKBKHA9PBdxeHrZG8J+gC2WE= +github.com/miekg/dns v1.1.68 h1:jsSRkNozw7G/mnmXULynzMNIsgY2dHC8LO6U6Ij2JEA= +github.com/miekg/dns v1.1.68/go.mod h1:fujopn7TB3Pu3JM69XaawiU0wqjpL9/8xGop5UrTPps= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms= github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= @@ -514,8 +508,8 @@ github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a github.com/multiformats/go-multiaddr v0.1.0/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= -github.com/multiformats/go-multiaddr v0.16.0 h1:oGWEVKioVQcdIOBlYM8BH1rZDWOGJSqr9/BKl6zQ4qc= -github.com/multiformats/go-multiaddr v0.16.0/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0= +github.com/multiformats/go-multiaddr v0.16.1 h1:fgJ0Pitow+wWXzN9do+1b8Pyjmo8m5WhGfzpL82MpCw= +github.com/multiformats/go-multiaddr v0.16.1/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0= github.com/multiformats/go-multiaddr-dns v0.4.1 h1:whi/uCLbDS3mSEUMb1MsoT4uzUeZB0N32yzufqS0i5M= github.com/multiformats/go-multiaddr-dns v0.4.1/go.mod h1:7hfthtB4E4pQwirrz+J0CcDUfbWzTqEzVyYKKIKpgkc= github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= @@ -556,8 +550,6 @@ github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vv github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus= -github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= @@ -630,27 +622,25 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4= github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw= -github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= -github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= -github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= +github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc= +github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.64.0 h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQPGO4= -github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= +github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE= +github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= -github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= +github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= +github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI= github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg= -github.com/quic-go/quic-go v0.52.0 h1:/SlHrCRElyaU6MaEPKqKr9z83sBg2v4FLLvWM+Z47pA= -github.com/quic-go/quic-go v0.52.0/go.mod h1:MFlGGpcpJqRAfmYi6NC2cptDPSxRWTOGNuP4wqrWmzQ= -github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 h1:4WFk6u3sOT6pLa1kQ50ZVdm8BQFgJNA117cepZxtLIg= -github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66/go.mod h1:Vp72IJajgeOL6ddqrAhmp7IM9zbTcgkQxD/YdxrVwMw= +github.com/quic-go/quic-go v0.54.0 h1:6s1YB9QotYI6Ospeiguknbp2Znb/jZYjZLRXn9kMQBg= +github.com/quic-go/quic-go v0.54.0/go.mod h1:e68ZEaCdyviluZmy44P6Iey98v/Wfz6HCjQEm+l8zTY= +github.com/quic-go/webtransport-go v0.9.0 h1:jgys+7/wm6JarGDrW+lD/r9BGqBAmqY/ssklE09bA70= +github.com/quic-go/webtransport-go v0.9.0/go.mod h1:4FUYIiUc75XSsF6HShcLeXXYZJ9AGwo/xh3L8M/P1ao= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= @@ -658,8 +648,6 @@ github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWN github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk= -github.com/samber/lo v1.47.0 h1:z7RynLwP5nbyRscyvcD043DWYoOcYRv3mV8lBeqOCLc= -github.com/samber/lo v1.47.0/go.mod h1:RmDH9Ct32Qy3gduHQuKJ3gW1fMHAnE/fAzQuf6He5cU= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= @@ -748,8 +736,8 @@ github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc h1:BCPnHtcboa github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc/go.mod h1:r45hJU7yEoA81k6MWNhpMj/kms0n14dkzkxYHoB96UM= github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 h1:5HZfQkwe0mIfyDmc1Em5GqlNRzcdtlv4HTNmdpt7XH0= github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11/go.mod h1:Wlo/SzPmxVp6vXpGt/zaXhHH0fn4IxgqZc82aKg6bpQ= -github.com/whyrusleeping/cbor-gen v0.1.2 h1:WQFlrPhpcQl+M2/3dP5cvlTLWPVsL6LGBb9jJt6l/cA= -github.com/whyrusleeping/cbor-gen v0.1.2/go.mod h1:pM99HXyEbSQHcosHc0iW7YFmwnscr+t9Te4ibko05so= +github.com/whyrusleeping/cbor-gen v0.3.1 h1:82ioxmhEYut7LBVGhGq8xoRkXPLElVuh5mV67AFfdv0= +github.com/whyrusleeping/cbor-gen v0.3.1/go.mod h1:pM99HXyEbSQHcosHc0iW7YFmwnscr+t9Te4ibko05so= github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E= github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= @@ -781,32 +769,32 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM= -go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= -go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 h1:K0XaT3DwHAcV4nKLzcQvwAgSyisUghWoY20I7huthMk= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0/go.mod h1:B5Ki776z/MBnVha1Nzwp5arlzBbE3+1jk+pGmaP5HME= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 h1:FFeLy03iVTXP6ffeN2iXrxfGsZGCjVx0/4KlizjyBwU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0/go.mod h1:TMu73/k1CP8nBUpDLc71Wj/Kf7ZS9FK5b53VapRsP9o= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 h1:lUsI2TYsQw2r1IASwoROaCnjdj2cvC2+Jbxvk6nHnWU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0/go.mod h1:2HpZxxQurfGxJlJDblybejHB6RX6pmExPNe517hREw4= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0 h1:UGZ1QwZWY67Z6BmckTU+9Rxn04m2bD3gD6Mk0OIOCPk= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0/go.mod h1:fcwWuDuaObkkChiDlhEpSq9+X1C0omv+s5mBtToAQ64= -go.opentelemetry.io/otel/exporters/zipkin v1.31.0 h1:CgucL0tj3717DJnni7HVVB2wExzi8c2zJNEA2BhLMvI= -go.opentelemetry.io/otel/exporters/zipkin v1.31.0/go.mod h1:rfzOVNiSwIcWtEC2J8epwG26fiaXlYvLySJ7bwsrtAE= -go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= -go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= -go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= -go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= -go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= -go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= -go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= -go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 h1:Hf9xI/XLML9ElpiHVDNwvqI0hIFlzV8dgIr35kV1kRU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0/go.mod h1:NfchwuyNoMcZ5MLHwPrODwUF1HWCXWrL31s8gSAdIKY= +go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= +go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 h1:Ahq7pZmv87yiyn3jeFz/LekZmPLLdKejuO3NcK9MssM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0/go.mod h1:MJTqhM0im3mRLw1i8uGHnCvUEeS7VwRyxlLC78PA18M= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0 h1:EtFWSnwW9hGObjkIdmlnWSydO+Qs8OwzfzXLUPg4xOc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0/go.mod h1:QjUEoiGCPkvFZ/MjK6ZZfNOS6mfVEVKYE99dFhuN2LI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.37.0 h1:bDMKF3RUSxshZ5OjOTi8rsHGaPKsAt76FaqgvIUySLc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.37.0/go.mod h1:dDT67G/IkA46Mr2l9Uj7HsQVwsjASyV9SjGofsiUZDA= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.37.0 h1:SNhVp/9q4Go/XHBkQ1/d5u9P/U+L1yaGPoi0x+mStaI= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.37.0/go.mod h1:tx8OOlGH6R4kLV67YaYO44GFXloEjGPZuMjEkaaqIp4= +go.opentelemetry.io/otel/exporters/zipkin v1.37.0 h1:Z2apuaRnHEjzDAkpbWNPiksz1R0/FCIrJSjiMA43zwI= +go.opentelemetry.io/otel/exporters/zipkin v1.37.0/go.mod h1:ofGu/7fG+bpmjZoiPUUmYDJ4vXWxMT57HmGoegx49uw= +go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= +go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= +go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= +go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= +go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= +go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= +go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= +go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= +go.opentelemetry.io/proto/otlp v1.7.0 h1:jX1VolD6nHuFzOYso2E73H85i92Mv8JQYk0K9vz09os= +go.opentelemetry.io/proto/otlp v1.7.0/go.mod h1:fSKjH6YJ7HDlwzltzyMj036AJ3ejJLCgCSHGj4efDDo= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= -go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/dig v1.19.0 h1:BACLhebsYdpQ7IROQ1AGPjrXcP5dF80U3gKoFzbaq/4= go.uber.org/dig v1.19.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= go.uber.org/fx v1.24.0 h1:wE8mruvpg2kiiL1Vqd0CC+tr0/24XIB10Iwp2lLWzkg= @@ -844,8 +832,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= -golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= +golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4= +golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -854,8 +842,8 @@ golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b h1:M2rDM6z3Fhozi9O7NWsxAkg/yqS/lQJ6PmkyIV3YP+o= -golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8= +golang.org/x/exp v0.0.0-20250813145105-42675adae3e6 h1:SbTAbRFnd5kjQXbczszQ0hdk3ctwYf3qBNH9jIsGclE= +golang.org/x/exp v0.0.0-20250813145105-42675adae3e6/go.mod h1:4QTo5u+SEIbbKW1RacMZq1YEfOBqeXa19JeshGi+zc4= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -878,8 +866,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w= -golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= +golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ= +golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -915,8 +903,8 @@ golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= -golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= +golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -935,8 +923,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8= -golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= +golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -983,8 +971,8 @@ golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= -golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1004,8 +992,8 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= -golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= +golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= +golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1046,8 +1034,8 @@ golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo= -golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg= +golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg= +golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1093,10 +1081,10 @@ google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 h1:T6rh4haD3GVYsgEfWExoCZA2o2FmbNyKpTuAxbEFPTg= -google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9/go.mod h1:wp2WsuBYj6j8wUdo3ToZsdxxixbvQNAHqVJrTgi5E5M= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9 h1:QCqS/PdaHTSWGvupk2F/ehwHtGc0/GYkT+3GAcR1CCc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 h1:oWVWY3NzT7KJppx2UKhKmzPq4SRe0LdCijVRwvGeikY= +google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822/go.mod h1:h3c4v36UTKzUiuaOKQ6gr3S+0hovBtUrXzTG/i3+XEc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -1109,8 +1097,8 @@ google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= -google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= +google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok= +google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1122,8 +1110,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= -google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +google.golang.org/protobuf v1.36.7 h1:IgrO7UwFQGJdRNXH/sQux4R1Dj1WAKcLElzeeRaXV2A= +google.golang.org/protobuf v1.36.7/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/docs/experimental-features.md b/docs/experimental-features.md index ef55691ba..68d7a819c 100644 --- a/docs/experimental-features.md +++ b/docs/experimental-features.md @@ -680,3 +680,4 @@ ipfs config --json Experimental.GatewayOverLibp2p true ## Accelerated DHT Client This feature now lives at [`Routing.AcceleratedDHTClient`](https://github.com/ipfs/kubo/blob/master/docs/config.md#routingaccelerateddhtclient). + diff --git a/docs/plugins.md b/docs/plugins.md index 86cfe1c51..8a388a533 100644 --- a/docs/plugins.md +++ b/docs/plugins.md @@ -117,6 +117,7 @@ Example: | [flatfs](https://github.com/ipfs/kubo/tree/master/plugin/plugins/flatfs) | Datastore | x | A stable filesystem-based datastore. | | [levelds](https://github.com/ipfs/kubo/tree/master/plugin/plugins/levelds) | Datastore | x | A stable, flexible datastore backend. | | [jaeger](https://github.com/ipfs/go-jaeger-plugin) | Tracing | | An opentracing backend. | +| [telemetry](https://github.com/ipfs/kubo/tree/master/plugin/plugins/telemetry) | Telemetry | x | Collects anonymized usage data for Kubo development. | * **Preloaded** plugins are built into the Kubo binary and do not need to be installed separately. At the moment, all in-tree plugins are preloaded. diff --git a/docs/telemetry.md b/docs/telemetry.md new file mode 100644 index 000000000..001c416b6 --- /dev/null +++ b/docs/telemetry.md @@ -0,0 +1,123 @@ +# Telemetry Plugin Documentation + +The **Telemetry plugin** is a feature in Kubo that collects **anonymized usage data** to help the development team better understand how the software is used, identify areas for improvement, and guide future feature development. + +This data is not personally identifiable and is used solely for the purpose of improving the Kubo project. + +--- + +## 🛡️ How to Control Telemetry + +The behavior of the Telemetry plugin is controlled via the environment variable [`IPFS_TELEMETRY`](environment-variables.md#ipfs_telemetry) and optionally via the `Plugins.Plugins.telemetry.Config.Mode` in the IPFS config file. + +### Available Modes + +| Mode | Description | +|----------|-----------------------------------------------------------------------------| +| `on` | **Default**. Telemetry is enabled. Data is sent periodically. | +| `off` | Telemetry is disabled. No data is sent. Any existing telemetry UUID file is removed. | +| `auto` | Like `on`, but logs an informative message about the telemetry and gives user 15 minutes to opt-out before first collection. This mode is automatically used on the first run when `IPFS_TELEMETRY` is not set and telemetry UUID is not found (not generated yet). The informative message is only shown once. | + +You can set the mode in your environment: + +```bash +export IPFS_TELEMETRY="off" +``` + +Or in your IPFS config file: + +```json +{ + "Plugins": { + "Plugins": { + "telemetry": { + "Config": { + "Mode": "off" + } + } + } + } +} +``` + +--- + +## 📦 What Data is Collected? + +The telemetry plugin collects the following anonymized data: + +### General Information +- **Agent version**: The version of Kubo being used. +- **Platform details**: Operating system, architecture, and container status. +- **Uptime**: How long the node has been running, categorized into buckets. +- **Repo size**: Categorized into buckets (e.g., 1GB, 5GB, 10GB, etc.). + +### Network Configuration +- **Private network**: Whether the node is running in a private network. +- **Bootstrap peers**: Whether custom bootstrap peers are used. +- **Routing type**: Whether the node uses DHT, IPFS, or a custom routing setup. +- **AutoNAT settings**: Whether AutoNAT is enabled and its reachability status. +- **AutoConf settings**: Whether AutoConf is enabled and whether a custom URL is used. +- **Swarm settings**: Whether hole punching is enabled, and whether public IP addresses are used. + +### TLS and Discovery +- **AutoTLS settings**: Whether WSS is enabled and whether a custom domain suffix is used. +- **Discovery settings**: Whether mDNS is enabled. + +### Reprovider Strategy +- The strategy used for reprovider (e.g., "all", "pinned"...). + +--- + +## 🧑‍🤝‍🧑 Privacy and Anonymization + +All data collected is: +- **Anonymized**: No personally identifiable information (PII) is sent. +- **Optional**: Users can choose to opt out at any time. +- **Secure**: Data is sent over HTTPS to a trusted endpoint. + +The telemetry UUID is stored in the IPFS repo folder and is used to identify the node across runs, but it does not contain any personal information. When you opt-out, this UUID file is automatically removed to ensure complete privacy. + +--- + +## 📦 Contributing to the Project + +By enabling telemetry, you are helping the Kubo team improve the software for the entire community. The data is used to: + +- Prioritize feature development +- Identify performance bottlenecks +- Improve user experience + +You can always disable telemetry at any time if you change your mind. + +--- + +## 🧪 Testing Telemetry + +If you're testing telemetry locally, you can change the endpoint by setting the `Endpoint` field in the config: + +```json +{ + "Plugins": { + "Plugins": { + "telemetry": { + "Config": { + "Mode": "on", + "Endpoint": "http://localhost:8080" + } + } + } + } +} +``` + +This allows you to capture and inspect telemetry data locally. + +--- + +## 📦 Further Reading + +For more information, see: +- [IPFS Environment Variables](docs/environment-variables.md) +- [IPFS Plugins](docs/plugins.md) +- [IPFS Configuration](docs/config.md) diff --git a/fuse/ipns/ipns_unix.go b/fuse/ipns/ipns_unix.go index de475dd89..9c36c9a26 100644 --- a/fuse/ipns/ipns_unix.go +++ b/fuse/ipns/ipns_unix.go @@ -108,7 +108,10 @@ func loadRoot(ctx context.Context, ipfs iface.CoreAPI, key iface.Key) (*mfs.Root return nil, nil, dag.ErrNotProtobuf } - root, err := mfs.NewRoot(ctx, ipfs.Dag(), pbnode, ipnsPubFunc(ipfs, key)) + // We have no access to provider.System from the CoreAPI. The Routing + // part offers Provide through the router so it may be slow/risky + // to give that here to MFS. Therefore we leave as nil. + root, err := mfs.NewRoot(ctx, ipfs.Dag(), pbnode, ipnsPubFunc(ipfs, key), nil) if err != nil { return nil, nil, err } diff --git a/fuse/ipns/mount_unix.go b/fuse/ipns/mount_unix.go index 34a8eef51..8c8ea8afe 100644 --- a/fuse/ipns/mount_unix.go +++ b/fuse/ipns/mount_unix.go @@ -29,5 +29,5 @@ func Mount(ipfs *core.IpfsNode, ipnsmp, ipfsmp string) (mount.Mount, error) { return nil, err } - return mount.NewMount(ipfs.Process, fsys, ipnsmp, allowOther) + return mount.NewMount(fsys, ipnsmp, allowOther) } diff --git a/fuse/mfs/mount_unix.go b/fuse/mfs/mount_unix.go index 7fe72e8df..bd7021e28 100644 --- a/fuse/mfs/mount_unix.go +++ b/fuse/mfs/mount_unix.go @@ -17,5 +17,5 @@ func Mount(ipfs *core.IpfsNode, mountpoint string) (mount.Mount, error) { } allowOther := cfg.Mounts.FuseAllowOther fsys := NewFileSystem(ipfs) - return mount.NewMount(ipfs.Process, fsys, mountpoint, allowOther) + return mount.NewMount(fsys, mountpoint, allowOther) } diff --git a/fuse/mount/fuse.go b/fuse/mount/fuse.go index 02d733b89..e18c0b4a9 100644 --- a/fuse/mount/fuse.go +++ b/fuse/mount/fuse.go @@ -11,7 +11,6 @@ import ( "bazil.org/fuse" "bazil.org/fuse/fs" - "github.com/jbenet/goprocess" ) var ErrNotMounted = errors.New("not mounted") @@ -25,12 +24,12 @@ type mount struct { active bool activeLock *sync.RWMutex - proc goprocess.Process + unmountOnce sync.Once } // Mount mounts a fuse fs.FS at a given location, and returns a Mount instance. -// parent is a ContextGroup to bind the mount's ContextGroup to. -func NewMount(p goprocess.Process, fsys fs.FS, mountpoint string, allowOther bool) (Mount, error) { +// ctx is parent is a ContextGroup to bind the mount's ContextGroup to. +func NewMount(fsys fs.FS, mountpoint string, allowOther bool) (Mount, error) { var conn *fuse.Conn var err error @@ -54,12 +53,10 @@ func NewMount(p goprocess.Process, fsys fs.FS, mountpoint string, allowOther boo filesys: fsys, active: false, activeLock: &sync.RWMutex{}, - proc: goprocess.WithParent(p), // link it to parent. } - m.proc.SetTeardown(m.unmount) // launch the mounting process. - if err := m.mount(); err != nil { + if err = m.mount(); err != nil { _ = m.Unmount() // just in case. return nil, err } @@ -135,10 +132,6 @@ func (m *mount) unmount() error { return nil } -func (m *mount) Process() goprocess.Process { - return m.proc -} - func (m *mount) MountPoint() string { return m.mpoint } @@ -148,8 +141,12 @@ func (m *mount) Unmount() error { return ErrNotMounted } - // call Process Close(), which calls unmount() exactly once. - return m.proc.Close() + var err error + m.unmountOnce.Do(func() { + err = m.unmount() + }) + + return err } func (m *mount) IsActive() bool { diff --git a/fuse/mount/mount.go b/fuse/mount/mount.go index b9008bc46..ca10405fe 100644 --- a/fuse/mount/mount.go +++ b/fuse/mount/mount.go @@ -9,7 +9,6 @@ import ( "time" logging "github.com/ipfs/go-log/v2" - goprocess "github.com/jbenet/goprocess" ) var log = logging.Logger("mount") @@ -26,10 +25,6 @@ type Mount interface { // Checks if the mount is still active. IsActive() bool - - // Process returns the mount's Process to be able to link it - // to other processes. Unmount upon closing. - Process() goprocess.Process } // ForceUnmount attempts to forcibly unmount a given mount. diff --git a/fuse/node/mount_nofuse.go b/fuse/node/mount_nofuse.go index 7423cb24d..6d4e102e2 100644 --- a/fuse/node/mount_nofuse.go +++ b/fuse/node/mount_nofuse.go @@ -12,3 +12,7 @@ import ( func Mount(node *core.IpfsNode, fsdir, nsdir, mfsdir string) error { return errors.New("not compiled in") } + +func Unmount(node *core.IpfsNode) { + return +} diff --git a/fuse/node/mount_notsupp.go b/fuse/node/mount_notsupp.go index 79ac0e791..15f98c40e 100644 --- a/fuse/node/mount_notsupp.go +++ b/fuse/node/mount_notsupp.go @@ -12,3 +12,7 @@ import ( func Mount(node *core.IpfsNode, fsdir, nsdir, mfsdir string) error { return errors.New("FUSE not supported on OpenBSD or NetBSD. See #5334 (https://github.com/ipfs/kubo/issues/5334).") } + +func Unmount(node *core.IpfsNode) { + return +} diff --git a/fuse/node/mount_unix.go b/fuse/node/mount_unix.go index c628a85f4..6c63f6e50 100644 --- a/fuse/node/mount_unix.go +++ b/fuse/node/mount_unix.go @@ -36,18 +36,7 @@ func Mount(node *core.IpfsNode, fsdir, nsdir, mfsdir string) error { // check if we already have live mounts. // if the user said "Mount", then there must be something wrong. // so, close them and try again. - if node.Mounts.Ipfs != nil && node.Mounts.Ipfs.IsActive() { - // best effort - _ = node.Mounts.Ipfs.Unmount() - } - if node.Mounts.Ipns != nil && node.Mounts.Ipns.IsActive() { - // best effort - _ = node.Mounts.Ipns.Unmount() - } - if node.Mounts.Mfs != nil && node.Mounts.Mfs.IsActive() { - // best effort - _ = node.Mounts.Mfs.Unmount() - } + Unmount(node) if err := platformFuseChecks(node); err != nil { return err @@ -56,6 +45,27 @@ func Mount(node *core.IpfsNode, fsdir, nsdir, mfsdir string) error { return doMount(node, fsdir, nsdir, mfsdir) } +func Unmount(node *core.IpfsNode) { + if node.Mounts.Ipfs != nil && node.Mounts.Ipfs.IsActive() { + // best effort + if err := node.Mounts.Ipfs.Unmount(); err != nil { + log.Errorf("error unmounting IPFS: %s", err) + } + } + if node.Mounts.Ipns != nil && node.Mounts.Ipns.IsActive() { + // best effort + if err := node.Mounts.Ipns.Unmount(); err != nil { + log.Errorf("error unmounting IPNS: %s", err) + } + } + if node.Mounts.Mfs != nil && node.Mounts.Mfs.IsActive() { + // best effort + if err := node.Mounts.Mfs.Unmount(); err != nil { + log.Errorf("error unmounting MFS: %s", err) + } + } +} + func doMount(node *core.IpfsNode, fsdir, nsdir, mfsdir string) error { fmtFuseErr := func(err error, mountpoint string) error { s := err.Error() diff --git a/fuse/node/mount_windows.go b/fuse/node/mount_windows.go index 42e6bc10b..9f22fe59e 100644 --- a/fuse/node/mount_windows.go +++ b/fuse/node/mount_windows.go @@ -9,3 +9,9 @@ func Mount(node *core.IpfsNode, fsdir, nsdir, mfsdir string) error { // currently a no-op, but we don't want to return an error return nil } + +func Unmount(node *core.IpfsNode) { + // TODO + // currently a no-op + return +} diff --git a/fuse/readonly/ipfs_test.go b/fuse/readonly/ipfs_test.go index e7dfbcb2a..348236737 100644 --- a/fuse/readonly/ipfs_test.go +++ b/fuse/readonly/ipfs_test.go @@ -187,7 +187,7 @@ func TestIpfsStressRead(t *testing.T) { defer wg.Done() for i := 0; i < 2000; i++ { - item, err := path.NewPath(paths[rand.Intn(len(paths))]) + item, err := path.NewPath("/ipfs/" + paths[rand.Intn(len(paths))]) if err != nil { errs <- err continue diff --git a/fuse/readonly/mount_unix.go b/fuse/readonly/mount_unix.go index 19be37abe..0ee198409 100644 --- a/fuse/readonly/mount_unix.go +++ b/fuse/readonly/mount_unix.go @@ -17,5 +17,5 @@ func Mount(ipfs *core.IpfsNode, mountpoint string) (mount.Mount, error) { } allowOther := cfg.Mounts.FuseAllowOther fsys := NewFileSystem(ipfs) - return mount.NewMount(ipfs.Process, fsys, mountpoint, allowOther) + return mount.NewMount(fsys, mountpoint, allowOther) } diff --git a/gc/gc.go b/gc/gc.go index 37daa887c..1d4805a66 100644 --- a/gc/gc.go +++ b/gc/gc.go @@ -81,7 +81,7 @@ func GC(ctx context.Context, bs bstore.GCBlockstore, dstor dstore.Datastore, pn return } - keychan, err := bs.AllKeysChan(ctx) + keychain, err := bs.AllKeysChan(ctx) if err != nil { select { case output <- Result{Error: err}: @@ -96,11 +96,11 @@ func GC(ctx context.Context, bs bstore.GCBlockstore, dstor dstore.Datastore, pn loop: for ctx.Err() == nil { // select may not notice that we're "done". select { - case k, ok := <-keychan: + case k, ok := <-keychain: if !ok { break loop } - // NOTE: assumes that all CIDs returned by the keychan are _raw_ CIDv1 CIDs. + // NOTE: assumes that all CIDs returned by the keychain are _raw_ CIDv1 CIDs. // This means we keep the block as long as we want it somewhere (CIDv1, CIDv0, Raw, other...). if !gcs.Has(k) { err := bs.DeleteBlock(ctx, k) diff --git a/go.mod b/go.mod index a9bd58174..0396b9e0f 100644 --- a/go.mod +++ b/go.mod @@ -1,13 +1,13 @@ module github.com/ipfs/kubo -go 1.24 +go 1.25 require ( bazil.org/fuse v0.0.0-20200117225306-7b5117fecadc contrib.go.opencensus.io/exporter/prometheus v0.4.2 github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 github.com/blang/semver/v4 v4.0.0 - github.com/caddyserver/certmagic v0.21.6 + github.com/caddyserver/certmagic v0.23.0 github.com/cenkalti/backoff/v4 v4.3.0 github.com/ceramicnetwork/go-dag-jose v0.1.1 github.com/cheggaaa/pb v1.0.29 @@ -22,11 +22,11 @@ require ( github.com/hashicorp/go-version v1.7.0 github.com/ipfs-shipyard/nopfs v0.0.14 github.com/ipfs-shipyard/nopfs/ipfs v0.25.0 - github.com/ipfs/boxo v0.33.0 + github.com/ipfs/boxo v0.34.0 github.com/ipfs/go-block-format v0.2.2 github.com/ipfs/go-cid v0.5.0 github.com/ipfs/go-cidutil v0.1.0 - github.com/ipfs/go-datastore v0.8.2 + github.com/ipfs/go-datastore v0.8.3 github.com/ipfs/go-detect-race v0.0.1 github.com/ipfs/go-ds-badger v0.3.4 github.com/ipfs/go-ds-flatfs v0.5.5 @@ -35,41 +35,41 @@ require ( github.com/ipfs/go-ds-pebble v0.5.1 github.com/ipfs/go-fs-lock v0.1.1 github.com/ipfs/go-ipfs-cmds v0.15.0 - github.com/ipfs/go-ipld-cbor v0.2.0 + github.com/ipfs/go-ipld-cbor v0.2.1 github.com/ipfs/go-ipld-format v0.6.2 github.com/ipfs/go-ipld-git v0.1.1 github.com/ipfs/go-ipld-legacy v0.2.2 - github.com/ipfs/go-log/v2 v2.6.0 + github.com/ipfs/go-log/v2 v2.8.1 github.com/ipfs/go-metrics-interface v0.3.0 github.com/ipfs/go-metrics-prometheus v0.1.0 - github.com/ipfs/go-test v0.2.2 + github.com/ipfs/go-test v0.2.3 github.com/ipfs/go-unixfsnode v1.10.1 github.com/ipld/go-car/v2 v2.14.3 github.com/ipld/go-codec-dagpb v1.7.0 github.com/ipld/go-ipld-prime v0.21.0 - github.com/ipshipyard/p2p-forge v0.6.0 + github.com/ipshipyard/p2p-forge v0.6.1 github.com/jbenet/go-temp-err-catcher v0.1.0 - github.com/jbenet/goprocess v0.1.4 github.com/julienschmidt/httprouter v1.3.0 github.com/libp2p/go-doh-resolver v0.5.0 - github.com/libp2p/go-libp2p v0.42.0 + github.com/libp2p/go-libp2p v0.43.0 github.com/libp2p/go-libp2p-http v0.5.0 - github.com/libp2p/go-libp2p-kad-dht v0.33.1 + github.com/libp2p/go-libp2p-kad-dht v0.34.0 github.com/libp2p/go-libp2p-kbucket v0.7.0 - github.com/libp2p/go-libp2p-pubsub v0.13.1 + github.com/libp2p/go-libp2p-pubsub v0.14.2 github.com/libp2p/go-libp2p-pubsub-router v0.6.0 github.com/libp2p/go-libp2p-record v0.3.1 github.com/libp2p/go-libp2p-routing-helpers v0.7.5 github.com/libp2p/go-libp2p-testing v0.12.0 github.com/libp2p/go-socket-activation v0.1.1 - github.com/multiformats/go-multiaddr v0.16.0 + github.com/miekg/dns v1.1.68 + github.com/multiformats/go-multiaddr v0.16.1 github.com/multiformats/go-multiaddr-dns v0.4.1 github.com/multiformats/go-multibase v0.2.0 github.com/multiformats/go-multicodec v0.9.2 github.com/multiformats/go-multihash v0.2.3 github.com/opentracing/opentracing-go v1.2.0 github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 - github.com/prometheus/client_golang v1.22.0 + github.com/prometheus/client_golang v1.23.0 github.com/stretchr/testify v1.10.0 github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d github.com/tidwall/gjson v1.16.0 @@ -77,21 +77,20 @@ require ( github.com/whyrusleeping/go-sysinfo v0.0.0-20190219211824-4a357d4b90b1 github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 go.opencensus.io v0.24.0 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 go.opentelemetry.io/contrib/propagators/autoprop v0.46.1 - go.opentelemetry.io/otel v1.35.0 - go.opentelemetry.io/otel/sdk v1.31.0 - go.opentelemetry.io/otel/trace v1.35.0 + go.opentelemetry.io/otel v1.37.0 + go.opentelemetry.io/otel/sdk v1.37.0 + go.opentelemetry.io/otel/trace v1.37.0 go.uber.org/dig v1.19.0 go.uber.org/fx v1.24.0 - go.uber.org/multierr v1.11.0 go.uber.org/zap v1.27.0 - golang.org/x/crypto v0.39.0 - golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b - golang.org/x/mod v0.25.0 - golang.org/x/sync v0.15.0 - golang.org/x/sys v0.33.0 - google.golang.org/protobuf v1.36.6 + golang.org/x/crypto v0.41.0 + golang.org/x/exp v0.0.0-20250813145105-42675adae3e6 + golang.org/x/mod v0.27.0 + golang.org/x/sync v0.16.0 + golang.org/x/sys v0.35.0 + google.golang.org/protobuf v1.36.7 ) require ( @@ -103,6 +102,7 @@ require ( github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/caddyserver/zerossl v0.1.3 // indirect + github.com/cenkalti/backoff/v5 v5.0.2 // indirect github.com/cespare/xxhash v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cockroachdb/crlib v0.0.0-20241015224233-894974b3ad94 // indirect @@ -124,25 +124,23 @@ require ( github.com/felixge/httpsnoop v1.0.4 // indirect github.com/flynn/noise v1.1.0 // indirect github.com/francoispqt/gojay v1.2.13 // indirect - github.com/gabriel-vasile/mimetype v1.4.6 // indirect - github.com/gammazero/chanqueue v1.1.0 // indirect - github.com/gammazero/deque v1.0.0 // indirect + github.com/gabriel-vasile/mimetype v1.4.9 // indirect + github.com/gammazero/chanqueue v1.1.1 // indirect + github.com/gammazero/deque v1.1.0 // indirect github.com/getsentry/sentry-go v0.27.0 // indirect github.com/go-jose/go-jose/v4 v4.0.5 // indirect github.com/go-kit/log v0.2.1 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect - github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e // indirect github.com/google/gopacket v1.1.19 // indirect - github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a // indirect github.com/gorilla/mux v1.8.1 // indirect github.com/gorilla/websocket v1.5.3 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 // indirect github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/huin/goupnp v1.3.0 // indirect @@ -155,11 +153,11 @@ require ( github.com/ipfs/go-peertaskqueue v0.8.2 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/klauspost/compress v1.18.0 // indirect - github.com/klauspost/cpuid/v2 v2.2.10 // indirect + github.com/klauspost/cpuid/v2 v2.3.0 // indirect github.com/koron/go-ssdp v0.0.6 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect - github.com/libdns/libdns v0.2.2 // indirect + github.com/libdns/libdns v1.0.0-beta.1 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/libp2p/go-cidranger v1.1.0 // indirect github.com/libp2p/go-flow-metrics v0.3.0 // indirect @@ -176,8 +174,7 @@ require ( github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.15 // indirect github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect - github.com/mholt/acmez/v3 v3.0.0 // indirect - github.com/miekg/dns v1.1.66 // indirect + github.com/mholt/acmez/v3 v3.1.2 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect github.com/minio/sha256-simd v1.0.1 // indirect @@ -188,7 +185,7 @@ require ( github.com/multiformats/go-multistream v0.6.1 // indirect github.com/multiformats/go-varint v0.0.7 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/onsi/ginkgo/v2 v2.23.4 // indirect + github.com/onsi/gomega v1.36.3 // indirect github.com/openzipkin/zipkin-go v0.4.3 // indirect github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 // indirect github.com/pion/datachannel v1.5.10 // indirect @@ -214,16 +211,15 @@ require ( github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/polydawn/refmt v0.89.0 // indirect github.com/prometheus/client_model v0.6.2 // indirect - github.com/prometheus/common v0.64.0 // indirect - github.com/prometheus/procfs v0.16.1 // indirect + github.com/prometheus/common v0.65.0 // indirect + github.com/prometheus/procfs v0.17.0 // indirect github.com/prometheus/statsd_exporter v0.27.1 // indirect github.com/quic-go/qpack v0.5.1 // indirect - github.com/quic-go/quic-go v0.52.0 // indirect - github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 // indirect + github.com/quic-go/quic-go v0.54.0 // indirect + github.com/quic-go/webtransport-go v0.9.0 // indirect github.com/rivo/uniseg v0.4.4 // indirect github.com/rogpeppe/go-internal v1.13.1 // indirect github.com/rs/cors v1.11.1 // indirect - github.com/samber/lo v1.47.0 // indirect github.com/slok/go-http-metrics v0.13.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/texttheater/golang-levenshtein v1.0.1 // indirect @@ -232,7 +228,7 @@ require ( github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb // indirect github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc // indirect github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 // indirect - github.com/whyrusleeping/cbor-gen v0.1.2 // indirect + github.com/whyrusleeping/cbor-gen v0.3.1 // indirect github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f // indirect github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect github.com/wlynxg/anet v0.0.5 // indirect @@ -242,28 +238,28 @@ require ( go.opentelemetry.io/contrib/propagators/b3 v1.21.1 // indirect go.opentelemetry.io/contrib/propagators/jaeger v1.21.1 // indirect go.opentelemetry.io/contrib/propagators/ot v1.21.1 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0 // indirect - go.opentelemetry.io/otel/exporters/zipkin v1.31.0 // indirect - go.opentelemetry.io/otel/metric v1.35.0 // indirect - go.opentelemetry.io/proto/otlp v1.3.1 // indirect - go.uber.org/automaxprocs v1.6.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.37.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.37.0 // indirect + go.opentelemetry.io/otel/exporters/zipkin v1.37.0 // indirect + go.opentelemetry.io/otel/metric v1.37.0 // indirect + go.opentelemetry.io/proto/otlp v1.7.0 // indirect go.uber.org/mock v0.5.2 // indirect + go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap/exp v0.3.0 // indirect go4.org v0.0.0-20230225012048-214862532bf5 // indirect - golang.org/x/net v0.41.0 // indirect + golang.org/x/net v0.43.0 // indirect golang.org/x/oauth2 v0.30.0 // indirect - golang.org/x/term v0.32.0 // indirect - golang.org/x/text v0.26.0 // indirect + golang.org/x/term v0.34.0 // indirect + golang.org/x/text v0.28.0 // indirect golang.org/x/time v0.12.0 // indirect - golang.org/x/tools v0.34.0 // indirect + golang.org/x/tools v0.36.0 // indirect golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect gonum.org/v1/gonum v0.16.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9 // indirect - google.golang.org/grpc v1.67.1 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect + google.golang.org/grpc v1.73.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.4.1 // indirect diff --git a/go.sum b/go.sum index 368d2f8bc..a5c243b03 100644 --- a/go.sum +++ b/go.sum @@ -88,12 +88,14 @@ github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= -github.com/caddyserver/certmagic v0.21.6 h1:1th6GfprVfsAtFNOu4StNMF5IxK5XiaI0yZhAHlZFPE= -github.com/caddyserver/certmagic v0.21.6/go.mod h1:n1sCo7zV1Ez2j+89wrzDxo4N/T1Ws/Vx8u5NvuBFabw= +github.com/caddyserver/certmagic v0.23.0 h1:CfpZ/50jMfG4+1J/u2LV6piJq4HOfO6ppOnOf7DkFEU= +github.com/caddyserver/certmagic v0.23.0/go.mod h1:9mEZIWqqWoI+Gf+4Trh04MOVPD0tGSxtqsxg87hAIH4= github.com/caddyserver/zerossl v0.1.3 h1:onS+pxp3M8HnHpN5MMbOMyNjmTheJyWRaZYwn+YTAyA= github.com/caddyserver/zerossl v0.1.3/go.mod h1:CxA0acn7oEGO6//4rtrRjYgEoa4MFw/XofZnrYwGqG4= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8= +github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/ceramicnetwork/go-dag-jose v0.1.1 h1:7pObs22egc14vSS3AfCFfS1VmaL4lQUsAK7OGC3PlKk= github.com/ceramicnetwork/go-dag-jose v0.1.1/go.mod h1:8ptnYwY2Z2y/s5oJnNBn/UCxLg6CpramNJ2ZXF/5aNY= @@ -195,12 +197,12 @@ github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4 github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/gabriel-vasile/mimetype v1.4.6 h1:3+PzJTKLkvgjeTbts6msPJt4DixhT4YtFNf1gtGe3zc= -github.com/gabriel-vasile/mimetype v1.4.6/go.mod h1:JX1qVKqZd40hUPpAfiNTe0Sne7hdfKSbOqqmkq8GCXc= -github.com/gammazero/chanqueue v1.1.0 h1:yiwtloc1azhgGLFo2gMloJtQvkYD936Ai7tBfa+rYJw= -github.com/gammazero/chanqueue v1.1.0/go.mod h1:fMwpwEiuUgpab0sH4VHiVcEoji1pSi+EIzeG4TPeKPc= -github.com/gammazero/deque v1.0.0 h1:LTmimT8H7bXkkCy6gZX7zNLtkbz4NdS2z8LZuor3j34= -github.com/gammazero/deque v1.0.0/go.mod h1:iflpYvtGfM3U8S8j+sZEKIak3SAKYpA5/SQewgfXDKo= +github.com/gabriel-vasile/mimetype v1.4.9 h1:5k+WDwEsD9eTLL8Tz3L0VnmVh9QxGjRmjBvAG7U/oYY= +github.com/gabriel-vasile/mimetype v1.4.9/go.mod h1:WnSQhFKJuBlRyLiKohA/2DtIlPFAbguNaG7QCHcyGok= +github.com/gammazero/chanqueue v1.1.1 h1:n9Y+zbBxw2f7uUE9wpgs0rOSkP/I/yhDLiNuhyVjojQ= +github.com/gammazero/chanqueue v1.1.1/go.mod h1:fMwpwEiuUgpab0sH4VHiVcEoji1pSi+EIzeG4TPeKPc= +github.com/gammazero/deque v1.1.0 h1:OyiyReBbnEG2PP0Bnv1AASLIYvyKqIFN5xfl1t8oGLo= +github.com/gammazero/deque v1.1.0/go.mod h1:JVrR+Bj1NMQbPnYclvDlvSX0nVGReLrQZ0aUMuWLctg= github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9 h1:r5GgOLGbza2wVHRzK7aAj6lWZjfbAwiu/RDCVOKjRyM= @@ -229,14 +231,12 @@ github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KE github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= -github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -311,8 +311,6 @@ github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a h1://KbezygeMJZCSHH+HgUZiTeSoiuFspbMg1ge+eFj18= -github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -331,8 +329,8 @@ github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aN github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 h1:X5VWvz21y3gzm9Nw/kaUeku/1+uBhcekkmy4IkffJww= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1/go.mod h1:Zanoh4+gvIgluNqcfMVTJueD4wSS5hT7zTt4Mrutd90= github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= @@ -356,8 +354,8 @@ github.com/ipfs-shipyard/nopfs/ipfs v0.25.0 h1:OqNqsGZPX8zh3eFMO8Lf8EHRRnSGBMqcd github.com/ipfs-shipyard/nopfs/ipfs v0.25.0/go.mod h1:BxhUdtBgOXg1B+gAPEplkg/GpyTZY+kCMSfsJvvydqU= github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= -github.com/ipfs/boxo v0.33.0 h1:9ow3chwkDzMj0Deq4AWRUEI7WnIIV7SZhPTzzG2mmfw= -github.com/ipfs/boxo v0.33.0/go.mod h1:3IPh7YFcCIcKp6o02mCHovrPntoT5Pctj/7j4syh/RM= +github.com/ipfs/boxo v0.34.0 h1:pMP9bAsTs4xVh8R0ZmxIWviV7kjDa60U24QrlGgHb1g= +github.com/ipfs/boxo v0.34.0/go.mod h1:kzdH/ewDybtO3+M8MCVkpwnIIc/d2VISX95DFrY4vQA= github.com/ipfs/go-bitfield v1.1.0 h1:fh7FIo8bSwaJEh6DdTWbCeZ1eqOaOkKFI74SCnsWbGA= github.com/ipfs/go-bitfield v1.1.0/go.mod h1:paqf1wjq/D2BBmzfTVFlJQ9IlFOZpg422HL0HqsGWHU= github.com/ipfs/go-block-format v0.0.3/go.mod h1:4LmD4ZUw0mhO+JSKdpWwrzATiEfM7WWgQ8H5l6P8MVk= @@ -372,8 +370,8 @@ github.com/ipfs/go-cidutil v0.1.0 h1:RW5hO7Vcf16dplUU60Hs0AKDkQAVPVplr7lk97CFL+Q github.com/ipfs/go-cidutil v0.1.0/go.mod h1:e7OEVBMIv9JaOxt9zaGEmAoSlXW9jdFZ5lP/0PwcfpA= github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= -github.com/ipfs/go-datastore v0.8.2 h1:Jy3wjqQR6sg/LhyY0NIePZC3Vux19nLtg7dx0TVqr6U= -github.com/ipfs/go-datastore v0.8.2/go.mod h1:W+pI1NsUsz3tcsAACMtfC+IZdnQTnC/7VfPoJBQuts0= +github.com/ipfs/go-datastore v0.8.3 h1:z391GsQyGKUIUof2tPoaZVeDknbt7fNHs6Gqjcw5Jo4= +github.com/ipfs/go-datastore v0.8.3/go.mod h1:raxQ/CreIy9L6MxT71ItfMX12/ASN6EhXJoUFjICQ2M= github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= github.com/ipfs/go-ds-badger v0.0.7/go.mod h1:qt0/fWzZDoPW6jpQeqUjR5kBfhDNB65jd9YlmAvpQBk= @@ -405,10 +403,8 @@ github.com/ipfs/go-ipfs-redirects-file v0.1.2 h1:QCK7VtL91FH17KROVVy5KrzDx2hu68Q github.com/ipfs/go-ipfs-redirects-file v0.1.2/go.mod h1:yIiTlLcDEM/8lS6T3FlCEXZktPPqSOyuY6dEzVqw7Fw= github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ= -github.com/ipfs/go-ipfs-util v0.0.3 h1:2RFdGez6bu2ZlZdI+rWfIdbQb1KudQp3VGwPtdNCmE0= -github.com/ipfs/go-ipfs-util v0.0.3/go.mod h1:LHzG1a0Ig4G+iZ26UUOMjHd+lfM84LZCrn17xAKWBvs= -github.com/ipfs/go-ipld-cbor v0.2.0 h1:VHIW3HVIjcMd8m4ZLZbrYpwjzqlVUfjLM7oK4T5/YF0= -github.com/ipfs/go-ipld-cbor v0.2.0/go.mod h1:Cp8T7w1NKcu4AQJLqK0tWpd1nkgTxEVB5C6kVpLW6/0= +github.com/ipfs/go-ipld-cbor v0.2.1 h1:H05yEJbK/hxg0uf2AJhyerBDbjOuHX4yi+1U/ogRa7E= +github.com/ipfs/go-ipld-cbor v0.2.1/go.mod h1:x9Zbeq8CoE5R2WicYgBMcr/9mnkQ0lHddYWJP2sMV3A= github.com/ipfs/go-ipld-format v0.6.2 h1:bPZQ+A05ol0b3lsJSl0bLvwbuQ+HQbSsdGTy4xtYUkU= github.com/ipfs/go-ipld-format v0.6.2/go.mod h1:nni2xFdHKx5lxvXJ6brt/pndtGxKAE+FPR1rg4jTkyk= github.com/ipfs/go-ipld-git v0.1.1 h1:TWGnZjS0htmEmlMFEkA3ogrNCqWjIxwr16x1OsdhG+Y= @@ -418,16 +414,16 @@ github.com/ipfs/go-ipld-legacy v0.2.2/go.mod h1:hhkj+b3kG9b2BcUNw8IFYAsfeNo8E3U7 github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= github.com/ipfs/go-log v1.0.5 h1:2dOuUCB1Z7uoczMWgAyDck5JLb72zHzrMnGnCNNbvY8= github.com/ipfs/go-log v1.0.5/go.mod h1:j0b8ZoR+7+R99LD9jZ6+AJsrzkPbSXbZfGakb5JPtIo= -github.com/ipfs/go-log/v2 v2.6.0 h1:2Nu1KKQQ2ayonKp4MPo6pXCjqw1ULc9iohRqWV5EYqg= -github.com/ipfs/go-log/v2 v2.6.0/go.mod h1:p+Efr3qaY5YXpx9TX7MoLCSEZX5boSWj9wh86P5HJa8= +github.com/ipfs/go-log/v2 v2.8.1 h1:Y/X36z7ASoLJaYIJAL4xITXgwf7RVeqb1+/25aq/Xk0= +github.com/ipfs/go-log/v2 v2.8.1/go.mod h1:NyhTBcZmh2Y55eWVjOeKf8M7e4pnJYM3yDZNxQBWEEY= github.com/ipfs/go-metrics-interface v0.3.0 h1:YwG7/Cy4R94mYDUuwsBfeziJCVm9pBMJ6q/JR9V40TU= github.com/ipfs/go-metrics-interface v0.3.0/go.mod h1:OxxQjZDGocXVdyTPocns6cOLwHieqej/jos7H4POwoY= github.com/ipfs/go-metrics-prometheus v0.1.0 h1:bApWOHkrH3VTBHzTHrZSfq4n4weOZDzZFxUXv+HyKcA= github.com/ipfs/go-metrics-prometheus v0.1.0/go.mod h1:2GtL525C/4yxtvSXpRJ4dnE45mCX9AS0XRa03vHx7G0= github.com/ipfs/go-peertaskqueue v0.8.2 h1:PaHFRaVFdxQk1Qo3OKiHPYjmmusQy7gKQUaL8JDszAU= github.com/ipfs/go-peertaskqueue v0.8.2/go.mod h1:L6QPvou0346c2qPJNiJa6BvOibxDfaiPlqHInmzg0FA= -github.com/ipfs/go-test v0.2.2 h1:1yjYyfbdt1w93lVzde6JZ2einh3DIV40at4rVoyEcE8= -github.com/ipfs/go-test v0.2.2/go.mod h1:cmLisgVwkdRCnKu/CFZOk2DdhOcwghr5GsHeqwexoRA= +github.com/ipfs/go-test v0.2.3 h1:Z/jXNAReQFtCYyn7bsv/ZqUwS6E7iIcSpJ2CuzCvnrc= +github.com/ipfs/go-test v0.2.3/go.mod h1:QW8vSKkwYvWFwIZQLGQXdkt9Ud76eQXRQ9Ao2H+cA1o= github.com/ipfs/go-unixfsnode v1.10.1 h1:hGKhzuH6NSzZ4y621wGuDspkjXRNG3B+HqhlyTjSwSM= github.com/ipfs/go-unixfsnode v1.10.1/go.mod h1:eguv/otvacjmfSbYvmamc9ssNAzLvRk0+YN30EYeOOY= github.com/ipld/go-car/v2 v2.14.3 h1:1Mhl82/ny8MVP+w1M4LXbj4j99oK3gnuZG2GmG1IhC8= @@ -439,18 +435,15 @@ github.com/ipld/go-ipld-prime v0.21.0 h1:n4JmcpOlPDIxBcY037SVfpd1G+Sj1nKZah0m6QH github.com/ipld/go-ipld-prime v0.21.0/go.mod h1:3RLqy//ERg/y5oShXXdx5YIp50cFGOanyMctpPjsvxQ= github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20230102063945-1a409dc236dd h1:gMlw/MhNr2Wtp5RwGdsW23cs+yCuj9k2ON7i9MiJlRo= github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20230102063945-1a409dc236dd/go.mod h1:wZ8hH8UxeryOs4kJEJaiui/s00hDSbE37OKsL47g+Sw= -github.com/ipshipyard/p2p-forge v0.6.0 h1:kNhYxgYGtqF3MLts/i0hw+7ygtgNB4Qv8h6fo7j6Iq4= -github.com/ipshipyard/p2p-forge v0.6.0/go.mod h1:pj8Zcs+ex5OMq5a1bFLHqW0oL3qYO0v5eGLZmit0l7U= +github.com/ipshipyard/p2p-forge v0.6.1 h1:987/hUC1YxI56CcMX6iTB+9BLjFV0d2SJnig9Z1pf8A= +github.com/ipshipyard/p2p-forge v0.6.1/go.mod h1:pj8Zcs+ex5OMq5a1bFLHqW0oL3qYO0v5eGLZmit0l7U= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= -github.com/jbenet/go-cienv v0.1.0 h1:Vc/s0QbQtoxX8MwwSLWWh+xNNZvM3Lw7NsTcHrvvhMc= github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk= github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY= github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= -github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o= -github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -476,8 +469,8 @@ github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6 github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= -github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= +github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= +github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/koron/go-ssdp v0.0.6 h1:Jb0h04599eq/CY7rB5YEqPS83HmRfHP2azkxMN2rFtU= @@ -495,8 +488,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/libdns/libdns v0.2.2 h1:O6ws7bAfRPaBsgAYt8MDe2HcNBGC29hkZ9MX2eUSX3s= -github.com/libdns/libdns v0.2.2/go.mod h1:4Bj9+5CQiNMVGf87wjX4CY3HQJypUHRuLvlsfsZqLWQ= +github.com/libdns/libdns v1.0.0-beta.1 h1:KIf4wLfsrEpXpZ3vmc/poM8zCATXT2klbdPe6hyOBjQ= +github.com/libdns/libdns v1.0.0-beta.1/go.mod h1:4Bj9+5CQiNMVGf87wjX4CY3HQJypUHRuLvlsfsZqLWQ= github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ= github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= @@ -509,8 +502,8 @@ github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZ github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= github.com/libp2p/go-flow-metrics v0.3.0 h1:q31zcHUvHnwDO0SHaukewPYgwOBSxtt830uJtUx6784= github.com/libp2p/go-flow-metrics v0.3.0/go.mod h1:nuhlreIwEguM1IvHAew3ij7A8BMlyHQJ279ao24eZZo= -github.com/libp2p/go-libp2p v0.42.0 h1:A8foZk+ZEhZTv0Jb++7xUFlrFhBDv4j2Vh/uq4YX+KE= -github.com/libp2p/go-libp2p v0.42.0/go.mod h1:4NGcjbD9OIvFiSRb0XueCO19zJ4kSPK5vkyyOUYmMro= +github.com/libp2p/go-libp2p v0.43.0 h1:b2bg2cRNmY4HpLK8VHYQXLX2d3iND95OjodLFymvqXU= +github.com/libp2p/go-libp2p v0.43.0/go.mod h1:IiSqAXDyP2sWH+J2gs43pNmB/y4FOi2XQPbsb+8qvzc= github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= github.com/libp2p/go-libp2p-core v0.2.4/go.mod h1:STh4fdfa5vDYr0/SzYYeqnt+E6KfEV5VxfIrm0bcI0g= @@ -519,14 +512,14 @@ github.com/libp2p/go-libp2p-gostream v0.6.0 h1:QfAiWeQRce6pqnYfmIVWJFXNdDyfiR/qk github.com/libp2p/go-libp2p-gostream v0.6.0/go.mod h1:Nywu0gYZwfj7Jc91PQvbGU8dIpqbQQkjWgDuOrFaRdA= github.com/libp2p/go-libp2p-http v0.5.0 h1:+x0AbLaUuLBArHubbbNRTsgWz0RjNTy6DJLOxQ3/QBc= github.com/libp2p/go-libp2p-http v0.5.0/go.mod h1:glh87nZ35XCQyFsdzZps6+F4HYI6DctVFY5u1fehwSg= -github.com/libp2p/go-libp2p-kad-dht v0.33.1 h1:hKFhHMf7WH69LDjaxsJUWOU6qZm71uO47M/a5ijkiP0= -github.com/libp2p/go-libp2p-kad-dht v0.33.1/go.mod h1:CdmNk4VeGJa9EXM9SLNyNVySEvduKvb+5rSC/H4pLAo= +github.com/libp2p/go-libp2p-kad-dht v0.34.0 h1:yvJ/Vrt36GVjsqPxiGcuuwOloKuZLV9Aa7awIKyNXy0= +github.com/libp2p/go-libp2p-kad-dht v0.34.0/go.mod h1:JNbkES4W5tajS6uYivw6MPs0842cPHAwhgaPw8sQG4o= github.com/libp2p/go-libp2p-kbucket v0.3.1/go.mod h1:oyjT5O7tS9CQurok++ERgc46YLwEpuGoFq9ubvoUOio= github.com/libp2p/go-libp2p-kbucket v0.7.0 h1:vYDvRjkyJPeWunQXqcW2Z6E93Ywx7fX0jgzb/dGOKCs= github.com/libp2p/go-libp2p-kbucket v0.7.0/go.mod h1:blOINGIj1yiPYlVEX0Rj9QwEkmVnz3EP8LK1dRKBC6g= github.com/libp2p/go-libp2p-peerstore v0.1.4/go.mod h1:+4BDbDiiKf4PzpANZDAT+knVdLxvqh7hXOujessqdzs= -github.com/libp2p/go-libp2p-pubsub v0.13.1 h1:tV3ttzzZSCk0EtEXnxVmWIXgjVxXx+20Jwjbs/Ctzjo= -github.com/libp2p/go-libp2p-pubsub v0.13.1/go.mod h1:MKPU5vMI8RRFyTP0HfdsF9cLmL1nHAeJm44AxJGJx44= +github.com/libp2p/go-libp2p-pubsub v0.14.2 h1:nT5lFHPQOFJcp9CW8hpKtvbpQNdl2udJuzLQWbgRum8= +github.com/libp2p/go-libp2p-pubsub v0.14.2/go.mod h1:MKPU5vMI8RRFyTP0HfdsF9cLmL1nHAeJm44AxJGJx44= github.com/libp2p/go-libp2p-pubsub-router v0.6.0 h1:D30iKdlqDt5ZmLEYhHELCMRj8b4sFAqrUcshIUvVP/s= github.com/libp2p/go-libp2p-pubsub-router v0.6.0/go.mod h1:FY/q0/RBTKsLA7l4vqC2cbRbOvyDotg8PJQ7j8FDudE= github.com/libp2p/go-libp2p-record v0.3.1 h1:cly48Xi5GjNw5Wq+7gmjfBiG9HCzQVkiZOUZ8kUl+Fg= @@ -576,12 +569,12 @@ github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= -github.com/mholt/acmez/v3 v3.0.0 h1:r1NcjuWR0VaKP2BTjDK9LRFBw/WvURx3jlaEUl9Ht8E= -github.com/mholt/acmez/v3 v3.0.0/go.mod h1:L1wOU06KKvq7tswuMDwKdcHeKpFFgkppZy/y0DFxagQ= +github.com/mholt/acmez/v3 v3.1.2 h1:auob8J/0FhmdClQicvJvuDavgd5ezwLBfKuYmynhYzc= +github.com/mholt/acmez/v3 v3.1.2/go.mod h1:L1wOU06KKvq7tswuMDwKdcHeKpFFgkppZy/y0DFxagQ= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= -github.com/miekg/dns v1.1.66 h1:FeZXOS3VCVsKnEAd+wBkjMC3D2K+ww66Cq3VnCINuJE= -github.com/miekg/dns v1.1.66/go.mod h1:jGFzBsSNbJw6z1HYut1RKBKHA9PBdxeHrZG8J+gC2WE= +github.com/miekg/dns v1.1.68 h1:jsSRkNozw7G/mnmXULynzMNIsgY2dHC8LO6U6Ij2JEA= +github.com/miekg/dns v1.1.68/go.mod h1:fujopn7TB3Pu3JM69XaawiU0wqjpL9/8xGop5UrTPps= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms= github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= @@ -616,8 +609,8 @@ github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a github.com/multiformats/go-multiaddr v0.1.0/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= -github.com/multiformats/go-multiaddr v0.16.0 h1:oGWEVKioVQcdIOBlYM8BH1rZDWOGJSqr9/BKl6zQ4qc= -github.com/multiformats/go-multiaddr v0.16.0/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0= +github.com/multiformats/go-multiaddr v0.16.1 h1:fgJ0Pitow+wWXzN9do+1b8Pyjmo8m5WhGfzpL82MpCw= +github.com/multiformats/go-multiaddr v0.16.1/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0= github.com/multiformats/go-multiaddr-dns v0.4.1 h1:whi/uCLbDS3mSEUMb1MsoT4uzUeZB0N32yzufqS0i5M= github.com/multiformats/go-multiaddr-dns v0.4.1/go.mod h1:7hfthtB4E4pQwirrz+J0CcDUfbWzTqEzVyYKKIKpgkc= github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= @@ -660,8 +653,6 @@ github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vv github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus= -github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= @@ -735,8 +726,6 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4= github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw= -github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= -github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -745,8 +734,8 @@ github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqr github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= -github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= -github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= +github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc= +github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -760,8 +749,8 @@ github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9 github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= -github.com/prometheus/common v0.64.0 h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQPGO4= -github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= +github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE= +github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -769,17 +758,17 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= -github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= -github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= +github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= +github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= github.com/prometheus/statsd_exporter v0.22.7/go.mod h1:N/TevpjkIh9ccs6nuzY3jQn9dFqnUakOjnEuMPJJJnI= github.com/prometheus/statsd_exporter v0.27.1 h1:tcRJOmwlA83HPfWzosAgr2+zEN5XDFv+M2mn/uYkn5Y= github.com/prometheus/statsd_exporter v0.27.1/go.mod h1:vA6ryDfsN7py/3JApEst6nLTJboq66XsNcJGNmC88NQ= github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI= github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg= -github.com/quic-go/quic-go v0.52.0 h1:/SlHrCRElyaU6MaEPKqKr9z83sBg2v4FLLvWM+Z47pA= -github.com/quic-go/quic-go v0.52.0/go.mod h1:MFlGGpcpJqRAfmYi6NC2cptDPSxRWTOGNuP4wqrWmzQ= -github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 h1:4WFk6u3sOT6pLa1kQ50ZVdm8BQFgJNA117cepZxtLIg= -github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66/go.mod h1:Vp72IJajgeOL6ddqrAhmp7IM9zbTcgkQxD/YdxrVwMw= +github.com/quic-go/quic-go v0.54.0 h1:6s1YB9QotYI6Ospeiguknbp2Znb/jZYjZLRXn9kMQBg= +github.com/quic-go/quic-go v0.54.0/go.mod h1:e68ZEaCdyviluZmy44P6Iey98v/Wfz6HCjQEm+l8zTY= +github.com/quic-go/webtransport-go v0.9.0 h1:jgys+7/wm6JarGDrW+lD/r9BGqBAmqY/ssklE09bA70= +github.com/quic-go/webtransport-go v0.9.0/go.mod h1:4FUYIiUc75XSsF6HShcLeXXYZJ9AGwo/xh3L8M/P1ao= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= @@ -792,8 +781,6 @@ github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk= -github.com/samber/lo v1.47.0 h1:z7RynLwP5nbyRscyvcD043DWYoOcYRv3mV8lBeqOCLc= -github.com/samber/lo v1.47.0/go.mod h1:RmDH9Ct32Qy3gduHQuKJ3gW1fMHAnE/fAzQuf6He5cU= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= @@ -898,8 +885,8 @@ github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc h1:BCPnHtcboa github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc/go.mod h1:r45hJU7yEoA81k6MWNhpMj/kms0n14dkzkxYHoB96UM= github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 h1:5HZfQkwe0mIfyDmc1Em5GqlNRzcdtlv4HTNmdpt7XH0= github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11/go.mod h1:Wlo/SzPmxVp6vXpGt/zaXhHH0fn4IxgqZc82aKg6bpQ= -github.com/whyrusleeping/cbor-gen v0.1.2 h1:WQFlrPhpcQl+M2/3dP5cvlTLWPVsL6LGBb9jJt6l/cA= -github.com/whyrusleeping/cbor-gen v0.1.2/go.mod h1:pM99HXyEbSQHcosHc0iW7YFmwnscr+t9Te4ibko05so= +github.com/whyrusleeping/cbor-gen v0.3.1 h1:82ioxmhEYut7LBVGhGq8xoRkXPLElVuh5mV67AFfdv0= +github.com/whyrusleeping/cbor-gen v0.3.1/go.mod h1:pM99HXyEbSQHcosHc0iW7YFmwnscr+t9Te4ibko05so= github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E= github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= @@ -937,8 +924,8 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 h1:Hf9xI/XLML9ElpiHVDNwvqI0hIFlzV8dgIr35kV1kRU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0/go.mod h1:NfchwuyNoMcZ5MLHwPrODwUF1HWCXWrL31s8gSAdIKY= go.opentelemetry.io/contrib/propagators/autoprop v0.46.1 h1:cXTYcMjY0dsYokAuo8LbNBQxpF8VgTHdiHJJ1zlIXl4= go.opentelemetry.io/contrib/propagators/autoprop v0.46.1/go.mod h1:WZxgny1/6+j67B1s72PLJ4bGjidoWFzSmLNfJKVt2bo= go.opentelemetry.io/contrib/propagators/aws v1.21.1 h1:uQIQIDWb0gzyvon2ICnghpLAf9w7ADOCUiIiwCQgR2o= @@ -949,30 +936,30 @@ go.opentelemetry.io/contrib/propagators/jaeger v1.21.1 h1:f4beMGDKiVzg9IcX7/VuWV go.opentelemetry.io/contrib/propagators/jaeger v1.21.1/go.mod h1:U9jhkEl8d1LL+QXY7q3kneJWJugiN3kZJV2OWz3hkBY= go.opentelemetry.io/contrib/propagators/ot v1.21.1 h1:3TN5vkXjKYWp0YdMcnUEC/A+pBPvqz9V3nCS2xmcurk= go.opentelemetry.io/contrib/propagators/ot v1.21.1/go.mod h1:oy0MYCbS/b3cqUDW37wBWtlwBIsutngS++Lklpgh+fc= -go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= -go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 h1:K0XaT3DwHAcV4nKLzcQvwAgSyisUghWoY20I7huthMk= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0/go.mod h1:B5Ki776z/MBnVha1Nzwp5arlzBbE3+1jk+pGmaP5HME= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 h1:FFeLy03iVTXP6ffeN2iXrxfGsZGCjVx0/4KlizjyBwU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0/go.mod h1:TMu73/k1CP8nBUpDLc71Wj/Kf7ZS9FK5b53VapRsP9o= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 h1:lUsI2TYsQw2r1IASwoROaCnjdj2cvC2+Jbxvk6nHnWU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0/go.mod h1:2HpZxxQurfGxJlJDblybejHB6RX6pmExPNe517hREw4= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0 h1:UGZ1QwZWY67Z6BmckTU+9Rxn04m2bD3gD6Mk0OIOCPk= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0/go.mod h1:fcwWuDuaObkkChiDlhEpSq9+X1C0omv+s5mBtToAQ64= -go.opentelemetry.io/otel/exporters/zipkin v1.31.0 h1:CgucL0tj3717DJnni7HVVB2wExzi8c2zJNEA2BhLMvI= -go.opentelemetry.io/otel/exporters/zipkin v1.31.0/go.mod h1:rfzOVNiSwIcWtEC2J8epwG26fiaXlYvLySJ7bwsrtAE= -go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= -go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= -go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= -go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= -go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= -go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= -go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= -go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= +go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 h1:Ahq7pZmv87yiyn3jeFz/LekZmPLLdKejuO3NcK9MssM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0/go.mod h1:MJTqhM0im3mRLw1i8uGHnCvUEeS7VwRyxlLC78PA18M= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0 h1:EtFWSnwW9hGObjkIdmlnWSydO+Qs8OwzfzXLUPg4xOc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0/go.mod h1:QjUEoiGCPkvFZ/MjK6ZZfNOS6mfVEVKYE99dFhuN2LI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.37.0 h1:bDMKF3RUSxshZ5OjOTi8rsHGaPKsAt76FaqgvIUySLc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.37.0/go.mod h1:dDT67G/IkA46Mr2l9Uj7HsQVwsjASyV9SjGofsiUZDA= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.37.0 h1:SNhVp/9q4Go/XHBkQ1/d5u9P/U+L1yaGPoi0x+mStaI= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.37.0/go.mod h1:tx8OOlGH6R4kLV67YaYO44GFXloEjGPZuMjEkaaqIp4= +go.opentelemetry.io/otel/exporters/zipkin v1.37.0 h1:Z2apuaRnHEjzDAkpbWNPiksz1R0/FCIrJSjiMA43zwI= +go.opentelemetry.io/otel/exporters/zipkin v1.37.0/go.mod h1:ofGu/7fG+bpmjZoiPUUmYDJ4vXWxMT57HmGoegx49uw= +go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= +go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= +go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= +go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= +go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= +go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= +go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= +go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= +go.opentelemetry.io/proto/otlp v1.7.0 h1:jX1VolD6nHuFzOYso2E73H85i92Mv8JQYk0K9vz09os= +go.opentelemetry.io/proto/otlp v1.7.0/go.mod h1:fSKjH6YJ7HDlwzltzyMj036AJ3ejJLCgCSHGj4efDDo= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= -go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/dig v1.19.0 h1:BACLhebsYdpQ7IROQ1AGPjrXcP5dF80U3gKoFzbaq/4= go.uber.org/dig v1.19.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= go.uber.org/fx v1.24.0 h1:wE8mruvpg2kiiL1Vqd0CC+tr0/24XIB10Iwp2lLWzkg= @@ -1010,8 +997,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= -golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= +golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4= +golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1022,8 +1009,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b h1:M2rDM6z3Fhozi9O7NWsxAkg/yqS/lQJ6PmkyIV3YP+o= -golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8= +golang.org/x/exp v0.0.0-20250813145105-42675adae3e6 h1:SbTAbRFnd5kjQXbczszQ0hdk3ctwYf3qBNH9jIsGclE= +golang.org/x/exp v0.0.0-20250813145105-42675adae3e6/go.mod h1:4QTo5u+SEIbbKW1RacMZq1YEfOBqeXa19JeshGi+zc4= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1047,8 +1034,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w= -golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= +golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ= +golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1100,8 +1087,8 @@ golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= -golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= +golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1128,8 +1115,8 @@ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8= -golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= +golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1197,8 +1184,8 @@ golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= -golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1207,8 +1194,8 @@ golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= -golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= -golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= +golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= +golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1220,8 +1207,8 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= -golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= +golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= +golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1279,8 +1266,8 @@ golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo= -golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg= +golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg= +golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1350,10 +1337,10 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 h1:T6rh4haD3GVYsgEfWExoCZA2o2FmbNyKpTuAxbEFPTg= -google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9/go.mod h1:wp2WsuBYj6j8wUdo3ToZsdxxixbvQNAHqVJrTgi5E5M= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9 h1:QCqS/PdaHTSWGvupk2F/ehwHtGc0/GYkT+3GAcR1CCc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 h1:oWVWY3NzT7KJppx2UKhKmzPq4SRe0LdCijVRwvGeikY= +google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822/go.mod h1:h3c4v36UTKzUiuaOKQ6gr3S+0hovBtUrXzTG/i3+XEc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -1370,8 +1357,8 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= -google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= +google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok= +google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1386,8 +1373,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= -google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +google.golang.org/protobuf v1.36.7 h1:IgrO7UwFQGJdRNXH/sQux4R1Dj1WAKcLElzeeRaXV2A= +google.golang.org/protobuf v1.36.7/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/mk/golang.mk b/mk/golang.mk index 3b32a65f9..5f691bc85 100644 --- a/mk/golang.mk +++ b/mk/golang.mk @@ -1,5 +1,5 @@ # golang utilities -GO_MIN_VERSION = 1.18 +GO_MIN_VERSION = 1.25 export GO111MODULE=on @@ -26,10 +26,10 @@ TEST_GO := TEST_GO_BUILD := CHECK_GO := -go-pkg-name=$(shell $(GOCC) list $(go-tags) github.com/ipfs/kubo/$(1)) +go-pkg-name=$(shell GOFLAGS=-buildvcs=false $(GOCC) list $(go-tags) github.com/ipfs/kubo/$(1)) go-main-name=$(notdir $(call go-pkg-name,$(1)))$(?exe) go-curr-pkg-tgt=$(d)/$(call go-main-name,$(d)) -go-pkgs=$(shell $(GOCC) list github.com/ipfs/kubo/...) +go-pkgs=$(shell GOFLAGS=-buildvcs=false $(GOCC) list github.com/ipfs/kubo/...) go-tags=$(if $(GOTAGS), -tags="$(call join-with,$(space),$(GOTAGS))") go-flags-with-tags=$(GOFLAGS)$(go-tags) diff --git a/plugin/loader/preload.go b/plugin/loader/preload.go index 75e21270c..eb1bd5a6e 100644 --- a/plugin/loader/preload.go +++ b/plugin/loader/preload.go @@ -10,6 +10,7 @@ import ( pluginnopfs "github.com/ipfs/kubo/plugin/plugins/nopfs" pluginpebbleds "github.com/ipfs/kubo/plugin/plugins/pebbleds" pluginpeerlog "github.com/ipfs/kubo/plugin/plugins/peerlog" + plugintelemetry "github.com/ipfs/kubo/plugin/plugins/telemetry" ) // DO NOT EDIT THIS FILE @@ -26,4 +27,5 @@ func init() { Preload(pluginpeerlog.Plugins...) Preload(pluginfxtest.Plugins...) Preload(pluginnopfs.Plugins...) + Preload(plugintelemetry.Plugins...) } diff --git a/plugin/loader/preload_list b/plugin/loader/preload_list index 190cc65d7..80e5b9cc9 100644 --- a/plugin/loader/preload_list +++ b/plugin/loader/preload_list @@ -13,3 +13,4 @@ pebbleds github.com/ipfs/kubo/plugin/plugins/pebbleds * peerlog github.com/ipfs/kubo/plugin/plugins/peerlog * fxtest github.com/ipfs/kubo/plugin/plugins/fxtest * nopfs github.com/ipfs/kubo/plugin/plugins/nopfs * +telemetry github.com/ipfs/kubo/plugin/plugins/telemetry * diff --git a/plugin/plugins/telemetry/telemetry.go b/plugin/plugins/telemetry/telemetry.go new file mode 100644 index 000000000..bcb6c03e9 --- /dev/null +++ b/plugin/plugins/telemetry/telemetry.go @@ -0,0 +1,557 @@ +package telemetry + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net/http" + "os" + "path" + "runtime" + "strings" + "time" + + "github.com/google/uuid" + logging "github.com/ipfs/go-log/v2" + ipfs "github.com/ipfs/kubo" + "github.com/ipfs/kubo/config" + "github.com/ipfs/kubo/core" + "github.com/ipfs/kubo/core/corerepo" + "github.com/ipfs/kubo/plugin" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/pnet" + multiaddr "github.com/multiformats/go-multiaddr" + manet "github.com/multiformats/go-multiaddr/net" +) + +var log = logging.Logger("telemetry") + +const ( + modeEnvVar = "IPFS_TELEMETRY" + uuidFilename = "telemetry_uuid" + endpoint = "https://telemetry.ipshipyard.dev" + sendDelay = 15 * time.Minute // delay before first telemetry collection after daemon start + sendInterval = 24 * time.Hour // interval between telemetry collections after the first one + httpTimeout = 30 * time.Second // timeout for telemetry HTTP requests +) + +type pluginMode int + +const ( + modeAuto pluginMode = iota + modeOn + modeOff +) + +// repoSizeBuckets defines size thresholds for categorizing repository sizes. +// Each value represents the upper limit of a bucket in bytes (except the last) +var repoSizeBuckets = []uint64{ + 1 << 30, // 1 GB + 5 << 30, // 5 GB + 10 << 30, // 10 GB + 100 << 30, // 100 GB + 500 << 30, // 500 GB + 1 << 40, // 1 TB + 10 << 40, // 10 TB + 11 << 40, // + anything more than 10TB falls here. +} + +var uptimeBuckets = []time.Duration{ + 1 * 24 * time.Hour, + 2 * 24 * time.Hour, + 3 * 24 * time.Hour, + 7 * 24 * time.Hour, + 14 * 24 * time.Hour, + 30 * 24 * time.Hour, + 31 * 24 * time.Hour, // + anything more than 30 days falls here. +} + +// A LogEvent is the object sent to the telemetry endpoint. +type LogEvent struct { + UUID string `json:"uuid"` + + AgentVersion string `json:"agent_version"` + + PrivateNetwork bool `json:"private_network"` + + BootstrappersCustom bool `json:"bootstrappers_custom"` + + RepoSizeBucket uint64 `json:"repo_size_bucket"` + + UptimeBucket time.Duration `json:"uptime_bucket"` + + ReproviderStrategy string `json:"reprovider_strategy"` + + RoutingType string `json:"routing_type"` + RoutingAcceleratedDHTClient bool `json:"routing_accelerated_dht_client"` + RoutingDelegatedCount int `json:"routing_delegated_count"` + + AutoNATServiceMode string `json:"autonat_service_mode"` + AutoNATReachability string `json:"autonat_reachability"` + + AutoConf bool `json:"autoconf"` + AutoConfCustom bool `json:"autoconf_custom"` + + SwarmEnableHolePunching bool `json:"swarm_enable_hole_punching"` + SwarmCircuitAddresses bool `json:"swarm_circuit_addresses"` + SwarmIPv4PublicAddresses bool `json:"swarm_ipv4_public_addresses"` + SwarmIPv6PublicAddresses bool `json:"swarm_ipv6_public_addresses"` + + AutoTLSAutoWSS bool `json:"auto_tls_auto_wss"` + AutoTLSDomainSuffixCustom bool `json:"auto_tls_domain_suffix_custom"` + + DiscoveryMDNSEnabled bool `json:"discovery_mdns_enabled"` + + PlatformOS string `json:"platform_os"` + PlatformArch string `json:"platform_arch"` + PlatformContainerized bool `json:"platform_containerized"` + PlatformVM bool `json:"platform_vm"` +} + +var Plugins = []plugin.Plugin{ + &telemetryPlugin{}, +} + +type telemetryPlugin struct { + uuidFilename string + mode pluginMode + endpoint string + runOnce bool // test-only flag: when true, sends telemetry immediately without delay + sendDelay time.Duration + + node *core.IpfsNode + config *config.Config + event *LogEvent + startTime time.Time +} + +func (p *telemetryPlugin) Name() string { + return "telemetry" +} + +func (p *telemetryPlugin) Version() string { + return "0.0.1" +} + +func readFromConfig(cfg interface{}, key string) string { + if cfg == nil { + return "" + } + + pcfg, ok := cfg.(map[string]interface{}) + if !ok { + return "" + } + + val, ok := pcfg[key].(string) + if !ok { + return "" + } + return val +} + +func (p *telemetryPlugin) Init(env *plugin.Environment) error { + // logging.SetLogLevel("telemetry", "DEBUG") + log.Debug("telemetry plugin Init()") + p.event = &LogEvent{} + p.startTime = time.Now() + + repoPath := env.Repo + p.uuidFilename = path.Join(repoPath, uuidFilename) + + v := os.Getenv(modeEnvVar) + if v != "" { + log.Debug("mode set from env-var") + } else if pmode := readFromConfig(env.Config, "Mode"); pmode != "" { + v = pmode + log.Debug("mode set from config") + } + + // read "Delay" from the config. Parse as duration. Set p.sendDelay to it + // or set default. + if delayStr := readFromConfig(env.Config, "Delay"); delayStr != "" { + delay, err := time.ParseDuration(delayStr) + if err != nil { + log.Debug("sendDelay set from default") + p.sendDelay = sendDelay + } else { + log.Debug("sendDelay set from config") + p.sendDelay = delay + } + } else { + log.Debug("sendDelay set from default") + p.sendDelay = sendDelay + } + + p.endpoint = endpoint + if ep := readFromConfig(env.Config, "Endpoint"); ep != "" { + log.Debug("endpoint set from config", ep) + p.endpoint = ep + } + + switch v { + case "off": + p.mode = modeOff + log.Debug("telemetry disabled via opt-out") + // Remove UUID file if it exists when user opts out + if _, err := os.Stat(p.uuidFilename); err == nil { + if err := os.Remove(p.uuidFilename); err != nil { + log.Debugf("failed to remove telemetry UUID file: %s", err) + } else { + log.Debug("removed existing telemetry UUID file due to opt-out") + } + } + return nil + case "auto": + p.mode = modeAuto + default: + p.mode = modeOn + } + log.Debug("telemetry mode: ", p.mode) + return nil +} + +func (p *telemetryPlugin) loadUUID() error { + // Generate or read our UUID from disk + b, err := os.ReadFile(p.uuidFilename) + if err != nil { + if !os.IsNotExist(err) { + log.Errorf("error reading telemetry uuid from disk: %s", err) + return err + } + uid, err := uuid.NewRandom() + if err != nil { + log.Errorf("cannot generate telemetry uuid: %s", err) + return err + } + p.event.UUID = uid.String() + p.mode = modeAuto + log.Debugf("new telemetry UUID %s. Mode set to Auto", uid) + + // Write the UUID to disk + if err := os.WriteFile(p.uuidFilename, []byte(p.event.UUID), 0600); err != nil { + log.Errorf("cannot write telemetry uuid: %s", err) + return err + } + return nil + } + + v := string(b) + v = strings.TrimSpace(v) + uid, err := uuid.Parse(v) + if err != nil { + log.Errorf("cannot parse telemetry uuid: %s", err) + return err + } + log.Debugf("uuid read from disk %s", uid) + p.event.UUID = uid.String() + return nil +} + +func (p *telemetryPlugin) hasDefaultBootstrapPeers() bool { + // With autoconf, default bootstrap is represented as ["auto"] + currentPeers := p.config.Bootstrap + return len(currentPeers) == 1 && currentPeers[0] == "auto" +} + +func (p *telemetryPlugin) showInfo() { + fmt.Printf(` + +ℹ️ Anonymous telemetry will be enabled in %s + +Kubo will collect anonymous usage data to help improve the software: +• What: Feature usage and configuration (no personal data) + Use GOLOG_LOG_LEVEL="telemetry=debug" to inspect collected data +• When: First collection in %s, then every 24h +• How: HTTP POST to %s + Anonymous ID: %s + +No data sent yet. To opt-out before collection starts: +• Set environment: %s=off +• Or run: ipfs config Plugins.Plugins.telemetry.Config.Mode off +• Then restart daemon + +This message is shown only once. +Learn more: https://github.com/ipfs/kubo/blob/master/docs/telemetry.md + + +`, p.sendDelay, p.sendDelay, endpoint, p.event.UUID, modeEnvVar) +} + +// Start finishes telemetry initialization once the IpfsNode is ready, +// collects telemetry data and sends it to the endpoint. +func (p *telemetryPlugin) Start(n *core.IpfsNode) error { + // We should not be crashing the daemon due to problems with telemetry + // so this is always going to return nil and panics are going to be + // handled. + defer func() { + if r := recover(); r != nil { + log.Errorf("telemetry plugin panicked: %v", r) + } + }() + + p.node = n + cfg, err := n.Repo.Config() + if err != nil { + log.Error("error getting the repo.Config: %s", err) + return nil + } + p.config = cfg + if p.mode == modeOff { + log.Debug("telemetry collection skipped: opted out") + return nil + } + + if !n.IsDaemon || !n.IsOnline { + log.Debugf("skipping telemetry. Daemon: %t. Online: %t", n.IsDaemon, n.IsOnline) + return nil + } + + // loadUUID might switch to modeAuto when generating a new uuid + if err := p.loadUUID(); err != nil { + p.mode = modeOff + return nil + } + + if p.mode == modeAuto { + p.showInfo() + } + + // runOnce is only used in tests to send telemetry immediately. + // In production, this is always false, ensuring users get the 15-minute delay. + if p.runOnce { + p.prepareEvent() + return p.sendTelemetry() + } + + go func() { + timer := time.NewTimer(p.sendDelay) + for range timer.C { + p.prepareEvent() + if err := p.sendTelemetry(); err != nil { + log.Warnf("telemetry submission failed: %s (will retry in %s)", err, sendInterval) + } + timer.Reset(sendInterval) + } + }() + + return nil +} + +func (p *telemetryPlugin) prepareEvent() { + p.collectBasicInfo() + p.collectRoutingInfo() + p.collectAutoNATInfo() + p.collectAutoConfInfo() + p.collectSwarmInfo() + p.collectAutoTLSInfo() + p.collectDiscoveryInfo() + p.collectPlatformInfo() +} + +// Collects: +// * AgentVersion +// * PrivateNetwork +// * RepoSizeBucket +// * BootstrappersCustom +// * UptimeBucket +// * ReproviderStrategy +func (p *telemetryPlugin) collectBasicInfo() { + p.event.AgentVersion = ipfs.GetUserAgentVersion() + + privNet := false + if pnet.ForcePrivateNetwork { + privNet = true + } else if key, _ := p.node.Repo.SwarmKey(); key != nil { + privNet = true + } + p.event.PrivateNetwork = privNet + + p.event.BootstrappersCustom = !p.hasDefaultBootstrapPeers() + + repoSizeBucket := repoSizeBuckets[len(repoSizeBuckets)-1] + sizeStat, err := corerepo.RepoSize(context.Background(), p.node) + if err == nil { + for _, b := range repoSizeBuckets { + if sizeStat.RepoSize > b { + continue + } + repoSizeBucket = b + break + } + p.event.RepoSizeBucket = repoSizeBucket + } else { + log.Debugf("error setting sizeStat: %s", err) + } + + uptime := time.Since(p.startTime) + uptimeBucket := uptimeBuckets[len(uptimeBuckets)-1] + for _, bucket := range uptimeBuckets { + if uptime > bucket { + continue + + } + uptimeBucket = bucket + break + } + p.event.UptimeBucket = uptimeBucket + + p.event.ReproviderStrategy = p.config.Reprovider.Strategy.WithDefault(config.DefaultReproviderStrategy) +} + +func (p *telemetryPlugin) collectRoutingInfo() { + p.event.RoutingType = p.config.Routing.Type.WithDefault("auto") + p.event.RoutingAcceleratedDHTClient = p.config.Routing.AcceleratedDHTClient.WithDefault(false) + p.event.RoutingDelegatedCount = len(p.config.Routing.DelegatedRouters) +} + +type reachabilityHost interface { + Reachability() network.Reachability +} + +func (p *telemetryPlugin) collectAutoNATInfo() { + autonat := p.config.AutoNAT.ServiceMode + if autonat == config.AutoNATServiceUnset { + autonat = config.AutoNATServiceEnabled + } + autoNATSvcModeB, err := autonat.MarshalText() + if err == nil { + autoNATSvcMode := string(autoNATSvcModeB) + if autoNATSvcMode == "" { + autoNATSvcMode = "unset" + } + p.event.AutoNATServiceMode = autoNATSvcMode + } + + h := p.node.PeerHost + reachHost, ok := h.(reachabilityHost) + if ok { + p.event.AutoNATReachability = reachHost.Reachability().String() + } +} + +func (p *telemetryPlugin) collectSwarmInfo() { + p.event.SwarmEnableHolePunching = p.config.Swarm.EnableHolePunching.WithDefault(true) + + var circuitAddrs, publicIP4Addrs, publicIP6Addrs bool + for _, addr := range p.node.PeerHost.Addrs() { + if manet.IsPublicAddr(addr) { + if _, err := addr.ValueForProtocol(multiaddr.P_IP4); err == nil { + publicIP4Addrs = true + } else if _, err := addr.ValueForProtocol(multiaddr.P_IP6); err == nil { + publicIP6Addrs = true + } + } + if _, err := addr.ValueForProtocol(multiaddr.P_CIRCUIT); err == nil { + circuitAddrs = true + } + } + + p.event.SwarmCircuitAddresses = circuitAddrs + p.event.SwarmIPv4PublicAddresses = publicIP4Addrs + p.event.SwarmIPv6PublicAddresses = publicIP6Addrs +} + +func (p *telemetryPlugin) collectAutoTLSInfo() { + p.event.AutoTLSAutoWSS = p.config.AutoTLS.AutoWSS.WithDefault(config.DefaultAutoWSS) + domainSuffix := p.config.AutoTLS.DomainSuffix.WithDefault(config.DefaultDomainSuffix) + p.event.AutoTLSDomainSuffixCustom = domainSuffix != config.DefaultDomainSuffix +} + +func (p *telemetryPlugin) collectAutoConfInfo() { + p.event.AutoConf = p.config.AutoConf.Enabled.WithDefault(config.DefaultAutoConfEnabled) + p.event.AutoConfCustom = p.config.AutoConf.URL.WithDefault(config.DefaultAutoConfURL) != config.DefaultAutoConfURL +} + +func (p *telemetryPlugin) collectDiscoveryInfo() { + p.event.DiscoveryMDNSEnabled = p.config.Discovery.MDNS.Enabled +} + +func (p *telemetryPlugin) collectPlatformInfo() { + p.event.PlatformOS = runtime.GOOS + p.event.PlatformArch = runtime.GOARCH + p.event.PlatformContainerized = isRunningInContainer() + p.event.PlatformVM = isRunningInVM() +} + +func isRunningInContainer() bool { + // Check for Docker container + if _, err := os.Stat("/.dockerenv"); err == nil { + return true + } + + // Check cgroup for container + content, err := os.ReadFile("/proc/self/cgroup") + if err == nil { + if strings.Contains(string(content), "docker") || strings.Contains(string(content), "lxc") || strings.Contains(string(content), "/kubepods") { + return true + } + } + + content, err = os.ReadFile("/proc/self/mountinfo") + if err == nil { + for line := range strings.Lines(string(content)) { + if strings.Contains(line, "overlay") && strings.Contains(line, "/var/lib/containers/storage/overlay") { + return true + } + } + } + + // Also check for systemd-nspawn + if _, err := os.Stat("/run/systemd/container"); err == nil { + return true + } + + return false +} + +func isRunningInVM() bool { + // Check for VM + if _, err := os.Stat("/sys/hypervisor/uuid"); err == nil { + return true + } + + // Check for other VM indicators + if _, err := os.Stat("/dev/virt-0"); err == nil { + return true + } + + return false +} + +func (p *telemetryPlugin) sendTelemetry() error { + data, err := json.MarshalIndent(p.event, "", " ") + if err != nil { + return err + } + + log.Debugf("sending telemetry:\n %s", data) + + req, err := http.NewRequest("POST", p.endpoint, bytes.NewBuffer(data)) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("User-Agent", ipfs.GetUserAgentVersion()) + req.Close = true + + // Use client with timeout to prevent hanging + client := &http.Client{ + Timeout: httpTimeout, + } + resp, err := client.Do(req) + if err != nil { + log.Debugf("failed to send telemetry: %s", err) + return err + } + defer resp.Body.Close() + + if resp.StatusCode >= 400 { + err := fmt.Errorf("telemetry endpoint returned HTTP %d", resp.StatusCode) + log.Debug(err) + return err + } + log.Debugf("telemetry sent successfully (%d)", resp.StatusCode) + return nil +} diff --git a/plugin/plugins/telemetry/telemetry_test.go b/plugin/plugins/telemetry/telemetry_test.go new file mode 100644 index 000000000..6b88ced92 --- /dev/null +++ b/plugin/plugins/telemetry/telemetry_test.go @@ -0,0 +1,170 @@ +package telemetry + +import ( + "context" + "encoding/json" + "io" + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/cockroachdb/pebble/v2" + logging "github.com/ipfs/go-log/v2" + "github.com/ipfs/kubo/config" + "github.com/ipfs/kubo/core" + "github.com/ipfs/kubo/core/node/libp2p" + "github.com/ipfs/kubo/plugin" + "github.com/ipfs/kubo/plugin/plugins/pebbleds" + "github.com/ipfs/kubo/repo/fsrepo" +) + +func mockServer(t *testing.T) (*httptest.Server, func() LogEvent) { + t.Helper() + + var e LogEvent + + // Create a mock HTTP test server + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Check if the request is POST to the correct endpoint + if r.Method != "POST" || r.URL.Path != "/" { + t.Log("invalid request") + http.Error(w, "invalid request", http.StatusBadRequest) + return + } + + // Check content type + if r.Header.Get("Content-Type") != "application/json" { + t.Log("invalid content type") + http.Error(w, "invalid content type", http.StatusBadRequest) + return + } + + // Check if the body is not empty + if r.Body == nil { + t.Log("empty body") + http.Error(w, "empty body", http.StatusBadRequest) + return + } + + // Read the body + body, _ := io.ReadAll(r.Body) + if len(body) == 0 { + t.Log("zero-length body") + http.Error(w, "empty body", http.StatusBadRequest) + return + } + + t.Logf("Received telemetry:\n %s", string(body)) + + err := json.Unmarshal(body, &e) + if err != nil { + t.Log("error unmarshaling event", err) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + // Return success + w.WriteHeader(http.StatusOK) + })), func() LogEvent { return e } +} + +func makeNode(t *testing.T) (node *core.IpfsNode, repopath string) { + t.Helper() + + // Create a Temporary Repo + repoPath, err := os.MkdirTemp("", "ipfs-shell") + if err != nil { + t.Fatal(err) + } + + pebbledspli := pebbleds.Plugins[0] + pebbledspl, ok := pebbledspli.(plugin.PluginDatastore) + if !ok { + t.Fatal("bad datastore plugin") + } + + err = fsrepo.AddDatastoreConfigHandler(pebbledspl.DatastoreTypeName(), pebbledspl.DatastoreConfigParser()) + if err != nil { + t.Fatal(err) + } + + // Create a config with default options and a 2048 bit key + cfg, err := config.Init(io.Discard, 2048) + if err != nil { + t.Fatal(err) + } + + cfg.Datastore.Spec = map[string]interface{}{ + "type": "pebbleds", + "prefix": "pebble.datastore", + "path": "pebbleds", + "formatMajorVersion": int(pebble.FormatNewest), + } + + // Create the repo with the config + err = fsrepo.Init(repoPath, cfg) + if err != nil { + t.Fatal(err) + } + + // Open the repo + repo, err := fsrepo.Open(repoPath) + if err != nil { + t.Fatal(err) + } + + // Construct the node + + nodeOptions := &core.BuildCfg{ + Online: true, + Routing: libp2p.NilRouterOption, + Repo: repo, + } + + node, err = core.NewNode(context.Background(), nodeOptions) + if err != nil { + t.Fatal(err) + } + + node.IsDaemon = true + return +} + +func TestSendTelemetry(t *testing.T) { + if err := logging.SetLogLevel("telemetry", "DEBUG"); err != nil { + t.Fatal(err) + } + ts, eventGetter := mockServer(t) + defer ts.Close() + + node, repoPath := makeNode(t) + + // Create a plugin instance + p := &telemetryPlugin{ + runOnce: true, + } + + // Initialize the plugin + pe := &plugin.Environment{ + Repo: repoPath, + Config: nil, + } + err := p.Init(pe) + if err != nil { + t.Fatalf("Init() failed: %v", err) + } + + p.endpoint = ts.URL + + // Start the plugin + err = p.Start(node) + if err != nil { + t.Fatalf("Start() failed: %v", err) + } + + e := eventGetter() + if e.UUID != p.event.UUID { + t.Fatal("uuid mismatch") + } +} diff --git a/plugin/plugins/telemetry/telemetry_uuid b/plugin/plugins/telemetry/telemetry_uuid new file mode 100644 index 000000000..f80cb9c3f --- /dev/null +++ b/plugin/plugins/telemetry/telemetry_uuid @@ -0,0 +1 @@ +289ffed8-c770-49ae-922f-b020c8f776f2 \ No newline at end of file diff --git a/repo/common/common_test.go b/repo/common/common_test.go index b999db459..3fb7198ec 100644 --- a/repo/common/common_test.go +++ b/repo/common/common_test.go @@ -3,7 +3,7 @@ package common import ( "testing" - "github.com/ipfs/kubo/thirdparty/assert" + "github.com/stretchr/testify/require" ) func TestMapMergeDeepReturnsNew(t *testing.T) { @@ -15,7 +15,7 @@ func TestMapMergeDeepReturnsNew(t *testing.T) { MapMergeDeep(leftMap, rightMap) - assert.True(leftMap["A"] == "Hello World", t, "MapMergeDeep should return a new map instance") + require.Equal(t, "Hello World", leftMap["A"], "MapMergeDeep should return a new map instance") } func TestMapMergeDeepNewKey(t *testing.T) { @@ -46,7 +46,7 @@ func TestMapMergeDeepNewKey(t *testing.T) { } */ - assert.True(result["B"] == "Bar", t, "New keys in right map should exist in resulting map") + require.Equal(t, "Bar", result["B"], "New keys in right map should exist in resulting map") } func TestMapMergeDeepRecursesOnMaps(t *testing.T) { @@ -92,8 +92,8 @@ func TestMapMergeDeepRecursesOnMaps(t *testing.T) { */ resultA := result["A"].(map[string]interface{}) - assert.True(resultA["B"] == "A value!", t, "Unaltered values should not change") - assert.True(resultA["C"] == "A different value!", t, "Nested values should be altered") + require.Equal(t, "A value!", resultA["B"], "Unaltered values should not change") + require.Equal(t, "A different value!", resultA["C"], "Nested values should be altered") } func TestMapMergeDeepRightNotAMap(t *testing.T) { @@ -128,5 +128,5 @@ func TestMapMergeDeepRightNotAMap(t *testing.T) { } */ - assert.True(result["A"] == "Not a map!", t, "Right values that are not a map should be set on the result") + require.Equal(t, "Not a map!", result["A"], "Right values that are not a map should be set on the result") } diff --git a/repo/fsrepo/fsrepo.go b/repo/fsrepo/fsrepo.go index 7c3093b7a..671621ef3 100644 --- a/repo/fsrepo/fsrepo.go +++ b/repo/fsrepo/fsrepo.go @@ -14,9 +14,9 @@ import ( filestore "github.com/ipfs/boxo/filestore" keystore "github.com/ipfs/boxo/keystore" + version "github.com/ipfs/kubo" repo "github.com/ipfs/kubo/repo" "github.com/ipfs/kubo/repo/common" - dir "github.com/ipfs/kubo/thirdparty/dir" rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager" ds "github.com/ipfs/go-datastore" @@ -37,7 +37,7 @@ const LockFile = "repo.lock" var log = logging.Logger("fsrepo") // RepoVersion is the version number that we are currently expecting to see. -var RepoVersion = 16 +var RepoVersion = version.RepoVersion var migrationInstructions = `See https://github.com/ipfs/fs-repo-migrations/blob/master/run.md Sorry for the inconvenience. In the future, these will run automatically.` @@ -192,7 +192,7 @@ func open(repoPath string, userConfigFilePath string) (repo.Repo, error) { } // check repo path, then check all constituent parts. - if err := dir.Writable(r.path); err != nil { + if err := fsutil.DirWritable(r.path); err != nil { return nil, err } diff --git a/repo/fsrepo/fsrepo_test.go b/repo/fsrepo/fsrepo_test.go index 6b30b107a..91d8e887a 100644 --- a/repo/fsrepo/fsrepo_test.go +++ b/repo/fsrepo/fsrepo_test.go @@ -7,17 +7,16 @@ import ( "path/filepath" "testing" - "github.com/ipfs/kubo/thirdparty/assert" - datastore "github.com/ipfs/go-datastore" config "github.com/ipfs/kubo/config" + "github.com/stretchr/testify/require" ) func TestInitIdempotence(t *testing.T) { t.Parallel() path := t.TempDir() for i := 0; i < 10; i++ { - assert.Nil(Init(path, &config.Config{Datastore: config.DefaultDatastoreConfig()}), t, "multiple calls to init should succeed") + require.NoError(t, Init(path, &config.Config{Datastore: config.DefaultDatastoreConfig()}), "multiple calls to init should succeed") } } @@ -32,78 +31,78 @@ func TestCanManageReposIndependently(t *testing.T) { pathB := t.TempDir() t.Log("initialize two repos") - assert.Nil(Init(pathA, &config.Config{Datastore: config.DefaultDatastoreConfig()}), t, "a", "should initialize successfully") - assert.Nil(Init(pathB, &config.Config{Datastore: config.DefaultDatastoreConfig()}), t, "b", "should initialize successfully") + require.NoError(t, Init(pathA, &config.Config{Datastore: config.DefaultDatastoreConfig()}), "a", "should initialize successfully") + require.NoError(t, Init(pathB, &config.Config{Datastore: config.DefaultDatastoreConfig()}), "b", "should initialize successfully") t.Log("ensure repos initialized") - assert.True(IsInitialized(pathA), t, "a should be initialized") - assert.True(IsInitialized(pathB), t, "b should be initialized") + require.True(t, IsInitialized(pathA), "a should be initialized") + require.True(t, IsInitialized(pathB), "b should be initialized") t.Log("open the two repos") repoA, err := Open(pathA) - assert.Nil(err, t, "a") + require.NoError(t, err, "a") repoB, err := Open(pathB) - assert.Nil(err, t, "b") + require.NoError(t, err, "b") t.Log("close and remove b while a is open") - assert.Nil(repoB.Close(), t, "close b") - assert.Nil(Remove(pathB), t, "remove b") + require.NoError(t, repoB.Close(), "close b") + require.NoError(t, Remove(pathB), "remove b") t.Log("close and remove a") - assert.Nil(repoA.Close(), t) - assert.Nil(Remove(pathA), t) + require.NoError(t, repoA.Close()) + require.NoError(t, Remove(pathA)) } func TestDatastoreGetNotAllowedAfterClose(t *testing.T) { t.Parallel() path := t.TempDir() - assert.True(!IsInitialized(path), t, "should NOT be initialized") - assert.Nil(Init(path, &config.Config{Datastore: config.DefaultDatastoreConfig()}), t, "should initialize successfully") + require.False(t, IsInitialized(path), "should NOT be initialized") + require.NoError(t, Init(path, &config.Config{Datastore: config.DefaultDatastoreConfig()}), "should initialize successfully") r, err := Open(path) - assert.Nil(err, t, "should open successfully") + require.NoError(t, err, "should open successfully") k := "key" data := []byte(k) - assert.Nil(r.Datastore().Put(context.Background(), datastore.NewKey(k), data), t, "Put should be successful") + require.NoError(t, r.Datastore().Put(context.Background(), datastore.NewKey(k), data), "Put should be successful") - assert.Nil(r.Close(), t) + require.NoError(t, r.Close()) _, err = r.Datastore().Get(context.Background(), datastore.NewKey(k)) - assert.Err(err, t, "after closer, Get should be fail") + require.Error(t, err, "after closer, Get should be fail") } func TestDatastorePersistsFromRepoToRepo(t *testing.T) { t.Parallel() path := t.TempDir() - assert.Nil(Init(path, &config.Config{Datastore: config.DefaultDatastoreConfig()}), t) + require.NoError(t, Init(path, &config.Config{Datastore: config.DefaultDatastoreConfig()})) r1, err := Open(path) - assert.Nil(err, t) + require.NoError(t, err) k := "key" expected := []byte(k) - assert.Nil(r1.Datastore().Put(context.Background(), datastore.NewKey(k), expected), t, "using first repo, Put should be successful") - assert.Nil(r1.Close(), t) + require.NoError(t, r1.Datastore().Put(context.Background(), datastore.NewKey(k), expected), "using first repo, Put should be successful") + require.NoError(t, r1.Close()) r2, err := Open(path) - assert.Nil(err, t) + require.NoError(t, err) actual, err := r2.Datastore().Get(context.Background(), datastore.NewKey(k)) - assert.Nil(err, t, "using second repo, Get should be successful") - assert.Nil(r2.Close(), t) - assert.True(bytes.Equal(expected, actual), t, "data should match") + require.NoError(t, err, "using second repo, Get should be successful") + require.NoError(t, r2.Close()) + require.True(t, bytes.Equal(expected, actual), "data should match") } func TestOpenMoreThanOnceInSameProcess(t *testing.T) { t.Parallel() path := t.TempDir() - assert.Nil(Init(path, &config.Config{Datastore: config.DefaultDatastoreConfig()}), t) + require.NoError(t, Init(path, &config.Config{Datastore: config.DefaultDatastoreConfig()})) r1, err := Open(path) - assert.Nil(err, t, "first repo should open successfully") + require.NoError(t, err, "first repo should open successfully") r2, err := Open(path) - assert.Nil(err, t, "second repo should open successfully") - assert.True(r1 == r2, t, "second open returns same value") + require.NoError(t, err, "second repo should open successfully") + require.Equal(t, r1, r2, "second open returns same value") - assert.Nil(r1.Close(), t) - assert.Nil(r2.Close(), t) + require.NoError(t, r1.Close()) + require.NoError(t, r2.Close()) } diff --git a/repo/fsrepo/migrations/README.md b/repo/fsrepo/migrations/README.md new file mode 100644 index 000000000..cc4b85ca3 --- /dev/null +++ b/repo/fsrepo/migrations/README.md @@ -0,0 +1,134 @@ +# IPFS Repository Migrations + +This directory contains the migration system for IPFS repositories, handling both embedded and external migrations. + +## Migration System Overview + +### Embedded vs External Migrations + +Starting from **repo version 17**, Kubo uses **embedded migrations** that are built into the binary, eliminating the need to download external migration tools. + +- **Repo versions <17**: Use external binary migrations downloaded from fs-repo-migrations +- **Repo version 17+**: Use embedded migrations built into Kubo + +### Migration Functions + +#### `migrations.RunEmbeddedMigrations()` +- **Purpose**: Runs migrations that are embedded directly in the Kubo binary +- **Scope**: Handles repo version 17+ migrations +- **Performance**: Fast execution, no network downloads required +- **Dependencies**: Self-contained, uses only Kubo's internal dependencies +- **Usage**: Primary migration method for modern repo versions + +**Parameters**: +- `ctx`: Context for cancellation and timeouts +- `targetVersion`: Target repository version to migrate to +- `repoPath`: Path to the IPFS repository directory +- `allowDowngrade`: Whether to allow downgrade migrations + +```go +err = migrations.RunEmbeddedMigrations(ctx, targetVersion, repoPath, allowDowngrade) +if err != nil { + // Handle migration failure, may fall back to external migrations +} +``` + +#### `migrations.RunMigration()` with `migrations.ReadMigrationConfig()` +- **Purpose**: Runs external binary migrations downloaded from fs-repo-migrations +- **Scope**: Handles legacy repo versions <17 and serves as fallback +- **Performance**: Slower due to network downloads and external process execution +- **Dependencies**: Requires fs-repo-migrations binaries and network access +- **Usage**: Fallback method for legacy migrations + +```go +// Read migration configuration for external migrations +migrationCfg, err := migrations.ReadMigrationConfig(repoPath, configFile) +fetcher, err := migrations.GetMigrationFetcher(migrationCfg.DownloadSources, ...) +err = migrations.RunMigration(ctx, fetcher, targetVersion, repoPath, allowDowngrade) +``` + +## Migration Flow in Daemon Startup + +1. **Primary**: Try embedded migrations first (`RunEmbeddedMigrations`) +2. **Fallback**: If embedded migration fails, fall back to external migrations (`RunMigration`) +3. **Legacy Support**: External migrations ensure compatibility with older repo versions + +## Directory Structure + +``` +repo/fsrepo/migrations/ +├── README.md # This file +├── embedded.go # Embedded migration system +├── embedded_test.go # Tests for embedded migrations +├── migrations.go # External migration system +├── fs-repo-16-to-17/ # First embedded migration (16→17) +│ ├── migration/ +│ │ ├── migration.go # Migration logic +│ │ └── migration_test.go # Migration tests +│ ├── atomicfile/ +│ │ └── atomicfile.go # Atomic file operations +│ ├── main.go # Standalone migration binary +│ └── README.md # Migration-specific documentation +└── [other migration utilities] +``` + +## Adding New Embedded Migrations + +To add a new embedded migration (e.g., fs-repo-17-to-18): + +1. **Create migration package**: `fs-repo-17-to-18/migration/migration.go` +2. **Implement interface**: Ensure your migration implements the `EmbeddedMigration` interface +3. **Register migration**: Add to `embeddedMigrations` map in `embedded.go` +4. **Add tests**: Create comprehensive tests for your migration logic +5. **Update repo version**: Increment `RepoVersion` in `fsrepo.go` + +```go +// In embedded.go +var embeddedMigrations = map[string]EmbeddedMigration{ + "fs-repo-16-to-17": &mg16.Migration{}, + "fs-repo-17-to-18": &mg17.Migration{}, // Add new migration +} +``` + +## Migration Requirements + +Each embedded migration must: +- Implement the `EmbeddedMigration` interface +- Be reversible with proper backup handling +- Use atomic file operations to prevent corruption +- Preserve user customizations +- Include comprehensive tests +- Follow the established naming pattern + +## External Migration Support + +External migrations are maintained for: +- **Backward compatibility** with repo versions <17 +- **Fallback mechanism** if embedded migrations fail +- **Legacy installations** that cannot be upgraded directly + +The external migration system will continue to work but is not the preferred method for new migrations. + +## Security and Safety + +All migrations (embedded and external) include: +- **Atomic operations**: Prevent repository corruption +- **Backup creation**: Allow rollback if migration fails +- **Version validation**: Ensure migrations run on correct repo versions +- **Error handling**: Graceful failure with informative messages +- **User preservation**: Maintain custom configurations during migration + +## Testing + +Test both embedded and external migration systems: + +```bash +# Test embedded migrations +go test ./repo/fsrepo/migrations/ -run TestEmbedded + +# Test specific migration +go test ./repo/fsrepo/migrations/fs-repo-16-to-17/migration/ + +# Test migration registration +go test ./repo/fsrepo/migrations/ -run TestHasEmbedded +``` \ No newline at end of file diff --git a/repo/fsrepo/migrations/atomicfile/atomicfile.go b/repo/fsrepo/migrations/atomicfile/atomicfile.go new file mode 100644 index 000000000..87704196d --- /dev/null +++ b/repo/fsrepo/migrations/atomicfile/atomicfile.go @@ -0,0 +1,59 @@ +package atomicfile + +import ( + "io" + "os" + "path/filepath" +) + +// File represents an atomic file writer +type File struct { + *os.File + path string +} + +// New creates a new atomic file writer +func New(path string, mode os.FileMode) (*File, error) { + dir := filepath.Dir(path) + tempFile, err := os.CreateTemp(dir, ".tmp-"+filepath.Base(path)) + if err != nil { + return nil, err + } + + if err := tempFile.Chmod(mode); err != nil { + tempFile.Close() + os.Remove(tempFile.Name()) + return nil, err + } + + return &File{ + File: tempFile, + path: path, + }, nil +} + +// Close atomically replaces the target file with the temporary file +func (f *File) Close() error { + if err := f.File.Close(); err != nil { + os.Remove(f.File.Name()) + return err + } + + if err := os.Rename(f.File.Name(), f.path); err != nil { + os.Remove(f.File.Name()) + return err + } + + return nil +} + +// Abort removes the temporary file without replacing the target +func (f *File) Abort() error { + f.File.Close() + return os.Remove(f.File.Name()) +} + +// ReadFrom reads from the given reader into the atomic file +func (f *File) ReadFrom(r io.Reader) (int64, error) { + return io.Copy(f.File, r) +} diff --git a/repo/fsrepo/migrations/embedded.go b/repo/fsrepo/migrations/embedded.go new file mode 100644 index 000000000..6c839ff1f --- /dev/null +++ b/repo/fsrepo/migrations/embedded.go @@ -0,0 +1,146 @@ +package migrations + +import ( + "context" + "fmt" + "log" + "os" + + mg16 "github.com/ipfs/kubo/repo/fsrepo/migrations/fs-repo-16-to-17/migration" +) + +// EmbeddedMigration represents an embedded migration that can be run directly +type EmbeddedMigration interface { + Versions() string + Apply(opts mg16.Options) error + Revert(opts mg16.Options) error + Reversible() bool +} + +// embeddedMigrations contains all embedded migrations +var embeddedMigrations = map[string]EmbeddedMigration{ + "fs-repo-16-to-17": &mg16.Migration{}, +} + +// RunEmbeddedMigration runs an embedded migration if available +func RunEmbeddedMigration(ctx context.Context, migrationName string, ipfsDir string, revert bool) error { + migration, exists := embeddedMigrations[migrationName] + if !exists { + return fmt.Errorf("embedded migration %s not found", migrationName) + } + + if revert && !migration.Reversible() { + return fmt.Errorf("migration %s is not reversible", migrationName) + } + + logger := log.New(os.Stdout, "", 0) + logger.Printf("Running embedded migration %s...", migrationName) + + opts := mg16.Options{ + Path: ipfsDir, + Verbose: true, + } + + var err error + if revert { + err = migration.Revert(opts) + } else { + err = migration.Apply(opts) + } + + if err != nil { + return fmt.Errorf("embedded migration %s failed: %w", migrationName, err) + } + + logger.Printf("Embedded migration %s completed successfully", migrationName) + return nil +} + +// HasEmbeddedMigration checks if a migration is available as embedded +func HasEmbeddedMigration(migrationName string) bool { + _, exists := embeddedMigrations[migrationName] + return exists +} + +// RunEmbeddedMigrations runs all needed embedded migrations from current version to target version. +// +// This function migrates an IPFS repository using embedded migrations that are built into the Kubo binary. +// Embedded migrations are available for repo version 17+ and provide fast, network-free migration execution. +// +// Parameters: +// - ctx: Context for cancellation and deadlines +// - targetVer: Target repository version to migrate to +// - ipfsDir: Path to the IPFS repository directory +// - allowDowngrade: Whether to allow downgrade migrations (reduces target version) +// +// Returns: +// - nil on successful migration +// - error if migration fails, repo path is invalid, or no embedded migrations are available +// +// Behavior: +// - Validates that ipfsDir contains a valid IPFS repository +// - Determines current repository version automatically +// - Returns immediately if already at target version +// - Prevents downgrades unless allowDowngrade is true +// - Runs all necessary migrations in sequence (e.g., 16→17→18 if going from 16 to 18) +// - Creates backups and uses atomic operations to prevent corruption +// +// Error conditions: +// - Repository path is invalid or inaccessible +// - Current version cannot be determined +// - Downgrade attempted with allowDowngrade=false +// - No embedded migrations available for the version range +// - Individual migration fails during execution +// +// Example: +// +// err := RunEmbeddedMigrations(ctx, 17, "/path/to/.ipfs", false) +// if err != nil { +// // Handle migration failure, may need to fall back to external migrations +// } +func RunEmbeddedMigrations(ctx context.Context, targetVer int, ipfsDir string, allowDowngrade bool) error { + ipfsDir, err := CheckIpfsDir(ipfsDir) + if err != nil { + return err + } + + fromVer, err := RepoVersion(ipfsDir) + if err != nil { + return fmt.Errorf("could not get repo version: %w", err) + } + + if fromVer == targetVer { + return nil + } + + revert := fromVer > targetVer + if revert && !allowDowngrade { + return fmt.Errorf("downgrade not allowed from %d to %d", fromVer, targetVer) + } + + logger := log.New(os.Stdout, "", 0) + logger.Print("Looking for embedded migrations.") + + migrations, _, err := findMigrations(ctx, fromVer, targetVer) + if err != nil { + return err + } + + embeddedCount := 0 + for _, migrationName := range migrations { + if HasEmbeddedMigration(migrationName) { + err = RunEmbeddedMigration(ctx, migrationName, ipfsDir, revert) + if err != nil { + return err + } + embeddedCount++ + } + } + + if embeddedCount == 0 { + return fmt.Errorf("no embedded migrations found for version %d to %d", fromVer, targetVer) + } + + logger.Printf("Success: fs-repo migrated to version %d using embedded migrations.\n", targetVer) + return nil +} diff --git a/repo/fsrepo/migrations/embedded_test.go b/repo/fsrepo/migrations/embedded_test.go new file mode 100644 index 000000000..b739d1e0c --- /dev/null +++ b/repo/fsrepo/migrations/embedded_test.go @@ -0,0 +1,36 @@ +package migrations + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestHasEmbeddedMigration(t *testing.T) { + // Test that the 16-to-17 migration is registered + assert.True(t, HasEmbeddedMigration("fs-repo-16-to-17"), + "fs-repo-16-to-17 migration should be registered") + + // Test that a non-existent migration is not found + assert.False(t, HasEmbeddedMigration("fs-repo-99-to-100"), + "fs-repo-99-to-100 migration should not be registered") +} + +func TestEmbeddedMigrations(t *testing.T) { + // Test that we have at least one embedded migration + assert.NotEmpty(t, embeddedMigrations, "No embedded migrations found") + + // Test that all registered migrations implement the interface + for name, migration := range embeddedMigrations { + assert.NotEmpty(t, migration.Versions(), + "Migration %s has empty versions", name) + } +} + +func TestRunEmbeddedMigration(t *testing.T) { + // Test that running a non-existent migration returns an error + err := RunEmbeddedMigration(context.Background(), "non-existent", "/tmp", false) + require.Error(t, err, "Expected error for non-existent migration") +} diff --git a/repo/fsrepo/migrations/fetcher.go b/repo/fsrepo/migrations/fetcher.go index c81554c3c..cc48a3b77 100644 --- a/repo/fsrepo/migrations/fetcher.go +++ b/repo/fsrepo/migrations/fetcher.go @@ -2,11 +2,10 @@ package migrations import ( "context" + "errors" "fmt" "io" "os" - - "go.uber.org/multierr" ) const ( @@ -49,23 +48,23 @@ func NewMultiFetcher(f ...Fetcher) *MultiFetcher { // Fetch attempts to fetch the file at each of its fetchers until one succeeds. func (f *MultiFetcher) Fetch(ctx context.Context, ipfsPath string) ([]byte, error) { - var errs error + var errs []error for _, fetcher := range f.fetchers { out, err := fetcher.Fetch(ctx, ipfsPath) if err == nil { return out, nil } fmt.Printf("Error fetching: %s\n", err.Error()) - errs = multierr.Append(errs, err) + errs = append(errs, err) } - return nil, errs + return nil, errors.Join(errs...) } func (f *MultiFetcher) Close() error { var errs error for _, fetcher := range f.fetchers { if err := fetcher.Close(); err != nil { - errs = multierr.Append(errs, err) + errs = errors.Join(errs, err) } } return errs diff --git a/repo/fsrepo/migrations/fs-repo-16-to-17/main.go b/repo/fsrepo/migrations/fs-repo-16-to-17/main.go new file mode 100644 index 000000000..df0963f3b --- /dev/null +++ b/repo/fsrepo/migrations/fs-repo-16-to-17/main.go @@ -0,0 +1,63 @@ +// Package main implements fs-repo-16-to-17 migration for IPFS repositories. +// +// This migration transitions repositories from version 16 to 17, introducing +// the AutoConf system that replaces hardcoded network defaults with dynamic +// configuration fetched from autoconf.json. +// +// Changes made: +// - Enables AutoConf system with default settings +// - Migrates default bootstrap peers to "auto" sentinel value +// - Sets DNS.Resolvers["."] to "auto" for dynamic DNS resolver configuration +// - Migrates Routing.DelegatedRouters to ["auto"] +// - Migrates Ipns.DelegatedPublishers to ["auto"] +// - Preserves user customizations (custom bootstrap peers, DNS resolvers) +// +// The migration is reversible and creates config.16-to-17.bak for rollback. +// +// Usage: +// +// fs-repo-16-to-17 -path /path/to/ipfs/repo [-verbose] [-revert] +// +// This migration is embedded in Kubo starting from version 0.37 and runs +// automatically during daemon startup. This standalone binary is provided +// for manual migration scenarios. +package main + +import ( + "flag" + "fmt" + "os" + + mg16 "github.com/ipfs/kubo/repo/fsrepo/migrations/fs-repo-16-to-17/migration" +) + +func main() { + var path = flag.String("path", "", "Path to IPFS repository") + var verbose = flag.Bool("verbose", false, "Enable verbose output") + var revert = flag.Bool("revert", false, "Revert migration") + flag.Parse() + + if *path == "" { + fmt.Fprintf(os.Stderr, "Error: -path flag is required\n") + flag.Usage() + os.Exit(1) + } + + m := mg16.Migration{} + opts := mg16.Options{ + Path: *path, + Verbose: *verbose, + } + + var err error + if *revert { + err = m.Revert(opts) + } else { + err = m.Apply(opts) + } + + if err != nil { + fmt.Fprintf(os.Stderr, "Migration failed: %v\n", err) + os.Exit(1) + } +} diff --git a/repo/fsrepo/migrations/fs-repo-16-to-17/migration/migration.go b/repo/fsrepo/migrations/fs-repo-16-to-17/migration/migration.go new file mode 100644 index 000000000..01cab8932 --- /dev/null +++ b/repo/fsrepo/migrations/fs-repo-16-to-17/migration/migration.go @@ -0,0 +1,492 @@ +// package mg16 contains the code to perform 16-17 repository migration in Kubo. +// This handles the following: +// - Migrate default bootstrap peers to "auto" +// - Migrate DNS resolvers to use "auto" for "." eTLD +// - Enable AutoConf system with default settings +// - Increment repo version to 17 +package mg16 + +import ( + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "reflect" + "slices" + "strings" + + "github.com/ipfs/kubo/config" + "github.com/ipfs/kubo/repo/fsrepo/migrations/atomicfile" +) + +// Options contains migration options for embedded migrations +type Options struct { + Path string + Verbose bool +} + +const backupSuffix = ".16-to-17.bak" + +// DefaultBootstrapAddresses are the hardcoded bootstrap addresses from Kubo 0.36 +// for IPFS. they are nodes run by the IPFS team. docs on these later. +// As with all p2p networks, bootstrap is an important security concern. +// This list is used during migration to detect which peers are defaults vs custom. +var DefaultBootstrapAddresses = []string{ + "/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", + "/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa", // rust-libp2p-server + "/dnsaddr/bootstrap.libp2p.io/p2p/QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb", + "/dnsaddr/bootstrap.libp2p.io/p2p/QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt", + "/dnsaddr/va1.bootstrap.libp2p.io/p2p/12D3KooWKnDdG3iXw9eTFijk3EWSunZcFi54Zka4wmtqtt6rPxc8", // js-libp2p-amino-dht-bootstrapper + "/ip4/104.131.131.82/tcp/4001/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", // mars.i.ipfs.io + "/ip4/104.131.131.82/udp/4001/quic-v1/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", // mars.i.ipfs.io +} + +// Migration implements the migration described above. +type Migration struct{} + +// Versions returns the current version string for this migration. +func (m Migration) Versions() string { + return "16-to-17" +} + +// Reversible returns true, as we keep old config around +func (m Migration) Reversible() bool { + return true +} + +// Apply update the config. +func (m Migration) Apply(opts Options) error { + if opts.Verbose { + fmt.Printf("applying %s repo migration\n", m.Versions()) + } + + // Check version + if err := checkVersion(opts.Path, "16"); err != nil { + return err + } + + if opts.Verbose { + fmt.Println("> Upgrading config to use AutoConf system") + } + + path := filepath.Join(opts.Path, "config") + in, err := os.Open(path) + if err != nil { + return err + } + + // make backup + backup, err := atomicfile.New(path+backupSuffix, 0600) + if err != nil { + return err + } + if _, err := backup.ReadFrom(in); err != nil { + panicOnError(backup.Abort()) + return err + } + if _, err := in.Seek(0, io.SeekStart); err != nil { + panicOnError(backup.Abort()) + return err + } + + // Create a temp file to write the output to on success + out, err := atomicfile.New(path, 0600) + if err != nil { + panicOnError(backup.Abort()) + panicOnError(in.Close()) + return err + } + + if err := convert(in, out, opts.Path); err != nil { + panicOnError(out.Abort()) + panicOnError(backup.Abort()) + panicOnError(in.Close()) + return err + } + + if err := in.Close(); err != nil { + panicOnError(out.Abort()) + panicOnError(backup.Abort()) + } + + if err := writeVersion(opts.Path, "17"); err != nil { + fmt.Println("failed to update version file to 17") + // There was an error so abort writing the output and clean up temp file + panicOnError(out.Abort()) + panicOnError(backup.Abort()) + return err + } else { + // Write the output and clean up temp file + panicOnError(out.Close()) + panicOnError(backup.Close()) + } + + if opts.Verbose { + fmt.Println("updated version file") + fmt.Println("Migration 16 to 17 succeeded") + } + return nil +} + +// panicOnError is reserved for checks we can't solve transactionally if an error occurs +func panicOnError(e error) { + if e != nil { + panic(fmt.Errorf("error can't be dealt with transactionally: %w", e)) + } +} + +func (m Migration) Revert(opts Options) error { + if opts.Verbose { + fmt.Println("reverting migration") + } + + if err := checkVersion(opts.Path, "17"); err != nil { + return err + } + + cfg := filepath.Join(opts.Path, "config") + if err := os.Rename(cfg+backupSuffix, cfg); err != nil { + return err + } + + if err := writeVersion(opts.Path, "16"); err != nil { + return err + } + if opts.Verbose { + fmt.Println("lowered version number to 16") + } + + return nil +} + +// checkVersion verifies the repo is at the expected version +func checkVersion(repoPath string, expectedVersion string) error { + versionPath := filepath.Join(repoPath, "version") + versionBytes, err := os.ReadFile(versionPath) + if err != nil { + return fmt.Errorf("could not read version file: %w", err) + } + version := strings.TrimSpace(string(versionBytes)) + if version != expectedVersion { + return fmt.Errorf("expected version %s, got %s", expectedVersion, version) + } + return nil +} + +// writeVersion writes the version to the repo +func writeVersion(repoPath string, version string) error { + versionPath := filepath.Join(repoPath, "version") + return os.WriteFile(versionPath, []byte(version), 0644) +} + +// convert converts the config from version 16 to 17 +func convert(in io.Reader, out io.Writer, repoPath string) error { + confMap := make(map[string]any) + if err := json.NewDecoder(in).Decode(&confMap); err != nil { + return err + } + + // Enable AutoConf system + if err := enableAutoConf(confMap); err != nil { + return err + } + + // Migrate Bootstrap peers + if err := migrateBootstrap(confMap, repoPath); err != nil { + return err + } + + // Migrate DNS resolvers + if err := migrateDNSResolvers(confMap); err != nil { + return err + } + + // Migrate DelegatedRouters + if err := migrateDelegatedRouters(confMap); err != nil { + return err + } + + // Migrate DelegatedPublishers + if err := migrateDelegatedPublishers(confMap); err != nil { + return err + } + + // Save new config + fixed, err := json.MarshalIndent(confMap, "", " ") + if err != nil { + return err + } + + if _, err := out.Write(fixed); err != nil { + return err + } + _, err = out.Write([]byte("\n")) + return err +} + +// enableAutoConf adds AutoConf section to config +func enableAutoConf(confMap map[string]any) error { + // Check if AutoConf already exists + if _, exists := confMap["AutoConf"]; exists { + return nil + } + + // Add empty AutoConf section - all fields will use implicit defaults: + // - Enabled defaults to true (via DefaultAutoConfEnabled) + // - URL defaults to mainnet URL (via DefaultAutoConfURL) + // - RefreshInterval defaults to 24h (via DefaultAutoConfRefreshInterval) + // - TLSInsecureSkipVerify defaults to false (no WithDefault, but false is zero value) + confMap["AutoConf"] = map[string]any{} + + return nil +} + +// migrateBootstrap migrates bootstrap peers to use "auto" +func migrateBootstrap(confMap map[string]any, repoPath string) error { + bootstrap, exists := confMap["Bootstrap"] + if !exists { + // No bootstrap section, add "auto" + confMap["Bootstrap"] = []string{"auto"} + return nil + } + + bootstrapSlice, ok := bootstrap.([]interface{}) + if !ok { + // Invalid bootstrap format, replace with "auto" + confMap["Bootstrap"] = []string{"auto"} + return nil + } + + // Convert to string slice + var bootstrapPeers []string + for _, peer := range bootstrapSlice { + if peerStr, ok := peer.(string); ok { + bootstrapPeers = append(bootstrapPeers, peerStr) + } + } + + // Check if we should replace with "auto" + newBootstrap := processBootstrapPeers(bootstrapPeers, repoPath) + confMap["Bootstrap"] = newBootstrap + + return nil +} + +// processBootstrapPeers processes bootstrap peers according to migration rules +func processBootstrapPeers(peers []string, repoPath string) []string { + // If empty, use "auto" + if len(peers) == 0 { + return []string{"auto"} + } + + // Separate default peers from custom ones + var customPeers []string + var hasDefaultPeers bool + + for _, peer := range peers { + if slices.Contains(DefaultBootstrapAddresses, peer) { + hasDefaultPeers = true + } else { + customPeers = append(customPeers, peer) + } + } + + // If we have default peers, replace them with "auto" + if hasDefaultPeers { + return append([]string{"auto"}, customPeers...) + } + + // No default peers found, keep as is + return peers +} + +// migrateDNSResolvers migrates DNS resolvers to use "auto" for "." eTLD +func migrateDNSResolvers(confMap map[string]any) error { + dnsSection, exists := confMap["DNS"] + if !exists { + // No DNS section, create it with "auto" + confMap["DNS"] = map[string]any{ + "Resolvers": map[string]string{ + ".": config.AutoPlaceholder, + }, + } + return nil + } + + dns, ok := dnsSection.(map[string]any) + if !ok { + // Invalid DNS format, replace with "auto" + confMap["DNS"] = map[string]any{ + "Resolvers": map[string]string{ + ".": config.AutoPlaceholder, + }, + } + return nil + } + + resolvers, exists := dns["Resolvers"] + if !exists { + // No resolvers, add "auto" + dns["Resolvers"] = map[string]string{ + ".": config.AutoPlaceholder, + } + return nil + } + + resolversMap, ok := resolvers.(map[string]any) + if !ok { + // Invalid resolvers format, replace with "auto" + dns["Resolvers"] = map[string]string{ + ".": config.AutoPlaceholder, + } + return nil + } + + // Convert to string map and replace default resolvers with "auto" + stringResolvers := make(map[string]string) + defaultResolvers := map[string]string{ + "https://dns.eth.limo/dns-query": "auto", + "https://dns.eth.link/dns-query": "auto", + "https://resolver.cloudflare-eth.com/dns-query": "auto", + } + + for k, v := range resolversMap { + if vStr, ok := v.(string); ok { + // Check if this is a default resolver that should be replaced + if replacement, isDefault := defaultResolvers[vStr]; isDefault { + stringResolvers[k] = replacement + } else { + stringResolvers[k] = vStr + } + } + } + + // If "." is not set or empty, set it to "auto" + if _, exists := stringResolvers["."]; !exists { + stringResolvers["."] = "auto" + } + + dns["Resolvers"] = stringResolvers + return nil +} + +// migrateDelegatedRouters migrates DelegatedRouters to use "auto" +func migrateDelegatedRouters(confMap map[string]any) error { + routing, exists := confMap["Routing"] + if !exists { + // No routing section, create it with "auto" + confMap["Routing"] = map[string]any{ + "DelegatedRouters": []string{"auto"}, + } + return nil + } + + routingMap, ok := routing.(map[string]any) + if !ok { + // Invalid routing format, replace with "auto" + confMap["Routing"] = map[string]any{ + "DelegatedRouters": []string{"auto"}, + } + return nil + } + + delegatedRouters, exists := routingMap["DelegatedRouters"] + if !exists { + // No delegated routers, add "auto" + routingMap["DelegatedRouters"] = []string{"auto"} + return nil + } + + // Check if it's empty or nil + if shouldReplaceWithAuto(delegatedRouters) { + routingMap["DelegatedRouters"] = []string{"auto"} + return nil + } + + // Process the list to replace cid.contact with "auto" and preserve others + if slice, ok := delegatedRouters.([]interface{}); ok { + var newRouters []string + hasAuto := false + + for _, router := range slice { + if routerStr, ok := router.(string); ok { + if routerStr == "https://cid.contact" { + if !hasAuto { + newRouters = append(newRouters, "auto") + hasAuto = true + } + } else { + newRouters = append(newRouters, routerStr) + } + } + } + + // If empty after processing, add "auto" + if len(newRouters) == 0 { + newRouters = []string{"auto"} + } + + routingMap["DelegatedRouters"] = newRouters + } + + return nil +} + +// migrateDelegatedPublishers migrates DelegatedPublishers to use "auto" +func migrateDelegatedPublishers(confMap map[string]any) error { + ipns, exists := confMap["Ipns"] + if !exists { + // No IPNS section, create it with "auto" + confMap["Ipns"] = map[string]any{ + "DelegatedPublishers": []string{"auto"}, + } + return nil + } + + ipnsMap, ok := ipns.(map[string]any) + if !ok { + // Invalid IPNS format, replace with "auto" + confMap["Ipns"] = map[string]any{ + "DelegatedPublishers": []string{"auto"}, + } + return nil + } + + delegatedPublishers, exists := ipnsMap["DelegatedPublishers"] + if !exists { + // No delegated publishers, add "auto" + ipnsMap["DelegatedPublishers"] = []string{"auto"} + return nil + } + + // Check if it's empty or nil - only then replace with "auto" + // Otherwise preserve custom publishers + if shouldReplaceWithAuto(delegatedPublishers) { + ipnsMap["DelegatedPublishers"] = []string{"auto"} + } + // If there are custom publishers, leave them as is + + return nil +} + +// shouldReplaceWithAuto checks if a field should be replaced with "auto" +func shouldReplaceWithAuto(field any) bool { + // If it's nil, replace with "auto" + if field == nil { + return true + } + + // If it's an empty slice, replace with "auto" + if slice, ok := field.([]interface{}); ok { + return len(slice) == 0 + } + + // If it's an empty array, replace with "auto" + if reflect.TypeOf(field).Kind() == reflect.Slice { + v := reflect.ValueOf(field) + return v.Len() == 0 + } + + return false +} diff --git a/repo/fsrepo/migrations/fs-repo-16-to-17/migration/migration_test.go b/repo/fsrepo/migrations/fs-repo-16-to-17/migration/migration_test.go new file mode 100644 index 000000000..2e80809a4 --- /dev/null +++ b/repo/fsrepo/migrations/fs-repo-16-to-17/migration/migration_test.go @@ -0,0 +1,479 @@ +package mg16 + +import ( + "bytes" + "encoding/json" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// Helper function to run migration on JSON input and return result +func runMigrationOnJSON(t *testing.T, input string) map[string]interface{} { + t.Helper() + var output bytes.Buffer + // Use t.TempDir() for test isolation and parallel execution support + tempDir := t.TempDir() + err := convert(bytes.NewReader([]byte(input)), &output, tempDir) + require.NoError(t, err) + + var result map[string]interface{} + err = json.Unmarshal(output.Bytes(), &result) + require.NoError(t, err) + + return result +} + +// Helper function to assert nested map key has expected value +func assertMapKeyEquals(t *testing.T, result map[string]interface{}, path []string, key string, expected interface{}) { + t.Helper() + current := result + for _, p := range path { + section, exists := current[p] + require.True(t, exists, "Section %s not found in path %v", p, path) + current = section.(map[string]interface{}) + } + + assert.Equal(t, expected, current[key], "Expected %s to be %v", key, expected) +} + +// Helper function to assert slice contains expected values +func assertSliceEquals(t *testing.T, result map[string]interface{}, path []string, expected []string) { + t.Helper() + current := result + for i, p := range path[:len(path)-1] { + section, exists := current[p] + require.True(t, exists, "Section %s not found in path %v at index %d", p, path, i) + current = section.(map[string]interface{}) + } + + sliceKey := path[len(path)-1] + slice, exists := current[sliceKey] + require.True(t, exists, "Slice %s not found", sliceKey) + + actualSlice := slice.([]interface{}) + require.Equal(t, len(expected), len(actualSlice), "Expected slice length %d, got %d", len(expected), len(actualSlice)) + + for i, exp := range expected { + assert.Equal(t, exp, actualSlice[i], "Expected slice[%d] to be %s", i, exp) + } +} + +// Helper to build test config JSON with specified fields +func buildTestConfig(fields map[string]interface{}) string { + config := map[string]interface{}{ + "Identity": map[string]interface{}{"PeerID": "QmTest"}, + } + for k, v := range fields { + config[k] = v + } + data, _ := json.MarshalIndent(config, "", " ") + return string(data) +} + +// Helper to run migration and get DNS resolvers +func runMigrationAndGetDNSResolvers(t *testing.T, input string) map[string]interface{} { + t.Helper() + result := runMigrationOnJSON(t, input) + dns := result["DNS"].(map[string]interface{}) + return dns["Resolvers"].(map[string]interface{}) +} + +// Helper to assert multiple resolver values +func assertResolvers(t *testing.T, resolvers map[string]interface{}, expected map[string]string) { + t.Helper() + for key, expectedValue := range expected { + assert.Equal(t, expectedValue, resolvers[key], "Expected %s resolver to be %v", key, expectedValue) + } +} + +// ============================================================================= +// End-to-End Migration Tests +// ============================================================================= + +func TestMigration(t *testing.T) { + // Create a temporary directory for testing + tempDir, err := os.MkdirTemp("", "migration-test-16-to-17") + require.NoError(t, err) + defer os.RemoveAll(tempDir) + + // Create a test config with default bootstrap peers + testConfig := map[string]interface{}{ + "Bootstrap": []string{ + "/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", + "/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa", + "/ip4/192.168.1.1/tcp/4001/p2p/QmCustomPeer", // Custom peer + }, + "DNS": map[string]interface{}{ + "Resolvers": map[string]string{}, + }, + "Routing": map[string]interface{}{ + "DelegatedRouters": []string{}, + }, + "Ipns": map[string]interface{}{ + "ResolveCacheSize": 128, + }, + "Identity": map[string]interface{}{ + "PeerID": "QmTest", + }, + "Version": map[string]interface{}{ + "Current": "0.36.0", + }, + } + + // Write test config + configPath := filepath.Join(tempDir, "config") + configData, err := json.MarshalIndent(testConfig, "", " ") + require.NoError(t, err) + err = os.WriteFile(configPath, configData, 0644) + require.NoError(t, err) + + // Create version file + versionPath := filepath.Join(tempDir, "version") + err = os.WriteFile(versionPath, []byte("16"), 0644) + require.NoError(t, err) + + // Run migration + migration := &Migration{} + opts := Options{ + Path: tempDir, + Verbose: true, + } + + err = migration.Apply(opts) + require.NoError(t, err) + + // Verify version was updated + versionData, err := os.ReadFile(versionPath) + require.NoError(t, err) + assert.Equal(t, "17", string(versionData), "Expected version 17") + + // Verify config was updated + configData, err = os.ReadFile(configPath) + require.NoError(t, err) + + var updatedConfig map[string]interface{} + err = json.Unmarshal(configData, &updatedConfig) + require.NoError(t, err) + + // Check AutoConf was added + autoConf, exists := updatedConfig["AutoConf"] + assert.True(t, exists, "AutoConf section not added") + autoConfMap := autoConf.(map[string]interface{}) + // URL is not set explicitly in migration (uses implicit default) + _, hasURL := autoConfMap["URL"] + assert.False(t, hasURL, "AutoConf URL should not be explicitly set in migration") + + // Check Bootstrap was updated + bootstrap := updatedConfig["Bootstrap"].([]interface{}) + assert.Equal(t, 2, len(bootstrap), "Expected 2 bootstrap entries") + assert.Equal(t, "auto", bootstrap[0], "Expected first bootstrap entry to be 'auto'") + assert.Equal(t, "/ip4/192.168.1.1/tcp/4001/p2p/QmCustomPeer", bootstrap[1], "Expected custom peer to be preserved") + + // Check DNS.Resolvers was updated + dns := updatedConfig["DNS"].(map[string]interface{}) + resolvers := dns["Resolvers"].(map[string]interface{}) + assert.Equal(t, "auto", resolvers["."], "Expected DNS resolver for '.' to be 'auto'") + + // Check Routing.DelegatedRouters was updated + routing := updatedConfig["Routing"].(map[string]interface{}) + delegatedRouters := routing["DelegatedRouters"].([]interface{}) + assert.Equal(t, 1, len(delegatedRouters)) + assert.Equal(t, "auto", delegatedRouters[0], "Expected DelegatedRouters to be ['auto']") + + // Check Ipns.DelegatedPublishers was updated + ipns := updatedConfig["Ipns"].(map[string]interface{}) + delegatedPublishers := ipns["DelegatedPublishers"].([]interface{}) + assert.Equal(t, 1, len(delegatedPublishers)) + assert.Equal(t, "auto", delegatedPublishers[0], "Expected DelegatedPublishers to be ['auto']") + + // Test revert + err = migration.Revert(opts) + require.NoError(t, err) + + // Verify version was reverted + versionData, err = os.ReadFile(versionPath) + require.NoError(t, err) + assert.Equal(t, "16", string(versionData), "Expected version 16 after revert") +} + +func TestConvert(t *testing.T) { + t.Parallel() + input := buildTestConfig(map[string]interface{}{ + "Bootstrap": []string{ + "/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", + "/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa", + }, + }) + + result := runMigrationOnJSON(t, input) + + // Check that AutoConf section was added but is empty (using implicit defaults) + autoConf, exists := result["AutoConf"] + require.True(t, exists, "AutoConf section should exist") + autoConfMap, ok := autoConf.(map[string]interface{}) + require.True(t, ok, "AutoConf should be a map") + require.Empty(t, autoConfMap, "AutoConf should be empty (using implicit defaults)") + + // Check that Bootstrap was updated to "auto" + assertSliceEquals(t, result, []string{"Bootstrap"}, []string{"auto"}) +} + +// ============================================================================= +// Bootstrap Migration Tests +// ============================================================================= + +func TestBootstrapMigration(t *testing.T) { + t.Parallel() + + t.Run("process bootstrap peers logic verification", func(t *testing.T) { + t.Parallel() + tests := []struct { + name string + peers []string + expected []string + }{ + { + name: "empty peers", + peers: []string{}, + expected: []string{"auto"}, + }, + { + name: "only default peers", + peers: []string{ + "/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", + "/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa", + }, + expected: []string{"auto"}, + }, + { + name: "mixed default and custom peers", + peers: []string{ + "/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", + "/ip4/192.168.1.1/tcp/4001/p2p/QmCustomPeer", + }, + expected: []string{"auto", "/ip4/192.168.1.1/tcp/4001/p2p/QmCustomPeer"}, + }, + { + name: "only custom peers", + peers: []string{ + "/ip4/192.168.1.1/tcp/4001/p2p/QmCustomPeer1", + "/ip4/192.168.1.2/tcp/4001/p2p/QmCustomPeer2", + }, + expected: []string{ + "/ip4/192.168.1.1/tcp/4001/p2p/QmCustomPeer1", + "/ip4/192.168.1.2/tcp/4001/p2p/QmCustomPeer2", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + result := processBootstrapPeers(tt.peers, "") + require.Equal(t, len(tt.expected), len(result), "Expected %d peers, got %d", len(tt.expected), len(result)) + for i, expected := range tt.expected { + assert.Equal(t, expected, result[i], "Expected peer %d to be %s", i, expected) + } + }) + } + }) + + t.Run("replaces all old default bootstrapper peers with auto entry", func(t *testing.T) { + t.Parallel() + input := buildTestConfig(map[string]interface{}{ + "Bootstrap": []string{ + "/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", + "/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa", + "/dnsaddr/bootstrap.libp2p.io/p2p/QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb", + "/dnsaddr/bootstrap.libp2p.io/p2p/QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt", + "/dnsaddr/va1.bootstrap.libp2p.io/p2p/12D3KooWKnDdG3iXw9eTFijk3EWSunZcFi54Zka4wmtqtt6rPxc8", + "/ip4/104.131.131.82/tcp/4001/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", + "/ip4/104.131.131.82/udp/4001/quic-v1/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", + }, + }) + + result := runMigrationOnJSON(t, input) + assertSliceEquals(t, result, []string{"Bootstrap"}, []string{"auto"}) + }) + + t.Run("creates Bootstrap section with auto when missing", func(t *testing.T) { + t.Parallel() + input := `{"Identity": {"PeerID": "QmTest"}}` + result := runMigrationOnJSON(t, input) + assertSliceEquals(t, result, []string{"Bootstrap"}, []string{"auto"}) + }) +} + +// ============================================================================= +// DNS Migration Tests +// ============================================================================= + +func TestDNSMigration(t *testing.T) { + t.Parallel() + + t.Run("creates DNS section with auto resolver when missing", func(t *testing.T) { + t.Parallel() + input := `{"Identity": {"PeerID": "QmTest"}}` + result := runMigrationOnJSON(t, input) + assertMapKeyEquals(t, result, []string{"DNS", "Resolvers"}, ".", "auto") + }) + + t.Run("preserves all custom DNS resolvers unchanged", func(t *testing.T) { + t.Parallel() + input := buildTestConfig(map[string]interface{}{ + "DNS": map[string]interface{}{ + "Resolvers": map[string]string{ + ".": "https://my-custom-resolver.com", + ".eth": "https://eth.resolver", + }, + }, + }) + + resolvers := runMigrationAndGetDNSResolvers(t, input) + assertResolvers(t, resolvers, map[string]string{ + ".": "https://my-custom-resolver.com", + ".eth": "https://eth.resolver", + }) + }) + + t.Run("preserves custom dot and eth resolvers unchanged", func(t *testing.T) { + t.Parallel() + input := buildTestConfig(map[string]interface{}{ + "DNS": map[string]interface{}{ + "Resolvers": map[string]string{ + ".": "https://cloudflare-dns.com/dns-query", + ".eth": "https://example.com/dns-query", + }, + }, + }) + + resolvers := runMigrationAndGetDNSResolvers(t, input) + assertResolvers(t, resolvers, map[string]string{ + ".": "https://cloudflare-dns.com/dns-query", + ".eth": "https://example.com/dns-query", + }) + }) + + t.Run("replaces old default eth resolver with auto", func(t *testing.T) { + t.Parallel() + input := buildTestConfig(map[string]interface{}{ + "DNS": map[string]interface{}{ + "Resolvers": map[string]string{ + ".": "https://cloudflare-dns.com/dns-query", + ".eth": "https://dns.eth.limo/dns-query", // should be replaced + ".crypto": "https://resolver.cloudflare-eth.com/dns-query", // should be replaced + ".link": "https://dns.eth.link/dns-query", // should be replaced + }, + }, + }) + + resolvers := runMigrationAndGetDNSResolvers(t, input) + assertResolvers(t, resolvers, map[string]string{ + ".": "https://cloudflare-dns.com/dns-query", // preserved + ".eth": "auto", // replaced + ".crypto": "auto", // replaced + ".link": "auto", // replaced + }) + }) +} + +// ============================================================================= +// Routing Migration Tests +// ============================================================================= + +func TestRoutingMigration(t *testing.T) { + t.Parallel() + + t.Run("creates Routing section with auto DelegatedRouters when missing", func(t *testing.T) { + t.Parallel() + input := `{"Identity": {"PeerID": "QmTest"}}` + result := runMigrationOnJSON(t, input) + assertSliceEquals(t, result, []string{"Routing", "DelegatedRouters"}, []string{"auto"}) + }) + + t.Run("replaces cid.contact with auto while preserving custom routers added by user", func(t *testing.T) { + t.Parallel() + input := buildTestConfig(map[string]interface{}{ + "Routing": map[string]interface{}{ + "DelegatedRouters": []string{ + "https://cid.contact", + "https://my-custom-router.com", + }, + }, + }) + + result := runMigrationOnJSON(t, input) + assertSliceEquals(t, result, []string{"Routing", "DelegatedRouters"}, []string{"auto", "https://my-custom-router.com"}) + }) +} + +// ============================================================================= +// IPNS Migration Tests +// ============================================================================= + +func TestIpnsMigration(t *testing.T) { + t.Parallel() + + t.Run("creates Ipns section with auto DelegatedPublishers when missing", func(t *testing.T) { + t.Parallel() + input := `{"Identity": {"PeerID": "QmTest"}}` + result := runMigrationOnJSON(t, input) + assertSliceEquals(t, result, []string{"Ipns", "DelegatedPublishers"}, []string{"auto"}) + }) + + t.Run("preserves existing custom DelegatedPublishers unchanged", func(t *testing.T) { + t.Parallel() + input := buildTestConfig(map[string]interface{}{ + "Ipns": map[string]interface{}{ + "DelegatedPublishers": []string{ + "https://my-publisher.com", + "https://another-publisher.com", + }, + }, + }) + + result := runMigrationOnJSON(t, input) + assertSliceEquals(t, result, []string{"Ipns", "DelegatedPublishers"}, []string{"https://my-publisher.com", "https://another-publisher.com"}) + }) + + t.Run("adds auto DelegatedPublishers to existing Ipns section", func(t *testing.T) { + t.Parallel() + input := buildTestConfig(map[string]interface{}{ + "Ipns": map[string]interface{}{ + "ResolveCacheSize": 128, + }, + }) + + result := runMigrationOnJSON(t, input) + assertMapKeyEquals(t, result, []string{"Ipns"}, "ResolveCacheSize", float64(128)) + assertSliceEquals(t, result, []string{"Ipns", "DelegatedPublishers"}, []string{"auto"}) + }) +} + +// ============================================================================= +// AutoConf Migration Tests +// ============================================================================= + +func TestAutoConfMigration(t *testing.T) { + t.Parallel() + + t.Run("preserves existing AutoConf fields unchanged", func(t *testing.T) { + t.Parallel() + input := buildTestConfig(map[string]interface{}{ + "AutoConf": map[string]interface{}{ + "URL": "https://custom.example.com/autoconf.json", + "Enabled": false, + "CustomField": "preserved", + }, + }) + + result := runMigrationOnJSON(t, input) + assertMapKeyEquals(t, result, []string{"AutoConf"}, "URL", "https://custom.example.com/autoconf.json") + assertMapKeyEquals(t, result, []string{"AutoConf"}, "Enabled", false) + assertMapKeyEquals(t, result, []string{"AutoConf"}, "CustomField", "preserved") + }) +} diff --git a/repo/fsrepo/migrations/migrations.go b/repo/fsrepo/migrations/migrations.go index e612b8abb..c5b23a17d 100644 --- a/repo/fsrepo/migrations/migrations.go +++ b/repo/fsrepo/migrations/migrations.go @@ -25,6 +25,10 @@ const ( // RunMigration finds, downloads, and runs the individual migrations needed to // migrate the repo from its current version to the target version. +// +// Deprecated: This function downloads migration binaries from the internet and will be removed +// in a future version. Use RunHybridMigrations for modern migrations with embedded support, +// or RunEmbeddedMigrations for repo versions ≥16. func RunMigration(ctx context.Context, fetcher Fetcher, targetVer int, ipfsDir string, allowDowngrade bool) error { ipfsDir, err := CheckIpfsDir(ipfsDir) if err != nil { @@ -114,6 +118,9 @@ func ExeName(name string) string { // ReadMigrationConfig reads the Migration section of the IPFS config, avoiding // reading anything other than the Migration section. That way, we're free to // make arbitrary changes to all _other_ sections in migrations. +// +// Deprecated: This function is used by legacy migration downloads and will be removed +// in a future version. Use RunHybridMigrations or RunEmbeddedMigrations instead. func ReadMigrationConfig(repoRoot string, userConfigFile string) (*config.Migration, error) { var cfg struct { Migration config.Migration @@ -151,7 +158,10 @@ func ReadMigrationConfig(repoRoot string, userConfigFile string) (*config.Migrat } // GetMigrationFetcher creates one or more fetchers according to -// downloadSources,. +// downloadSources. +// +// Deprecated: This function is used by legacy migration downloads and will be removed +// in a future version. Use RunHybridMigrations or RunEmbeddedMigrations instead. func GetMigrationFetcher(downloadSources []string, distPath string, newIpfsFetcher func(string) Fetcher) (Fetcher, error) { const httpUserAgent = "kubo/migration" const numTriesPerHTTP = 3 @@ -163,9 +173,7 @@ func GetMigrationFetcher(downloadSources []string, distPath string, newIpfsFetch case "HTTPS", "https", "HTTP", "http": fetchers = append(fetchers, &RetryFetcher{NewHttpFetcher(distPath, "", httpUserAgent, 0), numTriesPerHTTP}) case "IPFS", "ipfs": - if newIpfsFetcher != nil { - fetchers = append(fetchers, newIpfsFetcher(distPath)) - } + return nil, errors.New("IPFS downloads are not supported for legacy migrations (repo versions <16). Please use only HTTPS in Migration.DownloadSources") case "": // Ignore empty string default: @@ -202,6 +210,9 @@ func migrationName(from, to int) string { // findMigrations returns a list of migrations, ordered from first to last // migration to apply, and a map of locations of migration binaries of any // migrations that were found. +// +// Deprecated: This function is used by legacy migration downloads and will be removed +// in a future version. func findMigrations(ctx context.Context, from, to int) ([]string, map[string]string, error) { step := 1 count := to - from @@ -250,6 +261,9 @@ func runMigration(ctx context.Context, binPath, ipfsDir string, revert bool, log // fetchMigrations downloads the requested migrations, and returns a slice with // the paths of each binary, in the same order specified by needed. +// +// Deprecated: This function downloads migration binaries from the internet and will be removed +// in a future version. Use RunHybridMigrations or RunEmbeddedMigrations instead. func fetchMigrations(ctx context.Context, fetcher Fetcher, needed []string, destDir string, logger *log.Logger) ([]string, error) { osv, err := osWithVariant() if err != nil { @@ -300,3 +314,224 @@ func fetchMigrations(ctx context.Context, fetcher Fetcher, needed []string, dest return bins, nil } + +// RunHybridMigrations intelligently runs migrations using external tools for legacy versions +// and embedded migrations for modern versions. This handles the transition from external +// fs-repo-migrations binaries (for repo versions <16) to embedded migrations (for repo versions ≥16). +// +// The function automatically: +// 1. Uses external migrations to get from current version to v16 (if needed) +// 2. Uses embedded migrations for v16+ steps +// 3. Handles pure external, pure embedded, or mixed migration scenarios +// +// Legacy external migrations (repo versions <16) only support HTTPS downloads. +// +// Parameters: +// - ctx: Context for cancellation and timeouts +// - targetVer: Target repository version to migrate to +// - ipfsDir: Path to the IPFS repository directory +// - allowDowngrade: Whether to allow downgrade migrations +// +// Returns error if migration fails at any step. +func RunHybridMigrations(ctx context.Context, targetVer int, ipfsDir string, allowDowngrade bool) error { + const embeddedMigrationsMinVersion = 16 + + // Get current repo version + currentVer, err := RepoVersion(ipfsDir) + if err != nil { + return fmt.Errorf("could not get current repo version: %w", err) + } + + var logger = log.New(os.Stdout, "", 0) + + // Check if migration is needed + if currentVer == targetVer { + logger.Printf("Repository is already at version %d", targetVer) + return nil + } + + // Validate downgrade request + if targetVer < currentVer && !allowDowngrade { + return fmt.Errorf("downgrade from version %d to %d requires allowDowngrade=true", currentVer, targetVer) + } + + // Determine migration strategy based on version ranges + needsExternal := currentVer < embeddedMigrationsMinVersion + needsEmbedded := targetVer >= embeddedMigrationsMinVersion + + // Case 1: Pure embedded migration (both current and target ≥ 16) + if !needsExternal && needsEmbedded { + return RunEmbeddedMigrations(ctx, targetVer, ipfsDir, allowDowngrade) + } + + // For cases requiring external migrations, we check if migration binaries + // are available in PATH before attempting network downloads + + // Case 2: Pure external migration (target < 16) + if needsExternal && !needsEmbedded { + + // Check for migration binaries in PATH first (for testing/local development) + migrations, binPaths, err := findMigrations(ctx, currentVer, targetVer) + if err != nil { + return fmt.Errorf("could not determine migration paths: %w", err) + } + + foundAll := true + for _, migName := range migrations { + if _, exists := binPaths[migName]; !exists { + foundAll = false + break + } + } + + if foundAll { + return runMigrationsFromPath(ctx, migrations, binPaths, ipfsDir, logger, false) + } + + // Fall back to network download (original behavior) + migrationCfg, err := ReadMigrationConfig(ipfsDir, "") + if err != nil { + return fmt.Errorf("could not read migration config: %w", err) + } + + // Use existing RunMigration which handles network downloads properly (HTTPS only for legacy migrations) + fetcher, err := GetMigrationFetcher(migrationCfg.DownloadSources, GetDistPathEnv(CurrentIpfsDist), nil) + if err != nil { + return fmt.Errorf("failed to get migration fetcher: %w", err) + } + defer fetcher.Close() + return RunMigration(ctx, fetcher, targetVer, ipfsDir, allowDowngrade) + } + + // Case 3: Hybrid migration (current < 16, target ≥ 16) + if needsExternal && needsEmbedded { + logger.Printf("Starting hybrid migration from version %d to %d", currentVer, targetVer) + logger.Print("Using hybrid migration strategy: external to v16, then embedded") + + // Phase 1: Use external migrations to get to v16 + logger.Printf("Phase 1: External migration from v%d to v%d", currentVer, embeddedMigrationsMinVersion) + + // Check for external migration binaries in PATH first + migrations, binPaths, err := findMigrations(ctx, currentVer, embeddedMigrationsMinVersion) + if err != nil { + return fmt.Errorf("could not determine external migration paths: %w", err) + } + + foundAll := true + for _, migName := range migrations { + if _, exists := binPaths[migName]; !exists { + foundAll = false + break + } + } + + if foundAll { + if err = runMigrationsFromPath(ctx, migrations, binPaths, ipfsDir, logger, false); err != nil { + return fmt.Errorf("external migration phase failed: %w", err) + } + } else { + migrationCfg, err := ReadMigrationConfig(ipfsDir, "") + if err != nil { + return fmt.Errorf("could not read migration config: %w", err) + } + + // Legacy migrations only support HTTPS downloads + fetcher, err := GetMigrationFetcher(migrationCfg.DownloadSources, GetDistPathEnv(CurrentIpfsDist), nil) + if err != nil { + return fmt.Errorf("failed to get migration fetcher: %w", err) + } + defer fetcher.Close() + + if err = RunMigration(ctx, fetcher, embeddedMigrationsMinVersion, ipfsDir, allowDowngrade); err != nil { + return fmt.Errorf("external migration phase failed: %w", err) + } + } + + // Phase 2: Use embedded migrations for v16+ + logger.Printf("Phase 2: Embedded migration from v%d to v%d", embeddedMigrationsMinVersion, targetVer) + err = RunEmbeddedMigrations(ctx, targetVer, ipfsDir, allowDowngrade) + if err != nil { + return fmt.Errorf("embedded migration phase failed: %w", err) + } + + logger.Printf("Hybrid migration completed successfully: v%d → v%d", currentVer, targetVer) + return nil + } + + // Case 4: Reverse hybrid migration (≥16 to <16) + // Use embedded migrations for ≥16 steps, then external migrations for <16 steps + logger.Printf("Starting reverse hybrid migration from version %d to %d", currentVer, targetVer) + logger.Print("Using reverse hybrid migration strategy: embedded to v16, then external") + + // Phase 1: Use embedded migrations from current version down to v16 (if needed) + if currentVer > embeddedMigrationsMinVersion { + logger.Printf("Phase 1: Embedded downgrade from v%d to v%d", currentVer, embeddedMigrationsMinVersion) + err = RunEmbeddedMigrations(ctx, embeddedMigrationsMinVersion, ipfsDir, allowDowngrade) + if err != nil { + return fmt.Errorf("embedded downgrade phase failed: %w", err) + } + } + + // Phase 2: Use external migrations from v16 to target (if needed) + if embeddedMigrationsMinVersion > targetVer { + logger.Printf("Phase 2: External downgrade from v%d to v%d", embeddedMigrationsMinVersion, targetVer) + + // Check for external migration binaries in PATH first + migrations, binPaths, err := findMigrations(ctx, embeddedMigrationsMinVersion, targetVer) + if err != nil { + return fmt.Errorf("could not determine external migration paths: %w", err) + } + + foundAll := true + for _, migName := range migrations { + if _, exists := binPaths[migName]; !exists { + foundAll = false + break + } + } + + if foundAll { + if err = runMigrationsFromPath(ctx, migrations, binPaths, ipfsDir, logger, true); err != nil { + return fmt.Errorf("external downgrade phase failed: %w", err) + } + } else { + migrationCfg, err := ReadMigrationConfig(ipfsDir, "") + if err != nil { + return fmt.Errorf("could not read migration config: %w", err) + } + + // Legacy migrations only support HTTPS downloads + fetcher, err := GetMigrationFetcher(migrationCfg.DownloadSources, GetDistPathEnv(CurrentIpfsDist), nil) + if err != nil { + return fmt.Errorf("failed to get migration fetcher: %w", err) + } + defer fetcher.Close() + + if err = RunMigration(ctx, fetcher, targetVer, ipfsDir, allowDowngrade); err != nil { + return fmt.Errorf("external downgrade phase failed: %w", err) + } + } + } + + logger.Printf("Reverse hybrid migration completed successfully: v%d → v%d", currentVer, targetVer) + return nil +} + +// runMigrationsFromPath runs migrations using binaries found in PATH +func runMigrationsFromPath(ctx context.Context, migrations []string, binPaths map[string]string, ipfsDir string, logger *log.Logger, revert bool) error { + for _, migName := range migrations { + binPath, exists := binPaths[migName] + if !exists { + return fmt.Errorf("migration binary %s not found in PATH", migName) + } + + logger.Printf("Running migration %s using binary from PATH: %s", migName, binPath) + + // Run the migration binary directly + err := runMigration(ctx, binPath, ipfsDir, revert, logger) + if err != nil { + return fmt.Errorf("migration %s failed: %w", migName, err) + } + } + return nil +} diff --git a/repo/fsrepo/migrations/migrations_test.go b/repo/fsrepo/migrations/migrations_test.go index 96370f864..f690290f8 100644 --- a/repo/fsrepo/migrations/migrations_test.go +++ b/repo/fsrepo/migrations/migrations_test.go @@ -169,7 +169,7 @@ func TestRunMigrations(t *testing.T) { err = RunMigration(ctx, fetcher, targetVer, fakeIpfs, false) if err == nil || !strings.HasPrefix(err.Error(), "downgrade not allowed") { - t.Fatal("expected 'downgrade not alloed' error") + t.Fatal("expected 'downgrade not allowed' error") } err = RunMigration(ctx, fetcher, targetVer, fakeIpfs, true) @@ -327,12 +327,9 @@ func TestGetMigrationFetcher(t *testing.T) { } downloadSources = []string{"ipfs"} - f, err = GetMigrationFetcher(downloadSources, "", newIpfsFetcher) - if err != nil { - t.Fatal(err) - } - if _, ok := f.(*mockIpfsFetcher); !ok { - t.Fatal("expected IpfsFetcher") + _, err = GetMigrationFetcher(downloadSources, "", newIpfsFetcher) + if err == nil || !strings.Contains(err.Error(), "IPFS downloads are not supported for legacy migrations") { + t.Fatal("Expected IPFS downloads error, got:", err) } downloadSources = []string{"http"} @@ -347,6 +344,12 @@ func TestGetMigrationFetcher(t *testing.T) { } downloadSources = []string{"IPFS", "HTTPS"} + _, err = GetMigrationFetcher(downloadSources, "", newIpfsFetcher) + if err == nil || !strings.Contains(err.Error(), "IPFS downloads are not supported for legacy migrations") { + t.Fatal("Expected IPFS downloads error, got:", err) + } + + downloadSources = []string{"https", "some.domain.io"} f, err = GetMigrationFetcher(downloadSources, "", newIpfsFetcher) if err != nil { t.Fatal(err) @@ -359,19 +362,6 @@ func TestGetMigrationFetcher(t *testing.T) { t.Fatal("expected 2 fetchers in MultiFetcher") } - downloadSources = []string{"ipfs", "https", "some.domain.io"} - f, err = GetMigrationFetcher(downloadSources, "", newIpfsFetcher) - if err != nil { - t.Fatal(err) - } - mf, ok = f.(*MultiFetcher) - if !ok { - t.Fatal("expected MultiFetcher") - } - if mf.Len() != 3 { - t.Fatal("expected 3 fetchers in MultiFetcher") - } - downloadSources = nil _, err = GetMigrationFetcher(downloadSources, "", newIpfsFetcher) if err == nil { diff --git a/routing/composer.go b/routing/composer.go index a100bb498..500fa371e 100644 --- a/routing/composer.go +++ b/routing/composer.go @@ -9,7 +9,6 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/routing" "github.com/multiformats/go-multihash" - "go.uber.org/multierr" ) var ( @@ -124,7 +123,7 @@ func (c *Composer) Bootstrap(ctx context.Context) error { errgv := c.GetValueRouter.Bootstrap(ctx) errpv := c.PutValueRouter.Bootstrap(ctx) errp := c.ProvideRouter.Bootstrap(ctx) - err := multierr.Combine(errfp, errfps, errgv, errpv, errp) + err := errors.Join(errfp, errfps, errgv, errpv, errp) if err != nil { log.Debug("composer: calling bootstrap error: ", err) } diff --git a/test/cli/add_test.go b/test/cli/add_test.go index 775a6063b..e4138b624 100644 --- a/test/cli/add_test.go +++ b/test/cli/add_test.go @@ -108,6 +108,44 @@ func TestAdd(t *testing.T) { require.Equal(t, shortStringCidV1NoRawLeaves, cidStr) }) + t.Run("ipfs add --pin-name=foo", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + pinName := "test-pin-name" + cidStr := node.IPFSAddStr(shortString, "--pin-name", pinName) + require.Equal(t, shortStringCidV0, cidStr) + + pinList := node.IPFS("pin", "ls", "--names").Stdout.Trimmed() + require.Contains(t, pinList, shortStringCidV0) + require.Contains(t, pinList, pinName) + }) + + t.Run("ipfs add --pin=false --pin-name=foo returns an error", func(t *testing.T) { + t.Parallel() + + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + // Use RunIPFS to allow for errors without assertion + result := node.RunIPFS("add", "--pin=false", "--pin-name=foo") + require.Error(t, result.Err, "Expected an error due to incompatible --pin and --pin-name") + require.Contains(t, result.Stderr.String(), "pin-name option requires pin to be set") + }) + + t.Run("ipfs add --pin-name without value should fail", func(t *testing.T) { + t.Parallel() + + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + // When --pin-name is passed without any value, it should fail + result := node.RunIPFS("add", "--pin-name") + require.Error(t, result.Err, "Expected an error when --pin-name has no value") + require.Contains(t, result.Stderr.String(), "missing argument for option \"pin-name\"") + }) + t.Run("produced unixfs max file links: command flag --max-file-links overrides configuration in Import.UnixFSFileMaxLinks", func(t *testing.T) { t.Parallel() diff --git a/test/cli/autoconf/autoconf_test.go b/test/cli/autoconf/autoconf_test.go new file mode 100644 index 000000000..0a49e8c89 --- /dev/null +++ b/test/cli/autoconf/autoconf_test.go @@ -0,0 +1,779 @@ +package autoconf + +import ( + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "os" + "strings" + "sync/atomic" + "testing" + "time" + + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestAutoConf(t *testing.T) { + t.Parallel() + + t.Run("basic functionality", func(t *testing.T) { + t.Parallel() + testAutoConfBasicFunctionality(t) + }) + + t.Run("background service updates", func(t *testing.T) { + t.Parallel() + testAutoConfBackgroundService(t) + }) + + t.Run("HTTP error scenarios", func(t *testing.T) { + t.Parallel() + testAutoConfHTTPErrors(t) + }) + + t.Run("cache-based config expansion", func(t *testing.T) { + t.Parallel() + testAutoConfCacheBasedExpansion(t) + }) + + t.Run("disabled autoconf", func(t *testing.T) { + t.Parallel() + testAutoConfDisabled(t) + }) + + t.Run("bootstrap list shows auto as-is", func(t *testing.T) { + t.Parallel() + testBootstrapListResolved(t) + }) + + t.Run("daemon uses resolved bootstrap values", func(t *testing.T) { + t.Parallel() + testDaemonUsesResolvedBootstrap(t) + }) + + t.Run("empty cache uses fallback defaults", func(t *testing.T) { + t.Parallel() + testEmptyCacheUsesFallbacks(t) + }) + + t.Run("stale cache with unreachable server", func(t *testing.T) { + t.Parallel() + testStaleCacheWithUnreachableServer(t) + }) + + t.Run("autoconf disabled with auto values", func(t *testing.T) { + t.Parallel() + testAutoConfDisabledWithAutoValues(t) + }) + + t.Run("network behavior - cached vs refresh", func(t *testing.T) { + t.Parallel() + testAutoConfNetworkBehavior(t) + }) + + t.Run("HTTPS autoconf server", func(t *testing.T) { + t.Parallel() + testAutoConfWithHTTPS(t) + }) +} + +func testAutoConfBasicFunctionality(t *testing.T) { + // Load test autoconf data + autoConfData := loadTestData(t, "valid_autoconf.json") + + // Create HTTP server that serves autoconf.json + etag := `"test-etag-123"` + requestCount := 0 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + requestCount++ + t.Logf("AutoConf server request #%d: %s %s", requestCount, r.Method, r.URL.Path) + w.Header().Set("Content-Type", "application/json") + w.Header().Set("ETag", etag) + w.Header().Set("Last-Modified", "Wed, 21 Oct 2015 07:28:00 GMT") + _, _ = w.Write(autoConfData) + })) + defer server.Close() + + // Create IPFS node and configure it to use our test server + // Use test profile to avoid autoconf profile being applied by default + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + // Disable background updates to prevent multiple requests + node.SetIPFSConfig("AutoConf.RefreshInterval", "24h") + + // Test with normal bootstrap peers (not "auto") to avoid multiaddr parsing issues + // This tests that autoconf fetching works without complex auto replacement + node.SetIPFSConfig("Bootstrap", []string{"/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN"}) + + // Start daemon to trigger autoconf fetch + node.StartDaemon() + defer node.StopDaemon() + + // Give autoconf some time to fetch + time.Sleep(2 * time.Second) + + // Verify that the autoconf system fetched data from our server + t.Logf("Server request count: %d", requestCount) + require.GreaterOrEqual(t, requestCount, 1, "AutoConf server should have been called at least once") + + // Test that daemon is functional + result := node.RunIPFS("id") + assert.Equal(t, 0, result.ExitCode(), "IPFS daemon should be responsive") + assert.Contains(t, result.Stdout.String(), "ID", "IPFS id command should return peer information") + + // Success! AutoConf system is working: + // 1. Server was called (proves fetch works) + // 2. Daemon started successfully (proves DNS resolver validation is fixed) + // 3. Daemon is functional (proves autoconf doesn't break core functionality) + // Note: We skip checking metadata values due to JSON parsing complexity in test harness +} + +func testAutoConfBackgroundService(t *testing.T) { + // Test that the startAutoConfUpdater() goroutine makes network requests for background refresh + // This is separate from daemon config operations which now use cache-first approach + + // Load initial and updated test data + initialData := loadTestData(t, "valid_autoconf.json") + updatedData := loadTestData(t, "updated_autoconf.json") + + // Track which config is being served + currentData := initialData + var requestCount atomic.Int32 + + // Create server that switches payload after first request + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + count := requestCount.Add(1) + t.Logf("Background service request #%d from %s", count, r.UserAgent()) + + w.Header().Set("Content-Type", "application/json") + w.Header().Set("ETag", fmt.Sprintf(`"background-test-etag-%d"`, count)) + w.Header().Set("Last-Modified", time.Now().Format(http.TimeFormat)) + + if count > 1 { + // After first request, serve updated config + currentData = updatedData + } + + _, _ = w.Write(currentData) + })) + defer server.Close() + + // Create IPFS node with short refresh interval to trigger background service + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("AutoConf.RefreshInterval", "1s") // Very short for testing background service + + // Use normal bootstrap values to avoid dependency on autoconf during initialization + node.SetIPFSConfig("Bootstrap", []string{"/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN"}) + + // Start daemon - this should start the background service via startAutoConfUpdater() + node.StartDaemon() + defer node.StopDaemon() + + // Wait for initial request (daemon startup may trigger one) + time.Sleep(1 * time.Second) + initialCount := requestCount.Load() + t.Logf("Initial request count after daemon start: %d", initialCount) + + // Wait for background service to make additional requests + // The background service should make requests at the RefreshInterval (1s) + time.Sleep(3 * time.Second) + + finalCount := requestCount.Load() + t.Logf("Final request count after background updates: %d", finalCount) + + // Background service should have made multiple requests due to 1s refresh interval + assert.Greater(t, finalCount, initialCount, + "Background service should have made additional requests beyond daemon startup") + + // Verify that the service is actively making requests (not just relying on cache) + assert.GreaterOrEqual(t, finalCount, int32(2), + "Should have at least 2 requests total (startup + background refresh)") + + t.Logf("Successfully verified startAutoConfUpdater() background service makes network requests") +} + +func testAutoConfHTTPErrors(t *testing.T) { + tests := []struct { + name string + statusCode int + body string + }{ + {"404 Not Found", http.StatusNotFound, "Not Found"}, + {"500 Internal Server Error", http.StatusInternalServerError, "Internal Server Error"}, + {"Invalid JSON", http.StatusOK, "invalid json content"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create server that returns error + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(tt.statusCode) + _, _ = w.Write([]byte(tt.body)) + })) + defer server.Close() + + // Create node with failing AutoConf URL + // Use test profile to avoid autoconf profile being applied by default + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("Bootstrap", []string{"auto"}) + + // Start daemon - it should start but autoconf should fail gracefully + node.StartDaemon() + defer node.StopDaemon() + + // Daemon should still be functional even with autoconf HTTP errors + result := node.RunIPFS("version") + assert.Equal(t, 0, result.ExitCode(), "Daemon should start even with HTTP errors in autoconf") + }) + } +} + +func testAutoConfCacheBasedExpansion(t *testing.T) { + // Test that config expansion works correctly with cached autoconf data + // without requiring active network requests during expansion operations + + autoConfData := loadTestData(t, "valid_autoconf.json") + + // Create server that serves autoconf data + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Header().Set("ETag", `"cache-test-etag"`) + w.Header().Set("Last-Modified", "Wed, 21 Oct 2015 07:28:00 GMT") + _, _ = w.Write(autoConfData) + })) + defer server.Close() + + // Create IPFS node with autoconf enabled + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + + // Set configuration with "auto" values to test expansion + node.SetIPFSConfig("Bootstrap", []string{"auto"}) + node.SetIPFSConfig("Routing.DelegatedRouters", []string{"auto"}) + node.SetIPFSConfig("DNS.Resolvers", map[string]string{"test.": "auto"}) + + // Populate cache by running a command that triggers autoconf (without daemon) + result := node.RunIPFS("bootstrap", "list", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "Initial bootstrap expansion should succeed") + + expandedBootstrap := result.Stdout.String() + assert.NotContains(t, expandedBootstrap, "auto", "Expanded bootstrap should not contain 'auto' literal") + assert.Greater(t, len(strings.Fields(expandedBootstrap)), 0, "Should have expanded bootstrap peers") + + // Test that subsequent config operations work with cached data (no network required) + // This simulates the cache-first behavior our architecture now uses + + // Test Bootstrap expansion + result = node.RunIPFS("config", "Bootstrap", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "Cached bootstrap expansion should succeed") + + var expandedBootstrapList []string + err := json.Unmarshal([]byte(result.Stdout.String()), &expandedBootstrapList) + require.NoError(t, err) + assert.NotContains(t, expandedBootstrapList, "auto", "Expanded bootstrap list should not contain 'auto'") + assert.Greater(t, len(expandedBootstrapList), 0, "Should have expanded bootstrap peers from cache") + + // Test Routing.DelegatedRouters expansion + result = node.RunIPFS("config", "Routing.DelegatedRouters", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "Cached router expansion should succeed") + + var expandedRouters []string + err = json.Unmarshal([]byte(result.Stdout.String()), &expandedRouters) + require.NoError(t, err) + assert.NotContains(t, expandedRouters, "auto", "Expanded routers should not contain 'auto'") + + // Test DNS.Resolvers expansion + result = node.RunIPFS("config", "DNS.Resolvers", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "Cached DNS resolver expansion should succeed") + + var expandedResolvers map[string]string + err = json.Unmarshal([]byte(result.Stdout.String()), &expandedResolvers) + require.NoError(t, err) + + // Should have expanded the "auto" value for test. domain, or removed it if no autoconf data available + testResolver, exists := expandedResolvers["test."] + if exists { + assert.NotEqual(t, "auto", testResolver, "test. resolver should not be literal 'auto'") + t.Logf("Found expanded resolver for test.: %s", testResolver) + } else { + t.Logf("No resolver found for test. domain (autoconf may not have DNS resolver data)") + } + + // Test full config expansion + result = node.RunIPFS("config", "show", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "Full config expansion should succeed") + + expandedConfig := result.Stdout.String() + // Should not contain literal "auto" values after expansion + assert.NotContains(t, expandedConfig, `"auto"`, "Expanded config should not contain literal 'auto' values") + assert.Contains(t, expandedConfig, `"Bootstrap"`, "Should contain Bootstrap section") + assert.Contains(t, expandedConfig, `"DNS"`, "Should contain DNS section") + + t.Logf("Successfully tested cache-based config expansion without active network requests") +} + +func testAutoConfDisabled(t *testing.T) { + // Create node with AutoConf disabled but "auto" values + // Use test profile to avoid autoconf profile being applied by default + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.Enabled", false) + node.SetIPFSConfig("Bootstrap", []string{"auto"}) + + // Test by trying to list bootstrap - when AutoConf is disabled, it should show literal "auto" + result := node.RunIPFS("bootstrap", "list") + if result.ExitCode() == 0 { + // If command succeeds, it should show literal "auto" (no resolution) + output := result.Stdout.String() + assert.Contains(t, output, "auto", "Should show literal 'auto' when AutoConf is disabled") + } else { + // If command fails, error should mention autoconf issue + assert.Contains(t, result.Stderr.String(), "auto", "Should mention 'auto' values in error") + } +} + +// Helper function to load test data files +func loadTestData(t *testing.T, filename string) []byte { + t.Helper() + + data, err := os.ReadFile("testdata/" + filename) + require.NoError(t, err, "Failed to read test data file: %s", filename) + + return data +} + +func testBootstrapListResolved(t *testing.T) { + // Test that bootstrap list shows "auto" as-is (not expanded) + + // Load test autoconf data + autoConfData := loadTestData(t, "valid_autoconf.json") + + // Create HTTP server that serves autoconf.json + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write(autoConfData) + })) + defer server.Close() + + // Create IPFS node with "auto" bootstrap value + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("Bootstrap", []string{"auto"}) + + // Test 1: bootstrap list (without --expand-auto) shows "auto" as-is - NO DAEMON NEEDED! + result := node.RunIPFS("bootstrap", "list") + require.Equal(t, 0, result.ExitCode(), "bootstrap list command should succeed") + + output := result.Stdout.String() + t.Logf("Bootstrap list output: %s", output) + assert.Contains(t, output, "auto", "bootstrap list should show 'auto' value as-is") + + // Should NOT contain expanded bootstrap peers without --expand-auto + unexpectedPeers := []string{ + "/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", + "/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa", + "/dnsaddr/bootstrap.libp2p.io/p2p/QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb", + } + + for _, peer := range unexpectedPeers { + assert.NotContains(t, output, peer, "bootstrap list should not contain expanded peer: %s", peer) + } + + // Test 2: bootstrap list --expand-auto shows expanded values (no daemon needed!) + result = node.RunIPFS("bootstrap", "list", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "bootstrap list --expand-auto command should succeed") + + expandedOutput := result.Stdout.String() + t.Logf("Bootstrap list --expand-auto output: %s", expandedOutput) + + // Should NOT contain "auto" literal when expanded + assert.NotContains(t, expandedOutput, "auto", "bootstrap list --expand-auto should not show 'auto' literal") + + // Should contain at least one expanded bootstrap peer + expectedPeers := []string{ + "/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", + "/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa", + "/dnsaddr/bootstrap.libp2p.io/p2p/QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb", + } + + foundExpectedPeer := false + for _, peer := range expectedPeers { + if strings.Contains(expandedOutput, peer) { + foundExpectedPeer = true + t.Logf("Found expected expanded peer: %s", peer) + break + } + } + assert.True(t, foundExpectedPeer, "bootstrap list --expand-auto should contain at least one expanded bootstrap peer") +} + +func testDaemonUsesResolvedBootstrap(t *testing.T) { + // Test that daemon actually uses expanded bootstrap values for P2P connections + // even though bootstrap list shows "auto" + + // Step 1: Create bootstrap node (target for connections) + bootstrapNode := harness.NewT(t).NewNode().Init("--profile=test") + // Set a specific swarm port for the bootstrap node to avoid port 0 issues + bootstrapNode.SetIPFSConfig("Addresses.Swarm", []string{"/ip4/127.0.0.1/tcp/14001"}) + // Disable routing and discovery to ensure it's only discoverable via explicit multiaddr + bootstrapNode.SetIPFSConfig("Routing.Type", "none") + bootstrapNode.SetIPFSConfig("Discovery.MDNS.Enabled", false) + bootstrapNode.SetIPFSConfig("Bootstrap", []string{}) // No bootstrap peers + + // Start the bootstrap node first + bootstrapNode.StartDaemon() + defer bootstrapNode.StopDaemon() + + // Get bootstrap node's peer ID and swarm address + bootstrapPeerID := bootstrapNode.PeerID() + + // Use the configured swarm address (we set it to a specific port above) + bootstrapMultiaddr := fmt.Sprintf("/ip4/127.0.0.1/tcp/14001/p2p/%s", bootstrapPeerID.String()) + t.Logf("Bootstrap node configured at: %s", bootstrapMultiaddr) + + // Step 2: Create autoconf server that returns bootstrap node's address + autoConfData := fmt.Sprintf(`{ + "AutoConfVersion": 2025072301, + "AutoConfSchema": 1, + "AutoConfTTL": 86400, + "SystemRegistry": { + "AminoDHT": { + "Description": "Test AminoDHT system", + "NativeConfig": { + "Bootstrap": ["%s"] + } + } + }, + "DNSResolvers": {}, + "DelegatedEndpoints": {} + }`, bootstrapMultiaddr) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write([]byte(autoConfData)) + })) + defer server.Close() + + // Step 3: Create autoconf-enabled node that should connect to bootstrap node + autoconfNode := harness.NewT(t).NewNode().Init("--profile=test") + autoconfNode.SetIPFSConfig("AutoConf.URL", server.URL) + autoconfNode.SetIPFSConfig("AutoConf.Enabled", true) + autoconfNode.SetIPFSConfig("Bootstrap", []string{"auto"}) // This should resolve to bootstrap node + // Disable other discovery methods to force bootstrap-only connectivity + autoconfNode.SetIPFSConfig("Routing.Type", "none") + autoconfNode.SetIPFSConfig("Discovery.MDNS.Enabled", false) + + // Start the autoconf node + autoconfNode.StartDaemon() + defer autoconfNode.StopDaemon() + + // Step 4: Give time for autoconf resolution and connection attempts + time.Sleep(8 * time.Second) + + // Step 5: Verify both nodes are responsive + result := bootstrapNode.RunIPFS("id") + require.Equal(t, 0, result.ExitCode(), "Bootstrap node should be responsive: %s", result.Stderr.String()) + + result = autoconfNode.RunIPFS("id") + require.Equal(t, 0, result.ExitCode(), "AutoConf node should be responsive: %s", result.Stderr.String()) + + // Step 6: Verify that autoconf node connected to bootstrap node + // Check swarm peers on autoconf node - it should show bootstrap node's peer ID + result = autoconfNode.RunIPFS("swarm", "peers") + if result.ExitCode() == 0 { + peerOutput := result.Stdout.String() + if strings.Contains(peerOutput, bootstrapPeerID.String()) { + t.Logf("SUCCESS: AutoConf node connected to bootstrap peer %s", bootstrapPeerID.String()) + } else { + t.Logf("No active connection found. Peers output: %s", peerOutput) + // This might be OK if connection attempt was made but didn't persist + } + } else { + // If swarm peers fails, try alternative verification via daemon logs + t.Logf("Swarm peers command failed, checking daemon logs for connection attempts") + daemonOutput := autoconfNode.Daemon.Stderr.String() + if strings.Contains(daemonOutput, bootstrapPeerID.String()) { + t.Logf("SUCCESS: Found bootstrap peer %s in daemon logs, connection attempted", bootstrapPeerID.String()) + } else { + t.Logf("Daemon stderr: %s", daemonOutput) + } + } + + // Step 7: Verify bootstrap configuration still shows "auto" (not resolved values) + result = autoconfNode.RunIPFS("bootstrap", "list") + require.Equal(t, 0, result.ExitCode(), "Bootstrap list command should work") + assert.Contains(t, result.Stdout.String(), "auto", + "Bootstrap list should still show 'auto' even though values were resolved for networking") +} + +func testEmptyCacheUsesFallbacks(t *testing.T) { + // Test that daemon uses fallback defaults when no cache exists and server is unreachable + + // Create IPFS node with auto values and unreachable autoconf server + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.URL", "http://127.0.0.1:9999/nonexistent") + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("Bootstrap", []string{"auto"}) + node.SetIPFSConfig("Routing.DelegatedRouters", []string{"auto"}) + + // Start daemon - should succeed using fallback values + node.StartDaemon() + defer node.StopDaemon() + + // Verify daemon started successfully (uses fallback bootstrap) + result := node.RunIPFS("id") + require.Equal(t, 0, result.ExitCode(), "Daemon should start successfully with fallback values") + + // Verify config commands still show "auto" + result = node.RunIPFS("config", "Bootstrap") + require.Equal(t, 0, result.ExitCode()) + assert.Contains(t, result.Stdout.String(), "auto", "Bootstrap config should still show 'auto'") + + result = node.RunIPFS("config", "Routing.DelegatedRouters") + require.Equal(t, 0, result.ExitCode()) + assert.Contains(t, result.Stdout.String(), "auto", "DelegatedRouters config should still show 'auto'") + + // Check daemon logs for error about failed autoconf fetch + logOutput := node.Daemon.Stderr.String() + // The daemon should attempt to fetch autoconf but will use fallbacks on failure + // We don't require specific log messages as long as the daemon starts successfully + if logOutput != "" { + t.Logf("Daemon logs: %s", logOutput) + } +} + +func testStaleCacheWithUnreachableServer(t *testing.T) { + // Test that daemon uses stale cache when server is unreachable + + // First create a working autoconf server and cache + autoConfData := loadTestData(t, "valid_autoconf.json") + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write(autoConfData) + })) + + // Create node and fetch autoconf to populate cache + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("Bootstrap", []string{"auto"}) + + // Start daemon briefly to populate cache + node.StartDaemon() + time.Sleep(1 * time.Second) // Allow cache population + node.StopDaemon() + + // Close the server to make it unreachable + server.Close() + + // Update config to point to unreachable server + node.SetIPFSConfig("AutoConf.URL", "http://127.0.0.1:9999/unreachable") + + // Start daemon again - should use stale cache + node.StartDaemon() + defer node.StopDaemon() + + // Verify daemon started successfully (uses cached autoconf) + result := node.RunIPFS("id") + require.Equal(t, 0, result.ExitCode(), "Daemon should start successfully with cached autoconf") + + // Check daemon logs for error about using stale config + logOutput := node.Daemon.Stderr.String() + // The daemon should use cached config when server is unreachable + // We don't require specific log messages as long as the daemon starts successfully + if logOutput != "" { + t.Logf("Daemon logs: %s", logOutput) + } +} + +func testAutoConfDisabledWithAutoValues(t *testing.T) { + // Test that daemon fails to start when AutoConf is disabled but "auto" values are present + + // Create IPFS node with AutoConf disabled but "auto" values configured + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.Enabled", false) + node.SetIPFSConfig("Bootstrap", []string{"auto"}) + + // Test by trying to list bootstrap - when AutoConf is disabled, it should show literal "auto" + result := node.RunIPFS("bootstrap", "list") + if result.ExitCode() == 0 { + // If command succeeds, it should show literal "auto" (no resolution) + output := result.Stdout.String() + assert.Contains(t, output, "auto", "Should show literal 'auto' when AutoConf is disabled") + } else { + // If command fails, error should mention autoconf issue + logOutput := result.Stderr.String() + assert.Contains(t, logOutput, "auto", "Error should mention 'auto' values") + // Check that the error message contains information about disabled state + assert.True(t, + strings.Contains(logOutput, "disabled") || strings.Contains(logOutput, "AutoConf.Enabled=false"), + "Error should mention that AutoConf is disabled or show AutoConf.Enabled=false") + } +} + +func testAutoConfNetworkBehavior(t *testing.T) { + // Test the network behavior differences between MustGetConfigCached and MustGetConfigWithRefresh + // This validates that our cache-first architecture works as expected + + autoConfData := loadTestData(t, "valid_autoconf.json") + var requestCount atomic.Int32 + + // Create server that tracks all requests + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + count := requestCount.Add(1) + t.Logf("Network behavior test request #%d: %s %s", count, r.Method, r.URL.Path) + + w.Header().Set("Content-Type", "application/json") + w.Header().Set("ETag", fmt.Sprintf(`"network-test-etag-%d"`, count)) + w.Header().Set("Last-Modified", time.Now().Format(http.TimeFormat)) + _, _ = w.Write(autoConfData) + })) + defer server.Close() + + // Create IPFS node with autoconf + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("Bootstrap", []string{"auto"}) + + // Phase 1: Test cache-first behavior (no network requests expected) + t.Logf("=== Phase 1: Testing cache-first behavior ===") + initialCount := requestCount.Load() + + // Multiple config operations should NOT trigger network requests (cache-first) + result := node.RunIPFS("config", "Bootstrap") + require.Equal(t, 0, result.ExitCode(), "Bootstrap config read should succeed") + + result = node.RunIPFS("config", "show") + require.Equal(t, 0, result.ExitCode(), "Config show should succeed") + + result = node.RunIPFS("bootstrap", "list") + require.Equal(t, 0, result.ExitCode(), "Bootstrap list should succeed") + + // Check that cache-first operations didn't trigger network requests + afterCacheOpsCount := requestCount.Load() + cachedRequestDiff := afterCacheOpsCount - initialCount + t.Logf("Network requests during cache-first operations: %d", cachedRequestDiff) + + // Phase 2: Test explicit expansion (may trigger cache population) + t.Logf("=== Phase 2: Testing expansion operations ===") + beforeExpansionCount := requestCount.Load() + + // Expansion operations may need to populate cache if empty + result = node.RunIPFS("bootstrap", "list", "--expand-auto") + if result.ExitCode() == 0 { + output := result.Stdout.String() + assert.NotContains(t, output, "auto", "Expanded bootstrap should not contain 'auto' literal") + t.Logf("Bootstrap expansion succeeded") + } else { + t.Logf("Bootstrap expansion failed (may be due to network/cache issues): %s", result.Stderr.String()) + } + + result = node.RunIPFS("config", "Bootstrap", "--expand-auto") + if result.ExitCode() == 0 { + t.Logf("Config Bootstrap expansion succeeded") + } else { + t.Logf("Config Bootstrap expansion failed: %s", result.Stderr.String()) + } + + afterExpansionCount := requestCount.Load() + expansionRequestDiff := afterExpansionCount - beforeExpansionCount + t.Logf("Network requests during expansion operations: %d", expansionRequestDiff) + + // Phase 3: Test background service behavior (if daemon is started) + t.Logf("=== Phase 3: Testing background service behavior ===") + beforeDaemonCount := requestCount.Load() + + // Set short refresh interval to test background service + node.SetIPFSConfig("AutoConf.RefreshInterval", "1s") + + // Start daemon - this triggers startAutoConfUpdater() which should make network requests + node.StartDaemon() + defer node.StopDaemon() + + // Wait for background service to potentially make requests + time.Sleep(2 * time.Second) + + afterDaemonCount := requestCount.Load() + daemonRequestDiff := afterDaemonCount - beforeDaemonCount + t.Logf("Network requests from background service: %d", daemonRequestDiff) + + // Verify expected behavior patterns + t.Logf("=== Summary ===") + t.Logf("Cache-first operations: %d requests", cachedRequestDiff) + t.Logf("Expansion operations: %d requests", expansionRequestDiff) + t.Logf("Background service: %d requests", daemonRequestDiff) + + // Cache-first operations should minimize network requests + assert.LessOrEqual(t, cachedRequestDiff, int32(1), + "Cache-first config operations should make minimal network requests") + + // Background service should make requests for refresh + if daemonRequestDiff > 0 { + t.Logf("✓ Background service is making network requests as expected") + } else { + t.Logf("⚠ Background service made no requests (may be using existing cache)") + } + + t.Logf("Successfully verified network behavior patterns in autoconf architecture") +} + +func testAutoConfWithHTTPS(t *testing.T) { + // Test autoconf with HTTPS server and TLSInsecureSkipVerify enabled + autoConfData := loadTestData(t, "valid_autoconf.json") + + // Create HTTPS server with self-signed certificate + server := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + t.Logf("HTTPS autoconf request from %s", r.UserAgent()) + w.Header().Set("Content-Type", "application/json") + w.Header().Set("ETag", `"https-test-etag"`) + w.Header().Set("Last-Modified", "Wed, 21 Oct 2015 07:28:00 GMT") + _, _ = w.Write(autoConfData) + })) + + // Enable HTTP/2 and start with TLS (self-signed certificate) + server.EnableHTTP2 = true + server.StartTLS() + defer server.Close() + + // Create IPFS node with HTTPS autoconf server and TLS skip verify + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("AutoConf.TLSInsecureSkipVerify", true) // Allow self-signed cert + node.SetIPFSConfig("AutoConf.RefreshInterval", "24h") // Disable background updates + + // Use normal bootstrap peers to test HTTPS fetching without complex auto replacement + node.SetIPFSConfig("Bootstrap", []string{"/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN"}) + + // Start daemon to trigger HTTPS autoconf fetch + node.StartDaemon() + defer node.StopDaemon() + + // Give autoconf time to fetch over HTTPS + time.Sleep(2 * time.Second) + + // Verify daemon is functional with HTTPS autoconf + result := node.RunIPFS("id") + assert.Equal(t, 0, result.ExitCode(), "IPFS daemon should be responsive with HTTPS autoconf") + assert.Contains(t, result.Stdout.String(), "ID", "IPFS id command should return peer information") + + // Test that config operations work with HTTPS-fetched autoconf cache + result = node.RunIPFS("config", "show") + assert.Equal(t, 0, result.ExitCode(), "Config show should work with HTTPS autoconf") + + // Test bootstrap list functionality + result = node.RunIPFS("bootstrap", "list") + assert.Equal(t, 0, result.ExitCode(), "Bootstrap list should work with HTTPS autoconf") + + t.Logf("Successfully tested AutoConf with HTTPS server and TLS skip verify") +} diff --git a/test/cli/autoconf/dns_test.go b/test/cli/autoconf/dns_test.go new file mode 100644 index 000000000..13144fa46 --- /dev/null +++ b/test/cli/autoconf/dns_test.go @@ -0,0 +1,288 @@ +package autoconf + +import ( + "encoding/base64" + "fmt" + "io" + "net/http" + "net/http/httptest" + "strings" + "sync" + "testing" + + "github.com/ipfs/kubo/test/cli/harness" + "github.com/miekg/dns" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestAutoConfDNS(t *testing.T) { + t.Parallel() + + t.Run("DNS resolution with auto DoH resolver", func(t *testing.T) { + t.Parallel() + testDNSResolutionWithAutoDoH(t) + }) + + t.Run("DNS errors are handled properly", func(t *testing.T) { + t.Parallel() + testDNSErrorHandling(t) + }) +} + +// mockDoHServer implements a simple DNS-over-HTTPS server for testing +type mockDoHServer struct { + t *testing.T + server *httptest.Server + mu sync.Mutex + requests []string + responseFunc func(name string) *dns.Msg +} + +func newMockDoHServer(t *testing.T) *mockDoHServer { + m := &mockDoHServer{ + t: t, + requests: []string{}, + } + + // Default response function returns a dnslink TXT record + m.responseFunc = func(name string) *dns.Msg { + msg := &dns.Msg{} + msg.SetReply(&dns.Msg{Question: []dns.Question{{Name: name, Qtype: dns.TypeTXT}}}) + + if strings.HasPrefix(name, "_dnslink.") { + // Return a valid dnslink record + rr := &dns.TXT{ + Hdr: dns.RR_Header{ + Name: name, + Rrtype: dns.TypeTXT, + Class: dns.ClassINET, + Ttl: 300, + }, + Txt: []string{"dnslink=/ipfs/QmYNQJoKGNHTpPxCBPh9KkDpaExgd2duMa3aF6ytMpHdao"}, + } + msg.Answer = append(msg.Answer, rr) + } + + return msg + } + + mux := http.NewServeMux() + mux.HandleFunc("/dns-query", m.handleDNSQuery) + + m.server = httptest.NewServer(mux) + return m +} + +func (m *mockDoHServer) handleDNSQuery(w http.ResponseWriter, r *http.Request) { + m.mu.Lock() + defer m.mu.Unlock() + + var dnsMsg *dns.Msg + + if r.Method == "GET" { + // Handle GET with ?dns= parameter + dnsParam := r.URL.Query().Get("dns") + if dnsParam == "" { + http.Error(w, "missing dns parameter", http.StatusBadRequest) + return + } + + data, err := base64.RawURLEncoding.DecodeString(dnsParam) + if err != nil { + http.Error(w, "invalid base64", http.StatusBadRequest) + return + } + + dnsMsg = &dns.Msg{} + if err := dnsMsg.Unpack(data); err != nil { + http.Error(w, "invalid DNS message", http.StatusBadRequest) + return + } + } else if r.Method == "POST" { + // Handle POST with DNS wire format + data, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, "failed to read body", http.StatusBadRequest) + return + } + + dnsMsg = &dns.Msg{} + if err := dnsMsg.Unpack(data); err != nil { + http.Error(w, "invalid DNS message", http.StatusBadRequest) + return + } + } else { + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + return + } + + // Log the DNS query + if len(dnsMsg.Question) > 0 { + qname := dnsMsg.Question[0].Name + m.requests = append(m.requests, qname) + m.t.Logf("DoH server received query for: %s", qname) + } + + // Generate response + response := m.responseFunc(dnsMsg.Question[0].Name) + responseData, err := response.Pack() + if err != nil { + http.Error(w, "failed to pack response", http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/dns-message") + _, _ = w.Write(responseData) +} + +func (m *mockDoHServer) getRequests() []string { + m.mu.Lock() + defer m.mu.Unlock() + return append([]string{}, m.requests...) +} + +func (m *mockDoHServer) close() { + m.server.Close() +} + +func testDNSResolutionWithAutoDoH(t *testing.T) { + // Create mock DoH server + dohServer := newMockDoHServer(t) + defer dohServer.close() + + // Create autoconf data with DoH resolver for "foo." domain + autoConfData := fmt.Sprintf(`{ + "AutoConfVersion": 2025072302, + "AutoConfSchema": 1, + "AutoConfTTL": 86400, + "SystemRegistry": { + "AminoDHT": { + "Description": "Test AminoDHT system", + "NativeConfig": { + "Bootstrap": [] + } + } + }, + "DNSResolvers": { + "foo.": ["%s/dns-query"] + }, + "DelegatedEndpoints": {} + }`, dohServer.server.URL) + + // Create autoconf server + autoConfServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write([]byte(autoConfData)) + })) + defer autoConfServer.Close() + + // Create IPFS node with auto DNS resolver + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.URL", autoConfServer.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("DNS.Resolvers", map[string]string{"foo.": "auto"}) + + // Start daemon + node.StartDaemon() + defer node.StopDaemon() + + // Verify config still shows "auto" for DNS resolvers + result := node.RunIPFS("config", "DNS.Resolvers") + require.Equal(t, 0, result.ExitCode()) + dnsResolversOutput := result.Stdout.String() + assert.Contains(t, dnsResolversOutput, "foo.", "DNS resolvers should contain foo. domain") + assert.Contains(t, dnsResolversOutput, "auto", "DNS resolver config should show 'auto'") + + // Try to resolve a .foo domain + result = node.RunIPFS("resolve", "/ipns/example.foo") + require.Equal(t, 0, result.ExitCode()) + + // Should resolve to the IPFS path from our mock DoH server + output := strings.TrimSpace(result.Stdout.String()) + assert.Equal(t, "/ipfs/QmYNQJoKGNHTpPxCBPh9KkDpaExgd2duMa3aF6ytMpHdao", output, + "Should resolve to the path returned by DoH server") + + // Verify DoH server received the DNS query + requests := dohServer.getRequests() + require.Greater(t, len(requests), 0, "DoH server should have received at least one request") + + foundDNSLink := false + for _, req := range requests { + if strings.Contains(req, "_dnslink.example.foo") { + foundDNSLink = true + break + } + } + assert.True(t, foundDNSLink, "DoH server should have received query for _dnslink.example.foo") +} + +func testDNSErrorHandling(t *testing.T) { + // Create DoH server that returns NXDOMAIN + dohServer := newMockDoHServer(t) + defer dohServer.close() + + // Configure to return NXDOMAIN + dohServer.responseFunc = func(name string) *dns.Msg { + msg := &dns.Msg{} + msg.SetReply(&dns.Msg{Question: []dns.Question{{Name: name, Qtype: dns.TypeTXT}}}) + msg.Rcode = dns.RcodeNameError // NXDOMAIN + return msg + } + + // Create autoconf data with DoH resolver + autoConfData := fmt.Sprintf(`{ + "AutoConfVersion": 2025072302, + "AutoConfSchema": 1, + "AutoConfTTL": 86400, + "SystemRegistry": { + "AminoDHT": { + "Description": "Test AminoDHT system", + "NativeConfig": { + "Bootstrap": [] + } + } + }, + "DNSResolvers": { + "bar.": ["%s/dns-query"] + }, + "DelegatedEndpoints": {} + }`, dohServer.server.URL) + + // Create autoconf server + autoConfServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write([]byte(autoConfData)) + })) + defer autoConfServer.Close() + + // Create IPFS node + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.URL", autoConfServer.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("DNS.Resolvers", map[string]string{"bar.": "auto"}) + + // Start daemon + node.StartDaemon() + defer node.StopDaemon() + + // Try to resolve a non-existent domain + result := node.RunIPFS("resolve", "/ipns/nonexistent.bar") + require.NotEqual(t, 0, result.ExitCode(), "Resolution should fail for non-existent domain") + + // Should contain appropriate error message + stderr := result.Stderr.String() + assert.Contains(t, stderr, "could not resolve name", + "Error should indicate DNS resolution failure") + + // Verify DoH server received the query + requests := dohServer.getRequests() + foundQuery := false + for _, req := range requests { + if strings.Contains(req, "_dnslink.nonexistent.bar") { + foundQuery = true + break + } + } + assert.True(t, foundQuery, "DoH server should have received query even for failed resolution") +} diff --git a/test/cli/autoconf/expand_comprehensive_test.go b/test/cli/autoconf/expand_comprehensive_test.go new file mode 100644 index 000000000..01dbcfda3 --- /dev/null +++ b/test/cli/autoconf/expand_comprehensive_test.go @@ -0,0 +1,698 @@ +// Package autoconf provides comprehensive tests for --expand-auto functionality. +// +// Test Scenarios: +// 1. Tests WITH daemon: Most tests start a daemon to fetch and cache autoconf data, +// then test CLI commands that read from that cache using MustGetConfigCached. +// 2. Tests WITHOUT daemon: Error condition tests that don't need cached autoconf. +// +// The daemon setup uses startDaemonAndWaitForAutoConf() helper which: +// - Starts the daemon +// - Waits for HTTP request to mock server (not arbitrary timeout) +// - Returns when autoconf is cached and ready for CLI commands +package autoconf + +import ( + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "os" + "strings" + "sync/atomic" + "testing" + "time" + + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestExpandAutoComprehensive(t *testing.T) { + t.Parallel() + + t.Run("all autoconf fields resolve correctly", func(t *testing.T) { + t.Parallel() + testAllAutoConfFieldsResolve(t) + }) + + t.Run("bootstrap list --expand-auto matches config Bootstrap --expand-auto", func(t *testing.T) { + t.Parallel() + testBootstrapCommandConsistency(t) + }) + + t.Run("write operations fail with --expand-auto", func(t *testing.T) { + t.Parallel() + testWriteOperationsFailWithExpandAuto(t) + }) + + t.Run("config show --expand-auto provides complete expanded view", func(t *testing.T) { + t.Parallel() + testConfigShowExpandAutoComplete(t) + }) + + t.Run("multiple expand-auto calls use cache (single HTTP request)", func(t *testing.T) { + t.Parallel() + testMultipleExpandAutoUsesCache(t) + }) + + t.Run("CLI uses cache only while daemon handles background updates", func(t *testing.T) { + t.Parallel() + testCLIUsesCacheOnlyDaemonUpdatesBackground(t) + }) +} + +// testAllAutoConfFieldsResolve verifies that all autoconf fields (Bootstrap, DNS.Resolvers, +// Routing.DelegatedRouters, and Ipns.DelegatedPublishers) can be resolved from "auto" values +// to their actual configuration using --expand-auto flag with daemon-cached autoconf data. +// +// This test is critical because: +// 1. It validates the core autoconf resolution functionality across all supported fields +// 2. It ensures that "auto" placeholders are properly replaced with real configuration values +// 3. It verifies that the autoconf JSON structure is correctly parsed and applied +// 4. It tests the end-to-end flow from HTTP fetch to config field expansion +func testAllAutoConfFieldsResolve(t *testing.T) { + // Test scenario: CLI with daemon started and autoconf cached + // This validates core autoconf resolution functionality across all supported fields + + // Track HTTP requests to verify mock server is being used + var requestCount atomic.Int32 + var autoConfData []byte + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + count := requestCount.Add(1) + t.Logf("Mock autoconf server request #%d: %s %s", count, r.Method, r.URL.Path) + + // Create comprehensive autoconf response matching Schema 4 format + // Use server URLs to ensure they're reachable and valid + serverURL := fmt.Sprintf("http://%s", r.Host) // Get the server URL from the request + autoConf := map[string]interface{}{ + "AutoConfVersion": 2025072301, + "AutoConfSchema": 1, + "AutoConfTTL": 86400, + "SystemRegistry": map[string]interface{}{ + "AminoDHT": map[string]interface{}{ + "URL": "https://github.com/ipfs/specs/pull/497", + "Description": "Test AminoDHT system", + "NativeConfig": map[string]interface{}{ + "Bootstrap": []string{ + "/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", + "/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa", + }, + }, + "DelegatedConfig": map[string]interface{}{ + "Read": []string{"/routing/v1/providers", "/routing/v1/peers", "/routing/v1/ipns"}, + "Write": []string{"/routing/v1/ipns"}, + }, + }, + "IPNI": map[string]interface{}{ + "URL": serverURL + "/ipni-system", + "Description": "Test IPNI system", + "DelegatedConfig": map[string]interface{}{ + "Read": []string{"/routing/v1/providers"}, + "Write": []string{}, + }, + }, + "CustomIPNS": map[string]interface{}{ + "URL": serverURL + "/ipns-system", + "Description": "Test IPNS system", + "DelegatedConfig": map[string]interface{}{ + "Read": []string{"/routing/v1/ipns"}, + "Write": []string{"/routing/v1/ipns"}, + }, + }, + }, + "DNSResolvers": map[string][]string{ + ".": {"https://cloudflare-dns.com/dns-query"}, + "eth.": {"https://dns.google/dns-query"}, + }, + "DelegatedEndpoints": map[string]interface{}{ + serverURL: map[string]interface{}{ + "Systems": []string{"IPNI", "CustomIPNS"}, // Use non-AminoDHT systems to avoid filtering + "Read": []string{"/routing/v1/providers", "/routing/v1/ipns"}, + "Write": []string{"/routing/v1/ipns"}, + }, + }, + } + + var err error + autoConfData, err = json.Marshal(autoConf) + if err != nil { + t.Fatalf("Failed to marshal autoConf: %v", err) + } + + t.Logf("Serving mock autoconf data: %s", string(autoConfData)) + + w.Header().Set("Content-Type", "application/json") + w.Header().Set("ETag", `"test-mock-config"`) + w.Header().Set("Last-Modified", "Wed, 21 Oct 2015 07:28:00 GMT") + _, _ = w.Write(autoConfData) + })) + defer server.Close() + + // Create IPFS node with all auto values + node := harness.NewT(t).NewNode().Init("--profile=test") + + // Clear any existing autoconf cache to prevent interference + result := node.RunIPFS("config", "show") + if result.ExitCode() == 0 { + var cfg map[string]interface{} + if json.Unmarshal([]byte(result.Stdout.String()), &cfg) == nil { + if repoPath, exists := cfg["path"]; exists { + if pathStr, ok := repoPath.(string); ok { + t.Logf("Clearing autoconf cache from %s/autoconf", pathStr) + // Note: We can't directly remove files, but clearing cache via config change should help + } + } + } + } + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("AutoConf.RefreshInterval", "1s") // Force fresh fetches for testing + node.SetIPFSConfig("Bootstrap", []string{"auto"}) + node.SetIPFSConfig("DNS.Resolvers", map[string]string{ + ".": "auto", + "eth.": "auto", + }) + node.SetIPFSConfig("Routing.DelegatedRouters", []string{"auto"}) + node.SetIPFSConfig("Ipns.DelegatedPublishers", []string{"auto"}) + + // Start daemon and wait for autoconf fetch + daemon := startDaemonAndWaitForAutoConf(t, node, &requestCount) + defer daemon.StopDaemon() + + // Test 1: Bootstrap resolution + result = node.RunIPFS("config", "Bootstrap", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "Bootstrap expansion should succeed") + + var expandedBootstrap []string + var err error + err = json.Unmarshal([]byte(result.Stdout.String()), &expandedBootstrap) + require.NoError(t, err) + + assert.NotContains(t, expandedBootstrap, "auto", "Bootstrap should not contain 'auto'") + assert.Contains(t, expandedBootstrap, "/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN") + assert.Contains(t, expandedBootstrap, "/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa") + t.Logf("Bootstrap expanded to: %v", expandedBootstrap) + + // Test 2: DNS.Resolvers resolution + result = node.RunIPFS("config", "DNS.Resolvers", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "DNS.Resolvers expansion should succeed") + + var expandedResolvers map[string]string + err = json.Unmarshal([]byte(result.Stdout.String()), &expandedResolvers) + require.NoError(t, err) + + assert.NotContains(t, expandedResolvers, "auto", "DNS.Resolvers should not contain 'auto'") + assert.Equal(t, "https://cloudflare-dns.com/dns-query", expandedResolvers["."]) + assert.Equal(t, "https://dns.google/dns-query", expandedResolvers["eth."]) + t.Logf("DNS.Resolvers expanded to: %v", expandedResolvers) + + // Test 3: Routing.DelegatedRouters resolution + result = node.RunIPFS("config", "Routing.DelegatedRouters", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "Routing.DelegatedRouters expansion should succeed") + + var expandedRouters []string + err = json.Unmarshal([]byte(result.Stdout.String()), &expandedRouters) + require.NoError(t, err) + + assert.NotContains(t, expandedRouters, "auto", "DelegatedRouters should not contain 'auto'") + + // Test should strictly require mock autoconf to work - no fallback acceptance + // The mock endpoint has Read paths ["/routing/v1/providers", "/routing/v1/ipns"] + // so we expect 2 URLs with those paths + expectedMockURLs := []string{ + server.URL + "/routing/v1/providers", + server.URL + "/routing/v1/ipns", + } + require.Equal(t, 2, len(expandedRouters), + "Should have exactly 2 routers from mock autoconf (one for each Read path). Got %d routers: %v. "+ + "This indicates autoconf is not working properly - check if mock server data is being parsed and filtered correctly.", + len(expandedRouters), expandedRouters) + + // Check that both expected URLs are present + for _, expectedURL := range expectedMockURLs { + assert.Contains(t, expandedRouters, expectedURL, + "Should contain mock autoconf endpoint with path %s. Got: %v. "+ + "This indicates autoconf endpoint path generation is not working properly.", + expectedURL, expandedRouters) + } + + // Test 4: Ipns.DelegatedPublishers resolution + result = node.RunIPFS("config", "Ipns.DelegatedPublishers", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "Ipns.DelegatedPublishers expansion should succeed") + + var expandedPublishers []string + err = json.Unmarshal([]byte(result.Stdout.String()), &expandedPublishers) + require.NoError(t, err) + + assert.NotContains(t, expandedPublishers, "auto", "DelegatedPublishers should not contain 'auto'") + + // Test should require mock autoconf endpoint for IPNS publishing + // The mock endpoint supports /routing/v1/ipns write operations, so it should be included with path + expectedMockPublisherURL := server.URL + "/routing/v1/ipns" + require.Equal(t, 1, len(expandedPublishers), + "Should have exactly 1 IPNS publisher from mock autoconf. Got %d publishers: %v. "+ + "This indicates autoconf IPNS publisher filtering is not working properly.", + len(expandedPublishers), expandedPublishers) + assert.Equal(t, expectedMockPublisherURL, expandedPublishers[0], + "Should use mock autoconf endpoint %s for IPNS publishing, not fallback. Got: %s. "+ + "This indicates autoconf IPNS publisher resolution is not working properly.", + expectedMockPublisherURL, expandedPublishers[0]) + + // CRITICAL: Verify that mock server was actually used + finalRequestCount := requestCount.Load() + require.Greater(t, finalRequestCount, int32(0), + "Mock autoconf server should have been called at least once. Got %d requests. "+ + "This indicates the test is using cached or fallback config instead of mock data.", finalRequestCount) + t.Logf("Mock server was called %d times - test is using mock data", finalRequestCount) +} + +// testBootstrapCommandConsistency verifies that `ipfs bootstrap list --expand-auto` and +// `ipfs config Bootstrap --expand-auto` return identical results when both use autoconf. +// +// This test is important because: +// 1. It ensures consistency between different CLI commands that access the same data +// 2. It validates that both the bootstrap-specific command and generic config command +// use the same underlying autoconf resolution mechanism +// 3. It prevents regression where different commands might resolve "auto" differently +// 4. It ensures users get consistent results regardless of which command they use +func testBootstrapCommandConsistency(t *testing.T) { + // Test scenario: CLI with daemon started and autoconf cached + // This ensures both bootstrap commands read from the same cached autoconf data + + // Load test autoconf data + autoConfData := loadTestDataComprehensive(t, "valid_autoconf.json") + + // Track HTTP requests to verify daemon fetches autoconf + var requestCount atomic.Int32 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + requestCount.Add(1) + t.Logf("Bootstrap consistency test request: %s %s", r.Method, r.URL.Path) + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write(autoConfData) + })) + defer server.Close() + + // Create IPFS node with auto bootstrap + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("Bootstrap", []string{"auto"}) + + // Start daemon and wait for autoconf fetch + daemon := startDaemonAndWaitForAutoConf(t, node, &requestCount) + defer daemon.StopDaemon() + + // Get bootstrap via config command + configResult := node.RunIPFS("config", "Bootstrap", "--expand-auto") + require.Equal(t, 0, configResult.ExitCode(), "config Bootstrap --expand-auto should succeed") + + // Get bootstrap via bootstrap command + bootstrapResult := node.RunIPFS("bootstrap", "list", "--expand-auto") + require.Equal(t, 0, bootstrapResult.ExitCode(), "bootstrap list --expand-auto should succeed") + + // Parse both results + var configBootstrap, bootstrapBootstrap []string + err := json.Unmarshal([]byte(configResult.Stdout.String()), &configBootstrap) + require.NoError(t, err) + + // Bootstrap command output is line-separated, not JSON + bootstrapOutput := strings.TrimSpace(bootstrapResult.Stdout.String()) + if bootstrapOutput != "" { + bootstrapBootstrap = strings.Split(bootstrapOutput, "\n") + } + + // Results should be equivalent + assert.Equal(t, len(configBootstrap), len(bootstrapBootstrap), "Both commands should return same number of peers") + + // Both should contain same peers (order might differ due to different output formats) + for _, peer := range configBootstrap { + found := false + for _, bsPeer := range bootstrapBootstrap { + if strings.TrimSpace(bsPeer) == peer { + found = true + break + } + } + assert.True(t, found, "Peer %s should be in both results", peer) + } + + t.Logf("Config command result: %v", configBootstrap) + t.Logf("Bootstrap command result: %v", bootstrapBootstrap) +} + +// testWriteOperationsFailWithExpandAuto verifies that --expand-auto flag is properly +// restricted to read-only operations and fails when used with config write operations. +// +// This test is essential because: +// 1. It enforces the security principle that --expand-auto should only be used for reading +// 2. It prevents users from accidentally overwriting config with expanded values +// 3. It ensures that "auto" placeholders are preserved in the stored configuration +// 4. It validates proper error handling and user guidance when misused +// 5. It protects against accidental loss of the "auto" semantic meaning +func testWriteOperationsFailWithExpandAuto(t *testing.T) { + // Test scenario: CLI without daemon (tests error conditions) + // This test doesn't need daemon setup since it's testing that write operations + // with --expand-auto should fail with appropriate error messages + + // Create IPFS node + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("Bootstrap", []string{"auto"}) + + // Test that setting config with --expand-auto fails + testCases := []struct { + name string + args []string + }{ + {"config set with expand-auto", []string{"config", "Bootstrap", "[\"test\"]", "--expand-auto"}}, + {"config set JSON with expand-auto", []string{"config", "Bootstrap", "[\"test\"]", "--json", "--expand-auto"}}, + {"config set bool with expand-auto", []string{"config", "SomeField", "true", "--bool", "--expand-auto"}}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := node.RunIPFS(tc.args...) + assert.NotEqual(t, 0, result.ExitCode(), "Write operation with --expand-auto should fail") + + stderr := result.Stderr.String() + assert.Contains(t, stderr, "--expand-auto", "Error should mention --expand-auto") + assert.Contains(t, stderr, "reading", "Error should mention reading limitation") + t.Logf("Expected error: %s", stderr) + }) + } +} + +// testConfigShowExpandAutoComplete verifies that `ipfs config show --expand-auto` +// produces a complete configuration with all "auto" values expanded to their resolved forms. +// +// This test is important because: +// 1. It validates the full-config expansion functionality for comprehensive troubleshooting +// 2. It ensures that users can see the complete resolved configuration state +// 3. It verifies that all "auto" placeholders are replaced, not just individual fields +// 4. It tests that the resulting JSON is valid and well-formed +// 5. It provides a way to export/backup the fully expanded configuration +func testConfigShowExpandAutoComplete(t *testing.T) { + // Test scenario: CLI with daemon started and autoconf cached + + // Load test autoconf data + autoConfData := loadTestDataComprehensive(t, "valid_autoconf.json") + + // Track HTTP requests to verify daemon fetches autoconf + var requestCount atomic.Int32 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + requestCount.Add(1) + t.Logf("Config show test request: %s %s", r.Method, r.URL.Path) + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write(autoConfData) + })) + defer server.Close() + + // Create IPFS node with multiple auto values + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("Bootstrap", []string{"auto"}) + node.SetIPFSConfig("DNS.Resolvers", map[string]string{".": "auto"}) + + // Start daemon and wait for autoconf fetch + daemon := startDaemonAndWaitForAutoConf(t, node, &requestCount) + defer daemon.StopDaemon() + + // Test config show --expand-auto + result := node.RunIPFS("config", "show", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "config show --expand-auto should succeed") + + expandedConfig := result.Stdout.String() + + // Should not contain any literal "auto" values + assert.NotContains(t, expandedConfig, `"auto"`, "Expanded config should not contain literal 'auto' values") + + // Should contain expected expanded sections + assert.Contains(t, expandedConfig, `"Bootstrap"`, "Should contain Bootstrap section") + assert.Contains(t, expandedConfig, `"DNS"`, "Should contain DNS section") + assert.Contains(t, expandedConfig, `"Resolvers"`, "Should contain Resolvers section") + + // Should contain expanded peer addresses (not "auto") + assert.Contains(t, expandedConfig, "bootstrap.libp2p.io", "Should contain expanded bootstrap peers") + + // Should be valid JSON + var configMap map[string]interface{} + err := json.Unmarshal([]byte(expandedConfig), &configMap) + require.NoError(t, err, "Expanded config should be valid JSON") + + // Verify specific fields were expanded + if bootstrap, ok := configMap["Bootstrap"].([]interface{}); ok { + assert.Greater(t, len(bootstrap), 0, "Bootstrap should have expanded entries") + for _, peer := range bootstrap { + assert.NotEqual(t, "auto", peer, "Bootstrap entries should not be 'auto'") + } + } + + t.Logf("Config show --expand-auto produced %d characters of expanded config", len(expandedConfig)) +} + +// testMultipleExpandAutoUsesCache verifies that multiple consecutive --expand-auto calls +// efficiently use cached autoconf data instead of making repeated HTTP requests. +// +// This test is critical for performance because: +// 1. It validates that the caching mechanism works correctly to reduce network overhead +// 2. It ensures that users can make multiple config queries without causing excessive HTTP traffic +// 3. It verifies that cached data is shared across different config fields and commands +// 4. It tests that HTTP headers (ETag/Last-Modified) are properly used for cache validation +// 5. It prevents regression where each --expand-auto call would trigger a new HTTP request +// 6. It demonstrates the performance benefit: 5 operations with only 1 network request +func testMultipleExpandAutoUsesCache(t *testing.T) { + // Test scenario: CLI with daemon started and autoconf cached + + // Create comprehensive autoconf response + autoConfData := loadTestDataComprehensive(t, "valid_autoconf.json") + + // Track HTTP requests to verify caching + var requestCount atomic.Int32 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + count := requestCount.Add(1) + t.Logf("AutoConf cache test request #%d: %s %s", count, r.Method, r.URL.Path) + + w.Header().Set("Content-Type", "application/json") + w.Header().Set("ETag", `"cache-test-123"`) + w.Header().Set("Last-Modified", "Wed, 21 Oct 2015 07:28:00 GMT") + _, _ = w.Write(autoConfData) + })) + defer server.Close() + + // Create IPFS node with all auto values + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + // Note: Using default RefreshInterval (24h) to ensure caching - explicit setting would require rebuilt binary + + // Set up auto values for multiple fields + node.SetIPFSConfig("Bootstrap", []string{"auto"}) + node.SetIPFSConfig("DNS.Resolvers", map[string]string{"foo.": "auto"}) + node.SetIPFSConfig("Routing.DelegatedRouters", []string{"auto"}) + node.SetIPFSConfig("Ipns.DelegatedPublishers", []string{"auto"}) + + // Start daemon and wait for autoconf fetch + daemon := startDaemonAndWaitForAutoConf(t, node, &requestCount) + defer daemon.StopDaemon() + + // Reset counter to only track our expand-auto calls + requestCount.Store(0) + + // Make multiple --expand-auto calls on different fields + t.Log("Testing multiple --expand-auto calls should use cache...") + + // Call 1: Bootstrap --expand-auto (should trigger HTTP request) + result1 := node.RunIPFS("config", "Bootstrap", "--expand-auto") + require.Equal(t, 0, result1.ExitCode(), "Bootstrap --expand-auto should succeed") + + var expandedBootstrap []string + err := json.Unmarshal([]byte(result1.Stdout.String()), &expandedBootstrap) + require.NoError(t, err) + assert.NotContains(t, expandedBootstrap, "auto", "Bootstrap should be expanded") + assert.Greater(t, len(expandedBootstrap), 0, "Bootstrap should have entries") + + // Call 2: DNS.Resolvers --expand-auto (should use cache, no HTTP) + result2 := node.RunIPFS("config", "DNS.Resolvers", "--expand-auto") + require.Equal(t, 0, result2.ExitCode(), "DNS.Resolvers --expand-auto should succeed") + + var expandedResolvers map[string]string + err = json.Unmarshal([]byte(result2.Stdout.String()), &expandedResolvers) + require.NoError(t, err) + + // Call 3: Routing.DelegatedRouters --expand-auto (should use cache, no HTTP) + result3 := node.RunIPFS("config", "Routing.DelegatedRouters", "--expand-auto") + require.Equal(t, 0, result3.ExitCode(), "Routing.DelegatedRouters --expand-auto should succeed") + + var expandedRouters []string + err = json.Unmarshal([]byte(result3.Stdout.String()), &expandedRouters) + require.NoError(t, err) + assert.NotContains(t, expandedRouters, "auto", "Routers should be expanded") + + // Call 4: Ipns.DelegatedPublishers --expand-auto (should use cache, no HTTP) + result4 := node.RunIPFS("config", "Ipns.DelegatedPublishers", "--expand-auto") + require.Equal(t, 0, result4.ExitCode(), "Ipns.DelegatedPublishers --expand-auto should succeed") + + var expandedPublishers []string + err = json.Unmarshal([]byte(result4.Stdout.String()), &expandedPublishers) + require.NoError(t, err) + assert.NotContains(t, expandedPublishers, "auto", "Publishers should be expanded") + + // Call 5: config show --expand-auto (should use cache, no HTTP) + result5 := node.RunIPFS("config", "show", "--expand-auto") + require.Equal(t, 0, result5.ExitCode(), "config show --expand-auto should succeed") + + expandedConfig := result5.Stdout.String() + assert.NotContains(t, expandedConfig, `"auto"`, "Full config should not contain 'auto' values") + + // CRITICAL TEST: Verify NO HTTP requests were made for --expand-auto calls (using cache) + finalRequestCount := requestCount.Load() + assert.Equal(t, int32(0), finalRequestCount, + "Multiple --expand-auto calls should result in 0 HTTP requests (using cache). Got %d requests", finalRequestCount) + + t.Logf("Made 5 --expand-auto calls, resulted in %d HTTP request(s) - cache is being used!", finalRequestCount) + + // Now simulate a manual cache refresh (what the background updater would do) + t.Log("Simulating manual cache refresh...") + + // Update the mock server to return different data + autoConfData2 := loadTestDataComprehensive(t, "updated_autoconf.json") + server.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + count := requestCount.Add(1) + t.Logf("Manual refresh request #%d: %s %s", count, r.Method, r.URL.Path) + w.Header().Set("Content-Type", "application/json") + w.Header().Set("ETag", `"cache-test-456"`) + w.Header().Set("Last-Modified", "Thu, 22 Oct 2015 08:00:00 GMT") + _, _ = w.Write(autoConfData2) + }) + + // Note: In the actual daemon, the background updater would call MustGetConfigWithRefresh + // For this test, we'll verify that subsequent --expand-auto calls still use cache + // and don't trigger additional requests + + // Reset counter before manual refresh simulation + beforeRefresh := requestCount.Load() + + // Make another --expand-auto call - should still use cache + result6 := node.RunIPFS("config", "Bootstrap", "--expand-auto") + require.Equal(t, 0, result6.ExitCode(), "Bootstrap --expand-auto after refresh should succeed") + + afterRefresh := requestCount.Load() + assert.Equal(t, beforeRefresh, afterRefresh, + "--expand-auto should continue using cache even after server update") + + t.Logf("Cache continues to be used after server update - background updater pattern confirmed!") +} + +// testCLIUsesCacheOnlyDaemonUpdatesBackground verifies the correct autoconf behavior: +// daemon makes exactly one HTTP request during startup to fetch and cache data, then +// CLI commands always use cached data without making additional HTTP requests. +// +// This test is essential for correctness because: +// 1. It validates that daemon startup makes exactly one HTTP request to fetch autoconf +// 2. It verifies that CLI --expand-auto never makes HTTP requests (uses cache only) +// 3. It ensures CLI commands remain fast by always using cached data +// 4. It prevents regression where CLI commands might start making HTTP requests +// 5. It confirms the correct separation between daemon (network) and CLI (cache-only) behavior +func testCLIUsesCacheOnlyDaemonUpdatesBackground(t *testing.T) { + // Test scenario: CLI with daemon and long RefreshInterval (no background updates during test) + + // Create autoconf response + autoConfData := loadTestDataComprehensive(t, "valid_autoconf.json") + + // Track HTTP requests with timestamps + var requestCount atomic.Int32 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + count := requestCount.Add(1) + t.Logf("Cache expiry test request #%d at %s: %s %s", count, time.Now().Format("15:04:05.000"), r.Method, r.URL.Path) + + w.Header().Set("Content-Type", "application/json") + // Use different ETag for each request to ensure we can detect new fetches + w.Header().Set("ETag", fmt.Sprintf(`"expiry-test-%d"`, count)) + w.Header().Set("Last-Modified", time.Now().Format(http.TimeFormat)) + _, _ = w.Write(autoConfData) + })) + defer server.Close() + + // Create IPFS node with long refresh interval + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + // Set long RefreshInterval to avoid background updates during test + node.SetIPFSConfig("AutoConf.RefreshInterval", "1h") + + node.SetIPFSConfig("Bootstrap", []string{"auto"}) + node.SetIPFSConfig("DNS.Resolvers", map[string]string{"test.": "auto"}) + + // Start daemon and wait for autoconf fetch + daemon := startDaemonAndWaitForAutoConf(t, node, &requestCount) + defer daemon.StopDaemon() + + // Confirm only one request was made during daemon startup + initialRequestCount := requestCount.Load() + assert.Equal(t, int32(1), initialRequestCount, "Expected exactly 1 HTTP request during daemon startup, got: %d", initialRequestCount) + t.Logf("Daemon startup made exactly 1 HTTP request") + + // Test: CLI commands use cache only (no additional HTTP requests) + t.Log("Testing that CLI --expand-auto commands use cache only...") + + // Make several CLI calls - none should trigger HTTP requests + result1 := node.RunIPFS("config", "Bootstrap", "--expand-auto") + require.Equal(t, 0, result1.ExitCode(), "Bootstrap --expand-auto should succeed") + + result2 := node.RunIPFS("config", "DNS.Resolvers", "--expand-auto") + require.Equal(t, 0, result2.ExitCode(), "DNS.Resolvers --expand-auto should succeed") + + result3 := node.RunIPFS("config", "Routing.DelegatedRouters", "--expand-auto") + require.Equal(t, 0, result3.ExitCode(), "Routing.DelegatedRouters --expand-auto should succeed") + + // Verify the request count remains at 1 (no additional requests from CLI) + finalRequestCount := requestCount.Load() + assert.Equal(t, int32(1), finalRequestCount, "Request count should remain at 1 after CLI commands, got: %d", finalRequestCount) + t.Log("CLI commands use cache only - request count remains at 1") + + t.Log("Test completed: Daemon makes 1 startup request, CLI commands use cache only") +} + +// loadTestDataComprehensive is a helper function that loads test autoconf JSON data files. +// It locates the test data directory relative to the test file and reads the specified file. +// This centralized helper ensures consistent test data loading across all comprehensive tests. +func loadTestDataComprehensive(t *testing.T, filename string) []byte { + t.Helper() + + data, err := os.ReadFile("testdata/" + filename) + require.NoError(t, err, "Failed to read test data file: %s", filename) + + return data +} + +// startDaemonAndWaitForAutoConf starts a daemon and waits for it to fetch autoconf data. +// It returns the node with daemon running and ensures autoconf has been cached before returning. +// This is a DRY helper to avoid repeating daemon setup and request waiting logic in every test. +func startDaemonAndWaitForAutoConf(t *testing.T, node *harness.Node, requestCount *atomic.Int32) *harness.Node { + t.Helper() + + // Start daemon to fetch and cache autoconf data + t.Log("Starting daemon to fetch and cache autoconf data...") + daemon := node.StartDaemon() + // StartDaemon returns *Node, no error to check + + // Wait for daemon to fetch autoconf (wait for HTTP request to mock server) + t.Log("Waiting for daemon to fetch autoconf from mock server...") + timeout := time.After(10 * time.Second) // Safety timeout + ticker := time.NewTicker(10 * time.Millisecond) + defer ticker.Stop() + + for { + select { + case <-timeout: + t.Fatal("Timeout waiting for autoconf fetch") + case <-ticker.C: + if requestCount.Load() > 0 { + t.Logf("Daemon fetched autoconf (%d requests made)", requestCount.Load()) + t.Log("AutoConf should now be cached by daemon") + return daemon + } + } + } +} diff --git a/test/cli/autoconf/expand_fallback_test.go b/test/cli/autoconf/expand_fallback_test.go new file mode 100644 index 000000000..f6fc1e8d3 --- /dev/null +++ b/test/cli/autoconf/expand_fallback_test.go @@ -0,0 +1,286 @@ +package autoconf + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "os" + "testing" + "time" + + "github.com/ipfs/boxo/autoconf" + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestExpandAutoFallbacks(t *testing.T) { + t.Parallel() + + t.Run("expand-auto with unreachable server shows fallbacks", func(t *testing.T) { + t.Parallel() + testExpandAutoWithUnreachableServer(t) + }) + + t.Run("expand-auto with disabled autoconf shows error", func(t *testing.T) { + t.Parallel() + testExpandAutoWithDisabledAutoConf(t) + }) + + t.Run("expand-auto with malformed response shows fallbacks", func(t *testing.T) { + t.Parallel() + testExpandAutoWithMalformedResponse(t) + }) + + t.Run("expand-auto preserves static values in mixed config", func(t *testing.T) { + t.Parallel() + testExpandAutoMixedConfigPreservesStatic(t) + }) + + t.Run("daemon gracefully handles malformed autoconf and uses fallbacks", func(t *testing.T) { + t.Parallel() + testDaemonWithMalformedAutoConf(t) + }) +} + +func testExpandAutoWithUnreachableServer(t *testing.T) { + // Create IPFS node with unreachable AutoConf server + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.URL", "http://127.0.0.1:99999/nonexistent") // Unreachable + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("Bootstrap", []string{"auto"}) + node.SetIPFSConfig("DNS.Resolvers", map[string]string{"foo.": "auto"}) + + // Test that --expand-auto falls back to defaults when server is unreachable + result := node.RunIPFS("config", "Bootstrap", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "config Bootstrap --expand-auto should succeed even with unreachable server") + + var bootstrap []string + err := json.Unmarshal([]byte(result.Stdout.String()), &bootstrap) + require.NoError(t, err) + + // Should contain fallback bootstrap peers (not "auto" and not empty) + assert.NotContains(t, bootstrap, "auto", "Fallback bootstrap should not contain 'auto'") + assert.Greater(t, len(bootstrap), 0, "Fallback bootstrap should not be empty") + + // Should contain known default bootstrap peers + foundDefaultPeer := false + for _, peer := range bootstrap { + if peer != "" && peer != "auto" { + foundDefaultPeer = true + t.Logf("Found fallback bootstrap peer: %s", peer) + break + } + } + assert.True(t, foundDefaultPeer, "Should contain at least one fallback bootstrap peer") + + // Test DNS resolvers fallback + result = node.RunIPFS("config", "DNS.Resolvers", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "config DNS.Resolvers --expand-auto should succeed with unreachable server") + + var resolvers map[string]string + err = json.Unmarshal([]byte(result.Stdout.String()), &resolvers) + require.NoError(t, err) + + // When autoconf server is unreachable, DNS resolvers should fall back to defaults + // The "foo." resolver should not exist in fallbacks (only "eth." has fallback) + fooResolver, fooExists := resolvers["foo."] + + if !fooExists { + t.Log("DNS resolver for 'foo.' has no fallback - correct behavior (only eth. has fallbacks)") + } else { + assert.NotEqual(t, "auto", fooResolver, "DNS resolver should not be 'auto' after expansion") + t.Logf("Unexpected DNS resolver for foo.: %s", fooResolver) + } +} + +func testExpandAutoWithDisabledAutoConf(t *testing.T) { + // Create IPFS node with AutoConf disabled + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.Enabled", false) + node.SetIPFSConfig("Bootstrap", []string{"auto"}) + + // Test that --expand-auto with disabled AutoConf returns appropriate error or fallback + result := node.RunIPFS("config", "Bootstrap", "--expand-auto") + + // When AutoConf is disabled, expand-auto should show empty results + // since "auto" values are not expanded when AutoConf.Enabled=false + var bootstrap []string + err := json.Unmarshal([]byte(result.Stdout.String()), &bootstrap) + require.NoError(t, err) + + // With AutoConf disabled, "auto" values are not expanded so we get empty result + assert.NotContains(t, bootstrap, "auto", "Should not contain 'auto' after expansion") + assert.Equal(t, 0, len(bootstrap), "Should be empty when AutoConf disabled (auto values not expanded)") + t.Log("Bootstrap is empty when AutoConf disabled - correct behavior") +} + +func testExpandAutoWithMalformedResponse(t *testing.T) { + // Create server that returns malformed JSON + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write([]byte(`{"invalid": "json", "Bootstrap": [incomplete`)) // Malformed JSON + })) + defer server.Close() + + // Create IPFS node with malformed autoconf server + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("Bootstrap", []string{"auto"}) + + // Test that --expand-auto handles malformed response gracefully + result := node.RunIPFS("config", "Bootstrap", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "config Bootstrap --expand-auto should succeed even with malformed response") + + var bootstrap []string + err := json.Unmarshal([]byte(result.Stdout.String()), &bootstrap) + require.NoError(t, err) + + // Should fall back to defaults, not contain "auto" + assert.NotContains(t, bootstrap, "auto", "Should not contain 'auto' after fallback") + assert.Greater(t, len(bootstrap), 0, "Should contain fallback peers after malformed response") + t.Logf("Bootstrap after malformed response: %v", bootstrap) +} + +func testExpandAutoMixedConfigPreservesStatic(t *testing.T) { + // Load valid test autoconf data + autoConfData := loadTestDataForFallback(t, "valid_autoconf.json") + + // Create HTTP server that serves autoconf.json + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write(autoConfData) + })) + defer server.Close() + + // Create IPFS node with mixed auto and static values + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + + // Set mixed configuration: static + auto + static + node.SetIPFSConfig("Bootstrap", []string{ + "/ip4/127.0.0.1/tcp/4001/p2p/12D3KooWTest", + "auto", + "/ip4/127.0.0.2/tcp/4001/p2p/12D3KooWTest2", + }) + + // Test that --expand-auto only expands "auto" values, preserves static ones + result := node.RunIPFS("config", "Bootstrap", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "config Bootstrap --expand-auto should succeed") + + var bootstrap []string + err := json.Unmarshal([]byte(result.Stdout.String()), &bootstrap) + require.NoError(t, err) + + // Should not contain literal "auto" anymore + assert.NotContains(t, bootstrap, "auto", "Expanded config should not contain literal 'auto'") + + // Should preserve static values at original positions + assert.Contains(t, bootstrap, "/ip4/127.0.0.1/tcp/4001/p2p/12D3KooWTest", "Should preserve first static peer") + assert.Contains(t, bootstrap, "/ip4/127.0.0.2/tcp/4001/p2p/12D3KooWTest2", "Should preserve third static peer") + + // Should have more entries than just the static ones (auto got expanded) + assert.Greater(t, len(bootstrap), 2, "Should have more than just the 2 static peers") + + t.Logf("Mixed config expansion result: %v", bootstrap) + + // Verify order is preserved: static, expanded auto values, static + assert.Equal(t, "/ip4/127.0.0.1/tcp/4001/p2p/12D3KooWTest", bootstrap[0], "First peer should be preserved") + lastIndex := len(bootstrap) - 1 + assert.Equal(t, "/ip4/127.0.0.2/tcp/4001/p2p/12D3KooWTest2", bootstrap[lastIndex], "Last peer should be preserved") +} + +func testDaemonWithMalformedAutoConf(t *testing.T) { + // Test scenario: Daemon starts with AutoConf.URL pointing to server that returns malformed JSON + // This tests that daemon gracefully handles malformed responses and falls back to hardcoded defaults + + // Create server that returns malformed JSON to simulate broken autoconf service + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + // Return malformed JSON that cannot be parsed + _, _ = w.Write([]byte(`{"Bootstrap": ["incomplete array", "missing closing bracket"`)) + })) + defer server.Close() + + // Create IPFS node with autoconf pointing to malformed server + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("Bootstrap", []string{"auto"}) + node.SetIPFSConfig("DNS.Resolvers", map[string]string{"foo.": "auto"}) + + // Start daemon - this will attempt to fetch autoconf from malformed server + t.Log("Starting daemon with malformed autoconf server...") + daemon := node.StartDaemon() + defer daemon.StopDaemon() + + // Wait for daemon to attempt autoconf fetch and handle the error gracefully + time.Sleep(6 * time.Second) // defaultTimeout is 5s, add 1s buffer + t.Log("Daemon should have attempted autoconf fetch and fallen back to defaults") + + // Test that daemon is still running and CLI commands work with fallback values + result := node.RunIPFS("config", "Bootstrap", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "config Bootstrap --expand-auto should succeed with daemon running") + + var bootstrap []string + err := json.Unmarshal([]byte(result.Stdout.String()), &bootstrap) + require.NoError(t, err) + + // Should fall back to hardcoded defaults from GetMainnetFallbackConfig() + // NOTE: These values may change if autoconf library updates GetMainnetFallbackConfig() + assert.NotContains(t, bootstrap, "auto", "Should not contain 'auto' after fallback") + assert.Greater(t, len(bootstrap), 0, "Should contain fallback bootstrap peers") + + // Verify we got actual fallback bootstrap peers from GetMainnetFallbackConfig() AminoDHT NativeConfig + fallbackConfig := autoconf.GetMainnetFallbackConfig() + aminoDHTSystem := fallbackConfig.SystemRegistry["AminoDHT"] + expectedBootstrapPeers := aminoDHTSystem.NativeConfig.Bootstrap + + foundFallbackPeers := 0 + for _, expectedPeer := range expectedBootstrapPeers { + for _, actualPeer := range bootstrap { + if actualPeer == expectedPeer { + foundFallbackPeers++ + break + } + } + } + assert.Greater(t, foundFallbackPeers, 0, "Should contain bootstrap peers from GetMainnetFallbackConfig() AminoDHT NativeConfig") + assert.Equal(t, len(expectedBootstrapPeers), foundFallbackPeers, "Should contain all bootstrap peers from GetMainnetFallbackConfig() AminoDHT NativeConfig") + + t.Logf("Daemon fallback bootstrap peers after malformed response: %v", bootstrap) + + // Test DNS resolvers also fall back correctly + result = node.RunIPFS("config", "DNS.Resolvers", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "config DNS.Resolvers --expand-auto should succeed with daemon running") + + var resolvers map[string]string + err = json.Unmarshal([]byte(result.Stdout.String()), &resolvers) + require.NoError(t, err) + + // Should not contain "auto" and should have fallback DNS resolvers + assert.NotEqual(t, "auto", resolvers["foo."], "DNS resolver should not be 'auto' after fallback") + if resolvers["foo."] != "" { + // If resolver is populated, it should be a valid URL from fallbacks + assert.Contains(t, resolvers["foo."], "https://", "Fallback DNS resolver should be HTTPS URL") + } + + t.Logf("Daemon fallback DNS resolvers after malformed response: %v", resolvers) + + // Verify daemon is still healthy and responsive + versionResult := node.RunIPFS("version") + require.Equal(t, 0, versionResult.ExitCode(), "daemon should remain healthy after handling malformed autoconf") + t.Log("Daemon remains healthy after gracefully handling malformed autoconf response") +} + +// Helper function to load test data files for fallback tests +func loadTestDataForFallback(t *testing.T, filename string) []byte { + t.Helper() + + data, err := os.ReadFile("testdata/" + filename) + require.NoError(t, err, "Failed to read test data file: %s", filename) + + return data +} diff --git a/test/cli/autoconf/expand_test.go b/test/cli/autoconf/expand_test.go new file mode 100644 index 000000000..45a46560f --- /dev/null +++ b/test/cli/autoconf/expand_test.go @@ -0,0 +1,732 @@ +package autoconf + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "os" + "testing" + "time" + + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestAutoConfExpand(t *testing.T) { + t.Parallel() + + t.Run("config commands show auto values", func(t *testing.T) { + t.Parallel() + testConfigCommandsShowAutoValues(t) + }) + + t.Run("mixed configuration preserves both auto and static", func(t *testing.T) { + t.Parallel() + testMixedConfigurationPreserved(t) + }) + + t.Run("config replace preserves auto values", func(t *testing.T) { + t.Parallel() + testConfigReplacePreservesAuto(t) + }) + + t.Run("expand-auto filters unsupported URL paths with delegated routing", func(t *testing.T) { + t.Parallel() + testExpandAutoFiltersUnsupportedPathsDelegated(t) + }) + + t.Run("expand-auto with auto routing uses NewRoutingSystem", func(t *testing.T) { + t.Parallel() + testExpandAutoWithAutoRouting(t) + }) + + t.Run("expand-auto with auto routing shows AminoDHT native vs IPNI delegated", func(t *testing.T) { + t.Parallel() + testExpandAutoWithMixedSystems(t) + }) + + t.Run("expand-auto filters paths with NewRoutingSystem and auto routing", func(t *testing.T) { + t.Parallel() + testExpandAutoWithFiltering(t) + }) + + t.Run("expand-auto falls back to defaults without cache (delegated)", func(t *testing.T) { + t.Parallel() + testExpandAutoWithoutCacheDelegated(t) + }) + + t.Run("expand-auto with auto routing without cache", func(t *testing.T) { + t.Parallel() + testExpandAutoWithoutCacheAuto(t) + }) +} + +func testConfigCommandsShowAutoValues(t *testing.T) { + // Create IPFS node + node := harness.NewT(t).NewNode().Init("--profile=test") + + // Set all fields to "auto" + node.SetIPFSConfig("Bootstrap", []string{"auto"}) + node.SetIPFSConfig("DNS.Resolvers", map[string]string{"foo.": "auto"}) + node.SetIPFSConfig("Routing.DelegatedRouters", []string{"auto"}) + node.SetIPFSConfig("Ipns.DelegatedPublishers", []string{"auto"}) + + // Test individual field queries + t.Run("Bootstrap shows auto", func(t *testing.T) { + result := node.RunIPFS("config", "Bootstrap") + require.Equal(t, 0, result.ExitCode()) + + var bootstrap []string + err := json.Unmarshal([]byte(result.Stdout.String()), &bootstrap) + require.NoError(t, err) + assert.Equal(t, []string{"auto"}, bootstrap) + }) + + t.Run("DNS.Resolvers shows auto", func(t *testing.T) { + result := node.RunIPFS("config", "DNS.Resolvers") + require.Equal(t, 0, result.ExitCode()) + + var resolvers map[string]string + err := json.Unmarshal([]byte(result.Stdout.String()), &resolvers) + require.NoError(t, err) + assert.Equal(t, map[string]string{"foo.": "auto"}, resolvers) + }) + + t.Run("Routing.DelegatedRouters shows auto", func(t *testing.T) { + result := node.RunIPFS("config", "Routing.DelegatedRouters") + require.Equal(t, 0, result.ExitCode()) + + var routers []string + err := json.Unmarshal([]byte(result.Stdout.String()), &routers) + require.NoError(t, err) + assert.Equal(t, []string{"auto"}, routers) + }) + + t.Run("Ipns.DelegatedPublishers shows auto", func(t *testing.T) { + result := node.RunIPFS("config", "Ipns.DelegatedPublishers") + require.Equal(t, 0, result.ExitCode()) + + var publishers []string + err := json.Unmarshal([]byte(result.Stdout.String()), &publishers) + require.NoError(t, err) + assert.Equal(t, []string{"auto"}, publishers) + }) + + t.Run("config show contains all auto values", func(t *testing.T) { + result := node.RunIPFS("config", "show") + require.Equal(t, 0, result.ExitCode()) + + output := result.Stdout.String() + + // Check that auto values are present in the full config + assert.Contains(t, output, `"Bootstrap": [ + "auto" + ]`, "Bootstrap should contain auto") + + assert.Contains(t, output, `"DNS": { + "Resolvers": { + "foo.": "auto" + } + }`, "DNS.Resolvers should contain auto") + + assert.Contains(t, output, `"DelegatedRouters": [ + "auto" + ]`, "Routing.DelegatedRouters should contain auto") + + assert.Contains(t, output, `"DelegatedPublishers": [ + "auto" + ]`, "Ipns.DelegatedPublishers should contain auto") + }) + + // Test with autoconf server for --expand-auto functionality + t.Run("config with --expand-auto expands auto values", func(t *testing.T) { + // Load test autoconf data + autoConfData := loadTestDataExpand(t, "valid_autoconf.json") + + // Create HTTP server that serves autoconf.json + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write(autoConfData) + })) + defer server.Close() + + // Configure autoconf for the node + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + + // Test Bootstrap field expansion + result := node.RunIPFS("config", "Bootstrap", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "config Bootstrap --expand-auto should succeed") + + var expandedBootstrap []string + err := json.Unmarshal([]byte(result.Stdout.String()), &expandedBootstrap) + require.NoError(t, err) + assert.NotContains(t, expandedBootstrap, "auto", "Expanded bootstrap should not contain 'auto'") + assert.Greater(t, len(expandedBootstrap), 0, "Expanded bootstrap should contain expanded peers") + + // Test DNS.Resolvers field expansion + result = node.RunIPFS("config", "DNS.Resolvers", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "config DNS.Resolvers --expand-auto should succeed") + + var expandedResolvers map[string]string + err = json.Unmarshal([]byte(result.Stdout.String()), &expandedResolvers) + require.NoError(t, err) + assert.NotEqual(t, "auto", expandedResolvers["foo."], "Expanded DNS resolver should not be 'auto'") + + // Test Routing.DelegatedRouters field expansion + result = node.RunIPFS("config", "Routing.DelegatedRouters", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "config Routing.DelegatedRouters --expand-auto should succeed") + + var expandedRouters []string + err = json.Unmarshal([]byte(result.Stdout.String()), &expandedRouters) + require.NoError(t, err) + assert.NotContains(t, expandedRouters, "auto", "Expanded routers should not contain 'auto'") + + // Test Ipns.DelegatedPublishers field expansion + result = node.RunIPFS("config", "Ipns.DelegatedPublishers", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "config Ipns.DelegatedPublishers --expand-auto should succeed") + + var expandedPublishers []string + err = json.Unmarshal([]byte(result.Stdout.String()), &expandedPublishers) + require.NoError(t, err) + assert.NotContains(t, expandedPublishers, "auto", "Expanded publishers should not contain 'auto'") + + // Test config show --expand-auto (full config expansion) + result = node.RunIPFS("config", "show", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "config show --expand-auto should succeed") + + expandedOutput := result.Stdout.String() + t.Logf("Expanded config output contains: %d characters", len(expandedOutput)) + + // Verify that auto values are expanded in the full config + assert.NotContains(t, expandedOutput, `"auto"`, "Expanded config should not contain literal 'auto' values") + assert.Contains(t, expandedOutput, `"Bootstrap"`, "Expanded config should contain Bootstrap section") + assert.Contains(t, expandedOutput, `"DNS"`, "Expanded config should contain DNS section") + }) +} + +func testMixedConfigurationPreserved(t *testing.T) { + // Create IPFS node + node := harness.NewT(t).NewNode().Init("--profile=test") + + // Set mixed configuration + node.SetIPFSConfig("Bootstrap", []string{ + "/ip4/127.0.0.1/tcp/4001/p2p/12D3KooWTest", + "auto", + "/ip4/127.0.0.2/tcp/4001/p2p/12D3KooWTest2", + }) + + node.SetIPFSConfig("DNS.Resolvers", map[string]string{ + "eth.": "https://eth.resolver", + "foo.": "auto", + "bar.": "https://bar.resolver", + }) + + node.SetIPFSConfig("Routing.DelegatedRouters", []string{ + "https://static.router", + "auto", + }) + + // Verify Bootstrap preserves order and mixes auto with static + result := node.RunIPFS("config", "Bootstrap") + require.Equal(t, 0, result.ExitCode()) + + var bootstrap []string + err := json.Unmarshal([]byte(result.Stdout.String()), &bootstrap) + require.NoError(t, err) + assert.Equal(t, []string{ + "/ip4/127.0.0.1/tcp/4001/p2p/12D3KooWTest", + "auto", + "/ip4/127.0.0.2/tcp/4001/p2p/12D3KooWTest2", + }, bootstrap) + + // Verify DNS.Resolvers preserves both auto and static + result = node.RunIPFS("config", "DNS.Resolvers") + require.Equal(t, 0, result.ExitCode()) + + var resolvers map[string]string + err = json.Unmarshal([]byte(result.Stdout.String()), &resolvers) + require.NoError(t, err) + assert.Equal(t, "https://eth.resolver", resolvers["eth."]) + assert.Equal(t, "auto", resolvers["foo."]) + assert.Equal(t, "https://bar.resolver", resolvers["bar."]) + + // Verify Routing.DelegatedRouters preserves order + result = node.RunIPFS("config", "Routing.DelegatedRouters") + require.Equal(t, 0, result.ExitCode()) + + var routers []string + err = json.Unmarshal([]byte(result.Stdout.String()), &routers) + require.NoError(t, err) + assert.Equal(t, []string{ + "https://static.router", + "auto", + }, routers) +} + +func testConfigReplacePreservesAuto(t *testing.T) { + // Create IPFS node + h := harness.NewT(t) + node := h.NewNode().Init("--profile=test") + + // Set initial auto values + node.SetIPFSConfig("Bootstrap", []string{"auto"}) + node.SetIPFSConfig("DNS.Resolvers", map[string]string{"foo.": "auto"}) + + // Export current config + result := node.RunIPFS("config", "show") + require.Equal(t, 0, result.ExitCode()) + originalConfig := result.Stdout.String() + + // Verify auto values are in the exported config + assert.Contains(t, originalConfig, `"Bootstrap": [ + "auto" + ]`) + assert.Contains(t, originalConfig, `"foo.": "auto"`) + + // Modify the config string to add a new field but preserve auto values + var configMap map[string]interface{} + err := json.Unmarshal([]byte(originalConfig), &configMap) + require.NoError(t, err) + + // Add a new field + configMap["NewTestField"] = "test-value" + + // Marshal back to JSON + modifiedConfig, err := json.MarshalIndent(configMap, "", " ") + require.NoError(t, err) + + // Write config to file and replace + configFile := h.WriteToTemp(string(modifiedConfig)) + replaceResult := node.RunIPFS("config", "replace", configFile) + if replaceResult.ExitCode() != 0 { + t.Logf("Config replace failed: stdout=%s, stderr=%s", replaceResult.Stdout.String(), replaceResult.Stderr.String()) + } + require.Equal(t, 0, replaceResult.ExitCode()) + + // Verify auto values are still present after replace + result = node.RunIPFS("config", "Bootstrap") + require.Equal(t, 0, result.ExitCode()) + + var bootstrap []string + err = json.Unmarshal([]byte(result.Stdout.String()), &bootstrap) + require.NoError(t, err) + assert.Equal(t, []string{"auto"}, bootstrap, "Bootstrap should still contain auto after config replace") + + // Verify DNS resolver config is preserved after replace + result = node.RunIPFS("config", "DNS.Resolvers") + require.Equal(t, 0, result.ExitCode()) + + var resolvers map[string]string + err = json.Unmarshal([]byte(result.Stdout.String()), &resolvers) + require.NoError(t, err) + assert.Equal(t, "auto", resolvers["foo."], "DNS resolver for foo. should still be auto after config replace") +} + +func testExpandAutoFiltersUnsupportedPathsDelegated(t *testing.T) { + // Test scenario: CLI with daemon started and autoconf cached using delegated routing + // This tests the production scenario where delegated routing is enabled and + // daemon has fetched and cached autoconf data, and CLI commands read from that cache + + // Create IPFS node + node := harness.NewT(t).NewNode().Init("--profile=test") + + // Configure delegated routing to use autoconf URLs + node.SetIPFSConfig("Routing.Type", "delegated") + node.SetIPFSConfig("Routing.DelegatedRouters", []string{"auto"}) + node.SetIPFSConfig("Ipns.DelegatedPublishers", []string{"auto"}) + // Disable content providing when using delegated routing + node.SetIPFSConfig("Provider.Enabled", false) + node.SetIPFSConfig("Reprovider.Interval", "0") + + // Load test autoconf data with unsupported paths + autoConfData := loadTestDataExpand(t, "autoconf_with_unsupported_paths.json") + + // Create HTTP server that serves autoconf.json with unsupported paths + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write(autoConfData) + })) + defer server.Close() + + // Configure autoconf for the node + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + + // Verify the autoconf URL is set correctly + result := node.RunIPFS("config", "AutoConf.URL") + require.Equal(t, 0, result.ExitCode(), "config AutoConf.URL should succeed") + t.Logf("AutoConf URL is set to: %s", result.Stdout.String()) + assert.Contains(t, result.Stdout.String(), "127.0.0.1", "AutoConf URL should contain the test server address") + + // Start daemon to fetch and cache autoconf data + t.Log("Starting daemon to fetch and cache autoconf data...") + daemon := node.StartDaemon() + defer daemon.StopDaemon() + + // Wait for autoconf fetch (use autoconf default timeout + buffer) + time.Sleep(6 * time.Second) // defaultTimeout is 5s, add 1s buffer + t.Log("AutoConf should now be cached by daemon") + + // Test Routing.DelegatedRouters field expansion filters unsupported paths + result = node.RunIPFS("config", "Routing.DelegatedRouters", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "config Routing.DelegatedRouters --expand-auto should succeed") + + var expandedRouters []string + err := json.Unmarshal([]byte(result.Stdout.String()), &expandedRouters) + require.NoError(t, err) + + // After cache prewarming, should get URLs from autoconf that have supported paths + assert.Contains(t, expandedRouters, "https://supported.example.com/routing/v1/providers", "Should contain supported provider URL") + assert.Contains(t, expandedRouters, "https://supported.example.com/routing/v1/peers", "Should contain supported peers URL") + assert.Contains(t, expandedRouters, "https://mixed.example.com/routing/v1/providers", "Should contain mixed provider URL") + assert.Contains(t, expandedRouters, "https://mixed.example.com/routing/v1/peers", "Should contain mixed peers URL") + + // Verify unsupported URLs from autoconf are filtered out (not in result) + assert.NotContains(t, expandedRouters, "https://unsupported.example.com/example/v0/read", "Should filter out unsupported path /example/v0/read") + assert.NotContains(t, expandedRouters, "https://unsupported.example.com/api/v1/custom", "Should filter out unsupported path /api/v1/custom") + assert.NotContains(t, expandedRouters, "https://mixed.example.com/unsupported/path", "Should filter out unsupported path /unsupported/path") + + t.Logf("Filtered routers: %v", expandedRouters) + + // Test Ipns.DelegatedPublishers field expansion filters unsupported paths + result = node.RunIPFS("config", "Ipns.DelegatedPublishers", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "config Ipns.DelegatedPublishers --expand-auto should succeed") + + var expandedPublishers []string + err = json.Unmarshal([]byte(result.Stdout.String()), &expandedPublishers) + require.NoError(t, err) + + // After cache prewarming, should get URLs from autoconf that have supported paths + assert.Contains(t, expandedPublishers, "https://supported.example.com/routing/v1/ipns", "Should contain supported IPNS URL") + assert.Contains(t, expandedPublishers, "https://mixed.example.com/routing/v1/ipns", "Should contain mixed IPNS URL") + + // Verify unsupported URLs from autoconf are filtered out (not in result) + assert.NotContains(t, expandedPublishers, "https://unsupported.example.com/example/v0/write", "Should filter out unsupported write path") + + t.Logf("Filtered publishers: %v", expandedPublishers) +} + +func testExpandAutoWithoutCacheDelegated(t *testing.T) { + // Test scenario: CLI without daemon ever starting (no cached autoconf) using delegated routing + // This tests the fallback scenario where delegated routing is configured but CLI commands + // cannot read from cache and must fall back to hardcoded defaults + + // Create IPFS node but DO NOT start daemon + node := harness.NewT(t).NewNode().Init("--profile=test") + + // Configure delegated routing to use autoconf URLs (but no daemon to fetch them) + node.SetIPFSConfig("Routing.Type", "delegated") + node.SetIPFSConfig("Routing.DelegatedRouters", []string{"auto"}) + node.SetIPFSConfig("Ipns.DelegatedPublishers", []string{"auto"}) + // Disable content providing when using delegated routing + node.SetIPFSConfig("Provider.Enabled", false) + node.SetIPFSConfig("Reprovider.Interval", "0") + + // Load test autoconf data with unsupported paths (this won't be used since no daemon) + autoConfData := loadTestDataExpand(t, "autoconf_with_unsupported_paths.json") + + // Create HTTP server that serves autoconf.json with unsupported paths + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write(autoConfData) + })) + defer server.Close() + + // Configure autoconf for the node (but daemon never starts to fetch it) + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + + // Test Routing.DelegatedRouters field expansion without cached autoconf + result := node.RunIPFS("config", "Routing.DelegatedRouters", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "config Routing.DelegatedRouters --expand-auto should succeed") + + var expandedRouters []string + err := json.Unmarshal([]byte(result.Stdout.String()), &expandedRouters) + require.NoError(t, err) + + // Without cached autoconf, should get fallback URLs from GetMainnetFallbackConfig() + // NOTE: These values may change if autoconf library updates GetMainnetFallbackConfig() + assert.Contains(t, expandedRouters, "https://cid.contact/routing/v1/providers", "Should contain fallback provider URL from GetMainnetFallbackConfig()") + + t.Logf("Fallback routers (no cache): %v", expandedRouters) + + // Test Ipns.DelegatedPublishers field expansion without cached autoconf + result = node.RunIPFS("config", "Ipns.DelegatedPublishers", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "config Ipns.DelegatedPublishers --expand-auto should succeed") + + var expandedPublishers []string + err = json.Unmarshal([]byte(result.Stdout.String()), &expandedPublishers) + require.NoError(t, err) + + // Without cached autoconf, should get fallback IPNS publishers from GetMainnetFallbackConfig() + // NOTE: These values may change if autoconf library updates GetMainnetFallbackConfig() + assert.Contains(t, expandedPublishers, "https://delegated-ipfs.dev/routing/v1/ipns", "Should contain fallback IPNS URL from GetMainnetFallbackConfig()") + + t.Logf("Fallback publishers (no cache): %v", expandedPublishers) +} + +func testExpandAutoWithAutoRouting(t *testing.T) { + // Test scenario: CLI with daemon started using auto routing with NewRoutingSystem + // This tests that non-native systems (NewRoutingSystem) ARE delegated even with auto routing + // Only native systems like AminoDHT are handled internally with auto routing + + // Create IPFS node + node := harness.NewT(t).NewNode().Init("--profile=test") + + // Configure auto routing with non-native system + node.SetIPFSConfig("Routing.Type", "auto") + node.SetIPFSConfig("Routing.DelegatedRouters", []string{"auto"}) + node.SetIPFSConfig("Ipns.DelegatedPublishers", []string{"auto"}) + + // Load test autoconf data with NewRoutingSystem (non-native, will be delegated) + autoConfData := loadTestDataExpand(t, "autoconf_new_routing_system.json") + + // Create HTTP server that serves autoconf.json + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write(autoConfData) + })) + defer server.Close() + + // Configure autoconf for the node + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + + // Start daemon to fetch and cache autoconf data + t.Log("Starting daemon to fetch and cache autoconf data...") + daemon := node.StartDaemon() + defer daemon.StopDaemon() + + // Wait for autoconf fetch (use autoconf default timeout + buffer) + time.Sleep(6 * time.Second) // defaultTimeout is 5s, add 1s buffer + t.Log("AutoConf should now be cached by daemon") + + // Test Routing.DelegatedRouters field expansion with auto routing + result := node.RunIPFS("config", "Routing.DelegatedRouters", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "config Routing.DelegatedRouters --expand-auto should succeed") + + var expandedRouters []string + err := json.Unmarshal([]byte(result.Stdout.String()), &expandedRouters) + require.NoError(t, err) + + // With auto routing and NewRoutingSystem (non-native), delegated endpoints should be populated + assert.Contains(t, expandedRouters, "https://new-routing.example.com/routing/v1/providers", "Should contain NewRoutingSystem provider URL") + assert.Contains(t, expandedRouters, "https://new-routing.example.com/routing/v1/peers", "Should contain NewRoutingSystem peers URL") + + t.Logf("Auto routing routers (NewRoutingSystem delegated): %v", expandedRouters) + + // Test Ipns.DelegatedPublishers field expansion with auto routing + result = node.RunIPFS("config", "Ipns.DelegatedPublishers", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "config Ipns.DelegatedPublishers --expand-auto should succeed") + + var expandedPublishers []string + err = json.Unmarshal([]byte(result.Stdout.String()), &expandedPublishers) + require.NoError(t, err) + + // With auto routing and NewRoutingSystem (non-native), delegated publishers should be populated + assert.Contains(t, expandedPublishers, "https://new-routing.example.com/routing/v1/ipns", "Should contain NewRoutingSystem IPNS URL") + + t.Logf("Auto routing publishers (NewRoutingSystem delegated): %v", expandedPublishers) +} + +func testExpandAutoWithMixedSystems(t *testing.T) { + // Test scenario: Auto routing with both AminoDHT (native) and IPNI (delegated) systems + // This explicitly confirms that AminoDHT is NOT delegated but IPNI at cid.contact IS delegated + + // Create IPFS node + node := harness.NewT(t).NewNode().Init("--profile=test") + + // Configure auto routing + node.SetIPFSConfig("Routing.Type", "auto") + node.SetIPFSConfig("Routing.DelegatedRouters", []string{"auto"}) + node.SetIPFSConfig("Ipns.DelegatedPublishers", []string{"auto"}) + + // Load test autoconf data with both AminoDHT and IPNI systems + autoConfData := loadTestDataExpand(t, "autoconf_amino_and_ipni.json") + + // Create HTTP server that serves autoconf.json + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write(autoConfData) + })) + defer server.Close() + + // Configure autoconf for the node + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + + // Start daemon to fetch and cache autoconf data + t.Log("Starting daemon to fetch and cache autoconf data...") + daemon := node.StartDaemon() + defer daemon.StopDaemon() + + // Wait for autoconf fetch (use autoconf default timeout + buffer) + time.Sleep(6 * time.Second) // defaultTimeout is 5s, add 1s buffer + t.Log("AutoConf should now be cached by daemon") + + // Test Routing.DelegatedRouters field expansion + result := node.RunIPFS("config", "Routing.DelegatedRouters", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "config Routing.DelegatedRouters --expand-auto should succeed") + + var expandedRouters []string + err := json.Unmarshal([]byte(result.Stdout.String()), &expandedRouters) + require.NoError(t, err) + + // With auto routing: AminoDHT (native) should NOT be delegated, IPNI should be delegated + assert.Contains(t, expandedRouters, "https://cid.contact/routing/v1/providers", "Should contain IPNI provider URL (delegated)") + assert.NotContains(t, expandedRouters, "https://amino-dht.example.com", "Should NOT contain AminoDHT URLs (native)") + + t.Logf("Mixed systems routers (IPNI delegated, AminoDHT native): %v", expandedRouters) + + // Test Ipns.DelegatedPublishers field expansion + result = node.RunIPFS("config", "Ipns.DelegatedPublishers", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "config Ipns.DelegatedPublishers --expand-auto should succeed") + + var expandedPublishers []string + err = json.Unmarshal([]byte(result.Stdout.String()), &expandedPublishers) + require.NoError(t, err) + + // IPNI system doesn't have write endpoints, so publishers should be empty + // (or contain other systems if they have write endpoints) + t.Logf("Mixed systems publishers (IPNI has no write endpoints): %v", expandedPublishers) +} + +func testExpandAutoWithFiltering(t *testing.T) { + // Test scenario: Auto routing with NewRoutingSystem and path filtering + // This tests that path filtering works for delegated systems even with auto routing + + // Create IPFS node + node := harness.NewT(t).NewNode().Init("--profile=test") + + // Configure auto routing + node.SetIPFSConfig("Routing.Type", "auto") + node.SetIPFSConfig("Routing.DelegatedRouters", []string{"auto"}) + node.SetIPFSConfig("Ipns.DelegatedPublishers", []string{"auto"}) + + // Load test autoconf data with NewRoutingSystem and mixed valid/invalid paths + autoConfData := loadTestDataExpand(t, "autoconf_new_routing_with_filtering.json") + + // Create HTTP server that serves autoconf.json + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write(autoConfData) + })) + defer server.Close() + + // Configure autoconf for the node + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + + // Start daemon to fetch and cache autoconf data + t.Log("Starting daemon to fetch and cache autoconf data...") + daemon := node.StartDaemon() + defer daemon.StopDaemon() + + // Wait for autoconf fetch (use autoconf default timeout + buffer) + time.Sleep(6 * time.Second) // defaultTimeout is 5s, add 1s buffer + t.Log("AutoConf should now be cached by daemon") + + // Test Routing.DelegatedRouters field expansion with filtering + result := node.RunIPFS("config", "Routing.DelegatedRouters", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "config Routing.DelegatedRouters --expand-auto should succeed") + + var expandedRouters []string + err := json.Unmarshal([]byte(result.Stdout.String()), &expandedRouters) + require.NoError(t, err) + + // Should contain supported paths from NewRoutingSystem + assert.Contains(t, expandedRouters, "https://supported-new.example.com/routing/v1/providers", "Should contain supported provider URL") + assert.Contains(t, expandedRouters, "https://supported-new.example.com/routing/v1/peers", "Should contain supported peers URL") + assert.Contains(t, expandedRouters, "https://mixed-new.example.com/routing/v1/providers", "Should contain mixed provider URL") + assert.Contains(t, expandedRouters, "https://mixed-new.example.com/routing/v1/peers", "Should contain mixed peers URL") + + // Should NOT contain unsupported paths + assert.NotContains(t, expandedRouters, "https://unsupported-new.example.com/custom/v0/read", "Should filter out unsupported path") + assert.NotContains(t, expandedRouters, "https://unsupported-new.example.com/api/v1/nonstandard", "Should filter out unsupported path") + assert.NotContains(t, expandedRouters, "https://mixed-new.example.com/invalid/path", "Should filter out invalid path from mixed endpoint") + + t.Logf("Filtered routers (NewRoutingSystem with auto routing): %v", expandedRouters) + + // Test Ipns.DelegatedPublishers field expansion with filtering + result = node.RunIPFS("config", "Ipns.DelegatedPublishers", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "config Ipns.DelegatedPublishers --expand-auto should succeed") + + var expandedPublishers []string + err = json.Unmarshal([]byte(result.Stdout.String()), &expandedPublishers) + require.NoError(t, err) + + // Should contain supported IPNS paths + assert.Contains(t, expandedPublishers, "https://supported-new.example.com/routing/v1/ipns", "Should contain supported IPNS URL") + assert.Contains(t, expandedPublishers, "https://mixed-new.example.com/routing/v1/ipns", "Should contain mixed IPNS URL") + + // Should NOT contain unsupported write paths + assert.NotContains(t, expandedPublishers, "https://unsupported-new.example.com/custom/v0/write", "Should filter out unsupported write path") + + t.Logf("Filtered publishers (NewRoutingSystem with auto routing): %v", expandedPublishers) +} + +func testExpandAutoWithoutCacheAuto(t *testing.T) { + // Test scenario: CLI without daemon ever starting using auto routing (default) + // This tests the fallback scenario where auto routing is used but doesn't populate delegated config fields + + // Create IPFS node but DO NOT start daemon + node := harness.NewT(t).NewNode().Init("--profile=test") + + // Configure auto routing - delegated fields are set to "auto" but won't be populated + // because auto routing uses different internal mechanisms + node.SetIPFSConfig("Routing.Type", "auto") + node.SetIPFSConfig("Routing.DelegatedRouters", []string{"auto"}) + node.SetIPFSConfig("Ipns.DelegatedPublishers", []string{"auto"}) + + // Load test autoconf data (this won't be used since no daemon and auto routing doesn't use these fields) + autoConfData := loadTestDataExpand(t, "autoconf_with_unsupported_paths.json") + + // Create HTTP server (won't be contacted since no daemon) + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write(autoConfData) + })) + defer server.Close() + + // Configure autoconf for the node (but daemon never starts to fetch it) + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + + // Test Routing.DelegatedRouters field expansion without cached autoconf + result := node.RunIPFS("config", "Routing.DelegatedRouters", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "config Routing.DelegatedRouters --expand-auto should succeed") + + var expandedRouters []string + err := json.Unmarshal([]byte(result.Stdout.String()), &expandedRouters) + require.NoError(t, err) + + // With auto routing, some fallback URLs are still populated from GetMainnetFallbackConfig() + // NOTE: These values may change if autoconf library updates GetMainnetFallbackConfig() + assert.Contains(t, expandedRouters, "https://cid.contact/routing/v1/providers", "Should contain fallback provider URL from GetMainnetFallbackConfig()") + + t.Logf("Auto routing fallback routers (with fallbacks): %v", expandedRouters) + + // Test Ipns.DelegatedPublishers field expansion without cached autoconf + result = node.RunIPFS("config", "Ipns.DelegatedPublishers", "--expand-auto") + require.Equal(t, 0, result.ExitCode(), "config Ipns.DelegatedPublishers --expand-auto should succeed") + + var expandedPublishers []string + err = json.Unmarshal([]byte(result.Stdout.String()), &expandedPublishers) + require.NoError(t, err) + + // With auto routing, delegated publishers may be empty for fallback scenario + // This can vary based on which systems have write endpoints in the fallback config + t.Logf("Auto routing fallback publishers: %v", expandedPublishers) +} + +// Helper function to load test data files +func loadTestDataExpand(t *testing.T, filename string) []byte { + t.Helper() + + data, err := os.ReadFile("testdata/" + filename) + require.NoError(t, err, "Failed to read test data file: %s", filename) + + return data +} diff --git a/test/cli/autoconf/extensibility_test.go b/test/cli/autoconf/extensibility_test.go new file mode 100644 index 000000000..87939a820 --- /dev/null +++ b/test/cli/autoconf/extensibility_test.go @@ -0,0 +1,255 @@ +package autoconf + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/ipfs/kubo/config" + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/require" +) + +// TestAutoConfExtensibility_NewSystem verifies that the AutoConf system can be extended +// with new routing systems beyond the default AminoDHT and IPNI. +// +// The test verifies that: +// 1. New systems can be added via AutoConf's SystemRegistry +// 2. Native vs delegated system filtering works correctly: +// - Native systems (AminoDHT) provide bootstrap peers and are used for P2P routing +// - Delegated systems (IPNI, NewSystem) provide HTTP endpoints for delegated routing +// +// 3. The system correctly filters endpoints based on routing type +// +// Note: Only native systems contribute bootstrap peers. Delegated systems like "NewSystem" +// only provide HTTP routing endpoints, not P2P bootstrap peers. +func TestAutoConfExtensibility_NewSystem(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + + // Setup mock autoconf server with NewSystem + var mockServer *httptest.Server + mockServer = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Create autoconf.json with NewSystem + autoconfData := map[string]interface{}{ + "AutoConfVersion": 2025072901, + "AutoConfSchema": 1, + "AutoConfTTL": 86400, + "SystemRegistry": map[string]interface{}{ + "AminoDHT": map[string]interface{}{ + "URL": "https://github.com/ipfs/specs/pull/497", + "Description": "Public DHT swarm", + "NativeConfig": map[string]interface{}{ + "Bootstrap": []string{ + "/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", + }, + }, + "DelegatedConfig": map[string]interface{}{ + "Read": []string{"/routing/v1/providers", "/routing/v1/peers", "/routing/v1/ipns"}, + "Write": []string{"/routing/v1/ipns"}, + }, + }, + "IPNI": map[string]interface{}{ + "URL": "https://ipni.example.com", + "Description": "Network Indexer", + "DelegatedConfig": map[string]interface{}{ + "Read": []string{"/routing/v1/providers"}, + "Write": []string{}, + }, + }, + "NewSystem": map[string]interface{}{ + "URL": "https://example.com/newsystem", + "Description": "Test system for extensibility verification", + "NativeConfig": map[string]interface{}{ + "Bootstrap": []string{ + "/ip4/127.0.0.1/tcp/9999/p2p/12D3KooWPeQ4r3v6CmVmKXoFGtqEqcr3L8P6La9yH5oEWKtoLVVa", + }, + }, + "DelegatedConfig": map[string]interface{}{ + "Read": []string{"/routing/v1/providers"}, + "Write": []string{}, + }, + }, + }, + "DNSResolvers": map[string]interface{}{ + "eth.": []string{"https://dns.eth.limo/dns-query"}, + }, + "DelegatedEndpoints": map[string]interface{}{ + "https://ipni.example.com": map[string]interface{}{ + "Systems": []string{"IPNI"}, + "Read": []string{"/routing/v1/providers"}, + "Write": []string{}, + }, + mockServer.URL + "/newsystem": map[string]interface{}{ + "Systems": []string{"NewSystem"}, + "Read": []string{"/routing/v1/providers"}, + "Write": []string{}, + }, + }, + } + + w.Header().Set("Content-Type", "application/json") + w.Header().Set("Cache-Control", "max-age=300") + _ = json.NewEncoder(w).Encode(autoconfData) + })) + defer mockServer.Close() + + // NewSystem mock server URL will be dynamically assigned + newSystemServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Simple mock server for NewSystem endpoint + response := map[string]interface{}{"Providers": []interface{}{}} + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(response) + })) + defer newSystemServer.Close() + + // Update the autoconf to point to the correct NewSystem endpoint + mockServer.Close() + mockServer = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + autoconfData := map[string]interface{}{ + "AutoConfVersion": 2025072901, + "AutoConfSchema": 1, + "AutoConfTTL": 86400, + "SystemRegistry": map[string]interface{}{ + "AminoDHT": map[string]interface{}{ + "URL": "https://github.com/ipfs/specs/pull/497", + "Description": "Public DHT swarm", + "NativeConfig": map[string]interface{}{ + "Bootstrap": []string{ + "/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", + }, + }, + "DelegatedConfig": map[string]interface{}{ + "Read": []string{"/routing/v1/providers", "/routing/v1/peers", "/routing/v1/ipns"}, + "Write": []string{"/routing/v1/ipns"}, + }, + }, + "IPNI": map[string]interface{}{ + "URL": "https://ipni.example.com", + "Description": "Network Indexer", + "DelegatedConfig": map[string]interface{}{ + "Read": []string{"/routing/v1/providers"}, + "Write": []string{}, + }, + }, + "NewSystem": map[string]interface{}{ + "URL": "https://example.com/newsystem", + "Description": "Test system for extensibility verification", + "NativeConfig": map[string]interface{}{ + "Bootstrap": []string{ + "/ip4/127.0.0.1/tcp/9999/p2p/12D3KooWPeQ4r3v6CmVmKXoFGtqEqcr3L8P6La9yH5oEWKtoLVVa", + }, + }, + "DelegatedConfig": map[string]interface{}{ + "Read": []string{"/routing/v1/providers"}, + "Write": []string{}, + }, + }, + }, + "DNSResolvers": map[string]interface{}{ + "eth.": []string{"https://dns.eth.limo/dns-query"}, + }, + "DelegatedEndpoints": map[string]interface{}{ + "https://ipni.example.com": map[string]interface{}{ + "Systems": []string{"IPNI"}, + "Read": []string{"/routing/v1/providers"}, + "Write": []string{}, + }, + newSystemServer.URL: map[string]interface{}{ + "Systems": []string{"NewSystem"}, + "Read": []string{"/routing/v1/providers"}, + "Write": []string{}, + }, + }, + } + + w.Header().Set("Content-Type", "application/json") + w.Header().Set("Cache-Control", "max-age=300") + _ = json.NewEncoder(w).Encode(autoconfData) + })) + defer mockServer.Close() + + // Create Kubo node with autoconf pointing to mock server + h := harness.NewT(t) + node := h.NewNode().Init() + + // Update config to use mock autoconf server + node.UpdateConfig(func(cfg *config.Config) { + cfg.AutoConf.URL = config.NewOptionalString(mockServer.URL) + cfg.AutoConf.Enabled = config.True + cfg.AutoConf.RefreshInterval = config.NewOptionalDuration(1 * time.Second) + cfg.Routing.Type = config.NewOptionalString("auto") // Should enable native AminoDHT + delegated others + cfg.Bootstrap = []string{"auto"} + cfg.Routing.DelegatedRouters = []string{"auto"} + }) + + // Start the daemon + daemon := node.StartDaemon() + defer daemon.StopDaemon() + + // Give the daemon some time to initialize and make requests + time.Sleep(3 * time.Second) + + // Test 1: Verify bootstrap includes both AminoDHT and NewSystem peers (deduplicated) + bootstrapResult := daemon.IPFS("bootstrap", "list", "--expand-auto") + bootstrapOutput := bootstrapResult.Stdout.String() + t.Logf("Bootstrap output: %s", bootstrapOutput) + + // Should contain original DHT bootstrap peer (AminoDHT is a native system) + require.Contains(t, bootstrapOutput, "QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", "Should contain AminoDHT bootstrap peer") + + // Note: NewSystem bootstrap peers are NOT included because only native systems + // (AminoDHT for Routing.Type="auto") contribute bootstrap peers. + // Delegated systems like NewSystem only provide HTTP routing endpoints. + + // Test 2: Verify delegated endpoints are filtered correctly + // For Routing.Type=auto, native systems=[AminoDHT], so: + // - AminoDHT endpoints should be filtered out + // - IPNI and NewSystem endpoints should be included + + // Get the expanded delegated routers using --expand-auto + routerResult := daemon.IPFS("config", "Routing.DelegatedRouters", "--expand-auto") + var expandedRouters []string + require.NoError(t, json.Unmarshal([]byte(routerResult.Stdout.String()), &expandedRouters)) + + t.Logf("Expanded delegated routers: %v", expandedRouters) + + // Verify we got exactly 2 delegated routers: IPNI and NewSystem + require.Equal(t, 2, len(expandedRouters), "Should have exactly 2 delegated routers (IPNI and NewSystem). Got %d: %v", len(expandedRouters), expandedRouters) + + // Convert to URLs for checking + routerURLs := expandedRouters + + // Should contain NewSystem endpoint (not native) - now with routing path + foundNewSystem := false + expectedNewSystemURL := newSystemServer.URL + "/routing/v1/providers" // Full URL with path, as returned by DelegatedRoutersWithAutoConf + for _, url := range routerURLs { + if url == expectedNewSystemURL { + foundNewSystem = true + break + } + } + require.True(t, foundNewSystem, "Should contain NewSystem endpoint (%s) for delegated routing, got: %v", expectedNewSystemURL, routerURLs) + + // Should contain ipni.example.com (IPNI is not native) + foundIPNI := false + for _, url := range routerURLs { + if strings.Contains(url, "ipni.example.com") { + foundIPNI = true + break + } + } + require.True(t, foundIPNI, "Should contain ipni.example.com endpoint for IPNI") + + // Test passes - we've verified that: + // 1. Bootstrap peers are correctly resolved from native systems only + // 2. Delegated routers include both IPNI and NewSystem endpoints + // 3. URL format is correct (base URLs with paths) + // 4. AutoConf extensibility works for unknown systems + + t.Log("NewSystem extensibility test passed - Kubo successfully discovered and used unknown routing system") +} diff --git a/test/cli/autoconf/fuzz_test.go b/test/cli/autoconf/fuzz_test.go new file mode 100644 index 000000000..440bc3a25 --- /dev/null +++ b/test/cli/autoconf/fuzz_test.go @@ -0,0 +1,654 @@ +package autoconf + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/ipfs/boxo/autoconf" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// testAutoConfWithFallback is a helper function that tests autoconf parsing with fallback detection +func testAutoConfWithFallback(t *testing.T, serverURL string, expectError bool, expectErrorMsg string) (*autoconf.Config, bool) { + return testAutoConfWithFallbackAndTimeout(t, serverURL, expectError, expectErrorMsg, 10*time.Second) +} + +// testAutoConfWithFallbackAndTimeout is a helper function that tests autoconf parsing with fallback detection and custom timeout +func testAutoConfWithFallbackAndTimeout(t *testing.T, serverURL string, expectError bool, expectErrorMsg string, timeout time.Duration) (*autoconf.Config, bool) { + // Use fallback detection to test error conditions with MustGetConfigWithRefresh + fallbackUsed := false + fallbackConfig := &autoconf.Config{ + AutoConfVersion: -999, // Special marker to detect fallback usage + AutoConfSchema: -999, + } + + client, err := autoconf.NewClient( + autoconf.WithUserAgent("test-agent"), + autoconf.WithURL(serverURL), + autoconf.WithRefreshInterval(autoconf.DefaultRefreshInterval), + autoconf.WithFallback(func() *autoconf.Config { + fallbackUsed = true + return fallbackConfig + }), + ) + require.NoError(t, err) + + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + result := client.GetCachedOrRefresh(ctx) + + if expectError { + require.True(t, fallbackUsed, expectErrorMsg) + require.Equal(t, int64(-999), result.AutoConfVersion, "Should return fallback config for error case") + } else { + require.False(t, fallbackUsed, "Expected no fallback to be used") + require.NotEqual(t, int64(-999), result.AutoConfVersion, "Should return fetched config for success case") + } + + return result, fallbackUsed +} + +func TestAutoConfFuzz(t *testing.T) { + t.Parallel() + + t.Run("fuzz autoconf version", testFuzzAutoConfVersion) + t.Run("fuzz bootstrap arrays", testFuzzBootstrapArrays) + t.Run("fuzz dns resolvers", testFuzzDNSResolvers) + t.Run("fuzz delegated routers", testFuzzDelegatedRouters) + t.Run("fuzz delegated publishers", testFuzzDelegatedPublishers) + t.Run("fuzz malformed json", testFuzzMalformedJSON) + t.Run("fuzz large payloads", testFuzzLargePayloads) +} + +func testFuzzAutoConfVersion(t *testing.T) { + testCases := []struct { + name string + version interface{} + expectError bool + }{ + {"valid version", 2025071801, false}, + {"zero version", 0, true}, // Should be invalid + {"negative version", -1, false}, // Parser accepts negative versions + {"string version", "2025071801", true}, // Should be number + {"float version", 2025071801.5, true}, + {"very large version", 9999999999999999, false}, // Large but valid int64 + {"null version", nil, true}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + config := map[string]interface{}{ + "AutoConfVersion": tc.version, + "AutoConfSchema": 1, + "AutoConfTTL": 86400, + "SystemRegistry": map[string]interface{}{ + "AminoDHT": map[string]interface{}{ + "Description": "Test AminoDHT system", + "NativeConfig": map[string]interface{}{ + "Bootstrap": []string{ + "/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", + }, + }, + }, + }, + "DNSResolvers": map[string]interface{}{}, + "DelegatedEndpoints": map[string]interface{}{}, + } + + jsonData, err := json.Marshal(config) + require.NoError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write(jsonData) + })) + defer server.Close() + + // Test that our autoconf parser handles this gracefully + _, _ = testAutoConfWithFallback(t, server.URL, tc.expectError, fmt.Sprintf("Expected fallback to be used for %s", tc.name)) + }) + } +} + +func testFuzzBootstrapArrays(t *testing.T) { + type testCase struct { + name string + bootstrap interface{} + expectError bool + validate func(*testing.T, *autoconf.Response) + } + + testCases := []testCase{ + { + name: "valid bootstrap", + bootstrap: []string{"/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN"}, + validate: func(t *testing.T, resp *autoconf.Response) { + expected := []string{"/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN"} + bootstrapPeers := resp.Config.GetBootstrapPeers("AminoDHT") + assert.Equal(t, expected, bootstrapPeers, "Bootstrap peers should match configured values") + }, + }, + { + name: "empty bootstrap", + bootstrap: []string{}, + validate: func(t *testing.T, resp *autoconf.Response) { + bootstrapPeers := resp.Config.GetBootstrapPeers("AminoDHT") + assert.Empty(t, bootstrapPeers, "Empty bootstrap should result in empty peers") + }, + }, + { + name: "null bootstrap", + bootstrap: nil, + validate: func(t *testing.T, resp *autoconf.Response) { + bootstrapPeers := resp.Config.GetBootstrapPeers("AminoDHT") + assert.Empty(t, bootstrapPeers, "Null bootstrap should result in empty peers") + }, + }, + { + name: "invalid multiaddr", + bootstrap: []string{"invalid-multiaddr"}, + expectError: true, + }, + { + name: "very long multiaddr", + bootstrap: []string{"/dnsaddr/" + strings.Repeat("a", 100) + ".com/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN"}, + validate: func(t *testing.T, resp *autoconf.Response) { + expected := []string{"/dnsaddr/" + strings.Repeat("a", 100) + ".com/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN"} + bootstrapPeers := resp.Config.GetBootstrapPeers("AminoDHT") + assert.Equal(t, expected, bootstrapPeers, "Very long multiaddr should be preserved") + }, + }, + { + name: "bootstrap as string", + bootstrap: "/dnsaddr/test", + expectError: true, + }, + { + name: "bootstrap as number", + bootstrap: 123, + expectError: true, + }, + { + name: "mixed types in array", + bootstrap: []interface{}{"/dnsaddr/test", 123, nil}, + expectError: true, + }, + { + name: "extremely large array", + bootstrap: make([]string, 1000), + validate: func(t *testing.T, resp *autoconf.Response) { + // Array will be filled in the loop below + bootstrapPeers := resp.Config.GetBootstrapPeers("AminoDHT") + assert.Len(t, bootstrapPeers, 1000, "Large bootstrap array should be preserved") + }, + }, + } + + // Fill the large array with valid multiaddrs + largeArray := testCases[len(testCases)-1].bootstrap.([]string) + for i := range largeArray { + largeArray[i] = fmt.Sprintf("/dnsaddr/bootstrap%d.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", i) + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + config := map[string]interface{}{ + "AutoConfVersion": 2025072301, + "AutoConfSchema": 1, + "AutoConfTTL": 86400, + "SystemRegistry": map[string]interface{}{ + "AminoDHT": map[string]interface{}{ + "Description": "Test AminoDHT system", + "NativeConfig": map[string]interface{}{ + "Bootstrap": tc.bootstrap, + }, + }, + }, + "DNSResolvers": map[string]interface{}{}, + "DelegatedEndpoints": map[string]interface{}{}, + } + + jsonData, err := json.Marshal(config) + require.NoError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write(jsonData) + })) + defer server.Close() + + autoConf, fallbackUsed := testAutoConfWithFallback(t, server.URL, tc.expectError, fmt.Sprintf("Expected fallback to be used for %s", tc.name)) + + if !tc.expectError { + require.NotNil(t, autoConf, "AutoConf should not be nil for successful parsing") + + // Verify structure is reasonable + bootstrapPeers := autoConf.GetBootstrapPeers("AminoDHT") + require.IsType(t, []string{}, bootstrapPeers, "Bootstrap should be []string") + + // Run test-specific validation if provided (only for non-fallback cases) + if tc.validate != nil && !fallbackUsed { + // Create a mock Response for compatibility with validation functions + mockResponse := &autoconf.Response{Config: autoConf} + tc.validate(t, mockResponse) + } + } + }) + } +} + +func testFuzzDNSResolvers(t *testing.T) { + type testCase struct { + name string + resolvers interface{} + expectError bool + validate func(*testing.T, *autoconf.Response) + } + + testCases := []testCase{ + { + name: "valid resolvers", + resolvers: map[string][]string{".": {"https://dns.google/dns-query"}}, + validate: func(t *testing.T, resp *autoconf.Response) { + expected := map[string][]string{".": {"https://dns.google/dns-query"}} + assert.Equal(t, expected, resp.Config.DNSResolvers, "DNS resolvers should match configured values") + }, + }, + { + name: "empty resolvers", + resolvers: map[string][]string{}, + validate: func(t *testing.T, resp *autoconf.Response) { + assert.Empty(t, resp.Config.DNSResolvers, "Empty resolvers should result in empty map") + }, + }, + { + name: "null resolvers", + resolvers: nil, + validate: func(t *testing.T, resp *autoconf.Response) { + assert.Empty(t, resp.Config.DNSResolvers, "Null resolvers should result in empty map") + }, + }, + { + name: "relative URL (missing scheme)", + resolvers: map[string][]string{".": {"not-a-url"}}, + expectError: true, // Should error due to strict HTTP/HTTPS validation + }, + { + name: "invalid URL format", + resolvers: map[string][]string{".": {"://invalid-missing-scheme"}}, + expectError: true, // Should error because url.Parse() fails + }, + { + name: "non-HTTP scheme", + resolvers: map[string][]string{".": {"ftp://example.com/dns-query"}}, + expectError: true, // Should error due to non-HTTP/HTTPS scheme + }, + { + name: "very long domain", + resolvers: map[string][]string{strings.Repeat("a", 1000) + ".com": {"https://dns.google/dns-query"}}, + validate: func(t *testing.T, resp *autoconf.Response) { + expected := map[string][]string{strings.Repeat("a", 1000) + ".com": {"https://dns.google/dns-query"}} + assert.Equal(t, expected, resp.Config.DNSResolvers, "Very long domain should be preserved") + }, + }, + { + name: "many resolvers", + resolvers: generateManyResolvers(100), + validate: func(t *testing.T, resp *autoconf.Response) { + expected := generateManyResolvers(100) + assert.Equal(t, expected, resp.Config.DNSResolvers, "Many resolvers should be preserved") + assert.Equal(t, 100, len(resp.Config.DNSResolvers), "Should have 100 resolvers") + }, + }, + { + name: "resolvers as array", + resolvers: []string{"https://dns.google/dns-query"}, + expectError: true, + }, + { + name: "nested invalid structure", + resolvers: map[string]interface{}{".": map[string]string{"invalid": "structure"}}, + expectError: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + config := map[string]interface{}{ + "AutoConfVersion": 2025072301, + "AutoConfSchema": 1, + "AutoConfTTL": 86400, + "SystemRegistry": map[string]interface{}{ + "AminoDHT": map[string]interface{}{ + "Description": "Test AminoDHT system", + "NativeConfig": map[string]interface{}{ + "Bootstrap": []string{"/dnsaddr/test"}, + }, + }, + }, + "DNSResolvers": tc.resolvers, + "DelegatedEndpoints": map[string]interface{}{}, + } + + jsonData, err := json.Marshal(config) + require.NoError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write(jsonData) + })) + defer server.Close() + + autoConf, fallbackUsed := testAutoConfWithFallback(t, server.URL, tc.expectError, fmt.Sprintf("Expected fallback to be used for %s", tc.name)) + + if !tc.expectError { + require.NotNil(t, autoConf, "AutoConf should not be nil for successful parsing") + + // Run test-specific validation if provided (only for non-fallback cases) + if tc.validate != nil && !fallbackUsed { + // Create a mock Response for compatibility with validation functions + mockResponse := &autoconf.Response{Config: autoConf} + tc.validate(t, mockResponse) + } + } + }) + } +} + +func testFuzzDelegatedRouters(t *testing.T) { + // Test various malformed delegated router configurations + type testCase struct { + name string + routers interface{} + expectError bool + validate func(*testing.T, *autoconf.Response) + } + + testCases := []testCase{ + { + name: "valid endpoints", + routers: map[string]interface{}{ + "https://ipni.example.com": map[string]interface{}{ + "Systems": []string{"IPNI"}, + "Read": []string{"/routing/v1/providers"}, + "Write": []string{}, + }, + }, + validate: func(t *testing.T, resp *autoconf.Response) { + assert.Len(t, resp.Config.DelegatedEndpoints, 1, "Should have 1 delegated endpoint") + for url, config := range resp.Config.DelegatedEndpoints { + assert.Contains(t, url, "ipni.example.com", "Endpoint URL should contain expected domain") + assert.Contains(t, config.Systems, "IPNI", "Endpoint should have IPNI system") + assert.Contains(t, config.Read, "/routing/v1/providers", "Endpoint should have providers read path") + } + }, + }, + { + name: "empty routers", + routers: map[string]interface{}{}, + validate: func(t *testing.T, resp *autoconf.Response) { + assert.Empty(t, resp.Config.DelegatedEndpoints, "Empty routers should result in empty endpoints") + }, + }, + { + name: "null routers", + routers: nil, + validate: func(t *testing.T, resp *autoconf.Response) { + assert.Empty(t, resp.Config.DelegatedEndpoints, "Null routers should result in empty endpoints") + }, + }, + { + name: "invalid nested structure", + routers: map[string]string{"invalid": "structure"}, + expectError: true, + }, + { + name: "invalid endpoint URLs", + routers: map[string]interface{}{ + "not-a-url": map[string]interface{}{ + "Systems": []string{"IPNI"}, + "Read": []string{"/routing/v1/providers"}, + "Write": []string{}, + }, + }, + expectError: true, // Should error due to URL validation + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + config := map[string]interface{}{ + "AutoConfVersion": 2025072301, + "AutoConfSchema": 1, + "AutoConfTTL": 86400, + "SystemRegistry": map[string]interface{}{ + "AminoDHT": map[string]interface{}{ + "Description": "Test AminoDHT system", + "NativeConfig": map[string]interface{}{ + "Bootstrap": []string{"/dnsaddr/test"}, + }, + }, + }, + "DNSResolvers": map[string]interface{}{}, + "DelegatedEndpoints": tc.routers, + } + + jsonData, err := json.Marshal(config) + require.NoError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write(jsonData) + })) + defer server.Close() + + autoConf, fallbackUsed := testAutoConfWithFallback(t, server.URL, tc.expectError, fmt.Sprintf("Expected fallback to be used for %s", tc.name)) + + if !tc.expectError { + require.NotNil(t, autoConf, "AutoConf should not be nil for successful parsing") + + // Run test-specific validation if provided (only for non-fallback cases) + if tc.validate != nil && !fallbackUsed { + // Create a mock Response for compatibility with validation functions + mockResponse := &autoconf.Response{Config: autoConf} + tc.validate(t, mockResponse) + } + } + }) + } +} + +func testFuzzDelegatedPublishers(t *testing.T) { + // DelegatedPublishers use the same autoclient library validation as DelegatedRouters + // Test that URL validation works for delegated publishers + type testCase struct { + name string + urls []string + expectErr bool + validate func(*testing.T, *autoconf.Response) + } + + testCases := []testCase{ + { + name: "valid HTTPS URLs", + urls: []string{"https://delegated-ipfs.dev", "https://another-publisher.com"}, + validate: func(t *testing.T, resp *autoconf.Response) { + assert.Len(t, resp.Config.DelegatedEndpoints, 2, "Should have 2 delegated endpoints") + foundURLs := make([]string, 0, len(resp.Config.DelegatedEndpoints)) + for url := range resp.Config.DelegatedEndpoints { + foundURLs = append(foundURLs, url) + } + expectedURLs := []string{"https://delegated-ipfs.dev", "https://another-publisher.com"} + for _, expectedURL := range expectedURLs { + assert.Contains(t, foundURLs, expectedURL, "Should contain configured URL: %s", expectedURL) + } + }, + }, + { + name: "invalid URL", + urls: []string{"not-a-url"}, + expectErr: true, + }, + { + name: "HTTP URL (accepted during parsing)", + urls: []string{"http://insecure-publisher.com"}, + validate: func(t *testing.T, resp *autoconf.Response) { + assert.Len(t, resp.Config.DelegatedEndpoints, 1, "Should have 1 delegated endpoint") + for url := range resp.Config.DelegatedEndpoints { + assert.Equal(t, "http://insecure-publisher.com", url, "HTTP URL should be preserved during parsing") + } + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + autoConfData := map[string]interface{}{ + "AutoConfVersion": 2025072301, + "AutoConfSchema": 1, + "AutoConfTTL": 86400, + "SystemRegistry": map[string]interface{}{ + "TestSystem": map[string]interface{}{ + "Description": "Test system for fuzz testing", + "DelegatedConfig": map[string]interface{}{ + "Read": []string{"/routing/v1/ipns"}, + "Write": []string{"/routing/v1/ipns"}, + }, + }, + }, + "DNSResolvers": map[string]interface{}{}, + "DelegatedEndpoints": map[string]interface{}{}, + } + + // Add test URLs as delegated endpoints + for _, url := range tc.urls { + autoConfData["DelegatedEndpoints"].(map[string]interface{})[url] = map[string]interface{}{ + "Systems": []string{"TestSystem"}, + "Read": []string{"/routing/v1/ipns"}, + "Write": []string{"/routing/v1/ipns"}, + } + } + + jsonData, err := json.Marshal(autoConfData) + require.NoError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write(jsonData) + })) + defer server.Close() + + // Test that our autoconf parser handles this gracefully + autoConf, fallbackUsed := testAutoConfWithFallback(t, server.URL, tc.expectErr, fmt.Sprintf("Expected fallback to be used for %s", tc.name)) + + if !tc.expectErr { + require.NotNil(t, autoConf, "AutoConf should not be nil for successful parsing") + + // Run test-specific validation if provided (only for non-fallback cases) + if tc.validate != nil && !fallbackUsed { + // Create a mock Response for compatibility with validation functions + mockResponse := &autoconf.Response{Config: autoConf} + tc.validate(t, mockResponse) + } + } + }) + } +} + +func testFuzzMalformedJSON(t *testing.T) { + malformedJSONs := []string{ + `{`, // Incomplete JSON + `{"AutoConfVersion": }`, // Missing value + `{"AutoConfVersion": 123,}`, // Trailing comma + `{AutoConfVersion: 123}`, // Unquoted key + `{"Bootstrap": [}`, // Incomplete array + `{"Bootstrap": ["/test",]}`, // Trailing comma in array + `invalid json`, // Not JSON at all + `null`, // Just null + `[]`, // Array instead of object + `""`, // String instead of object + } + + for i, malformedJSON := range malformedJSONs { + t.Run(fmt.Sprintf("malformed_%d", i), func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write([]byte(malformedJSON)) + })) + defer server.Close() + + // All malformed JSON should result in fallback usage + _, _ = testAutoConfWithFallback(t, server.URL, true, fmt.Sprintf("Expected fallback to be used for malformed JSON: %s", malformedJSON)) + }) + } +} + +func testFuzzLargePayloads(t *testing.T) { + // Test with very large but valid JSON payloads + largeBootstrap := make([]string, 10000) + for i := range largeBootstrap { + largeBootstrap[i] = fmt.Sprintf("/dnsaddr/bootstrap%d.example.com/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", i) + } + + largeDNSResolvers := make(map[string][]string) + for i := 0; i < 1000; i++ { + domain := fmt.Sprintf("domain%d.example.com", i) + largeDNSResolvers[domain] = []string{ + fmt.Sprintf("https://resolver%d.example.com/dns-query", i), + } + } + + config := map[string]interface{}{ + "AutoConfVersion": 2025072301, + "AutoConfSchema": 1, + "AutoConfTTL": 86400, + "SystemRegistry": map[string]interface{}{ + "AminoDHT": map[string]interface{}{ + "Description": "Test AminoDHT system", + "NativeConfig": map[string]interface{}{ + "Bootstrap": largeBootstrap, + }, + }, + }, + "DNSResolvers": largeDNSResolvers, + "DelegatedEndpoints": map[string]interface{}{}, + } + + jsonData, err := json.Marshal(config) + require.NoError(t, err) + + t.Logf("Large payload size: %d bytes", len(jsonData)) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write(jsonData) + })) + defer server.Close() + + // Should handle large payloads gracefully (up to reasonable limits) + autoConf, _ := testAutoConfWithFallbackAndTimeout(t, server.URL, false, "Large payload should not trigger fallback", 30*time.Second) + require.NotNil(t, autoConf, "Should return valid config") + + // Verify bootstrap entries were preserved + bootstrapPeers := autoConf.GetBootstrapPeers("AminoDHT") + require.Len(t, bootstrapPeers, 10000, "Should preserve all bootstrap entries") +} + +// Helper function to generate many DNS resolvers for testing +func generateManyResolvers(count int) map[string][]string { + resolvers := make(map[string][]string) + for i := 0; i < count; i++ { + domain := fmt.Sprintf("domain%d.example.com", i) + resolvers[domain] = []string{ + fmt.Sprintf("https://resolver%d.example.com/dns-query", i), + } + } + return resolvers +} diff --git a/test/cli/autoconf/ipns_test.go b/test/cli/autoconf/ipns_test.go new file mode 100644 index 000000000..ce5b20bd4 --- /dev/null +++ b/test/cli/autoconf/ipns_test.go @@ -0,0 +1,352 @@ +package autoconf + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "net/http/httptest" + "strings" + "sync" + "testing" + "time" + + "github.com/ipfs/boxo/autoconf" + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// TestAutoConfIPNS tests IPNS publishing with autoconf-resolved delegated publishers +func TestAutoConfIPNS(t *testing.T) { + t.Parallel() + + t.Run("PublishingWithWorkingEndpoint", func(t *testing.T) { + t.Parallel() + testIPNSPublishingWithWorkingEndpoint(t) + }) + + t.Run("PublishingResilience", func(t *testing.T) { + t.Parallel() + testIPNSPublishingResilience(t) + }) +} + +// testIPNSPublishingWithWorkingEndpoint verifies that IPNS delegated publishing works +// correctly when the HTTP endpoint is functioning normally and accepts requests. +// It also verifies that the PUT payload matches what can be retrieved via routing get. +func testIPNSPublishingWithWorkingEndpoint(t *testing.T) { + // Create mock IPNS publisher that accepts requests + publisher := newMockIPNSPublisher(t) + defer publisher.close() + + // Create node with delegated publisher + node := setupNodeWithAutoconf(t, publisher.server.URL, "auto") + defer node.StopDaemon() + + // Wait for daemon to be ready + time.Sleep(5 * time.Second) + + // Get node's peer ID + idResult := node.RunIPFS("id", "-f", "") + require.Equal(t, 0, idResult.ExitCode()) + peerID := strings.TrimSpace(idResult.Stdout.String()) + + // Get peer ID in base36 format (used for IPNS keys) + idBase36Result := node.RunIPFS("id", "--peerid-base", "base36", "-f", "") + require.Equal(t, 0, idBase36Result.ExitCode()) + peerIDBase36 := strings.TrimSpace(idBase36Result.Stdout.String()) + + // Verify autoconf resolved "auto" correctly + result := node.RunIPFS("config", "Ipns.DelegatedPublishers", "--expand-auto") + var resolvedPublishers []string + err := json.Unmarshal([]byte(result.Stdout.String()), &resolvedPublishers) + require.NoError(t, err) + expectedURL := publisher.server.URL + "/routing/v1/ipns" + assert.Contains(t, resolvedPublishers, expectedURL, "AutoConf should resolve 'auto' to mock publisher") + + // Test publishing with --allow-delegated + testCID := "bafkqablimvwgy3y" + result = node.RunIPFS("name", "publish", "--allow-delegated", "/ipfs/"+testCID) + require.Equal(t, 0, result.ExitCode(), "Publishing should succeed") + assert.Contains(t, result.Stdout.String(), "Published to") + + // Wait for async HTTP request to delegated publisher + time.Sleep(2 * time.Second) + + // Verify HTTP PUT was made to delegated publisher + publishedKeys := publisher.getPublishedKeys() + assert.NotEmpty(t, publishedKeys, "HTTP PUT request should have been made to delegated publisher") + + // Get the PUT payload that was sent to the delegated publisher + putPayload := publisher.getRecordPayload(peerIDBase36) + require.NotNil(t, putPayload, "Should have captured PUT payload") + require.Greater(t, len(putPayload), 0, "PUT payload should not be empty") + + // Retrieve the IPNS record using routing get + getResult := node.RunIPFS("routing", "get", "/ipns/"+peerID) + require.Equal(t, 0, getResult.ExitCode(), "Should be able to retrieve IPNS record") + getPayload := getResult.Stdout.Bytes() + + // Compare the payloads + assert.Equal(t, putPayload, getPayload, + "PUT payload sent to delegated publisher should match what routing get returns") + + // Also verify the record points to the expected content + assert.Contains(t, getResult.Stdout.String(), testCID, + "Retrieved IPNS record should reference the published CID") + + // Use ipfs name inspect to verify the IPNS record's value matches the published CID + // First write the routing get result to a file for inspection + node.WriteBytes("ipns-record", getPayload) + inspectResult := node.RunIPFS("name", "inspect", "ipns-record") + require.Equal(t, 0, inspectResult.ExitCode(), "Should be able to inspect IPNS record") + + // The inspect output should show the path we published + inspectOutput := inspectResult.Stdout.String() + assert.Contains(t, inspectOutput, "/ipfs/"+testCID, + "IPNS record value should match the published path") + + // Also verify it's a valid record with proper fields + assert.Contains(t, inspectOutput, "Value:", "Should have Value field") + assert.Contains(t, inspectOutput, "Validity:", "Should have Validity field") + assert.Contains(t, inspectOutput, "Sequence:", "Should have Sequence field") + + t.Log("Verified: PUT payload to delegated publisher matches routing get result and name inspect confirms correct path") +} + +// testIPNSPublishingResilience verifies that IPNS publishing is resilient by design. +// Publishing succeeds as long as local storage works, even when all delegated endpoints fail. +// This test documents the intentional resilient behavior, not bugs. +func testIPNSPublishingResilience(t *testing.T) { + testCases := []struct { + name string + routingType string // "auto" or "delegated" + description string + }{ + { + name: "AutoRouting", + routingType: "auto", + description: "auto mode uses DHT + HTTP, tolerates HTTP failures", + }, + { + name: "DelegatedRouting", + routingType: "delegated", + description: "delegated mode uses HTTP only, tolerates HTTP failures", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // Create publisher that always fails + publisher := newMockIPNSPublisher(t) + defer publisher.close() + publisher.responseFunc = func(peerID string, record []byte) int { + return http.StatusInternalServerError + } + + // Create node with failing endpoint + node := setupNodeWithAutoconf(t, publisher.server.URL, tc.routingType) + defer node.StopDaemon() + + // Test different publishing modes - all should succeed due to resilient design + testCID := "/ipfs/bafkqablimvwgy3y" + + // Normal publishing (should succeed despite endpoint failures) + result := node.RunIPFS("name", "publish", testCID) + assert.Equal(t, 0, result.ExitCode(), + "%s: Normal publishing should succeed (local storage works)", tc.description) + + // Publishing with --allow-offline (local only, no network) + result = node.RunIPFS("name", "publish", "--allow-offline", testCID) + assert.Equal(t, 0, result.ExitCode(), + "--allow-offline should succeed (local only)") + + // Publishing with --allow-delegated (if using auto routing) + if tc.routingType == "auto" { + result = node.RunIPFS("name", "publish", "--allow-delegated", testCID) + assert.Equal(t, 0, result.ExitCode(), + "--allow-delegated should succeed (no DHT required)") + } + + t.Logf("%s: All publishing modes succeeded despite endpoint failures (resilient design)", tc.name) + }) + } +} + +// ============================================================================ +// Helper Functions +// ============================================================================ + +// setupNodeWithAutoconf creates an IPFS node with autoconf-configured delegated publishers +func setupNodeWithAutoconf(t *testing.T, publisherURL string, routingType string) *harness.Node { + // Create autoconf server with the publisher endpoint + autoconfData := createAutoconfJSON(publisherURL) + autoconfServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + fmt.Fprint(w, autoconfData) + })) + t.Cleanup(func() { autoconfServer.Close() }) + + // Create and configure node + h := harness.NewT(t) + node := h.NewNode().Init("--profile=test") + + // Configure autoconf + node.SetIPFSConfig("AutoConf.URL", autoconfServer.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("Ipns.DelegatedPublishers", []string{"auto"}) + node.SetIPFSConfig("Routing.Type", routingType) + + // Additional config for delegated routing mode + if routingType == "delegated" { + node.SetIPFSConfig("Provider.Enabled", false) + node.SetIPFSConfig("Reprovider.Interval", "0s") + } + + // Add bootstrap peers for connectivity + node.SetIPFSConfig("Bootstrap", autoconf.FallbackBootstrapPeers) + + // Start daemon + node.StartDaemon() + + return node +} + +// createAutoconfJSON generates autoconf configuration with a delegated IPNS publisher +func createAutoconfJSON(publisherURL string) string { + // Use bootstrap peers from autoconf fallbacks for consistency + bootstrapPeers, _ := json.Marshal(autoconf.FallbackBootstrapPeers) + + return fmt.Sprintf(`{ + "AutoConfVersion": 2025072302, + "AutoConfSchema": 1, + "AutoConfTTL": 86400, + "SystemRegistry": { + "TestSystem": { + "Description": "Test system for IPNS publishing", + "NativeConfig": { + "Bootstrap": %s + } + } + }, + "DNSResolvers": {}, + "DelegatedEndpoints": { + "%s": { + "Systems": ["TestSystem"], + "Read": ["/routing/v1/ipns"], + "Write": ["/routing/v1/ipns"] + } + } + }`, string(bootstrapPeers), publisherURL) +} + +// ============================================================================ +// Mock IPNS Publisher +// ============================================================================ + +// mockIPNSPublisher implements a simple IPNS publishing HTTP API server +type mockIPNSPublisher struct { + t *testing.T + server *httptest.Server + mu sync.Mutex + publishedKeys map[string]string // peerID -> published CID + recordPayloads map[string][]byte // peerID -> actual HTTP PUT record payload + responseFunc func(peerID string, record []byte) int // returns HTTP status code +} + +func newMockIPNSPublisher(t *testing.T) *mockIPNSPublisher { + m := &mockIPNSPublisher{ + t: t, + publishedKeys: make(map[string]string), + recordPayloads: make(map[string][]byte), + } + + // Default response function accepts all publishes + m.responseFunc = func(peerID string, record []byte) int { + return http.StatusOK + } + + mux := http.NewServeMux() + mux.HandleFunc("/routing/v1/ipns/", m.handleIPNS) + + m.server = httptest.NewServer(mux) + return m +} + +func (m *mockIPNSPublisher) handleIPNS(w http.ResponseWriter, r *http.Request) { + m.mu.Lock() + defer m.mu.Unlock() + + // Extract peer ID from path + parts := strings.Split(r.URL.Path, "/") + if len(parts) < 5 { + http.Error(w, "invalid path", http.StatusBadRequest) + return + } + + peerID := parts[4] + + if r.Method == "PUT" { + // Handle IPNS record publication + body, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, "failed to read body", http.StatusBadRequest) + return + } + + // Get response status from response function + status := m.responseFunc(peerID, body) + + if status == http.StatusOK { + if len(body) > 0 { + // Store the actual record payload + m.recordPayloads[peerID] = make([]byte, len(body)) + copy(m.recordPayloads[peerID], body) + } + + // Mark as published + m.publishedKeys[peerID] = fmt.Sprintf("published-%d", time.Now().Unix()) + } + + w.WriteHeader(status) + if status != http.StatusOK { + fmt.Fprint(w, `{"error": "publish failed"}`) + } + } else if r.Method == "GET" { + // Handle IPNS record retrieval + if record, exists := m.publishedKeys[peerID]; exists { + w.Header().Set("Content-Type", "application/vnd.ipfs.ipns-record") + fmt.Fprint(w, record) + } else { + http.Error(w, "record not found", http.StatusNotFound) + } + } else { + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + } +} + +func (m *mockIPNSPublisher) getPublishedKeys() map[string]string { + m.mu.Lock() + defer m.mu.Unlock() + result := make(map[string]string) + for k, v := range m.publishedKeys { + result[k] = v + } + return result +} + +func (m *mockIPNSPublisher) getRecordPayload(peerID string) []byte { + m.mu.Lock() + defer m.mu.Unlock() + if payload, exists := m.recordPayloads[peerID]; exists { + result := make([]byte, len(payload)) + copy(result, payload) + return result + } + return nil +} + +func (m *mockIPNSPublisher) close() { + m.server.Close() +} diff --git a/test/cli/autoconf/routing_test.go b/test/cli/autoconf/routing_test.go new file mode 100644 index 000000000..57022e390 --- /dev/null +++ b/test/cli/autoconf/routing_test.go @@ -0,0 +1,236 @@ +package autoconf + +import ( + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "strings" + "sync" + "testing" + + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestAutoConfDelegatedRouting(t *testing.T) { + t.Parallel() + + t.Run("delegated routing with auto router", func(t *testing.T) { + t.Parallel() + testDelegatedRoutingWithAuto(t) + }) + + t.Run("routing errors are handled properly", func(t *testing.T) { + t.Parallel() + testRoutingErrorHandling(t) + }) +} + +// mockRoutingServer implements a simple Delegated Routing HTTP API server +type mockRoutingServer struct { + t *testing.T + server *httptest.Server + mu sync.Mutex + requests []string + providerFunc func(cid string) []map[string]interface{} +} + +func newMockRoutingServer(t *testing.T) *mockRoutingServer { + m := &mockRoutingServer{ + t: t, + requests: []string{}, + } + + // Default provider function returns mock provider records + m.providerFunc = func(cid string) []map[string]interface{} { + return []map[string]interface{}{ + { + "Protocol": "transport-bitswap", + "Schema": "bitswap", + "ID": "12D3KooWMockProvider1", + "Addrs": []string{"/ip4/192.168.1.100/tcp/4001"}, + }, + { + "Protocol": "transport-bitswap", + "Schema": "bitswap", + "ID": "12D3KooWMockProvider2", + "Addrs": []string{"/ip4/192.168.1.101/tcp/4001"}, + }, + } + } + + mux := http.NewServeMux() + mux.HandleFunc("/routing/v1/providers/", m.handleProviders) + + m.server = httptest.NewServer(mux) + return m +} + +func (m *mockRoutingServer) handleProviders(w http.ResponseWriter, r *http.Request) { + m.mu.Lock() + defer m.mu.Unlock() + + // Extract CID from path + parts := strings.Split(r.URL.Path, "/") + if len(parts) < 5 { + http.Error(w, "invalid path", http.StatusBadRequest) + return + } + + cid := parts[4] + m.requests = append(m.requests, cid) + m.t.Logf("Routing server received providers request for CID: %s", cid) + + // Get provider records + providers := m.providerFunc(cid) + + // Return NDJSON response as per IPIP-378 + w.Header().Set("Content-Type", "application/x-ndjson") + encoder := json.NewEncoder(w) + + for _, provider := range providers { + if err := encoder.Encode(provider); err != nil { + m.t.Logf("Failed to encode provider: %v", err) + return + } + } +} + +func (m *mockRoutingServer) close() { + m.server.Close() +} + +func testDelegatedRoutingWithAuto(t *testing.T) { + // Create mock routing server + routingServer := newMockRoutingServer(t) + defer routingServer.close() + + // Create autoconf data with delegated router + autoConfData := fmt.Sprintf(`{ + "AutoConfVersion": 2025072302, + "AutoConfSchema": 1, + "AutoConfTTL": 86400, + "SystemRegistry": { + "AminoDHT": { + "Description": "Test AminoDHT system", + "NativeConfig": { + "Bootstrap": [] + } + } + }, + "DNSResolvers": {}, + "DelegatedEndpoints": { + "%s": { + "Systems": ["AminoDHT", "IPNI"], + "Read": ["/routing/v1/providers", "/routing/v1/peers", "/routing/v1/ipns"], + "Write": [] + } + } + }`, routingServer.server.URL) + + // Create autoconf server + autoConfServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write([]byte(autoConfData)) + })) + defer autoConfServer.Close() + + // Create IPFS node with auto delegated router + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.URL", autoConfServer.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("Routing.DelegatedRouters", []string{"auto"}) + + // Test that daemon starts successfully with auto routing configuration + // The actual routing functionality requires online mode, but we can test + // that the configuration is expanded and daemon starts properly + node.StartDaemon("--offline") + defer node.StopDaemon() + + // Verify config still shows "auto" (this tests that auto values are preserved in user-facing config) + result := node.RunIPFS("config", "Routing.DelegatedRouters") + require.Equal(t, 0, result.ExitCode()) + + var routers []string + err := json.Unmarshal([]byte(result.Stdout.String()), &routers) + require.NoError(t, err) + assert.Equal(t, []string{"auto"}, routers, "Delegated routers config should show 'auto'") + + // Test that daemon is running and accepting commands + result = node.RunIPFS("version") + require.Equal(t, 0, result.ExitCode(), "Daemon should be running and accepting commands") + + // Test that autoconf server was contacted (indicating successful resolution) + // We can't test actual routing in offline mode, but we can verify that + // the AutoConf system expanded the "auto" placeholder successfully + // by checking that the daemon started without errors + t.Log("AutoConf successfully expanded delegated router configuration and daemon started") +} + +func testRoutingErrorHandling(t *testing.T) { + // Create routing server that returns no providers + routingServer := newMockRoutingServer(t) + defer routingServer.close() + + // Configure to return no providers (empty response) + routingServer.providerFunc = func(cid string) []map[string]interface{} { + return []map[string]interface{}{} + } + + // Create autoconf data + autoConfData := fmt.Sprintf(`{ + "AutoConfVersion": 2025072302, + "AutoConfSchema": 1, + "AutoConfTTL": 86400, + "SystemRegistry": { + "AminoDHT": { + "Description": "Test AminoDHT system", + "NativeConfig": { + "Bootstrap": [] + } + } + }, + "DNSResolvers": {}, + "DelegatedEndpoints": { + "%s": { + "Systems": ["AminoDHT", "IPNI"], + "Read": ["/routing/v1/providers", "/routing/v1/peers", "/routing/v1/ipns"], + "Write": [] + } + } + }`, routingServer.server.URL) + + // Create autoconf server + autoConfServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write([]byte(autoConfData)) + })) + defer autoConfServer.Close() + + // Create IPFS node + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.URL", autoConfServer.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("Routing.DelegatedRouters", []string{"auto"}) + + // Test that daemon starts successfully even when no providers are available + node.StartDaemon("--offline") + defer node.StopDaemon() + + // Verify config shows "auto" + result := node.RunIPFS("config", "Routing.DelegatedRouters") + require.Equal(t, 0, result.ExitCode()) + + var routers []string + err := json.Unmarshal([]byte(result.Stdout.String()), &routers) + require.NoError(t, err) + assert.Equal(t, []string{"auto"}, routers, "Delegated routers config should show 'auto'") + + // Test that daemon is running and accepting commands + result = node.RunIPFS("version") + require.Equal(t, 0, result.ExitCode(), "Daemon should be running even with empty routing config") + + t.Log("AutoConf successfully handled routing configuration with empty providers") +} diff --git a/test/cli/autoconf/swarm_connect_test.go b/test/cli/autoconf/swarm_connect_test.go new file mode 100644 index 000000000..95c75d953 --- /dev/null +++ b/test/cli/autoconf/swarm_connect_test.go @@ -0,0 +1,90 @@ +package autoconf + +import ( + "testing" + "time" + + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// TestSwarmConnectWithAutoConf tests that ipfs swarm connect works properly +// when AutoConf is enabled and a daemon is running. +// +// This is a regression test for the issue where: +// - AutoConf disabled: ipfs swarm connect works +// - AutoConf enabled: ipfs swarm connect fails with "Error: connect" +// +// The issue affects CLI command fallback behavior when the HTTP API connection fails. +func TestSwarmConnectWithAutoConf(t *testing.T) { + t.Parallel() + + t.Run("AutoConf disabled - should work", func(t *testing.T) { + testSwarmConnectWithAutoConfSetting(t, false, true) // expect success + }) + + t.Run("AutoConf enabled - should work", func(t *testing.T) { + testSwarmConnectWithAutoConfSetting(t, true, true) // expect success (fix the bug!) + }) +} + +func testSwarmConnectWithAutoConfSetting(t *testing.T, autoConfEnabled bool, expectSuccess bool) { + // Create IPFS node with test profile + node := harness.NewT(t).NewNode().Init("--profile=test") + + // Configure AutoConf + node.SetIPFSConfig("AutoConf.Enabled", autoConfEnabled) + + // Set up bootstrap peers so the node has something to connect to + // Use the same bootstrap peers from boxo/autoconf fallbacks + node.SetIPFSConfig("Bootstrap", []string{ + "/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", + "/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa", + "/dnsaddr/bootstrap.libp2p.io/p2p/QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb", + }) + + // CRITICAL: Start the daemon first - this is the key requirement + // The daemon must be running and working properly + node.StartDaemon() + defer node.StopDaemon() + + // Give daemon time to start up completely + time.Sleep(3 * time.Second) + + // Verify daemon is responsive + result := node.RunIPFS("id") + require.Equal(t, 0, result.ExitCode(), "Daemon should be responsive before testing swarm connect") + t.Logf("Daemon is running and responsive. AutoConf enabled: %v", autoConfEnabled) + + // Now test swarm connect to a bootstrap peer + // This should work because: + // 1. The daemon is running + // 2. The CLI should connect to the daemon via API + // 3. The daemon should handle the swarm connect request + result = node.RunIPFS("swarm", "connect", "/dnsaddr/bootstrap.libp2p.io") + + // swarm connect should work regardless of AutoConf setting + assert.Equal(t, 0, result.ExitCode(), + "swarm connect should succeed with AutoConf=%v. stderr: %s", + autoConfEnabled, result.Stderr.String()) + + // Should contain success message + output := result.Stdout.String() + assert.Contains(t, output, "success", + "swarm connect output should contain 'success' with AutoConf=%v. output: %s", + autoConfEnabled, output) + + // Additional diagnostic: Check if ipfs id shows addresses + // Both AutoConf enabled and disabled should show proper addresses + result = node.RunIPFS("id") + require.Equal(t, 0, result.ExitCode(), "ipfs id should work with AutoConf=%v", autoConfEnabled) + + idOutput := result.Stdout.String() + t.Logf("ipfs id output with AutoConf=%v: %s", autoConfEnabled, idOutput) + + // Addresses should not be null regardless of AutoConf setting + assert.Contains(t, idOutput, `"Addresses"`, "ipfs id should show Addresses field") + assert.NotContains(t, idOutput, `"Addresses": null`, + "ipfs id should not show null addresses with AutoConf=%v", autoConfEnabled) +} diff --git a/test/cli/autoconf/testdata/autoconf_amino_and_ipni.json b/test/cli/autoconf/testdata/autoconf_amino_and_ipni.json new file mode 100644 index 000000000..add246cc3 --- /dev/null +++ b/test/cli/autoconf/testdata/autoconf_amino_and_ipni.json @@ -0,0 +1,60 @@ +{ + "AutoConfVersion": 2025072901, + "AutoConfSchema": 1, + "AutoConfTTL": 86400, + "SystemRegistry": { + "AminoDHT": { + "URL": "https://github.com/ipfs/specs/pull/497", + "Description": "Public DHT swarm that implements the IPFS Kademlia DHT specification under protocol identifier /ipfs/kad/1.0.0", + "NativeConfig": { + "Bootstrap": [ + "/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN" + ] + }, + "DelegatedConfig": { + "Read": [ + "/routing/v1/providers", + "/routing/v1/peers", + "/routing/v1/ipns" + ], + "Write": [ + "/routing/v1/ipns" + ] + } + }, + "IPNI": { + "URL": "https://cid.contact", + "Description": "Network Indexer - content routing database for large storage providers", + "DelegatedConfig": { + "Read": [ + "/routing/v1/providers" + ], + "Write": [] + } + } + }, + "DNSResolvers": { + "eth.": [ + "https://dns.eth.limo/dns-query" + ] + }, + "DelegatedEndpoints": { + "https://amino-dht.example.com": { + "Systems": ["AminoDHT"], + "Read": [ + "/routing/v1/providers", + "/routing/v1/peers" + ], + "Write": [ + "/routing/v1/ipns" + ] + }, + "https://cid.contact": { + "Systems": ["IPNI"], + "Read": [ + "/routing/v1/providers" + ], + "Write": [] + } + } +} \ No newline at end of file diff --git a/test/cli/autoconf/testdata/autoconf_new_routing_system.json b/test/cli/autoconf/testdata/autoconf_new_routing_system.json new file mode 100644 index 000000000..697e5cc8f --- /dev/null +++ b/test/cli/autoconf/testdata/autoconf_new_routing_system.json @@ -0,0 +1,38 @@ +{ + "AutoConfVersion": 2025072901, + "AutoConfSchema": 1, + "AutoConfTTL": 86400, + "SystemRegistry": { + "NewRoutingSystem": { + "URL": "https://new-routing.example.com", + "Description": "New routing system for testing delegation with auto routing", + "DelegatedConfig": { + "Read": [ + "/routing/v1/providers", + "/routing/v1/peers", + "/routing/v1/ipns" + ], + "Write": [ + "/routing/v1/ipns" + ] + } + } + }, + "DNSResolvers": { + "eth.": [ + "https://dns.eth.limo/dns-query" + ] + }, + "DelegatedEndpoints": { + "https://new-routing.example.com": { + "Systems": ["NewRoutingSystem"], + "Read": [ + "/routing/v1/providers", + "/routing/v1/peers" + ], + "Write": [ + "/routing/v1/ipns" + ] + } + } +} \ No newline at end of file diff --git a/test/cli/autoconf/testdata/autoconf_new_routing_with_filtering.json b/test/cli/autoconf/testdata/autoconf_new_routing_with_filtering.json new file mode 100644 index 000000000..982f545aa --- /dev/null +++ b/test/cli/autoconf/testdata/autoconf_new_routing_with_filtering.json @@ -0,0 +1,59 @@ +{ + "AutoConfVersion": 2025072901, + "AutoConfSchema": 1, + "AutoConfTTL": 86400, + "SystemRegistry": { + "NewRoutingSystem": { + "URL": "https://new-routing.example.com", + "Description": "New routing system for testing path filtering with auto routing", + "DelegatedConfig": { + "Read": [ + "/routing/v1/providers", + "/routing/v1/peers", + "/routing/v1/ipns" + ], + "Write": [ + "/routing/v1/ipns" + ] + } + } + }, + "DNSResolvers": { + "eth.": [ + "https://dns.eth.limo/dns-query" + ] + }, + "DelegatedEndpoints": { + "https://supported-new.example.com": { + "Systems": ["NewRoutingSystem"], + "Read": [ + "/routing/v1/providers", + "/routing/v1/peers" + ], + "Write": [ + "/routing/v1/ipns" + ] + }, + "https://unsupported-new.example.com": { + "Systems": ["NewRoutingSystem"], + "Read": [ + "/custom/v0/read", + "/api/v1/nonstandard" + ], + "Write": [ + "/custom/v0/write" + ] + }, + "https://mixed-new.example.com": { + "Systems": ["NewRoutingSystem"], + "Read": [ + "/routing/v1/providers", + "/invalid/path", + "/routing/v1/peers" + ], + "Write": [ + "/routing/v1/ipns" + ] + } + } +} \ No newline at end of file diff --git a/test/cli/autoconf/testdata/autoconf_with_unsupported_paths.json b/test/cli/autoconf/testdata/autoconf_with_unsupported_paths.json new file mode 100644 index 000000000..e7a45a1da --- /dev/null +++ b/test/cli/autoconf/testdata/autoconf_with_unsupported_paths.json @@ -0,0 +1,64 @@ +{ + "AutoConfVersion": 2025072901, + "AutoConfSchema": 1, + "AutoConfTTL": 86400, + "SystemRegistry": { + "AminoDHT": { + "URL": "https://github.com/ipfs/specs/pull/497", + "Description": "Public DHT swarm that implements the IPFS Kademlia DHT specification under protocol identifier /ipfs/kad/1.0.0", + "NativeConfig": { + "Bootstrap": [ + "/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN" + ] + }, + "DelegatedConfig": { + "Read": [ + "/routing/v1/providers", + "/routing/v1/peers", + "/routing/v1/ipns" + ], + "Write": [ + "/routing/v1/ipns" + ] + } + } + }, + "DNSResolvers": { + "eth.": [ + "https://dns.eth.limo/dns-query" + ] + }, + "DelegatedEndpoints": { + "https://supported.example.com": { + "Systems": ["AminoDHT"], + "Read": [ + "/routing/v1/providers", + "/routing/v1/peers" + ], + "Write": [ + "/routing/v1/ipns" + ] + }, + "https://unsupported.example.com": { + "Systems": ["AminoDHT"], + "Read": [ + "/example/v0/read", + "/api/v1/custom" + ], + "Write": [ + "/example/v0/write" + ] + }, + "https://mixed.example.com": { + "Systems": ["AminoDHT"], + "Read": [ + "/routing/v1/providers", + "/unsupported/path", + "/routing/v1/peers" + ], + "Write": [ + "/routing/v1/ipns" + ] + } + } +} diff --git a/test/cli/autoconf/testdata/updated_autoconf.json b/test/cli/autoconf/testdata/updated_autoconf.json new file mode 100644 index 000000000..44b7f1ed9 --- /dev/null +++ b/test/cli/autoconf/testdata/updated_autoconf.json @@ -0,0 +1,87 @@ +{ + "AutoConfVersion": 2025072902, + "AutoConfSchema": 1, + "AutoConfTTL": 86400, + "SystemRegistry": { + "AminoDHT": { + "URL": "https://github.com/ipfs/specs/pull/497", + "Description": "Public DHT swarm that implements the IPFS Kademlia DHT specification under protocol identifier /ipfs/kad/1.0.0", + "NativeConfig": { + "Bootstrap": [ + "/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", + "/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa", + "/dnsaddr/bootstrap.libp2p.io/p2p/QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb", + "/dnsaddr/bootstrap.libp2p.io/p2p/QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt", + "/dnsaddr/va1.bootstrap.libp2p.io/p2p/12D3KooWKnDdG3iXw9eTFijk3EWSunZcFi54Zka4wmtqtt6rPxc8", + "/ip4/104.131.131.82/tcp/4001/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", + "/ip4/104.131.131.82/udp/4001/quic-v1/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ" + ] + }, + "DelegatedConfig": { + "Read": [ + "/routing/v1/providers", + "/routing/v1/peers", + "/routing/v1/ipns" + ], + "Write": [ + "/routing/v1/ipns" + ] + } + }, + "IPNI": { + "URL": "https://ipni.example.com", + "Description": "Network Indexer - content routing database for large storage providers", + "DelegatedConfig": { + "Read": [ + "/routing/v1/providers" + ], + "Write": [] + } + } + }, + "DNSResolvers": { + "eth.": [ + "https://dns.eth.limo/dns-query", + "https://dns.eth.link/dns-query" + ], + "test.": [ + "https://test.resolver/dns-query" + ] + }, + "DelegatedEndpoints": { + "https://ipni.example.com": { + "Systems": ["IPNI"], + "Read": [ + "/routing/v1/providers" + ], + "Write": [] + }, + "https://routing.example.com": { + "Systems": ["IPNI"], + "Read": [ + "/routing/v1/providers" + ], + "Write": [] + }, + "https://delegated-ipfs.dev": { + "Systems": ["AminoDHT", "IPNI"], + "Read": [ + "/routing/v1/providers", + "/routing/v1/peers", + "/routing/v1/ipns" + ], + "Write": [ + "/routing/v1/ipns" + ] + }, + "https://ipns.example.com": { + "Systems": ["AminoDHT"], + "Read": [ + "/routing/v1/ipns" + ], + "Write": [ + "/routing/v1/ipns" + ] + } + } +} \ No newline at end of file diff --git a/test/cli/autoconf/testdata/valid_autoconf.json b/test/cli/autoconf/testdata/valid_autoconf.json new file mode 100644 index 000000000..4469c33c2 --- /dev/null +++ b/test/cli/autoconf/testdata/valid_autoconf.json @@ -0,0 +1,68 @@ +{ + "AutoConfVersion": 2025072901, + "AutoConfSchema": 1, + "AutoConfTTL": 86400, + "SystemRegistry": { + "AminoDHT": { + "URL": "https://github.com/ipfs/specs/pull/497", + "Description": "Public DHT swarm that implements the IPFS Kademlia DHT specification under protocol identifier /ipfs/kad/1.0.0", + "NativeConfig": { + "Bootstrap": [ + "/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", + "/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa", + "/dnsaddr/bootstrap.libp2p.io/p2p/QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb", + "/dnsaddr/bootstrap.libp2p.io/p2p/QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt", + "/dnsaddr/va1.bootstrap.libp2p.io/p2p/12D3KooWKnDdG3iXw9eTFijk3EWSunZcFi54Zka4wmtqtt6rPxc8", + "/ip4/104.131.131.82/tcp/4001/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", + "/ip4/104.131.131.82/udp/4001/quic-v1/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ" + ] + }, + "DelegatedConfig": { + "Read": [ + "/routing/v1/providers", + "/routing/v1/peers", + "/routing/v1/ipns" + ], + "Write": [ + "/routing/v1/ipns" + ] + } + }, + "IPNI": { + "URL": "https://ipni.example.com", + "Description": "Network Indexer - content routing database for large storage providers", + "DelegatedConfig": { + "Read": [ + "/routing/v1/providers" + ], + "Write": [] + } + } + }, + "DNSResolvers": { + "eth.": [ + "https://dns.eth.limo/dns-query", + "https://dns.eth.link/dns-query" + ] + }, + "DelegatedEndpoints": { + "https://ipni.example.com": { + "Systems": ["IPNI"], + "Read": [ + "/routing/v1/providers" + ], + "Write": [] + }, + "https://delegated-ipfs.dev": { + "Systems": ["AminoDHT", "IPNI"], + "Read": [ + "/routing/v1/providers", + "/routing/v1/peers", + "/routing/v1/ipns" + ], + "Write": [ + "/routing/v1/ipns" + ] + } + } +} \ No newline at end of file diff --git a/test/cli/autoconf/validation_test.go b/test/cli/autoconf/validation_test.go new file mode 100644 index 000000000..e906fe175 --- /dev/null +++ b/test/cli/autoconf/validation_test.go @@ -0,0 +1,144 @@ +package autoconf + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/assert" +) + +func TestAutoConfValidation(t *testing.T) { + t.Parallel() + + t.Run("invalid autoconf JSON prevents caching", func(t *testing.T) { + t.Parallel() + testInvalidAutoConfJSONPreventsCaching(t) + }) + + t.Run("malformed multiaddr in autoconf", func(t *testing.T) { + t.Parallel() + testMalformedMultiaddrInAutoConf(t) + }) + + t.Run("malformed URL in autoconf", func(t *testing.T) { + t.Parallel() + testMalformedURLInAutoConf(t) + }) +} + +func testInvalidAutoConfJSONPreventsCaching(t *testing.T) { + // Create server that serves invalid autoconf JSON + invalidAutoConfData := `{ + "AutoConfVersion": 123, + "AutoConfSchema": 1, + "SystemRegistry": { + "AminoDHT": { + "NativeConfig": { + "Bootstrap": [ + "invalid-multiaddr-that-should-fail" + ] + } + } + } + }` + + requestCount := 0 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + requestCount++ + t.Logf("Invalid autoconf server request #%d: %s %s", requestCount, r.Method, r.URL.Path) + w.Header().Set("Content-Type", "application/json") + w.Header().Set("ETag", `"invalid-config-123"`) + _, _ = w.Write([]byte(invalidAutoConfData)) + })) + defer server.Close() + + // Create IPFS node and try to start daemon with invalid autoconf + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("Bootstrap", []string{"auto"}) + + // Start daemon to trigger autoconf fetch - this should start but log validation errors + node.StartDaemon() + defer node.StopDaemon() + + // Give autoconf some time to attempt fetch and fail validation + // The daemon should still start but autoconf should fail + result := node.RunIPFS("version") + assert.Equal(t, 0, result.ExitCode(), "Daemon should start even with invalid autoconf") + + // Verify server was called (autoconf was attempted even though validation failed) + assert.Greater(t, requestCount, 0, "Invalid autoconf server should have been called") +} + +func testMalformedMultiaddrInAutoConf(t *testing.T) { + // Create server that serves autoconf with malformed multiaddr + invalidAutoConfData := `{ + "AutoConfVersion": 456, + "AutoConfSchema": 1, + "SystemRegistry": { + "AminoDHT": { + "NativeConfig": { + "Bootstrap": [ + "/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", + "not-a-valid-multiaddr" + ] + } + } + } + }` + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write([]byte(invalidAutoConfData)) + })) + defer server.Close() + + // Create IPFS node + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("Bootstrap", []string{"auto"}) + + // Start daemon to trigger autoconf fetch - daemon should start but autoconf validation should fail + node.StartDaemon() + defer node.StopDaemon() + + // Daemon should still be functional even with invalid autoconf + result := node.RunIPFS("version") + assert.Equal(t, 0, result.ExitCode(), "Daemon should start even with invalid autoconf") +} + +func testMalformedURLInAutoConf(t *testing.T) { + // Create server that serves autoconf with malformed URL + invalidAutoConfData := `{ + "AutoConfVersion": 789, + "AutoConfSchema": 1, + "DNSResolvers": { + "eth.": ["https://valid.example.com"], + "bad.": ["://malformed-url-missing-scheme"] + } + }` + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write([]byte(invalidAutoConfData)) + })) + defer server.Close() + + // Create IPFS node + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.URL", server.URL) + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("DNS.Resolvers", map[string]string{"foo.": "auto"}) + + // Start daemon to trigger autoconf fetch - daemon should start but autoconf validation should fail + node.StartDaemon() + defer node.StopDaemon() + + // Daemon should still be functional even with invalid autoconf + result := node.RunIPFS("version") + assert.Equal(t, 0, result.ExitCode(), "Daemon should start even with invalid autoconf") +} diff --git a/test/cli/basic_commands_test.go b/test/cli/basic_commands_test.go index 62733d00b..d9d66d1c3 100644 --- a/test/cli/basic_commands_test.go +++ b/test/cli/basic_commands_test.go @@ -70,6 +70,10 @@ func TestIPFSVersionDeps(t *testing.T) { splitModVers := strings.Split(moduleVersion, "@") modPath := splitModVers[0] modVers := splitModVers[1] + // Skip local replace paths (starting with "./") + if strings.HasPrefix(modPath, "./") { + continue + } assert.NoError(t, gomod.Check(modPath, modVers), "path: %s, version: %s", modPath, modVers) } } diff --git a/test/cli/bitswap_config_test.go b/test/cli/bitswap_config_test.go index 9674d3cb6..52e9ea541 100644 --- a/test/cli/bitswap_config_test.go +++ b/test/cli/bitswap_config_test.go @@ -6,9 +6,9 @@ import ( "time" "github.com/ipfs/boxo/bitswap/network/bsnet" + "github.com/ipfs/go-test/random" "github.com/ipfs/kubo/config" "github.com/ipfs/kubo/test/cli/harness" - "github.com/ipfs/kubo/test/cli/testutils" "github.com/stretchr/testify/assert" ) @@ -16,7 +16,7 @@ func TestBitswapConfig(t *testing.T) { t.Parallel() // Create test data that will be shared between nodes - testData := testutils.RandomBytes(100) + testData := random.Bytes(100) t.Run("server enabled (default)", func(t *testing.T) { t.Parallel() diff --git a/test/cli/bootstrap_auto_test.go b/test/cli/bootstrap_auto_test.go new file mode 100644 index 000000000..e3959ece7 --- /dev/null +++ b/test/cli/bootstrap_auto_test.go @@ -0,0 +1,202 @@ +package cli + +import ( + "testing" + + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestBootstrapCommandsWithAutoPlaceholder(t *testing.T) { + t.Parallel() + + t.Run("bootstrap add default", func(t *testing.T) { + t.Parallel() + // Test that 'ipfs bootstrap add default' works correctly + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("Bootstrap", []string{}) // Start with empty bootstrap + + // Add default bootstrap peers via "auto" placeholder + result := node.RunIPFS("bootstrap", "add", "default") + require.Equal(t, 0, result.ExitCode(), "bootstrap add default should succeed") + + output := result.Stdout.String() + t.Logf("Bootstrap add default output: %s", output) + assert.Contains(t, output, "added auto", "bootstrap add default should report adding 'auto'") + + // Verify bootstrap list shows "auto" + listResult := node.RunIPFS("bootstrap", "list") + require.Equal(t, 0, listResult.ExitCode(), "bootstrap list should succeed") + + listOutput := listResult.Stdout.String() + t.Logf("Bootstrap list after add default: %s", listOutput) + assert.Contains(t, listOutput, "auto", "bootstrap list should show 'auto' placeholder") + }) + + t.Run("bootstrap add auto explicitly", func(t *testing.T) { + t.Parallel() + // Test that 'ipfs bootstrap add auto' works correctly + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("Bootstrap", []string{}) // Start with empty bootstrap + + // Add "auto" placeholder explicitly + result := node.RunIPFS("bootstrap", "add", "auto") + require.Equal(t, 0, result.ExitCode(), "bootstrap add auto should succeed") + + output := result.Stdout.String() + t.Logf("Bootstrap add auto output: %s", output) + assert.Contains(t, output, "added auto", "bootstrap add auto should report adding 'auto'") + + // Verify bootstrap list shows "auto" + listResult := node.RunIPFS("bootstrap", "list") + require.Equal(t, 0, listResult.ExitCode(), "bootstrap list should succeed") + + listOutput := listResult.Stdout.String() + t.Logf("Bootstrap list after add auto: %s", listOutput) + assert.Contains(t, listOutput, "auto", "bootstrap list should show 'auto' placeholder") + }) + + t.Run("bootstrap add default converts to auto", func(t *testing.T) { + t.Parallel() + // Test that 'ipfs bootstrap add default' adds "auto" to the bootstrap list + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("Bootstrap", []string{}) // Start with empty bootstrap + node.SetIPFSConfig("AutoConf.Enabled", true) // Enable AutoConf to allow adding "auto" + + // Add default bootstrap peers + result := node.RunIPFS("bootstrap", "add", "default") + require.Equal(t, 0, result.ExitCode(), "bootstrap add default should succeed") + assert.Contains(t, result.Stdout.String(), "added auto", "should report adding 'auto'") + + // Verify bootstrap list shows "auto" + var bootstrap []string + node.GetIPFSConfig("Bootstrap", &bootstrap) + require.Equal(t, []string{"auto"}, bootstrap, "Bootstrap should contain ['auto']") + }) + + t.Run("bootstrap add default fails when AutoConf disabled", func(t *testing.T) { + t.Parallel() + // Test that adding default/auto fails when AutoConf is disabled + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("Bootstrap", []string{}) // Start with empty bootstrap + node.SetIPFSConfig("AutoConf.Enabled", false) // Disable AutoConf + + // Try to add default - should fail + result := node.RunIPFS("bootstrap", "add", "default") + require.NotEqual(t, 0, result.ExitCode(), "bootstrap add default should fail when AutoConf disabled") + assert.Contains(t, result.Stderr.String(), "AutoConf is disabled", "should mention AutoConf is disabled") + + // Try to add auto - should also fail + result = node.RunIPFS("bootstrap", "add", "auto") + require.NotEqual(t, 0, result.ExitCode(), "bootstrap add auto should fail when AutoConf disabled") + assert.Contains(t, result.Stderr.String(), "AutoConf is disabled", "should mention AutoConf is disabled") + }) + + t.Run("bootstrap rm with auto placeholder", func(t *testing.T) { + t.Parallel() + // Test that selective removal fails properly when "auto" is present + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("Bootstrap", []string{"auto"}) // Start with auto + + // Try to remove a specific peer - should fail with helpful error + result := node.RunIPFS("bootstrap", "rm", "/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN") + require.NotEqual(t, 0, result.ExitCode(), "bootstrap rm of specific peer should fail when 'auto' is present") + + output := result.Stderr.String() + t.Logf("Bootstrap rm error output: %s", output) + assert.Contains(t, output, "cannot remove individual bootstrap peers when using 'auto' placeholder", + "should provide helpful error message about auto placeholder") + assert.Contains(t, output, "disable AutoConf", + "should suggest disabling AutoConf as solution") + assert.Contains(t, output, "ipfs bootstrap rm --all", + "should suggest using rm --all as alternative") + }) + + t.Run("bootstrap rm --all with auto placeholder", func(t *testing.T) { + t.Parallel() + // Test that 'ipfs bootstrap rm --all' works with "auto" placeholder + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("Bootstrap", []string{"auto"}) // Start with auto + + // Remove all bootstrap peers + result := node.RunIPFS("bootstrap", "rm", "--all") + require.Equal(t, 0, result.ExitCode(), "bootstrap rm --all should succeed with auto placeholder") + + output := result.Stdout.String() + t.Logf("Bootstrap rm --all output: %s", output) + assert.Contains(t, output, "removed auto", "bootstrap rm --all should report removing 'auto'") + + // Verify bootstrap list is now empty + listResult := node.RunIPFS("bootstrap", "list") + require.Equal(t, 0, listResult.ExitCode(), "bootstrap list should succeed") + + listOutput := listResult.Stdout.String() + t.Logf("Bootstrap list after rm --all: %s", listOutput) + assert.Empty(t, listOutput, "bootstrap list should be empty after rm --all") + + // Test the rm all subcommand too + node.SetIPFSConfig("Bootstrap", []string{"auto"}) // Reset to auto + + result = node.RunIPFS("bootstrap", "rm", "all") + require.Equal(t, 0, result.ExitCode(), "bootstrap rm all should succeed with auto placeholder") + + output = result.Stdout.String() + t.Logf("Bootstrap rm all output: %s", output) + assert.Contains(t, output, "removed auto", "bootstrap rm all should report removing 'auto'") + }) + + t.Run("bootstrap mixed auto and specific peers", func(t *testing.T) { + t.Parallel() + // Test that bootstrap commands work when mixing "auto" with specific peers + node := harness.NewT(t).NewNode().Init("--profile=test") + node.SetIPFSConfig("AutoConf.Enabled", true) + node.SetIPFSConfig("Bootstrap", []string{}) // Start with empty bootstrap + + // Add a specific peer first + specificPeer := "/ip4/127.0.0.1/tcp/4001/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ" + result := node.RunIPFS("bootstrap", "add", specificPeer) + require.Equal(t, 0, result.ExitCode(), "bootstrap add specific peer should succeed") + + // Add auto placeholder + result = node.RunIPFS("bootstrap", "add", "auto") + require.Equal(t, 0, result.ExitCode(), "bootstrap add auto should succeed") + + // Verify bootstrap list shows both + listResult := node.RunIPFS("bootstrap", "list") + require.Equal(t, 0, listResult.ExitCode(), "bootstrap list should succeed") + + listOutput := listResult.Stdout.String() + t.Logf("Bootstrap list with mixed peers: %s", listOutput) + assert.Contains(t, listOutput, "auto", "bootstrap list should contain 'auto' placeholder") + assert.Contains(t, listOutput, specificPeer, "bootstrap list should contain specific peer") + + // Try to remove the specific peer - should fail because auto is present + result = node.RunIPFS("bootstrap", "rm", specificPeer) + require.NotEqual(t, 0, result.ExitCode(), "bootstrap rm of specific peer should fail when 'auto' is present") + + output := result.Stderr.String() + assert.Contains(t, output, "cannot remove individual bootstrap peers when using 'auto' placeholder", + "should provide helpful error message about auto placeholder") + + // Remove all should work and remove both auto and specific peer + result = node.RunIPFS("bootstrap", "rm", "--all") + require.Equal(t, 0, result.ExitCode(), "bootstrap rm --all should succeed") + + output = result.Stdout.String() + t.Logf("Bootstrap rm --all output with mixed peers: %s", output) + // Should report removing both the specific peer and auto + assert.Contains(t, output, "removed", "should report removing peers") + + // Verify bootstrap list is now empty + listResult = node.RunIPFS("bootstrap", "list") + require.Equal(t, 0, listResult.ExitCode(), "bootstrap list should succeed") + + listOutput = listResult.Stdout.String() + assert.Empty(t, listOutput, "bootstrap list should be empty after rm --all") + }) +} diff --git a/test/cli/cid_test.go b/test/cli/cid_test.go new file mode 100644 index 000000000..5e44b0db6 --- /dev/null +++ b/test/cli/cid_test.go @@ -0,0 +1,609 @@ +package cli + +import ( + "fmt" + "strings" + "testing" + + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/assert" +) + +func TestCidCommands(t *testing.T) { + t.Parallel() + + t.Run("base32", testCidBase32) + t.Run("format", testCidFormat) + t.Run("bases", testCidBases) + t.Run("codecs", testCidCodecs) + t.Run("hashes", testCidHashes) +} + +// testCidBase32 tests 'ipfs cid base32' subcommand +// Includes regression tests for https://github.com/ipfs/kubo/issues/9007 +func testCidBase32(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode() + + t.Run("converts valid CIDs to base32", func(t *testing.T) { + t.Run("CIDv0 to base32", func(t *testing.T) { + res := node.RunIPFS("cid", "base32", "QmZZRTyhDpL5Jgift1cHbAhexeE1m2Hw8x8g7rTcPahDvo") + assert.Equal(t, 0, res.ExitCode()) + assert.Equal(t, "bafybeifgwyq5gs4l2mru5klgwjfmftjvkmbyyjurbupuz2bst7mhmg2hwa\n", res.Stdout.String()) + }) + + t.Run("CIDv1 base58 to base32", func(t *testing.T) { + res := node.RunIPFS("cid", "base32", "zdj7WgefqQm5HogBQ2bckZuTYYDarRTUZi51GYCnerHD2G86j") + assert.Equal(t, 0, res.ExitCode()) + assert.Equal(t, "bafybeifgwyq5gs4l2mru5klgwjfmftjvkmbyyjurbupuz2bst7mhmg2hwa\n", res.Stdout.String()) + }) + + t.Run("already base32 CID remains unchanged", func(t *testing.T) { + res := node.RunIPFS("cid", "base32", "bafybeifgwyq5gs4l2mru5klgwjfmftjvkmbyyjurbupuz2bst7mhmg2hwa") + assert.Equal(t, 0, res.ExitCode()) + assert.Equal(t, "bafybeifgwyq5gs4l2mru5klgwjfmftjvkmbyyjurbupuz2bst7mhmg2hwa\n", res.Stdout.String()) + }) + + t.Run("multiple valid CIDs", func(t *testing.T) { + res := node.RunIPFS("cid", "base32", + "QmZZRTyhDpL5Jgift1cHbAhexeE1m2Hw8x8g7rTcPahDvo", + "bafybeifgwyq5gs4l2mru5klgwjfmftjvkmbyyjurbupuz2bst7mhmg2hwa") + assert.Equal(t, 0, res.ExitCode()) + assert.Empty(t, res.Stderr.String()) + lines := strings.Split(strings.TrimSpace(res.Stdout.String()), "\n") + assert.Equal(t, 2, len(lines)) + assert.Equal(t, "bafybeifgwyq5gs4l2mru5klgwjfmftjvkmbyyjurbupuz2bst7mhmg2hwa", lines[0]) + assert.Equal(t, "bafybeifgwyq5gs4l2mru5klgwjfmftjvkmbyyjurbupuz2bst7mhmg2hwa", lines[1]) + }) + }) + + t.Run("error handling", func(t *testing.T) { + // Regression tests for https://github.com/ipfs/kubo/issues/9007 + t.Run("returns error code 1 for single invalid CID", func(t *testing.T) { + res := node.RunIPFS("cid", "base32", "invalid-cid") + assert.Equal(t, 1, res.ExitCode()) + assert.Contains(t, res.Stderr.String(), "invalid-cid: invalid cid") + assert.Contains(t, res.Stderr.String(), "Error: errors while displaying some entries") + }) + + t.Run("returns error code 1 for mixed valid and invalid CIDs", func(t *testing.T) { + res := node.RunIPFS("cid", "base32", "QmZZRTyhDpL5Jgift1cHbAhexeE1m2Hw8x8g7rTcPahDvo", "invalid-cid") + assert.Equal(t, 1, res.ExitCode()) + // Valid CID should be converted and printed to stdout + assert.Contains(t, res.Stdout.String(), "bafybeifgwyq5gs4l2mru5klgwjfmftjvkmbyyjurbupuz2bst7mhmg2hwa") + // Invalid CID error should be printed to stderr + assert.Contains(t, res.Stderr.String(), "invalid-cid: invalid cid") + assert.Contains(t, res.Stderr.String(), "Error: errors while displaying some entries") + }) + + t.Run("returns error code 1 for stdin with invalid CIDs", func(t *testing.T) { + input := "QmZZRTyhDpL5Jgift1cHbAhexeE1m2Hw8x8g7rTcPahDvo\nbad-cid\nbafybeifgwyq5gs4l2mru5klgwjfmftjvkmbyyjurbupuz2bst7mhmg2hwa" + res := node.RunPipeToIPFS(strings.NewReader(input), "cid", "base32") + assert.Equal(t, 1, res.ExitCode()) + // Valid CIDs should be converted + assert.Contains(t, res.Stdout.String(), "bafybeifgwyq5gs4l2mru5klgwjfmftjvkmbyyjurbupuz2bst7mhmg2hwa") + // Invalid CID error should be in stderr + assert.Contains(t, res.Stderr.String(), "bad-cid: invalid cid") + }) + }) +} + +// testCidFormat tests 'ipfs cid format' subcommand +// Includes regression tests for https://github.com/ipfs/kubo/issues/9007 +func testCidFormat(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode() + + t.Run("formats CIDs with various options", func(t *testing.T) { + t.Run("default format preserves CID", func(t *testing.T) { + res := node.RunIPFS("cid", "format", "QmZZRTyhDpL5Jgift1cHbAhexeE1m2Hw8x8g7rTcPahDvo") + assert.Equal(t, 0, res.ExitCode()) + assert.Equal(t, "QmZZRTyhDpL5Jgift1cHbAhexeE1m2Hw8x8g7rTcPahDvo\n", res.Stdout.String()) + }) + + t.Run("convert to CIDv1 with base58btc", func(t *testing.T) { + res := node.RunIPFS("cid", "format", "-v", "1", "-b", "base58btc", + "QmZZRTyhDpL5Jgift1cHbAhexeE1m2Hw8x8g7rTcPahDvo") + assert.Equal(t, 0, res.ExitCode()) + assert.Equal(t, "zdj7WgefqQm5HogBQ2bckZuTYYDarRTUZi51GYCnerHD2G86j\n", res.Stdout.String()) + }) + + t.Run("convert to CIDv0", func(t *testing.T) { + res := node.RunIPFS("cid", "format", "-v", "0", + "bafybeifgwyq5gs4l2mru5klgwjfmftjvkmbyyjurbupuz2bst7mhmg2hwa") + assert.Equal(t, 0, res.ExitCode()) + assert.Equal(t, "QmZZRTyhDpL5Jgift1cHbAhexeE1m2Hw8x8g7rTcPahDvo\n", res.Stdout.String()) + }) + + t.Run("change codec to raw", func(t *testing.T) { + res := node.RunIPFS("cid", "format", "--mc", "raw", "-b", "base32", + "bafybeievd6mwe6vcwnkwo3eizs3h7w3a34opszbyfxziqdxguhjw7imdve") + assert.Equal(t, 0, res.ExitCode()) + assert.Equal(t, "bafkreievd6mwe6vcwnkwo3eizs3h7w3a34opszbyfxziqdxguhjw7imdve\n", res.Stdout.String()) + }) + + t.Run("multiple valid CIDs with format options", func(t *testing.T) { + res := node.RunIPFS("cid", "format", "-v", "1", "-b", "base58btc", + "QmZZRTyhDpL5Jgift1cHbAhexeE1m2Hw8x8g7rTcPahDvo", + "bafybeifgwyq5gs4l2mru5klgwjfmftjvkmbyyjurbupuz2bst7mhmg2hwa") + assert.Equal(t, 0, res.ExitCode()) + assert.Empty(t, res.Stderr.String()) + lines := strings.Split(strings.TrimSpace(res.Stdout.String()), "\n") + assert.Equal(t, 2, len(lines)) + assert.Equal(t, "zdj7WgefqQm5HogBQ2bckZuTYYDarRTUZi51GYCnerHD2G86j", lines[0]) + assert.Equal(t, "zdj7WgefqQm5HogBQ2bckZuTYYDarRTUZi51GYCnerHD2G86j", lines[1]) + }) + }) + + t.Run("error handling", func(t *testing.T) { + // Regression tests for https://github.com/ipfs/kubo/issues/9007 + t.Run("returns error code 1 for single invalid CID", func(t *testing.T) { + res := node.RunIPFS("cid", "format", "not-a-cid") + assert.Equal(t, 1, res.ExitCode()) + assert.Contains(t, res.Stderr.String(), "not-a-cid: invalid cid") + assert.Contains(t, res.Stderr.String(), "Error: errors while displaying some entries") + }) + + t.Run("returns error code 1 for mixed valid and invalid CIDs", func(t *testing.T) { + res := node.RunIPFS("cid", "format", "not-a-cid", "QmZZRTyhDpL5Jgift1cHbAhexeE1m2Hw8x8g7rTcPahDvo") + assert.Equal(t, 1, res.ExitCode()) + // Valid CID should be printed to stdout + assert.Contains(t, res.Stdout.String(), "QmZZRTyhDpL5Jgift1cHbAhexeE1m2Hw8x8g7rTcPahDvo") + // Invalid CID error should be printed to stderr + assert.Contains(t, res.Stderr.String(), "not-a-cid: invalid cid") + assert.Contains(t, res.Stderr.String(), "Error: errors while displaying some entries") + }) + + t.Run("returns error code 1 for stdin with invalid CIDs", func(t *testing.T) { + input := "invalid\nQmZZRTyhDpL5Jgift1cHbAhexeE1m2Hw8x8g7rTcPahDvo" + res := node.RunPipeToIPFS(strings.NewReader(input), "cid", "format", "-v", "1", "-b", "base58btc") + assert.Equal(t, 1, res.ExitCode()) + // Valid CID should be converted + assert.Contains(t, res.Stdout.String(), "zdj7WgefqQm5HogBQ2bckZuTYYDarRTUZi51GYCnerHD2G86j") + // Invalid CID error should be in stderr + assert.Contains(t, res.Stderr.String(), "invalid: invalid cid") + }) + }) +} + +// testCidBases tests 'ipfs cid bases' subcommand +func testCidBases(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode() + + t.Run("lists available bases", func(t *testing.T) { + // This is a regression test to ensure we don't accidentally add or remove support + // for multibase encodings. If a new base is intentionally added or removed, + // this test should be updated accordingly. + expectedBases := []string{ + "identity", + "base2", + "base16", + "base16upper", + "base32", + "base32upper", + "base32pad", + "base32padupper", + "base32hex", + "base32hexupper", + "base32hexpad", + "base32hexpadupper", + "base36", + "base36upper", + "base58btc", + "base58flickr", + "base64", + "base64pad", + "base64url", + "base64urlpad", + "base256emoji", + } + + res := node.RunIPFS("cid", "bases") + assert.Equal(t, 0, res.ExitCode()) + + lines := strings.Split(strings.TrimSpace(res.Stdout.String()), "\n") + assertExactSet(t, "bases", expectedBases, lines) + }) + + t.Run("with --prefix flag shows single letter prefixes", func(t *testing.T) { + // Regression test to catch any changes to the output format or supported bases + expectedLines := []string{ + "identity", + "0 base2", + "b base32", + "B base32upper", + "c base32pad", + "C base32padupper", + "f base16", + "F base16upper", + "k base36", + "K base36upper", + "m base64", + "M base64pad", + "t base32hexpad", + "T base32hexpadupper", + "u base64url", + "U base64urlpad", + "v base32hex", + "V base32hexupper", + "z base58btc", + "Z base58flickr", + "🚀 base256emoji", + } + + res := node.RunIPFS("cid", "bases", "--prefix") + assert.Equal(t, 0, res.ExitCode()) + + lines := strings.Split(strings.TrimSpace(res.Stdout.String()), "\n") + assertExactSet(t, "bases --prefix output", expectedLines, lines) + }) + + t.Run("with --numeric flag shows numeric codes", func(t *testing.T) { + // Regression test to catch any changes to the output format or supported bases + expectedLines := []string{ + "0 identity", + "48 base2", + "98 base32", + "66 base32upper", + "99 base32pad", + "67 base32padupper", + "102 base16", + "70 base16upper", + "107 base36", + "75 base36upper", + "109 base64", + "77 base64pad", + "116 base32hexpad", + "84 base32hexpadupper", + "117 base64url", + "85 base64urlpad", + "118 base32hex", + "86 base32hexupper", + "122 base58btc", + "90 base58flickr", + "128640 base256emoji", + } + + res := node.RunIPFS("cid", "bases", "--numeric") + assert.Equal(t, 0, res.ExitCode()) + + lines := strings.Split(strings.TrimSpace(res.Stdout.String()), "\n") + assertExactSet(t, "bases --numeric output", expectedLines, lines) + }) + + t.Run("with both --prefix and --numeric flags", func(t *testing.T) { + // Regression test to catch any changes to the output format or supported bases + expectedLines := []string{ + "0 identity", + "0 48 base2", + "b 98 base32", + "B 66 base32upper", + "c 99 base32pad", + "C 67 base32padupper", + "f 102 base16", + "F 70 base16upper", + "k 107 base36", + "K 75 base36upper", + "m 109 base64", + "M 77 base64pad", + "t 116 base32hexpad", + "T 84 base32hexpadupper", + "u 117 base64url", + "U 85 base64urlpad", + "v 118 base32hex", + "V 86 base32hexupper", + "z 122 base58btc", + "Z 90 base58flickr", + "🚀 128640 base256emoji", + } + + res := node.RunIPFS("cid", "bases", "--prefix", "--numeric") + assert.Equal(t, 0, res.ExitCode()) + + lines := strings.Split(strings.TrimSpace(res.Stdout.String()), "\n") + assertExactSet(t, "bases --prefix --numeric output", expectedLines, lines) + }) +} + +// testCidCodecs tests 'ipfs cid codecs' subcommand +func testCidCodecs(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode() + + t.Run("lists available codecs", func(t *testing.T) { + // This is a regression test to ensure we don't accidentally add or remove + // IPLD codecs. If a codec is intentionally added or removed, + // this test should be updated accordingly. + expectedCodecs := []string{ + "cbor", + "raw", + "dag-pb", + "dag-cbor", + "libp2p-key", + "git-raw", + "torrent-info", + "torrent-file", + "blake3-hashseq", + "leofcoin-block", + "leofcoin-tx", + "leofcoin-pr", + "dag-jose", + "dag-cose", + "eth-block", + "eth-block-list", + "eth-tx-trie", + "eth-tx", + "eth-tx-receipt-trie", + "eth-tx-receipt", + "eth-state-trie", + "eth-account-snapshot", + "eth-storage-trie", + "eth-receipt-log-trie", + "eth-receipt-log", + "bitcoin-block", + "bitcoin-tx", + "bitcoin-witness-commitment", + "zcash-block", + "zcash-tx", + "stellar-block", + "stellar-tx", + "decred-block", + "decred-tx", + "dash-block", + "dash-tx", + "swarm-manifest", + "swarm-feed", + "beeson", + "dag-json", + "swhid-1-snp", + "json", + "rdfc-1", + "json-jcs", + } + + res := node.RunIPFS("cid", "codecs") + assert.Equal(t, 0, res.ExitCode()) + + lines := strings.Split(strings.TrimSpace(res.Stdout.String()), "\n") + assertExactSet(t, "codecs", expectedCodecs, lines) + }) + + t.Run("with --numeric flag shows codec numbers", func(t *testing.T) { + // This is a regression test to ensure we don't accidentally add or remove + // IPLD codecs. If a codec is intentionally added or removed, + // this test should be updated accordingly. + expectedLines := []string{ + "81 cbor", + "85 raw", + "112 dag-pb", + "113 dag-cbor", + "114 libp2p-key", + "120 git-raw", + "123 torrent-info", + "124 torrent-file", + "128 blake3-hashseq", + "129 leofcoin-block", + "130 leofcoin-tx", + "131 leofcoin-pr", + "133 dag-jose", + "134 dag-cose", + "144 eth-block", + "145 eth-block-list", + "146 eth-tx-trie", + "147 eth-tx", + "148 eth-tx-receipt-trie", + "149 eth-tx-receipt", + "150 eth-state-trie", + "151 eth-account-snapshot", + "152 eth-storage-trie", + "153 eth-receipt-log-trie", + "154 eth-receipt-log", + "176 bitcoin-block", + "177 bitcoin-tx", + "178 bitcoin-witness-commitment", + "192 zcash-block", + "193 zcash-tx", + "208 stellar-block", + "209 stellar-tx", + "224 decred-block", + "225 decred-tx", + "240 dash-block", + "241 dash-tx", + "250 swarm-manifest", + "251 swarm-feed", + "252 beeson", + "297 dag-json", + "496 swhid-1-snp", + "512 json", + "46083 rdfc-1", + "46593 json-jcs", + } + + res := node.RunIPFS("cid", "codecs", "--numeric") + assert.Equal(t, 0, res.ExitCode()) + + lines := strings.Split(strings.TrimSpace(res.Stdout.String()), "\n") + assertExactSet(t, "codecs --numeric output", expectedLines, lines) + }) + + t.Run("with --supported flag lists only supported codecs", func(t *testing.T) { + // This is a regression test to ensure we don't accidentally change the list + // of supported codecs. If a codec is intentionally added or removed from + // support, this test should be updated accordingly. + expectedSupportedCodecs := []string{ + "cbor", + "dag-cbor", + "dag-jose", + "dag-json", + "dag-pb", + "git-raw", + "json", + "libp2p-key", + "raw", + } + + res := node.RunIPFS("cid", "codecs", "--supported") + assert.Equal(t, 0, res.ExitCode()) + + lines := strings.Split(strings.TrimSpace(res.Stdout.String()), "\n") + assertExactSet(t, "supported codecs", expectedSupportedCodecs, lines) + }) + + t.Run("with both --supported and --numeric flags", func(t *testing.T) { + // Regression test to catch any changes to supported codecs or output format + expectedLines := []string{ + "81 cbor", + "85 raw", + "112 dag-pb", + "113 dag-cbor", + "114 libp2p-key", + "120 git-raw", + "133 dag-jose", + "297 dag-json", + "512 json", + } + + res := node.RunIPFS("cid", "codecs", "--supported", "--numeric") + assert.Equal(t, 0, res.ExitCode()) + + lines := strings.Split(strings.TrimSpace(res.Stdout.String()), "\n") + assertExactSet(t, "codecs --supported --numeric output", expectedLines, lines) + }) +} + +// testCidHashes tests 'ipfs cid hashes' subcommand +func testCidHashes(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode() + + t.Run("lists available hashes", func(t *testing.T) { + // This is a regression test to ensure we don't accidentally add or remove + // support for hash functions. If a hash function is intentionally added + // or removed, this test should be updated accordingly. + expectedHashes := []string{ + "identity", + "sha1", + "sha2-256", + "sha2-512", + "sha3-512", + "sha3-384", + "sha3-256", + "sha3-224", + "shake-256", + "keccak-224", + "keccak-256", + "keccak-384", + "keccak-512", + "blake3", + "dbl-sha2-256", + } + + // Also expect all blake2b variants (160-512 in steps of 8) + for i := 160; i <= 512; i += 8 { + expectedHashes = append(expectedHashes, fmt.Sprintf("blake2b-%d", i)) + } + + // Also expect all blake2s variants (160-256 in steps of 8) + for i := 160; i <= 256; i += 8 { + expectedHashes = append(expectedHashes, fmt.Sprintf("blake2s-%d", i)) + } + + res := node.RunIPFS("cid", "hashes") + assert.Equal(t, 0, res.ExitCode()) + + lines := strings.Split(strings.TrimSpace(res.Stdout.String()), "\n") + assertExactSet(t, "hash functions", expectedHashes, lines) + }) + + t.Run("with --numeric flag shows hash function codes", func(t *testing.T) { + // This is a regression test to ensure we don't accidentally add or remove + // support for hash functions. If a hash function is intentionally added + // or removed, this test should be updated accordingly. + expectedLines := []string{ + "0 identity", + "17 sha1", + "18 sha2-256", + "19 sha2-512", + "20 sha3-512", + "21 sha3-384", + "22 sha3-256", + "23 sha3-224", + "25 shake-256", + "26 keccak-224", + "27 keccak-256", + "28 keccak-384", + "29 keccak-512", + "30 blake3", + "86 dbl-sha2-256", + } + + // Add all blake2b variants (160-512 in steps of 8) + for i := 160; i <= 512; i += 8 { + expectedLines = append(expectedLines, fmt.Sprintf("%d blake2b-%d", 45568+i/8, i)) + } + + // Add all blake2s variants (160-256 in steps of 8) + for i := 160; i <= 256; i += 8 { + expectedLines = append(expectedLines, fmt.Sprintf("%d blake2s-%d", 45632+i/8, i)) + } + + res := node.RunIPFS("cid", "hashes", "--numeric") + assert.Equal(t, 0, res.ExitCode()) + + lines := strings.Split(strings.TrimSpace(res.Stdout.String()), "\n") + assertExactSet(t, "hashes --numeric output", expectedLines, lines) + }) +} + +// assertExactSet compares expected vs actual items and reports clear errors for any differences. +// This is used as a regression test to ensure we don't accidentally add or remove support. +// Both expected and actual strings are trimmed of whitespace before comparison for maintainability. +func assertExactSet(t *testing.T, itemType string, expected []string, actual []string) { + t.Helper() + + // Normalize by trimming whitespace + normalizedExpected := make([]string, len(expected)) + for i, item := range expected { + normalizedExpected[i] = strings.TrimSpace(item) + } + + normalizedActual := make([]string, len(actual)) + for i, item := range actual { + normalizedActual[i] = strings.TrimSpace(item) + } + + expectedSet := make(map[string]bool) + for _, item := range normalizedExpected { + expectedSet[item] = true + } + + actualSet := make(map[string]bool) + for _, item := range normalizedActual { + actualSet[item] = true + } + + var missing []string + for _, item := range normalizedExpected { + if !actualSet[item] { + missing = append(missing, item) + } + } + + var unexpected []string + for _, item := range normalizedActual { + if !expectedSet[item] { + unexpected = append(unexpected, item) + } + } + + if len(missing) > 0 { + t.Errorf("Missing expected %s: %q", itemType, missing) + } + if len(unexpected) > 0 { + t.Errorf("Unexpected %s found: %q", itemType, unexpected) + } + + assert.Equal(t, len(expected), len(actual), + "Expected %d %s but got %d", len(expected), itemType, len(actual)) +} diff --git a/test/cli/commands_without_repo_test.go b/test/cli/commands_without_repo_test.go new file mode 100644 index 000000000..55469adae --- /dev/null +++ b/test/cli/commands_without_repo_test.go @@ -0,0 +1,130 @@ +package cli + +import ( + "os" + "os/exec" + "strings" + "testing" +) + +func TestCommandsWithoutRepo(t *testing.T) { + t.Run("cid", func(t *testing.T) { + t.Run("base32", func(t *testing.T) { + cmd := exec.Command("ipfs", "cid", "base32", "QmS4ustL54uo8FzR9455qaxZwuMiUhyvMcX9Ba8nUH4uVv") + cmd.Env = append(os.Environ(), "IPFS_PATH="+t.TempDir()) + stdout, err := cmd.Output() + if err != nil { + t.Fatal(err) + } + expected := "bafybeibxm2nsadl3fnxv2sxcxmxaco2jl53wpeorjdzidjwf5aqdg7wa6u\n" + if string(stdout) != expected { + t.Fatalf("expected %q, got: %q", expected, stdout) + } + }) + + t.Run("format", func(t *testing.T) { + cmd := exec.Command("ipfs", "cid", "format", "-v", "1", "QmS4ustL54uo8FzR9455qaxZwuMiUhyvMcX9Ba8nUH4uVv") + cmd.Env = append(os.Environ(), "IPFS_PATH="+t.TempDir()) + stdout, err := cmd.Output() + if err != nil { + t.Fatal(err) + } + expected := "zdj7WZAAFKPvYPPzyJLso2hhxo8a7ZACFQ4DvvfrNXTHidofr\n" + if string(stdout) != expected { + t.Fatalf("expected %q, got: %q", expected, stdout) + } + }) + + t.Run("bases", func(t *testing.T) { + cmd := exec.Command("ipfs", "cid", "bases") + cmd.Env = append(os.Environ(), "IPFS_PATH="+t.TempDir()) + stdout, err := cmd.Output() + if err != nil { + t.Fatal(err) + } + if !strings.Contains(string(stdout), "base32") { + t.Fatalf("expected base32 in output, got: %s", stdout) + } + }) + + t.Run("codecs", func(t *testing.T) { + cmd := exec.Command("ipfs", "cid", "codecs") + cmd.Env = append(os.Environ(), "IPFS_PATH="+t.TempDir()) + stdout, err := cmd.Output() + if err != nil { + t.Fatal(err) + } + if !strings.Contains(string(stdout), "dag-pb") { + t.Fatalf("expected dag-pb in output, got: %s", stdout) + } + }) + + t.Run("hashes", func(t *testing.T) { + cmd := exec.Command("ipfs", "cid", "hashes") + cmd.Env = append(os.Environ(), "IPFS_PATH="+t.TempDir()) + stdout, err := cmd.Output() + if err != nil { + t.Fatal(err) + } + if !strings.Contains(string(stdout), "sha2-256") { + t.Fatalf("expected sha2-256 in output, got: %s", stdout) + } + }) + }) + + t.Run("multibase", func(t *testing.T) { + t.Run("list", func(t *testing.T) { + cmd := exec.Command("ipfs", "multibase", "list") + cmd.Env = append(os.Environ(), "IPFS_PATH="+t.TempDir()) + stdout, err := cmd.Output() + if err != nil { + t.Fatal(err) + } + if !strings.Contains(string(stdout), "base32") { + t.Fatalf("expected base32 in output, got: %s", stdout) + } + }) + + t.Run("encode", func(t *testing.T) { + cmd := exec.Command("ipfs", "multibase", "encode", "-b", "base32") + cmd.Env = append(os.Environ(), "IPFS_PATH="+t.TempDir()) + cmd.Stdin = strings.NewReader("hello\n") + stdout, err := cmd.Output() + if err != nil { + t.Fatal(err) + } + expected := "bnbswy3dpbi" + if string(stdout) != expected { + t.Fatalf("expected %q, got: %q", expected, stdout) + } + }) + + t.Run("decode", func(t *testing.T) { + cmd := exec.Command("ipfs", "multibase", "decode") + cmd.Env = append(os.Environ(), "IPFS_PATH="+t.TempDir()) + cmd.Stdin = strings.NewReader("bnbswy3dpbi") + stdout, err := cmd.Output() + if err != nil { + t.Fatal(err) + } + expected := "hello\n" + if string(stdout) != expected { + t.Fatalf("expected %q, got: %q", expected, stdout) + } + }) + + t.Run("transcode", func(t *testing.T) { + cmd := exec.Command("ipfs", "multibase", "transcode", "-b", "base64") + cmd.Env = append(os.Environ(), "IPFS_PATH="+t.TempDir()) + cmd.Stdin = strings.NewReader("bnbswy3dpbi") + stdout, err := cmd.Output() + if err != nil { + t.Fatal(err) + } + expected := "maGVsbG8K" + if string(stdout) != expected { + t.Fatalf("expected %q, got: %q", expected, stdout) + } + }) + }) +} diff --git a/test/cli/content_blocking_test.go b/test/cli/content_blocking_test.go index 6598354d1..8c50aee2b 100644 --- a/test/cli/content_blocking_test.go +++ b/test/cli/content_blocking_test.go @@ -308,7 +308,7 @@ func TestContentBlocking(t *testing.T) { // trustless gateway exposed over libp2p // when Experimental.GatewayOverLibp2p=true // (https://github.com/ipfs/kubo/blob/master/docs/experimental-features.md#http-gateway-over-libp2p) - // NOTE: this type fo gateway is hardcoded to be NoFetch: it does not fetch + // NOTE: this type of gateway is hardcoded to be NoFetch: it does not fetch // data that is not in local store, so we only need to run it once: a // simple smoke-test for allowed CID and blockedCID. t.Run("GatewayOverLibp2p", func(t *testing.T) { diff --git a/test/cli/content_routing_http_test.go b/test/cli/content_routing_http_test.go index 4b210cfba..b6e045383 100644 --- a/test/cli/content_routing_http_test.go +++ b/test/cli/content_routing_http_test.go @@ -8,9 +8,9 @@ import ( "time" "github.com/ipfs/boxo/routing/http/server" + "github.com/ipfs/go-test/random" "github.com/ipfs/kubo/config" "github.com/ipfs/kubo/test/cli/harness" - "github.com/ipfs/kubo/test/cli/testutils" "github.com/ipfs/kubo/test/cli/testutils/httprouting" "github.com/stretchr/testify/assert" ) @@ -43,7 +43,7 @@ func TestContentRoutingHTTP(t *testing.T) { node.StartDaemon() // compute a random CID - randStr := string(testutils.RandomBytes(100)) + randStr := string(random.Bytes(100)) res := node.PipeStrToIPFS(randStr, "add", "-qn") wantCIDStr := res.Stdout.Trimmed() diff --git a/test/cli/daemon_test.go b/test/cli/daemon_test.go index 7a8c583a2..f87a21651 100644 --- a/test/cli/daemon_test.go +++ b/test/cli/daemon_test.go @@ -1,10 +1,20 @@ package cli import ( + "bytes" + "crypto/rand" + "fmt" + "io" + "net/http" "os/exec" "testing" + "time" + "github.com/ipfs/kubo/config" "github.com/ipfs/kubo/test/cli/harness" + "github.com/multiformats/go-multiaddr" + manet "github.com/multiformats/go-multiaddr/net" + "github.com/stretchr/testify/require" ) func TestDaemon(t *testing.T) { @@ -22,4 +32,125 @@ func TestDaemon(t *testing.T) { node.StopDaemon() }) + + t.Run("daemon shuts down gracefully with active operations", func(t *testing.T) { + t.Parallel() + + // Start daemon with multiple components active via config + node := harness.NewT(t).NewNode().Init() + + // Enable experimental features and pubsub via config + node.UpdateConfig(func(cfg *config.Config) { + cfg.Pubsub.Enabled = config.True // Instead of --enable-pubsub-experiment + cfg.Experimental.P2pHttpProxy = true // Enable P2P HTTP proxy + cfg.Experimental.GatewayOverLibp2p = true // Enable gateway over libp2p + }) + + node.StartDaemon("--enable-gc") + + // Start background operations to simulate real daemon workload: + // 1. "ipfs add" simulates content onboarding/ingestion work + // 2. Gateway request simulates content retrieval and gateway processing work + + // Background operation 1: Continuous add of random data to simulate onboarding + addDone := make(chan struct{}) + go func() { + defer close(addDone) + + // Start the add command asynchronously + res := node.Runner.Run(harness.RunRequest{ + Path: node.IPFSBin, + Args: []string{"add", "--progress=false", "-"}, + RunFunc: (*exec.Cmd).Start, + CmdOpts: []harness.CmdOpt{ + harness.RunWithStdin(&infiniteReader{}), + }, + }) + + // Wait for command to finish (when daemon stops) + if res.Cmd != nil { + _ = res.Cmd.Wait() // Ignore error, expect command to be killed during shutdown + } + }() + + // Background operation 2: Gateway CAR request to simulate retrieval work + gatewayDone := make(chan struct{}) + go func() { + defer close(gatewayDone) + + // First add a file sized to ensure gateway request takes ~1 minute + largeData := make([]byte, 512*1024) // 512KB of data + _, _ = rand.Read(largeData) // Always succeeds for crypto/rand + testCID := node.IPFSAdd(bytes.NewReader(largeData)) + + // Get gateway address from config + cfg := node.ReadConfig() + gatewayMaddr, err := multiaddr.NewMultiaddr(cfg.Addresses.Gateway[0]) + if err != nil { + return + } + gatewayAddr, err := manet.ToNetAddr(gatewayMaddr) + if err != nil { + return + } + + // Request CAR but slow reading to simulate heavy gateway load + gatewayURL := fmt.Sprintf("http://%s/ipfs/%s?format=car", gatewayAddr, testCID) + + client := &http.Client{Timeout: 90 * time.Second} + resp, err := client.Get(gatewayURL) + if err == nil { + defer resp.Body.Close() + // Read response slowly: 512KB ÷ 1KB × 125ms = ~64 seconds (1+ minute) total + // This ensures operation is still active when we shutdown at 2 seconds + buf := make([]byte, 1024) // 1KB buffer + for { + if _, err := io.ReadFull(resp.Body, buf); err != nil { + return + } + time.Sleep(125 * time.Millisecond) // 125ms delay = ~64s total for 512KB + } + } + }() + + // Let operations run for 2 seconds to ensure they're active + time.Sleep(2 * time.Second) + + // Trigger graceful shutdown + shutdownStart := time.Now() + node.StopDaemon() + shutdownDuration := time.Since(shutdownStart) + + // Verify clean shutdown: + // - Daemon should stop within reasonable time (not hang) + require.Less(t, shutdownDuration, 10*time.Second, "daemon should shut down within 10 seconds") + + // Wait for background operations to complete (with timeout) + select { + case <-addDone: + // Good, add operation terminated + case <-time.After(5 * time.Second): + t.Error("add operation did not terminate within 5 seconds after daemon shutdown") + } + + select { + case <-gatewayDone: + // Good, gateway operation terminated + case <-time.After(5 * time.Second): + t.Error("gateway operation did not terminate within 5 seconds after daemon shutdown") + } + + // Verify we can restart with same repo (no lock issues) + node.StartDaemon() + node.StopDaemon() + }) +} + +// infiniteReader provides an infinite stream of random data +type infiniteReader struct{} + +func (r *infiniteReader) Read(p []byte) (n int, err error) { + _, _ = rand.Read(p) // Always succeeds for crypto/rand + time.Sleep(50 * time.Millisecond) // Rate limit to simulate steady stream + return len(p), nil } diff --git a/test/cli/delegated_routing_v1_http_proxy_test.go b/test/cli/delegated_routing_v1_http_proxy_test.go index 7f8ff8bca..548459653 100644 --- a/test/cli/delegated_routing_v1_http_proxy_test.go +++ b/test/cli/delegated_routing_v1_http_proxy_test.go @@ -4,9 +4,9 @@ import ( "testing" "github.com/ipfs/boxo/ipns" + "github.com/ipfs/go-test/random" "github.com/ipfs/kubo/config" "github.com/ipfs/kubo/test/cli/harness" - "github.com/ipfs/kubo/test/cli/testutils" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -70,7 +70,7 @@ func TestRoutingV1Proxy(t *testing.T) { t.Parallel() nodes := setupNodes(t) - cidStr := nodes[0].IPFSAddStr(testutils.RandomStr(1000)) + cidStr := nodes[0].IPFSAddStr(string(random.Bytes(1000))) // Reprovide as initialProviderDelay still ongoing res := nodes[0].IPFS("routing", "reprovide") require.NoError(t, res.Err) @@ -109,7 +109,7 @@ func TestRoutingV1Proxy(t *testing.T) { require.Error(t, res.ExitErr) // Publish record on Node 0. - path := "/ipfs/" + nodes[0].IPFSAddStr(testutils.RandomStr(1000)) + path := "/ipfs/" + nodes[0].IPFSAddStr(string(random.Bytes(1000))) nodes[0].IPFS("name", "publish", "--allow-offline", path) // Get record on Node 1 (no DHT). @@ -132,7 +132,7 @@ func TestRoutingV1Proxy(t *testing.T) { require.Error(t, res.ExitErr) // Publish name. - path := "/ipfs/" + nodes[0].IPFSAddStr(testutils.RandomStr(1000)) + path := "/ipfs/" + nodes[0].IPFSAddStr(string(random.Bytes(1000))) nodes[0].IPFS("name", "publish", "--allow-offline", path) // Resolve IPNS name @@ -146,7 +146,7 @@ func TestRoutingV1Proxy(t *testing.T) { // Publish something on Node 1 (no DHT). nodeName := "/ipns/" + ipns.NameFromPeer(nodes[1].PeerID()).String() - path := "/ipfs/" + nodes[1].IPFSAddStr(testutils.RandomStr(1000)) + path := "/ipfs/" + nodes[1].IPFSAddStr(string(random.Bytes(1000))) nodes[1].IPFS("name", "publish", "--allow-offline", path) // Retrieve through Node 0. diff --git a/test/cli/dht_autoclient_test.go b/test/cli/dht_autoclient_test.go index 39aa5b258..adb200509 100644 --- a/test/cli/dht_autoclient_test.go +++ b/test/cli/dht_autoclient_test.go @@ -4,8 +4,8 @@ import ( "bytes" "testing" + "github.com/ipfs/go-test/random" "github.com/ipfs/kubo/test/cli/harness" - "github.com/ipfs/kubo/test/cli/testutils" "github.com/stretchr/testify/assert" ) @@ -19,7 +19,7 @@ func TestDHTAutoclient(t *testing.T) { t.Run("file added on node in client mode is retrievable from node in client mode", func(t *testing.T) { t.Parallel() - randomBytes := testutils.RandomBytes(1000) + randomBytes := random.Bytes(1000) randomBytes = append(randomBytes, '\r') hash := nodes[8].IPFSAdd(bytes.NewReader(randomBytes)) @@ -29,7 +29,7 @@ func TestDHTAutoclient(t *testing.T) { t.Run("file added on node in server mode is retrievable from all nodes", func(t *testing.T) { t.Parallel() - randomBytes := testutils.RandomBytes(1000) + randomBytes := random.Bytes(1000) hash := nodes[0].IPFSAdd(bytes.NewReader(randomBytes)) for i := 0; i < 10; i++ { diff --git a/test/cli/dht_opt_prov_test.go b/test/cli/dht_opt_prov_test.go index f7b492066..3cdb9d51c 100644 --- a/test/cli/dht_opt_prov_test.go +++ b/test/cli/dht_opt_prov_test.go @@ -3,9 +3,9 @@ package cli import ( "testing" + "github.com/ipfs/go-test/random" "github.com/ipfs/kubo/config" "github.com/ipfs/kubo/test/cli/harness" - "github.com/ipfs/kubo/test/cli/testutils" "github.com/stretchr/testify/assert" ) @@ -21,7 +21,7 @@ func TestDHTOptimisticProvide(t *testing.T) { nodes.StartDaemons().Connect() - hash := nodes[0].IPFSAddStr(testutils.RandomStr(100)) + hash := nodes[0].IPFSAddStr(string(random.Bytes(100))) nodes[0].IPFS("routing", "provide", hash) res := nodes[1].IPFS("routing", "findprovs", "--num-providers=1", hash) diff --git a/test/cli/fuse_test.go b/test/cli/fuse_test.go new file mode 100644 index 000000000..6182a069a --- /dev/null +++ b/test/cli/fuse_test.go @@ -0,0 +1,166 @@ +package cli + +import ( + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "testing" + + "github.com/ipfs/kubo/test/cli/harness" + "github.com/ipfs/kubo/test/cli/testutils" + "github.com/stretchr/testify/require" +) + +func TestFUSE(t *testing.T) { + testutils.RequiresFUSE(t) + t.Parallel() + + t.Run("mount and unmount work correctly", func(t *testing.T) { + t.Parallel() + + // Create a node and start daemon + node := harness.NewT(t).NewNode().Init() + node.StartDaemon() + + // Create mount directories in the node's working directory + nodeDir := node.Dir + ipfsMount := filepath.Join(nodeDir, "ipfs") + ipnsMount := filepath.Join(nodeDir, "ipns") + mfsMount := filepath.Join(nodeDir, "mfs") + + err := os.MkdirAll(ipfsMount, 0755) + require.NoError(t, err) + err = os.MkdirAll(ipnsMount, 0755) + require.NoError(t, err) + err = os.MkdirAll(mfsMount, 0755) + require.NoError(t, err) + + // Ensure any existing mounts are cleaned up first + failOnError := false // mount points might not exist from previous runs + doUnmount(t, ipfsMount, failOnError) + doUnmount(t, ipnsMount, failOnError) + doUnmount(t, mfsMount, failOnError) + + // Test mount operation + result := node.IPFS("mount", "-f", ipfsMount, "-n", ipnsMount, "-m", mfsMount) + + // Verify mount output + expectedOutput := "IPFS mounted at: " + ipfsMount + "\n" + + "IPNS mounted at: " + ipnsMount + "\n" + + "MFS mounted at: " + mfsMount + "\n" + require.Equal(t, expectedOutput, result.Stdout.String()) + + // Test basic MFS functionality via FUSE mount + testFile := filepath.Join(mfsMount, "testfile") + testContent := "hello fuse world" + + // Create file via FUSE mount + err = os.WriteFile(testFile, []byte(testContent), 0644) + require.NoError(t, err) + + // Verify file appears in MFS via IPFS commands + result = node.IPFS("files", "ls", "/") + require.Contains(t, result.Stdout.String(), "testfile") + + // Read content back via MFS FUSE mount + readContent, err := os.ReadFile(testFile) + require.NoError(t, err) + require.Equal(t, testContent, string(readContent)) + + // Get the CID of the MFS file + result = node.IPFS("files", "stat", "/testfile", "--format=") + fileCID := strings.TrimSpace(result.Stdout.String()) + require.NotEmpty(t, fileCID, "should have a CID for the MFS file") + + // Read the same content via IPFS FUSE mount using the CID + ipfsFile := filepath.Join(ipfsMount, fileCID) + ipfsContent, err := os.ReadFile(ipfsFile) + require.NoError(t, err) + require.Equal(t, testContent, string(ipfsContent), "content should match between MFS and IPFS mounts") + + // Verify both FUSE mounts return identical data + require.Equal(t, readContent, ipfsContent, "MFS and IPFS FUSE mounts should return identical data") + + // Test that mount directories cannot be removed while mounted + err = os.Remove(ipfsMount) + require.Error(t, err, "should not be able to remove mounted directory") + + // Stop daemon - this should trigger automatic unmount via context cancellation + node.StopDaemon() + + // Daemon shutdown should handle unmount synchronously via context.AfterFunc + + // Verify directories can now be removed (indicating successful unmount) + err = os.Remove(ipfsMount) + require.NoError(t, err, "should be able to remove directory after unmount") + err = os.Remove(ipnsMount) + require.NoError(t, err, "should be able to remove directory after unmount") + err = os.Remove(mfsMount) + require.NoError(t, err, "should be able to remove directory after unmount") + }) + + t.Run("explicit unmount works", func(t *testing.T) { + t.Parallel() + + node := harness.NewT(t).NewNode().Init() + node.StartDaemon() + + // Create mount directories + nodeDir := node.Dir + ipfsMount := filepath.Join(nodeDir, "ipfs") + ipnsMount := filepath.Join(nodeDir, "ipns") + mfsMount := filepath.Join(nodeDir, "mfs") + + err := os.MkdirAll(ipfsMount, 0755) + require.NoError(t, err) + err = os.MkdirAll(ipnsMount, 0755) + require.NoError(t, err) + err = os.MkdirAll(mfsMount, 0755) + require.NoError(t, err) + + // Clean up any existing mounts + failOnError := false // mount points might not exist from previous runs + doUnmount(t, ipfsMount, failOnError) + doUnmount(t, ipnsMount, failOnError) + doUnmount(t, mfsMount, failOnError) + + // Mount + node.IPFS("mount", "-f", ipfsMount, "-n", ipnsMount, "-m", mfsMount) + + // Explicit unmount via platform-specific command + failOnError = true // test that explicit unmount works correctly + doUnmount(t, ipfsMount, failOnError) + doUnmount(t, ipnsMount, failOnError) + doUnmount(t, mfsMount, failOnError) + + // Verify directories can be removed after explicit unmount + err = os.Remove(ipfsMount) + require.NoError(t, err) + err = os.Remove(ipnsMount) + require.NoError(t, err) + err = os.Remove(mfsMount) + require.NoError(t, err) + + node.StopDaemon() + }) +} + +// doUnmount performs platform-specific unmount, similar to sharness do_umount +// failOnError: if true, unmount errors cause test failure; if false, errors are ignored (useful for cleanup) +func doUnmount(t *testing.T, mountPoint string, failOnError bool) { + t.Helper() + var cmd *exec.Cmd + if runtime.GOOS == "linux" { + // fusermount -u: unmount filesystem (strict - fails if busy) + cmd = exec.Command("fusermount", "-u", mountPoint) + } else { + cmd = exec.Command("umount", mountPoint) + } + + err := cmd.Run() + if err != nil && failOnError { + t.Fatalf("failed to unmount %s: %v", mountPoint, err) + } +} diff --git a/test/cli/gateway_limits_test.go b/test/cli/gateway_limits_test.go new file mode 100644 index 000000000..2c5554cf3 --- /dev/null +++ b/test/cli/gateway_limits_test.go @@ -0,0 +1,132 @@ +package cli + +import ( + "net/http" + "testing" + "time" + + "github.com/ipfs/kubo/config" + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/assert" +) + +// TestGatewayLimits tests the gateway request limiting and timeout features. +// These are basic integration tests that verify the configuration works. +// For comprehensive tests, see: +// - github.com/ipfs/boxo/gateway/middleware_retrieval_timeout_test.go +// - github.com/ipfs/boxo/gateway/middleware_ratelimit_test.go +func TestGatewayLimits(t *testing.T) { + t.Parallel() + + t.Run("RetrievalTimeout", func(t *testing.T) { + t.Parallel() + + // Create a node with a short retrieval timeout + node := harness.NewT(t).NewNode().Init() + node.UpdateConfig(func(cfg *config.Config) { + // Set a 1 second timeout for retrieval + cfg.Gateway.RetrievalTimeout = config.NewOptionalDuration(1 * time.Second) + }) + node.StartDaemon() + + // Add content that can be retrieved quickly + cid := node.IPFSAddStr("test content") + + client := node.GatewayClient() + + // Normal request should succeed (content is local) + resp := client.Get("/ipfs/" + cid) + assert.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "test content", resp.Body) + + // Request for non-existent content should timeout + // Using a CID that has no providers (generated with ipfs add -n) + nonExistentCID := "bafkreif6lrhgz3fpiwypdk65qrqiey7svgpggruhbylrgv32l3izkqpsc4" + + // Create a client with longer timeout than the gateway's retrieval timeout + // to ensure we get the gateway's 504 response + clientWithTimeout := &harness.HTTPClient{ + Client: &http.Client{ + Timeout: 5 * time.Second, + }, + BaseURL: client.BaseURL, + } + + resp = clientWithTimeout.Get("/ipfs/" + nonExistentCID) + assert.Equal(t, http.StatusGatewayTimeout, resp.StatusCode, "Expected 504 Gateway Timeout for stuck retrieval") + assert.Contains(t, resp.Body, "Unable to retrieve content within timeout period") + }) + + t.Run("MaxConcurrentRequests", func(t *testing.T) { + t.Parallel() + + // Create a node with a low concurrent request limit + node := harness.NewT(t).NewNode().Init() + node.UpdateConfig(func(cfg *config.Config) { + // Allow only 1 concurrent request to make test deterministic + cfg.Gateway.MaxConcurrentRequests = config.NewOptionalInteger(1) + // Set retrieval timeout so blocking requests don't hang forever + cfg.Gateway.RetrievalTimeout = config.NewOptionalDuration(2 * time.Second) + }) + node.StartDaemon() + + // Add some content - use a non-existent CID that will block during retrieval + // to ensure we can control timing + blockingCID := "bafkreif6lrhgz3fpiwypdk65qrqiey7svgpggruhbylrgv32l3izkqpsc4" + normalCID := node.IPFSAddStr("test content for concurrent request limiting") + + client := node.GatewayClient() + + // First, verify single request succeeds + resp := client.Get("/ipfs/" + normalCID) + assert.Equal(t, http.StatusOK, resp.StatusCode) + + // Now test deterministic 429 response: + // Start a blocking request that will occupy the single slot, + // then make another request that MUST get 429 + + blockingStarted := make(chan bool) + blockingDone := make(chan bool) + + // Start a request that will block (searching for non-existent content) + go func() { + blockingStarted <- true + // This will block until timeout looking for providers + client.Get("/ipfs/" + blockingCID) + blockingDone <- true + }() + + // Wait for blocking request to start and occupy the slot + <-blockingStarted + time.Sleep(1 * time.Second) // Ensure it has acquired the semaphore + + // This request MUST get 429 because the slot is occupied + resp = client.Get("/ipfs/" + normalCID + "?must-get-429=true") + assert.Equal(t, http.StatusTooManyRequests, resp.StatusCode, "Second request must get 429 when slot is occupied") + + // Verify 429 response headers + retryAfter := resp.Headers.Get("Retry-After") + assert.NotEmpty(t, retryAfter, "Retry-After header must be set on 429 response") + assert.Equal(t, "60", retryAfter, "Retry-After must be 60 seconds") + + cacheControl := resp.Headers.Get("Cache-Control") + assert.Equal(t, "no-store", cacheControl, "Cache-Control must be no-store on 429 response") + + assert.Contains(t, resp.Body, "Too many requests", "429 response must contain error message") + + // Clean up: wait for blocking request to timeout (it will timeout due to gateway retrieval timeout) + select { + case <-blockingDone: + // Good, it completed + case <-time.After(10 * time.Second): + // Give it more time if needed + } + + // Wait a bit more to ensure slot is fully released + time.Sleep(1 * time.Second) + + // After blocking request completes, new request should succeed + resp = client.Get("/ipfs/" + normalCID + "?after-limit-cleared=true") + assert.Equal(t, http.StatusOK, resp.StatusCode, "Request must succeed after slot is freed") + }) +} diff --git a/test/cli/harness/ipfs.go b/test/cli/harness/ipfs.go index 0842d3627..2f7a8f18e 100644 --- a/test/cli/harness/ipfs.go +++ b/test/cli/harness/ipfs.go @@ -101,6 +101,34 @@ func (n *Node) IPFSAdd(content io.Reader, args ...string) string { return out } +func (n *Node) IPFSBlockPut(content io.Reader, args ...string) string { + log.Debugf("node %d block put with args: %v", n.ID, args) + fullArgs := []string{"block", "put"} + fullArgs = append(fullArgs, args...) + res := n.Runner.MustRun(RunRequest{ + Path: n.IPFSBin, + Args: fullArgs, + CmdOpts: []CmdOpt{RunWithStdin(content)}, + }) + out := strings.TrimSpace(res.Stdout.String()) + log.Debugf("block put result: %q", out) + return out +} + +func (n *Node) IPFSDAGPut(content io.Reader, args ...string) string { + log.Debugf("node %d dag put with args: %v", n.ID, args) + fullArgs := []string{"dag", "put"} + fullArgs = append(fullArgs, args...) + res := n.Runner.MustRun(RunRequest{ + Path: n.IPFSBin, + Args: fullArgs, + CmdOpts: []CmdOpt{RunWithStdin(content)}, + }) + out := strings.TrimSpace(res.Stdout.String()) + log.Debugf("dag put result: %q", out) + return out +} + func (n *Node) IPFSDagImport(content io.Reader, cid string, args ...string) error { log.Debugf("node %d dag import with args: %v", n.ID, args) fullArgs := []string{"dag", "import", "--pin-roots=false"} diff --git a/test/cli/harness/node.go b/test/cli/harness/node.go index 49a551535..0315e81df 100644 --- a/test/cli/harness/node.go +++ b/test/cli/harness/node.go @@ -54,6 +54,42 @@ func BuildNode(ipfsBin, baseDir string, id int) *Node { env := environToMap(os.Environ()) env["IPFS_PATH"] = dir + // If using "ipfs" binary name, provide helpful binary information + if ipfsBin == "ipfs" { + // Check if cmd/ipfs/ipfs exists (simple relative path check) + localBinary := "cmd/ipfs/ipfs" + localExists := false + if _, err := os.Stat(localBinary); err == nil { + localExists = true + if abs, err := filepath.Abs(localBinary); err == nil { + localBinary = abs + } + } + + // Check if ipfs is available in PATH + pathBinary, pathErr := exec.LookPath("ipfs") + + // Handle different scenarios + if pathErr != nil { + // No ipfs in PATH + if localExists { + fmt.Printf("WARNING: No 'ipfs' found in PATH, but local binary exists at %s\n", localBinary) + fmt.Printf("Consider adding it to PATH or run: export PATH=\"$(pwd)/cmd/ipfs:$PATH\"\n") + } else { + fmt.Printf("ERROR: No 'ipfs' binary found in PATH and no local build at cmd/ipfs/ipfs\n") + fmt.Printf("Run 'make build' first or install ipfs and add it to PATH\n") + panic("ipfs binary not available") + } + } else { + // ipfs found in PATH + if localExists && localBinary != pathBinary { + fmt.Printf("NOTE: Local binary at %s differs from PATH binary at %s\n", localBinary, pathBinary) + fmt.Printf("Consider adding the local binary to PATH if you want to use the version built by 'make build'\n") + } + // If they match or no local binary, no message needed + } + } + return &Node{ ID: id, Dir: dir, @@ -209,6 +245,14 @@ func (n *Node) Init(ipfsArgs ...string) *Node { cfg.Swarm.DisableNatPortMap = true cfg.Discovery.MDNS.Enabled = n.EnableMDNS cfg.Routing.LoopbackAddressesOnLanDHT = config.True + // Telemetry disabled by default in tests. + cfg.Plugins = config.Plugins{ + Plugins: map[string]config.Plugin{ + "telemetry": config.Plugin{ + Disabled: true, + }, + }, + } }) return n } @@ -457,28 +501,60 @@ func (n *Node) IsAlive() bool { } func (n *Node) SwarmAddrs() []multiaddr.Multiaddr { - res := n.Runner.MustRun(RunRequest{ + res := n.Runner.Run(RunRequest{ Path: n.IPFSBin, Args: []string{"swarm", "addrs", "local"}, }) + if res.ExitCode() != 0 { + // If swarm command fails (e.g., daemon not online), return empty slice + log.Debugf("Node %d: swarm addrs local failed (exit %d): %s", n.ID, res.ExitCode(), res.Stderr.String()) + return []multiaddr.Multiaddr{} + } out := strings.TrimSpace(res.Stdout.String()) + if out == "" { + log.Debugf("Node %d: swarm addrs local returned empty output", n.ID) + return []multiaddr.Multiaddr{} + } + log.Debugf("Node %d: swarm addrs local output: %s", n.ID, out) outLines := strings.Split(out, "\n") var addrs []multiaddr.Multiaddr for _, addrStr := range outLines { + addrStr = strings.TrimSpace(addrStr) + if addrStr == "" { + continue + } ma, err := multiaddr.NewMultiaddr(addrStr) if err != nil { panic(err) } addrs = append(addrs, ma) } + log.Debugf("Node %d: parsed %d swarm addresses", n.ID, len(addrs)) return addrs } +// SwarmAddrsWithTimeout waits for swarm addresses to be available +func (n *Node) SwarmAddrsWithTimeout(timeout time.Duration) []multiaddr.Multiaddr { + start := time.Now() + for time.Since(start) < timeout { + addrs := n.SwarmAddrs() + if len(addrs) > 0 { + return addrs + } + time.Sleep(100 * time.Millisecond) + } + return []multiaddr.Multiaddr{} +} + func (n *Node) SwarmAddrsWithPeerIDs() []multiaddr.Multiaddr { + return n.SwarmAddrsWithPeerIDsTimeout(5 * time.Second) +} + +func (n *Node) SwarmAddrsWithPeerIDsTimeout(timeout time.Duration) []multiaddr.Multiaddr { ipfsProtocol := multiaddr.ProtocolWithCode(multiaddr.P_IPFS).Name peerID := n.PeerID() var addrs []multiaddr.Multiaddr - for _, ma := range n.SwarmAddrs() { + for _, ma := range n.SwarmAddrsWithTimeout(timeout) { // add the peer ID to the multiaddr if it doesn't have it _, err := ma.ValueForProtocol(multiaddr.P_IPFS) if errors.Is(err, multiaddr.ErrProtocolNotFound) { @@ -513,18 +589,80 @@ func (n *Node) SwarmAddrsWithoutPeerIDs() []multiaddr.Multiaddr { } func (n *Node) Connect(other *Node) *Node { - n.Runner.MustRun(RunRequest{ + // Get the peer addresses to connect to + addrs := other.SwarmAddrsWithPeerIDs() + if len(addrs) == 0 { + // If no addresses available, skip connection + log.Debugf("No swarm addresses available for connection") + return n + } + // Use Run instead of MustRun to avoid panics on connection failures + res := n.Runner.Run(RunRequest{ Path: n.IPFSBin, - Args: []string{"swarm", "connect", other.SwarmAddrsWithPeerIDs()[0].String()}, + Args: []string{"swarm", "connect", addrs[0].String()}, }) + if res.ExitCode() != 0 { + log.Debugf("swarm connect failed: %s", res.Stderr.String()) + } return n } +// ConnectAndWait connects to another node and waits for the connection to be established +func (n *Node) ConnectAndWait(other *Node, timeout time.Duration) error { + // Get the peer addresses to connect to - wait up to half the timeout for addresses + addrs := other.SwarmAddrsWithPeerIDsTimeout(timeout / 2) + if len(addrs) == 0 { + return fmt.Errorf("no swarm addresses available for node %d after waiting %v", other.ID, timeout/2) + } + + otherPeerID := other.PeerID() + + // Try to connect + res := n.Runner.Run(RunRequest{ + Path: n.IPFSBin, + Args: []string{"swarm", "connect", addrs[0].String()}, + }) + if res.ExitCode() != 0 { + return fmt.Errorf("swarm connect failed: %s", res.Stderr.String()) + } + + // Wait for connection to be established + start := time.Now() + for time.Since(start) < timeout { + peers := n.Peers() + for _, peerAddr := range peers { + if peerID, err := peerAddr.ValueForProtocol(multiaddr.P_P2P); err == nil { + if peerID == otherPeerID.String() { + return nil // Connection established + } + } + } + time.Sleep(100 * time.Millisecond) + } + + return fmt.Errorf("timeout waiting for connection to node %d (peer %s)", other.ID, otherPeerID) +} + func (n *Node) Peers() []multiaddr.Multiaddr { - res := n.Runner.MustRun(RunRequest{ + // Wait for daemon to be ready if it's supposed to be running + if n.Daemon != nil && n.Daemon.Cmd != nil && n.Daemon.Cmd.Process != nil { + // Give daemon a short time to become ready + for i := 0; i < 10; i++ { + if n.IsAlive() { + break + } + time.Sleep(100 * time.Millisecond) + } + } + res := n.Runner.Run(RunRequest{ Path: n.IPFSBin, Args: []string{"swarm", "peers"}, }) + if res.ExitCode() != 0 { + // If swarm peers fails (e.g., daemon not online), return empty slice + log.Debugf("swarm peers failed: %s", res.Stderr.String()) + return []multiaddr.Multiaddr{} + } var addrs []multiaddr.Multiaddr for _, line := range res.Stdout.Lines() { ma, err := multiaddr.NewMultiaddr(line) diff --git a/test/cli/harness/nodes.go b/test/cli/harness/nodes.go index 113289e3c..8a5451e03 100644 --- a/test/cli/harness/nodes.go +++ b/test/cli/harness/nodes.go @@ -5,7 +5,6 @@ import ( . "github.com/ipfs/kubo/test/cli/testutils" "github.com/multiformats/go-multiaddr" - "golang.org/x/sync/errgroup" ) // Nodes is a collection of Kubo nodes along with operations on groups of nodes. @@ -17,37 +16,28 @@ func (n Nodes) Init(args ...string) Nodes { } func (n Nodes) ForEachPar(f func(*Node)) { - group := &errgroup.Group{} + var wg sync.WaitGroup for _, node := range n { + wg.Add(1) node := node - group.Go(func() error { + go func() { + defer wg.Done() f(node) - return nil - }) - } - err := group.Wait() - if err != nil { - panic(err) + }() } + wg.Wait() } func (n Nodes) Connect() Nodes { - wg := sync.WaitGroup{} for i, node := range n { for j, otherNode := range n { if i == j { continue } - node := node - otherNode := otherNode - wg.Add(1) - go func() { - defer wg.Done() - node.Connect(otherNode) - }() + // Do not connect in parallel, because that can cause TLS handshake problems on some platforms. + node.Connect(otherNode) } } - wg.Wait() for _, node := range n { firstPeer := node.Peers()[0] if _, err := firstPeer.ValueForProtocol(multiaddr.P_P2P); err != nil { diff --git a/test/cli/harness/peering.go b/test/cli/harness/peering.go index 8488c822d..445c2cf26 100644 --- a/test/cli/harness/peering.go +++ b/test/cli/harness/peering.go @@ -4,6 +4,7 @@ import ( "fmt" "math/rand" "net" + "sync" "testing" "github.com/ipfs/kubo/config" @@ -14,16 +15,39 @@ type Peering struct { To int } +var ( + allocatedPorts = make(map[int]struct{}) + portMutex sync.Mutex +) + func NewRandPort() int { - if a, err := net.ResolveTCPAddr("tcp", "localhost:0"); err == nil { - var l *net.TCPListener - if l, err = net.ListenTCP("tcp", a); err == nil { - defer l.Close() - return l.Addr().(*net.TCPAddr).Port + portMutex.Lock() + defer portMutex.Unlock() + + for i := 0; i < 100; i++ { + l, err := net.Listen("tcp", "localhost:0") + if err != nil { + continue + } + port := l.Addr().(*net.TCPAddr).Port + l.Close() + + if _, used := allocatedPorts[port]; !used { + allocatedPorts[port] = struct{}{} + return port } } - n := rand.Int() - return 3000 + (n % 1000) + + // Fallback to random port if we can't get a unique one from the OS + for i := 0; i < 1000; i++ { + port := 30000 + rand.Intn(10000) + if _, used := allocatedPorts[port]; !used { + allocatedPorts[port] = struct{}{} + return port + } + } + + panic("failed to allocate unique port after 1100 attempts") } func CreatePeerNodes(t *testing.T, n int, peerings []Peering) (*Harness, Nodes) { diff --git a/test/cli/http_retrieval_client_test.go b/test/cli/http_retrieval_client_test.go index f845c818e..e2934fc99 100644 --- a/test/cli/http_retrieval_client_test.go +++ b/test/cli/http_retrieval_client_test.go @@ -13,9 +13,9 @@ import ( "github.com/ipfs/boxo/routing/http/server" "github.com/ipfs/boxo/routing/http/types" "github.com/ipfs/go-cid" + "github.com/ipfs/go-test/random" "github.com/ipfs/kubo/config" "github.com/ipfs/kubo/test/cli/harness" - "github.com/ipfs/kubo/test/cli/testutils" "github.com/ipfs/kubo/test/cli/testutils/httprouting" "github.com/libp2p/go-libp2p/core/peer" "github.com/multiformats/go-multiaddr" @@ -51,7 +51,7 @@ func TestHTTPRetrievalClient(t *testing.T) { }) // compute a random CID - randStr := string(testutils.RandomBytes(100)) + randStr := string(random.Bytes(100)) res := node.PipeStrToIPFS(randStr, "add", "-qn", "--cid-version", "1") // -n means dont add to local repo, just produce CID wantCIDStr := res.Stdout.Trimmed() testCid := cid.MustParse(wantCIDStr) diff --git a/test/cli/log_level_test.go b/test/cli/log_level_test.go new file mode 100644 index 000000000..e5c9eb8f8 --- /dev/null +++ b/test/cli/log_level_test.go @@ -0,0 +1,663 @@ +package cli + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" + "testing" + + "github.com/ipfs/kubo/test/cli/harness" + . "github.com/ipfs/kubo/test/cli/testutils" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestLogLevel(t *testing.T) { + + t.Run("CLI", func(t *testing.T) { + t.Run("level '*' shows all subsystems", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + expectedSubsystems := getExpectedSubsystems(t, node) + + res := node.IPFS("log", "level", "*") + assert.NoError(t, res.Err) + assert.Empty(t, res.Stderr.Lines()) + + actualSubsystems := parseCLIOutput(t, res.Stdout.String()) + + // Should show all subsystems plus the (default) entry + assert.GreaterOrEqual(t, len(actualSubsystems), len(expectedSubsystems)) + + validateAllSubsystemsPresentCLI(t, expectedSubsystems, actualSubsystems, "CLI output") + + // Should have the (default) entry + _, hasDefault := actualSubsystems["(default)"] + assert.True(t, hasDefault, "Should have '(default)' entry") + }) + + t.Run("level 'all' shows all subsystems (alias for '*')", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + expectedSubsystems := getExpectedSubsystems(t, node) + + res := node.IPFS("log", "level", "all") + assert.NoError(t, res.Err) + assert.Empty(t, res.Stderr.Lines()) + + actualSubsystems := parseCLIOutput(t, res.Stdout.String()) + + // Should show all subsystems plus the (default) entry + assert.GreaterOrEqual(t, len(actualSubsystems), len(expectedSubsystems)) + + validateAllSubsystemsPresentCLI(t, expectedSubsystems, actualSubsystems, "CLI output") + + // Should have the (default) entry + _, hasDefault := actualSubsystems["(default)"] + assert.True(t, hasDefault, "Should have '(default)' entry") + }) + + t.Run("get level for specific subsystem", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + node.IPFS("log", "level", "core", "debug") + res := node.IPFS("log", "level", "core") + assert.NoError(t, res.Err) + assert.Empty(t, res.Stderr.Lines()) + + output := res.Stdout.String() + lines := SplitLines(output) + + assert.Equal(t, 1, len(lines)) + + line := strings.TrimSpace(lines[0]) + assert.Equal(t, "debug", line) + }) + + t.Run("get level with no args returns default level", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + res1 := node.IPFS("log", "level", "*", "fatal") + assert.NoError(t, res1.Err) + assert.Empty(t, res1.Stderr.Lines()) + + res := node.IPFS("log", "level") + assert.NoError(t, res.Err) + assert.Equal(t, 0, len(res.Stderr.Lines())) + + output := res.Stdout.String() + lines := SplitLines(output) + + assert.Equal(t, 1, len(lines)) + + line := strings.TrimSpace(lines[0]) + assert.Equal(t, "fatal", line) + }) + + t.Run("get level reflects runtime log level changes", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon("--offline") + defer node.StopDaemon() + + node.IPFS("log", "level", "core", "debug") + res := node.IPFS("log", "level", "core") + assert.NoError(t, res.Err) + + output := res.Stdout.String() + lines := SplitLines(output) + + assert.Equal(t, 1, len(lines)) + + line := strings.TrimSpace(lines[0]) + assert.Equal(t, "debug", line) + }) + + t.Run("get level with non-existent subsystem returns error", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + res := node.RunIPFS("log", "level", "non-existent-subsystem") + assert.Error(t, res.Err) + assert.NotEqual(t, 0, len(res.Stderr.Lines())) + }) + + t.Run("set level to 'default' keyword", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + // First set a specific subsystem to a different level + res1 := node.IPFS("log", "level", "core", "debug") + assert.NoError(t, res1.Err) + assert.Contains(t, res1.Stdout.String(), "Changed log level of 'core' to 'debug'") + + // Verify it was set to debug + res2 := node.IPFS("log", "level", "core") + assert.NoError(t, res2.Err) + assert.Equal(t, "debug", strings.TrimSpace(res2.Stdout.String())) + + // Get the current default level (should be 'error' since unchanged) + res3 := node.IPFS("log", "level") + assert.NoError(t, res3.Err) + defaultLevel := strings.TrimSpace(res3.Stdout.String()) + assert.Equal(t, "error", defaultLevel, "Default level should be 'error' when unchanged") + + // Now set the subsystem back to default + res4 := node.IPFS("log", "level", "core", "default") + assert.NoError(t, res4.Err) + assert.Contains(t, res4.Stdout.String(), "Changed log level of 'core' to") + + // Verify it's now at the default level (should be 'error') + res5 := node.IPFS("log", "level", "core") + assert.NoError(t, res5.Err) + assert.Equal(t, "error", strings.TrimSpace(res5.Stdout.String())) + }) + + t.Run("set all subsystems with 'all' changes default (alias for '*')", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + // Initial state - default should be 'error' + res := node.IPFS("log", "level") + assert.NoError(t, res.Err) + assert.Equal(t, "error", strings.TrimSpace(res.Stdout.String())) + + // Set one subsystem to a different level + res = node.IPFS("log", "level", "core", "debug") + assert.NoError(t, res.Err) + + // Default should still be 'error' + res = node.IPFS("log", "level") + assert.NoError(t, res.Err) + assert.Equal(t, "error", strings.TrimSpace(res.Stdout.String())) + + // Now use 'all' to set everything to 'info' + res = node.IPFS("log", "level", "all", "info") + assert.NoError(t, res.Err) + assert.Contains(t, res.Stdout.String(), "Changed log level of '*' to 'info'") + + // Default should now be 'info' + res = node.IPFS("log", "level") + assert.NoError(t, res.Err) + assert.Equal(t, "info", strings.TrimSpace(res.Stdout.String())) + + // Core should also be 'info' (overwritten by 'all') + res = node.IPFS("log", "level", "core") + assert.NoError(t, res.Err) + assert.Equal(t, "info", strings.TrimSpace(res.Stdout.String())) + + // Any other subsystem should also be 'info' + res = node.IPFS("log", "level", "dht") + assert.NoError(t, res.Err) + assert.Equal(t, "info", strings.TrimSpace(res.Stdout.String())) + }) + + t.Run("set all subsystems with '*' changes default", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + // Initial state - default should be 'error' + res := node.IPFS("log", "level") + assert.NoError(t, res.Err) + assert.Equal(t, "error", strings.TrimSpace(res.Stdout.String())) + + // Set one subsystem to a different level + res = node.IPFS("log", "level", "core", "debug") + assert.NoError(t, res.Err) + + // Default should still be 'error' + res = node.IPFS("log", "level") + assert.NoError(t, res.Err) + assert.Equal(t, "error", strings.TrimSpace(res.Stdout.String())) + + // Now use '*' to set everything to 'info' + res = node.IPFS("log", "level", "*", "info") + assert.NoError(t, res.Err) + assert.Contains(t, res.Stdout.String(), "Changed log level of '*' to 'info'") + + // Default should now be 'info' + res = node.IPFS("log", "level") + assert.NoError(t, res.Err) + assert.Equal(t, "info", strings.TrimSpace(res.Stdout.String())) + + // Core should also be 'info' (overwritten by '*') + res = node.IPFS("log", "level", "core") + assert.NoError(t, res.Err) + assert.Equal(t, "info", strings.TrimSpace(res.Stdout.String())) + + // Any other subsystem should also be 'info' + res = node.IPFS("log", "level", "dht") + assert.NoError(t, res.Err) + assert.Equal(t, "info", strings.TrimSpace(res.Stdout.String())) + }) + + t.Run("'all' in get mode shows (default) entry (alias for '*')", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + // Get all levels with 'all' + res := node.IPFS("log", "level", "all") + assert.NoError(t, res.Err) + + output := res.Stdout.String() + + // Should contain "(default): error" entry + assert.Contains(t, output, "(default): error", "Should show default level with (default) key") + + // Should also contain various subsystems + assert.Contains(t, output, "core: error") + assert.Contains(t, output, "dht: error") + }) + + t.Run("'*' in get mode shows (default) entry", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + // Get all levels with '*' + res := node.IPFS("log", "level", "*") + assert.NoError(t, res.Err) + + output := res.Stdout.String() + + // Should contain "(default): error" entry + assert.Contains(t, output, "(default): error", "Should show default level with (default) key") + + // Should also contain various subsystems + assert.Contains(t, output, "core: error") + assert.Contains(t, output, "dht: error") + }) + + t.Run("set all subsystems to 'default' using 'all' (alias for '*')", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + // Get the original default level (just for reference, it should be "error") + res0 := node.IPFS("log", "level") + assert.NoError(t, res0.Err) + assert.Equal(t, "error", strings.TrimSpace(res0.Stdout.String())) + + // First set all subsystems to debug using 'all' + res1 := node.IPFS("log", "level", "all", "debug") + assert.NoError(t, res1.Err) + assert.Contains(t, res1.Stdout.String(), "Changed log level of '*' to 'debug'") + + // Verify a specific subsystem is at debug + res2 := node.IPFS("log", "level", "core") + assert.NoError(t, res2.Err) + assert.Equal(t, "debug", strings.TrimSpace(res2.Stdout.String())) + + // Verify the default level is now debug + res3 := node.IPFS("log", "level") + assert.NoError(t, res3.Err) + assert.Equal(t, "debug", strings.TrimSpace(res3.Stdout.String())) + + // Now set all subsystems back to default (which is now "debug") using 'all' + res4 := node.IPFS("log", "level", "all", "default") + assert.NoError(t, res4.Err) + assert.Contains(t, res4.Stdout.String(), "Changed log level of '*' to") + + // The subsystem should still be at debug (because that's what default is now) + res5 := node.IPFS("log", "level", "core") + assert.NoError(t, res5.Err) + assert.Equal(t, "debug", strings.TrimSpace(res5.Stdout.String())) + + // The behavior is correct: "default" uses the current default level, + // which was changed to "debug" when we set "all" to "debug" + }) + + t.Run("set all subsystems to 'default' keyword", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + // Get the original default level (just for reference, it should be "error") + res0 := node.IPFS("log", "level") + assert.NoError(t, res0.Err) + // originalDefault := strings.TrimSpace(res0.Stdout.String()) + assert.Equal(t, "error", strings.TrimSpace(res0.Stdout.String())) + + // First set all subsystems to debug + res1 := node.IPFS("log", "level", "*", "debug") + assert.NoError(t, res1.Err) + assert.Contains(t, res1.Stdout.String(), "Changed log level of '*' to 'debug'") + + // Verify a specific subsystem is at debug + res2 := node.IPFS("log", "level", "core") + assert.NoError(t, res2.Err) + assert.Equal(t, "debug", strings.TrimSpace(res2.Stdout.String())) + + // Verify the default level is now debug + res3 := node.IPFS("log", "level") + assert.NoError(t, res3.Err) + assert.Equal(t, "debug", strings.TrimSpace(res3.Stdout.String())) + + // Now set all subsystems back to default (which is now "debug") + res4 := node.IPFS("log", "level", "*", "default") + assert.NoError(t, res4.Err) + assert.Contains(t, res4.Stdout.String(), "Changed log level of '*' to") + + // The subsystem should still be at debug (because that's what default is now) + res5 := node.IPFS("log", "level", "core") + assert.NoError(t, res5.Err) + assert.Equal(t, "debug", strings.TrimSpace(res5.Stdout.String())) + + // The behavior is correct: "default" uses the current default level, + // which was changed to "debug" when we set "*" to "debug" + }) + + t.Run("shell escaping variants for '*' wildcard", func(t *testing.T) { + t.Parallel() + h := harness.NewT(t) + node := h.NewNode().Init().StartDaemon() + defer node.StopDaemon() + + // Test different shell escaping methods work for '*' + // This tests the behavior documented in help text: '*' or "*" or \* + + // Test 1: Single quotes '*' (should work) + cmd1 := fmt.Sprintf("IPFS_PATH='%s' %s --api='%s' log level '*' info", + node.Dir, node.IPFSBin, node.APIAddr()) + res1 := h.Sh(cmd1) + assert.NoError(t, res1.Err) + assert.Contains(t, res1.Stdout.String(), "Changed log level of '*' to 'info'") + + // Test 2: Double quotes "*" (should work) + cmd2 := fmt.Sprintf("IPFS_PATH='%s' %s --api='%s' log level \"*\" debug", + node.Dir, node.IPFSBin, node.APIAddr()) + res2 := h.Sh(cmd2) + assert.NoError(t, res2.Err) + assert.Contains(t, res2.Stdout.String(), "Changed log level of '*' to 'debug'") + + // Test 3: Backslash escape \* (should work) + cmd3 := fmt.Sprintf("IPFS_PATH='%s' %s --api='%s' log level \\* warn", + node.Dir, node.IPFSBin, node.APIAddr()) + res3 := h.Sh(cmd3) + assert.NoError(t, res3.Err) + assert.Contains(t, res3.Stdout.String(), "Changed log level of '*' to 'warn'") + + // Test 4: Verify the final state - should show 'warn' as default + res4 := node.IPFS("log", "level") + assert.NoError(t, res4.Err) + assert.Equal(t, "warn", strings.TrimSpace(res4.Stdout.String())) + + // Test 5: Get all levels using escaped '*' to verify it shows all subsystems + cmd5 := fmt.Sprintf("IPFS_PATH='%s' %s --api='%s' log level \\*", + node.Dir, node.IPFSBin, node.APIAddr()) + res5 := h.Sh(cmd5) + assert.NoError(t, res5.Err) + output := res5.Stdout.String() + assert.Contains(t, output, "(default): warn", "Should show updated default level") + assert.Contains(t, output, "core: warn", "Should show core subsystem at warn level") + }) + }) + + t.Run("HTTP RPC", func(t *testing.T) { + t.Run("get default level returns JSON", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + // Make HTTP request to get default log level + resp, err := http.Post(node.APIURL()+"/api/v0/log/level", "", nil) + require.NoError(t, err) + defer resp.Body.Close() + + // Parse JSON response + var result map[string]interface{} + err = json.NewDecoder(resp.Body).Decode(&result) + require.NoError(t, err) + + // Check that we have the Levels field + levels, ok := result["Levels"].(map[string]interface{}) + require.True(t, ok, "Response should have 'Levels' field") + + // Should have exactly one entry for the default level + assert.Equal(t, 1, len(levels)) + + // The default level should be present + defaultLevel, ok := levels[""] + require.True(t, ok, "Should have empty string key for default level") + assert.Equal(t, "error", defaultLevel, "Default level should be 'error'") + }) + + t.Run("get all levels using 'all' returns JSON (alias for '*')", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + expectedSubsystems := getExpectedSubsystems(t, node) + + // Make HTTP request to get all log levels using 'all' + resp, err := http.Post(node.APIURL()+"/api/v0/log/level?arg=all", "", nil) + require.NoError(t, err) + defer resp.Body.Close() + + levels := parseHTTPResponse(t, resp) + validateAllSubsystemsPresent(t, expectedSubsystems, levels, "JSON response") + + // Should have the (default) entry + defaultLevel, ok := levels["(default)"] + require.True(t, ok, "Should have '(default)' key") + assert.Equal(t, "error", defaultLevel, "Default level should be 'error'") + }) + + t.Run("get all levels returns JSON", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + expectedSubsystems := getExpectedSubsystems(t, node) + + // Make HTTP request to get all log levels + resp, err := http.Post(node.APIURL()+"/api/v0/log/level?arg=*", "", nil) + require.NoError(t, err) + defer resp.Body.Close() + + levels := parseHTTPResponse(t, resp) + validateAllSubsystemsPresent(t, expectedSubsystems, levels, "JSON response") + + // Should have the (default) entry + defaultLevel, ok := levels["(default)"] + require.True(t, ok, "Should have '(default)' key") + assert.Equal(t, "error", defaultLevel, "Default level should be 'error'") + }) + + t.Run("get specific subsystem level returns JSON", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + // First set a specific level for a subsystem + resp, err := http.Post(node.APIURL()+"/api/v0/log/level?arg=core&arg=debug", "", nil) + require.NoError(t, err) + resp.Body.Close() + + // Now get the level for that subsystem + resp, err = http.Post(node.APIURL()+"/api/v0/log/level?arg=core", "", nil) + require.NoError(t, err) + defer resp.Body.Close() + + // Parse JSON response + var result map[string]interface{} + err = json.NewDecoder(resp.Body).Decode(&result) + require.NoError(t, err) + + // Check that we have the Levels field + levels, ok := result["Levels"].(map[string]interface{}) + require.True(t, ok, "Response should have 'Levels' field") + + // Should have exactly one entry + assert.Equal(t, 1, len(levels)) + + // Check the level for 'core' subsystem + coreLevel, ok := levels["core"] + require.True(t, ok, "Should have 'core' key") + assert.Equal(t, "debug", coreLevel, "Core level should be 'debug'") + }) + + t.Run("set level using 'all' returns JSON message (alias for '*')", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + // Set a log level using 'all' + resp, err := http.Post(node.APIURL()+"/api/v0/log/level?arg=all&arg=info", "", nil) + require.NoError(t, err) + defer resp.Body.Close() + + // Parse JSON response + var result map[string]interface{} + err = json.NewDecoder(resp.Body).Decode(&result) + require.NoError(t, err) + + // Check that we have the Message field + message, ok := result["Message"].(string) + require.True(t, ok, "Response should have 'Message' field") + + // Check the message content (should show '*' in message even when 'all' was used) + assert.Contains(t, message, "Changed log level of '*' to 'info'") + }) + + t.Run("set level returns JSON message", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + // Set a log level + resp, err := http.Post(node.APIURL()+"/api/v0/log/level?arg=core&arg=info", "", nil) + require.NoError(t, err) + defer resp.Body.Close() + + // Parse JSON response + var result map[string]interface{} + err = json.NewDecoder(resp.Body).Decode(&result) + require.NoError(t, err) + + // Check that we have the Message field + message, ok := result["Message"].(string) + require.True(t, ok, "Response should have 'Message' field") + + // Check the message content + assert.Contains(t, message, "Changed log level of 'core' to 'info'") + }) + + t.Run("set level to 'default' keyword", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + defer node.StopDaemon() + + // First set a subsystem to debug + resp, err := http.Post(node.APIURL()+"/api/v0/log/level?arg=core&arg=debug", "", nil) + require.NoError(t, err) + resp.Body.Close() + + // Now set it back to default + resp, err = http.Post(node.APIURL()+"/api/v0/log/level?arg=core&arg=default", "", nil) + require.NoError(t, err) + defer resp.Body.Close() + + // Parse JSON response + var result map[string]interface{} + err = json.NewDecoder(resp.Body).Decode(&result) + require.NoError(t, err) + + // Check that we have the Message field + message, ok := result["Message"].(string) + require.True(t, ok, "Response should have 'Message' field") + + // The message should indicate the change + assert.True(t, strings.Contains(message, "Changed log level of 'core' to"), + "Message should indicate level change") + + // Verify the level is back to error (default) + resp, err = http.Post(node.APIURL()+"/api/v0/log/level?arg=core", "", nil) + require.NoError(t, err) + defer resp.Body.Close() + + var getResult map[string]interface{} + err = json.NewDecoder(resp.Body).Decode(&getResult) + require.NoError(t, err) + + levels, _ := getResult["Levels"].(map[string]interface{}) + coreLevel, _ := levels["core"].(string) + assert.Equal(t, "error", coreLevel, "Core level should be back to 'error' (default)") + }) + }) + +} + +func getExpectedSubsystems(t *testing.T, node *harness.Node) []string { + t.Helper() + lsRes := node.IPFS("log", "ls") + require.NoError(t, lsRes.Err) + expectedSubsystems := SplitLines(lsRes.Stdout.String()) + assert.Greater(t, len(expectedSubsystems), 10, "Should have many subsystems") + return expectedSubsystems +} + +func parseCLIOutput(t *testing.T, output string) map[string]string { + t.Helper() + lines := SplitLines(output) + actualSubsystems := make(map[string]string) + for _, line := range lines { + if strings.TrimSpace(line) == "" { + continue + } + parts := strings.Split(line, ": ") + assert.Equal(t, 2, len(parts), "Line should have format 'subsystem: level', got: %s", line) + assert.NotEmpty(t, parts[0], "Subsystem should not be empty") + assert.NotEmpty(t, parts[1], "Level should not be empty") + actualSubsystems[parts[0]] = parts[1] + } + return actualSubsystems +} + +func parseHTTPResponse(t *testing.T, resp *http.Response) map[string]interface{} { + t.Helper() + var result map[string]interface{} + err := json.NewDecoder(resp.Body).Decode(&result) + require.NoError(t, err) + levels, ok := result["Levels"].(map[string]interface{}) + require.True(t, ok, "Response should have 'Levels' field") + assert.Greater(t, len(levels), 10, "Should have many subsystems") + return levels +} + +func validateAllSubsystemsPresent(t *testing.T, expectedSubsystems []string, actualLevels map[string]interface{}, context string) { + t.Helper() + for _, expectedSub := range expectedSubsystems { + expectedSub = strings.TrimSpace(expectedSub) + if expectedSub == "" { + continue + } + _, found := actualLevels[expectedSub] + assert.True(t, found, "Expected subsystem '%s' should be present in %s", expectedSub, context) + } +} + +func validateAllSubsystemsPresentCLI(t *testing.T, expectedSubsystems []string, actualLevels map[string]string, context string) { + t.Helper() + for _, expectedSub := range expectedSubsystems { + expectedSub = strings.TrimSpace(expectedSub) + if expectedSub == "" { + continue + } + _, found := actualLevels[expectedSub] + assert.True(t, found, "Expected subsystem '%s' should be present in %s", expectedSub, context) + } +} diff --git a/test/cli/migrations/migration_16_to_17_test.go b/test/cli/migrations/migration_16_to_17_test.go new file mode 100644 index 000000000..e4d75bffd --- /dev/null +++ b/test/cli/migrations/migration_16_to_17_test.go @@ -0,0 +1,684 @@ +package migrations + +// NOTE: These migration tests require the local Kubo binary (built with 'make build') to be in PATH. +// The tests migrate from repo version 16 to 17, which requires Kubo version 0.37.0+ (expects repo v17). +// If using system ipfs binary v0.36.0 or older (expects repo v16), no migration will be triggered. +// +// To run these tests successfully: +// export PATH="$(pwd)/cmd/ipfs:$PATH" +// go test ./test/cli/migrations/ + +import ( + "bufio" + "context" + "encoding/json" + "io" + "os" + "os/exec" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/require" +) + +func TestMigration16To17(t *testing.T) { + t.Parallel() + + // Primary tests using 'ipfs daemon --migrate' command (default in Docker) + t.Run("daemon migrate: forward migration with auto values", testDaemonMigrationWithAuto) + t.Run("daemon migrate: forward migration without auto values", testDaemonMigrationWithoutAuto) + t.Run("daemon migrate: corrupted config handling", testDaemonCorruptedConfigHandling) + t.Run("daemon migrate: missing fields handling", testDaemonMissingFieldsHandling) + + // Comparison tests using 'ipfs repo migrate' command + t.Run("repo migrate: forward migration with auto values", testRepoMigrationWithAuto) + t.Run("repo migrate: backward migration", testRepoBackwardMigration) +} + +// ============================================================================= +// PRIMARY TESTS: 'ipfs daemon --migrate' command (default in Docker) +// +// These tests exercise the primary migration path used in production Docker +// containers where --migrate is enabled by default. This covers: +// - Normal forward migration scenarios +// - Error handling with corrupted configs +// - Migration with minimal/missing config fields +// ============================================================================= + +func testDaemonMigrationWithAuto(t *testing.T) { + // TEST: Forward migration using 'ipfs daemon --migrate' command (PRIMARY) + // Use static v16 repo fixture from real Kubo 0.36 `ipfs init` + // NOTE: This test may need to be revised/updated once repo version 18 is released, + // at that point only keep tests that use 'ipfs repo migrate' + node := setupStaticV16Repo(t) + + configPath := filepath.Join(node.Dir, "config") + versionPath := filepath.Join(node.Dir, "version") + + // Static fixture already uses port 0 for random port assignment - no config update needed + + // Run migration using daemon --migrate (automatic during daemon startup) + // This is the primary method used in Docker containers + // Monitor output until daemon is ready, then shut it down gracefully + stdoutOutput, migrationSuccess := runDaemonMigrationWithMonitoring(t, node) + + // Debug: Print the actual output + t.Logf("Daemon output:\n%s", stdoutOutput) + + // Verify migration was successful based on monitoring + require.True(t, migrationSuccess, "Migration should have been successful") + require.Contains(t, stdoutOutput, "applying 16-to-17 repo migration", "Migration should have been triggered") + require.Contains(t, stdoutOutput, "Migration 16 to 17 succeeded", "Migration should have completed successfully") + + // Verify version was updated to 17 + versionData, err := os.ReadFile(versionPath) + require.NoError(t, err) + require.Equal(t, "17", strings.TrimSpace(string(versionData)), "Version should be updated to 17") + + // Verify migration results using DRY helper + helper := NewMigrationTestHelper(t, configPath) + helper.RequireAutoConfDefaults(). + RequireArrayContains("Bootstrap", "auto"). + RequireArrayLength("Bootstrap", 1). // Should only contain "auto" when all peers were defaults + RequireArrayContains("Routing.DelegatedRouters", "auto"). + RequireArrayContains("Ipns.DelegatedPublishers", "auto") + + // DNS resolver in static fixture should be empty, so "." should be set to "auto" + helper.RequireFieldEquals("DNS.Resolvers[.]", "auto") +} + +func testDaemonMigrationWithoutAuto(t *testing.T) { + // TEST: Forward migration using 'ipfs daemon --migrate' command (PRIMARY) + // Test migration of a config that already has some custom values + // NOTE: This test may need to be revised/updated once repo version 18 is released, + // at that point only keep tests that use 'ipfs repo migrate' + // Should preserve existing settings and only add missing ones + node := setupStaticV16Repo(t) + + // Modify the static fixture to add some custom values for testing mixed scenarios + configPath := filepath.Join(node.Dir, "config") + + // Read existing config from static fixture + var v16Config map[string]interface{} + configData, err := os.ReadFile(configPath) + require.NoError(t, err) + require.NoError(t, json.Unmarshal(configData, &v16Config)) + + // Add custom DNS resolver that should be preserved + if v16Config["DNS"] == nil { + v16Config["DNS"] = map[string]interface{}{} + } + dnsSection := v16Config["DNS"].(map[string]interface{}) + dnsSection["Resolvers"] = map[string]string{ + ".": "https://custom-dns.example.com/dns-query", + "eth.": "https://dns.eth.limo/dns-query", // This is a default that will be replaced with "auto" + } + + // Write modified config back + modifiedConfigData, err := json.MarshalIndent(v16Config, "", " ") + require.NoError(t, err) + require.NoError(t, os.WriteFile(configPath, modifiedConfigData, 0644)) + + // Static fixture already uses port 0 for random port assignment - no config update needed + + // Run migration using daemon --migrate command (this is a daemon test) + // Monitor output until daemon is ready, then shut it down gracefully + stdoutOutput, migrationSuccess := runDaemonMigrationWithMonitoring(t, node) + + // Verify migration was successful based on monitoring + require.True(t, migrationSuccess, "Migration should have been successful") + require.Contains(t, stdoutOutput, "applying 16-to-17 repo migration", "Migration should have been triggered") + require.Contains(t, stdoutOutput, "Migration 16 to 17 succeeded", "Migration should have completed successfully") + + // Verify migration results: custom values preserved alongside "auto" + helper := NewMigrationTestHelper(t, configPath) + helper.RequireAutoConfDefaults(). + RequireArrayContains("Bootstrap", "auto"). + RequireFieldEquals("DNS.Resolvers[.]", "https://custom-dns.example.com/dns-query") + + // Check that eth. resolver was replaced with "auto" since it uses a default URL + helper.RequireFieldEquals("DNS.Resolvers[eth.]", "auto"). + RequireFieldEquals("DNS.Resolvers[.]", "https://custom-dns.example.com/dns-query") +} + +// ============================================================================= +// Tests using 'ipfs daemon --migrate' command +// ============================================================================= + +// Test helper structs and functions for cleaner, more DRY tests + +type ConfigField struct { + Path string + Expected interface{} + Message string +} + +type MigrationTestHelper struct { + t *testing.T + config map[string]interface{} +} + +func NewMigrationTestHelper(t *testing.T, configPath string) *MigrationTestHelper { + var config map[string]interface{} + configData, err := os.ReadFile(configPath) + require.NoError(t, err) + require.NoError(t, json.Unmarshal(configData, &config)) + + return &MigrationTestHelper{t: t, config: config} +} + +func (h *MigrationTestHelper) RequireFieldExists(path string) *MigrationTestHelper { + value := h.getNestedValue(path) + require.NotNil(h.t, value, "Field %s should exist", path) + return h +} + +func (h *MigrationTestHelper) RequireFieldEquals(path string, expected interface{}) *MigrationTestHelper { + value := h.getNestedValue(path) + require.Equal(h.t, expected, value, "Field %s should equal %v", path, expected) + return h +} + +func (h *MigrationTestHelper) RequireArrayContains(path string, expected interface{}) *MigrationTestHelper { + value := h.getNestedValue(path) + require.IsType(h.t, []interface{}{}, value, "Field %s should be an array", path) + array := value.([]interface{}) + require.Contains(h.t, array, expected, "Array %s should contain %v", path, expected) + return h +} + +func (h *MigrationTestHelper) RequireArrayLength(path string, expectedLen int) *MigrationTestHelper { + value := h.getNestedValue(path) + require.IsType(h.t, []interface{}{}, value, "Field %s should be an array", path) + array := value.([]interface{}) + require.Len(h.t, array, expectedLen, "Array %s should have length %d", path, expectedLen) + return h +} + +func (h *MigrationTestHelper) RequireArrayDoesNotContain(path string, notExpected interface{}) *MigrationTestHelper { + value := h.getNestedValue(path) + require.IsType(h.t, []interface{}{}, value, "Field %s should be an array", path) + array := value.([]interface{}) + require.NotContains(h.t, array, notExpected, "Array %s should not contain %v", path, notExpected) + return h +} + +func (h *MigrationTestHelper) RequireFieldAbsent(path string) *MigrationTestHelper { + value := h.getNestedValue(path) + require.Nil(h.t, value, "Field %s should not exist", path) + return h +} + +func (h *MigrationTestHelper) RequireAutoConfDefaults() *MigrationTestHelper { + // AutoConf section should exist but be empty (using implicit defaults) + return h.RequireFieldExists("AutoConf"). + RequireFieldAbsent("AutoConf.Enabled"). // Should use implicit default (true) + RequireFieldAbsent("AutoConf.URL"). // Should use implicit default (mainnet URL) + RequireFieldAbsent("AutoConf.RefreshInterval"). // Should use implicit default (24h) + RequireFieldAbsent("AutoConf.TLSInsecureSkipVerify") // Should use implicit default (false) +} + +func (h *MigrationTestHelper) RequireAutoFieldsSetToAuto() *MigrationTestHelper { + return h.RequireArrayContains("Bootstrap", "auto"). + RequireFieldEquals("DNS.Resolvers[.]", "auto"). + RequireArrayContains("Routing.DelegatedRouters", "auto"). + RequireArrayContains("Ipns.DelegatedPublishers", "auto") +} + +func (h *MigrationTestHelper) RequireNoAutoValues() *MigrationTestHelper { + // Check Bootstrap if it exists + if h.getNestedValue("Bootstrap") != nil { + h.RequireArrayDoesNotContain("Bootstrap", "auto") + } + + // Check DNS.Resolvers if it exists + if h.getNestedValue("DNS.Resolvers") != nil { + h.RequireMapDoesNotContainValue("DNS.Resolvers", "auto") + } + + // Check Routing.DelegatedRouters if it exists + if h.getNestedValue("Routing.DelegatedRouters") != nil { + h.RequireArrayDoesNotContain("Routing.DelegatedRouters", "auto") + } + + // Check Ipns.DelegatedPublishers if it exists + if h.getNestedValue("Ipns.DelegatedPublishers") != nil { + h.RequireArrayDoesNotContain("Ipns.DelegatedPublishers", "auto") + } + + return h +} + +func (h *MigrationTestHelper) RequireMapDoesNotContainValue(path string, notExpected interface{}) *MigrationTestHelper { + value := h.getNestedValue(path) + require.IsType(h.t, map[string]interface{}{}, value, "Field %s should be a map", path) + mapValue := value.(map[string]interface{}) + for k, v := range mapValue { + require.NotEqual(h.t, notExpected, v, "Map %s[%s] should not equal %v", path, k, notExpected) + } + return h +} + +func (h *MigrationTestHelper) getNestedValue(path string) interface{} { + segments := h.parseKuboConfigPath(path) + current := interface{}(h.config) + + for _, segment := range segments { + switch segment.Type { + case "field": + switch v := current.(type) { + case map[string]interface{}: + current = v[segment.Key] + default: + return nil + } + case "mapKey": + switch v := current.(type) { + case map[string]interface{}: + current = v[segment.Key] + default: + return nil + } + default: + return nil + } + + if current == nil { + return nil + } + } + + return current +} + +type PathSegment struct { + Type string // "field" or "mapKey" + Key string +} + +func (h *MigrationTestHelper) parseKuboConfigPath(path string) []PathSegment { + var segments []PathSegment + + // Split path into parts, respecting bracket boundaries + parts := h.splitKuboConfigPath(path) + + for _, part := range parts { + if strings.Contains(part, "[") && strings.HasSuffix(part, "]") { + // Handle field[key] notation + bracketStart := strings.Index(part, "[") + fieldName := part[:bracketStart] + mapKey := part[bracketStart+1 : len(part)-1] // Remove [ and ] + + // Add field segment if present + if fieldName != "" { + segments = append(segments, PathSegment{Type: "field", Key: fieldName}) + } + // Add map key segment + segments = append(segments, PathSegment{Type: "mapKey", Key: mapKey}) + } else { + // Regular field access + if part != "" { + segments = append(segments, PathSegment{Type: "field", Key: part}) + } + } + } + + return segments +} + +// splitKuboConfigPath splits a path on dots, but preserves bracket sections intact +func (h *MigrationTestHelper) splitKuboConfigPath(path string) []string { + var parts []string + var current strings.Builder + inBrackets := false + + for _, r := range path { + switch r { + case '[': + inBrackets = true + current.WriteRune(r) + case ']': + inBrackets = false + current.WriteRune(r) + case '.': + if inBrackets { + // Inside brackets, preserve the dot + current.WriteRune(r) + } else { + // Outside brackets, split here + if current.Len() > 0 { + parts = append(parts, current.String()) + current.Reset() + } + } + default: + current.WriteRune(r) + } + } + + // Add final part if any + if current.Len() > 0 { + parts = append(parts, current.String()) + } + + return parts +} + +// setupStaticV16Repo creates a test node using static v16 repo fixture from real Kubo 0.36 `ipfs init` +// This ensures tests remain stable regardless of future changes to the IPFS binary +// Each test gets its own copy in a temporary directory to allow modifications +func setupStaticV16Repo(t *testing.T) *harness.Node { + // Get absolute path to static v16 repo fixture + v16FixturePath := "testdata/v16-repo" + + // Create a temporary test directory - each test gets its own copy + // Use ./tmp.DELETEME/ as requested by user instead of /tmp/ + tmpDir := filepath.Join("tmp.DELETEME", "migration-test-"+t.Name()) + require.NoError(t, os.MkdirAll(tmpDir, 0755)) + t.Cleanup(func() { os.RemoveAll(tmpDir) }) + + // Convert to absolute path for harness + absTmpDir, err := filepath.Abs(tmpDir) + require.NoError(t, err) + + // Use the built binary (should be in PATH) + node := harness.BuildNode("ipfs", absTmpDir, 0) + + // Replace IPFS_PATH with static fixture files to test directory (creates independent copy per test) + cloneStaticRepoFixture(t, v16FixturePath, node.Dir) + + return node +} + +// cloneStaticRepoFixture recursively copies the v16 repo fixture to the target directory +// It completely removes the target directory contents before copying to ensure no extra files remain +func cloneStaticRepoFixture(t *testing.T, srcPath, dstPath string) { + srcInfo, err := os.Stat(srcPath) + require.NoError(t, err) + + if srcInfo.IsDir() { + // Completely remove destination directory and all contents + require.NoError(t, os.RemoveAll(dstPath)) + // Create fresh destination directory + require.NoError(t, os.MkdirAll(dstPath, srcInfo.Mode())) + + // Read source directory + entries, err := os.ReadDir(srcPath) + require.NoError(t, err) + + // Copy each entry recursively + for _, entry := range entries { + srcEntryPath := filepath.Join(srcPath, entry.Name()) + dstEntryPath := filepath.Join(dstPath, entry.Name()) + cloneStaticRepoFixture(t, srcEntryPath, dstEntryPath) + } + } else { + // Copy file (destination directory should already be clean from parent call) + srcFile, err := os.Open(srcPath) + require.NoError(t, err) + defer srcFile.Close() + + dstFile, err := os.Create(dstPath) + require.NoError(t, err) + defer dstFile.Close() + + _, err = io.Copy(dstFile, srcFile) + require.NoError(t, err) + + // Copy file permissions + require.NoError(t, dstFile.Chmod(srcInfo.Mode())) + } +} + +// Placeholder stubs for new test functions - to be implemented +func testDaemonCorruptedConfigHandling(t *testing.T) { + // TEST: Error handling using 'ipfs daemon --migrate' command with corrupted config (PRIMARY) + // Test what happens when config file is corrupted during migration + // NOTE: This test may need to be revised/updated once repo version 18 is released, + // at that point only keep tests that use 'ipfs repo migrate' + node := setupStaticV16Repo(t) + + // Create corrupted config + configPath := filepath.Join(node.Dir, "config") + corruptedJson := `{"Bootstrap": [invalid json}` + require.NoError(t, os.WriteFile(configPath, []byte(corruptedJson), 0644)) + + // Write version file indicating v16 + versionPath := filepath.Join(node.Dir, "version") + require.NoError(t, os.WriteFile(versionPath, []byte("16"), 0644)) + + // Run daemon with --migrate flag - this should fail gracefully + result := node.RunIPFS("daemon", "--migrate") + + // Verify graceful failure handling + // The daemon should fail but migration error should be clear + errorOutput := result.Stderr.String() + result.Stdout.String() + require.True(t, strings.Contains(errorOutput, "json") || strings.Contains(errorOutput, "invalid character"), "Error should mention JSON parsing issue") + + // Verify atomic failure: version and config should remain unchanged + versionData, err := os.ReadFile(versionPath) + require.NoError(t, err) + require.Equal(t, "16", strings.TrimSpace(string(versionData)), "Version should remain unchanged after failed migration") + + originalContent, err := os.ReadFile(configPath) + require.NoError(t, err) + require.Equal(t, corruptedJson, string(originalContent), "Original config should be unchanged after failed migration") +} + +func testDaemonMissingFieldsHandling(t *testing.T) { + // TEST: Migration using 'ipfs daemon --migrate' command with minimal config (PRIMARY) + // Test migration when config is missing expected fields + // NOTE: This test may need to be revised/updated once repo version 18 is released, + // at that point only keep tests that use 'ipfs repo migrate' + node := setupStaticV16Repo(t) + + // The static fixture already has all required fields, use it as-is + configPath := filepath.Join(node.Dir, "config") + versionPath := filepath.Join(node.Dir, "version") + + // Static fixture already uses port 0 for random port assignment - no config update needed + + // Run daemon migration + stdoutOutput, migrationSuccess := runDaemonMigrationWithMonitoring(t, node) + + // Verify migration was successful + require.True(t, migrationSuccess, "Migration should have been successful") + require.Contains(t, stdoutOutput, "applying 16-to-17 repo migration", "Migration should have been triggered") + require.Contains(t, stdoutOutput, "Migration 16 to 17 succeeded", "Migration should have completed successfully") + + // Verify version was updated + versionData, err := os.ReadFile(versionPath) + require.NoError(t, err) + require.Equal(t, "17", strings.TrimSpace(string(versionData)), "Version should be updated to 17") + + // Verify migration adds all required fields to minimal config + NewMigrationTestHelper(t, configPath). + RequireAutoConfDefaults(). + RequireAutoFieldsSetToAuto(). + RequireFieldExists("Identity.PeerID") // Original identity preserved from static fixture +} + +// ============================================================================= +// COMPARISON TESTS: 'ipfs repo migrate' command +// +// These tests verify that repo migrate produces equivalent results to +// daemon migrate, and test scenarios specific to repo migrate like +// backward migration (which daemon doesn't support). +// ============================================================================= + +func testRepoMigrationWithAuto(t *testing.T) { + // TEST: Forward migration using 'ipfs repo migrate' command (COMPARISON) + // Simple comparison test to verify repo migrate produces same results as daemon migrate + node := setupStaticV16Repo(t) + + // Use static fixture as-is + configPath := filepath.Join(node.Dir, "config") + + // Run migration using 'ipfs repo migrate' command + result := node.RunIPFS("repo", "migrate") + require.Empty(t, result.Stderr.String(), "Migration should succeed without errors") + + // Verify same results as daemon migrate + helper := NewMigrationTestHelper(t, configPath) + helper.RequireAutoConfDefaults(). + RequireArrayContains("Bootstrap", "auto"). + RequireArrayContains("Routing.DelegatedRouters", "auto"). + RequireArrayContains("Ipns.DelegatedPublishers", "auto"). + RequireFieldEquals("DNS.Resolvers[.]", "auto") +} + +func testRepoBackwardMigration(t *testing.T) { + // TEST: Backward migration using 'ipfs repo migrate --to=16 --allow-downgrade' command + // This is kept as repo migrate since daemon doesn't support backward migration + node := setupStaticV16Repo(t) + + // Use static fixture as-is + configPath := filepath.Join(node.Dir, "config") + versionPath := filepath.Join(node.Dir, "version") + + // First run forward migration to get to v17 + result := node.RunIPFS("repo", "migrate") + require.Empty(t, result.Stderr.String(), "Forward migration should succeed") + + // Verify we're at v17 + versionData, err := os.ReadFile(versionPath) + require.NoError(t, err) + require.Equal(t, "17", strings.TrimSpace(string(versionData)), "Should be at version 17 after forward migration") + + // Now run reverse migration back to v16 + result = node.RunIPFS("repo", "migrate", "--to=16", "--allow-downgrade") + require.Empty(t, result.Stderr.String(), "Reverse migration should succeed") + + // Verify version was downgraded to 16 + versionData, err = os.ReadFile(versionPath) + require.NoError(t, err) + require.Equal(t, "16", strings.TrimSpace(string(versionData)), "Version should be downgraded to 16") + + // Verify backward migration results: AutoConf removed and no "auto" values remain + NewMigrationTestHelper(t, configPath). + RequireFieldAbsent("AutoConf"). + RequireNoAutoValues() +} + +// runDaemonMigrationWithMonitoring starts daemon --migrate, monitors output until "Daemon is ready", +// then gracefully shuts down the daemon and returns the captured output and success status. +// This is a generic helper that can monitor for any migration patterns. +func runDaemonMigrationWithMonitoring(t *testing.T, node *harness.Node) (string, bool) { + // Use specific patterns for 16-to-17 migration + return runDaemonWithMigrationMonitoring(t, node, "applying 16-to-17 repo migration", "Migration 16 to 17 succeeded") +} + +// runDaemonWithMigrationMonitoring is a generic helper for running daemon --migrate and monitoring output. +// It waits for the daemon to be ready, then shuts it down gracefully. +// migrationPattern: pattern to detect migration started (e.g., "applying X-to-Y repo migration") +// successPattern: pattern to detect migration succeeded (e.g., "Migration X to Y succeeded") +// Returns the stdout output and whether both patterns were detected. +func runDaemonWithMigrationMonitoring(t *testing.T, node *harness.Node, migrationPattern, successPattern string) (string, bool) { + // Create context with timeout as safety net + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + + // Set up daemon command with output monitoring + cmd := exec.CommandContext(ctx, node.IPFSBin, "daemon", "--migrate") + cmd.Dir = node.Dir + + // Set environment (especially IPFS_PATH) + for k, v := range node.Runner.Env { + cmd.Env = append(cmd.Env, k+"="+v) + } + + // Set up pipes for output monitoring + stdout, err := cmd.StdoutPipe() + require.NoError(t, err) + stderr, err := cmd.StderrPipe() + require.NoError(t, err) + + // Start the daemon + err = cmd.Start() + require.NoError(t, err) + + var allOutput strings.Builder + var migrationDetected, migrationSucceeded, daemonReady bool + + // Monitor stdout for completion signals + scanner := bufio.NewScanner(stdout) + go func() { + for scanner.Scan() { + line := scanner.Text() + allOutput.WriteString(line + "\n") + + // Check for migration messages + if migrationPattern != "" && strings.Contains(line, migrationPattern) { + migrationDetected = true + } + if successPattern != "" && strings.Contains(line, successPattern) { + migrationSucceeded = true + } + if strings.Contains(line, "Daemon is ready") { + daemonReady = true + break // Exit monitoring loop + } + } + }() + + // Also monitor stderr (but don't use it for completion detection) + go func() { + stderrScanner := bufio.NewScanner(stderr) + for stderrScanner.Scan() { + line := stderrScanner.Text() + allOutput.WriteString("STDERR: " + line + "\n") + } + }() + + // Wait for daemon ready signal or timeout + ticker := time.NewTicker(100 * time.Millisecond) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + // Timeout - kill the process + if cmd.Process != nil { + _ = cmd.Process.Kill() + } + t.Logf("Daemon migration timed out after 60 seconds") + return allOutput.String(), false + + case <-ticker.C: + if daemonReady { + // Daemon is ready - shut it down gracefully + shutdownCmd := exec.Command(node.IPFSBin, "shutdown") + shutdownCmd.Dir = node.Dir + for k, v := range node.Runner.Env { + shutdownCmd.Env = append(shutdownCmd.Env, k+"="+v) + } + + if err := shutdownCmd.Run(); err != nil { + t.Logf("Warning: ipfs shutdown failed: %v", err) + // Force kill if graceful shutdown fails + if cmd.Process != nil { + _ = cmd.Process.Kill() + } + } + + // Wait for process to exit + _ = cmd.Wait() + + // Return success if we detected migration + success := migrationDetected && migrationSucceeded + return allOutput.String(), success + } + + // Check if process has exited (e.g., due to startup failure after migration) + if cmd.ProcessState != nil && cmd.ProcessState.Exited() { + // Process exited - migration may have completed but daemon failed to start + // This is expected for corrupted config tests + success := migrationDetected && migrationSucceeded + return allOutput.String(), success + } + } + } +} diff --git a/test/cli/migrations/migration_legacy_15_to_17_test.go b/test/cli/migrations/migration_legacy_15_to_17_test.go new file mode 100644 index 000000000..1471cab1f --- /dev/null +++ b/test/cli/migrations/migration_legacy_15_to_17_test.go @@ -0,0 +1,451 @@ +package migrations + +// NOTE: These legacy migration tests require the local Kubo binary (built with 'make build') to be in PATH. +// The tests migrate from repo version 15 to 17, which requires both external (15→16) and embedded (16→17) migrations. +// This validates the transition from legacy external binaries to modern embedded migrations. +// +// To run these tests successfully: +// export PATH="$(pwd)/cmd/ipfs:$PATH" +// go test ./test/cli/migrations/ + +import ( + "bufio" + "context" + "encoding/json" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "strings" + "syscall" + "testing" + "time" + + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/require" +) + +func TestMigration15To17(t *testing.T) { + t.Parallel() + + // Test legacy migration from v15 to v17 (combines external 15→16 + embedded 16→17) + t.Run("daemon migrate: legacy 15 to 17", testDaemonMigration15To17) + t.Run("repo migrate: legacy 15 to 17", testRepoMigration15To17) +} + +func TestMigration17To15Downgrade(t *testing.T) { + t.Parallel() + + // Test reverse hybrid migration from v17 to v15 (embedded 17→16 + external 16→15) + t.Run("repo migrate: reverse hybrid 17 to 15", testRepoReverseHybridMigration17To15) +} + +func testDaemonMigration15To17(t *testing.T) { + // TEST: Migration from v15 to v17 using 'ipfs daemon --migrate' + // This tests the dual migration path: external binary (15→16) + embedded (16→17) + // NOTE: This test may need to be revised/updated once repo version 18 is released, + // at that point only keep tests that use 'ipfs repo migrate' + node := setupStaticV15Repo(t) + + // Create mock migration binary for 15→16 (16→17 will use embedded migration) + createMockMigrationBinary(t, "15", "16") + + configPath := filepath.Join(node.Dir, "config") + versionPath := filepath.Join(node.Dir, "version") + + // Verify starting conditions + versionData, err := os.ReadFile(versionPath) + require.NoError(t, err) + require.Equal(t, "15", strings.TrimSpace(string(versionData)), "Should start at version 15") + + // Read original config to verify preservation of key fields + var originalConfig map[string]interface{} + configData, err := os.ReadFile(configPath) + require.NoError(t, err) + require.NoError(t, json.Unmarshal(configData, &originalConfig)) + + originalPeerID := getNestedValue(originalConfig, "Identity.PeerID") + + // Run dual migration using daemon --migrate + stdoutOutput, migrationSuccess := runDaemonWithLegacyMigrationMonitoring(t, node) + + // Debug output + t.Logf("Daemon output:\n%s", stdoutOutput) + + // Verify hybrid migration was successful + require.True(t, migrationSuccess, "Hybrid migration should have been successful") + require.Contains(t, stdoutOutput, "Phase 1: External migration from v15 to v16", "Should detect external migration phase") + require.Contains(t, stdoutOutput, "Phase 2: Embedded migration from v16 to v17", "Should detect embedded migration phase") + require.Contains(t, stdoutOutput, "Hybrid migration completed successfully", "Should confirm hybrid migration completion") + + // Verify final version is 17 + versionData, err = os.ReadFile(versionPath) + require.NoError(t, err) + require.Equal(t, "17", strings.TrimSpace(string(versionData)), "Version should be updated to 17") + + // Verify config is still valid JSON and key fields preserved + var finalConfig map[string]interface{} + configData, err = os.ReadFile(configPath) + require.NoError(t, err) + require.NoError(t, json.Unmarshal(configData, &finalConfig), "Config should remain valid JSON") + + // Verify essential fields preserved + finalPeerID := getNestedValue(finalConfig, "Identity.PeerID") + require.Equal(t, originalPeerID, finalPeerID, "Identity.PeerID should be preserved") + + // Verify bootstrap exists (may be modified by 16→17 migration) + finalBootstrap := getNestedValue(finalConfig, "Bootstrap") + require.NotNil(t, finalBootstrap, "Bootstrap should exist after migration") + + // Verify AutoConf was added by 16→17 migration + autoConf := getNestedValue(finalConfig, "AutoConf") + require.NotNil(t, autoConf, "AutoConf should be added by 16→17 migration") +} + +func testRepoMigration15To17(t *testing.T) { + // TEST: Migration from v15 to v17 using 'ipfs repo migrate' + // Comparison test to verify repo migrate produces same results as daemon migrate + node := setupStaticV15Repo(t) + + // Create mock migration binary for 15→16 (16→17 will use embedded migration) + createMockMigrationBinary(t, "15", "16") + + configPath := filepath.Join(node.Dir, "config") + versionPath := filepath.Join(node.Dir, "version") + + // Verify starting version + versionData, err := os.ReadFile(versionPath) + require.NoError(t, err) + require.Equal(t, "15", strings.TrimSpace(string(versionData)), "Should start at version 15") + + // Run migration using 'ipfs repo migrate' with custom PATH + result := node.Runner.Run(harness.RunRequest{ + Path: node.IPFSBin, + Args: []string{"repo", "migrate"}, + CmdOpts: []harness.CmdOpt{ + func(cmd *exec.Cmd) { + // Ensure the command inherits our modified PATH with mock binaries + cmd.Env = append(cmd.Env, "PATH="+os.Getenv("PATH")) + }, + }, + }) + require.Empty(t, result.Stderr.String(), "Migration should succeed without errors") + + // Verify final version is 17 + versionData, err = os.ReadFile(versionPath) + require.NoError(t, err) + require.Equal(t, "17", strings.TrimSpace(string(versionData)), "Version should be updated to 17") + + // Verify config is valid JSON + var finalConfig map[string]interface{} + configData, err := os.ReadFile(configPath) + require.NoError(t, err) + require.NoError(t, json.Unmarshal(configData, &finalConfig), "Config should remain valid JSON") + + // Verify essential fields exist + require.NotNil(t, getNestedValue(finalConfig, "Identity.PeerID"), "Identity.PeerID should exist") + require.NotNil(t, getNestedValue(finalConfig, "Bootstrap"), "Bootstrap should exist") + require.NotNil(t, getNestedValue(finalConfig, "AutoConf"), "AutoConf should be added") +} + +// setupStaticV15Repo creates a test node using static v15 repo fixture +// This ensures tests remain stable and validates migration from very old repos +func setupStaticV15Repo(t *testing.T) *harness.Node { + // Get path to static v15 repo fixture + v15FixturePath := "testdata/v15-repo" + + // Create temporary test directory using Go's testing temp dir + tmpDir := t.TempDir() + + // Use the built binary (should be in PATH) + node := harness.BuildNode("ipfs", tmpDir, 0) + + // Copy static fixture to test directory + cloneStaticRepoFixture(t, v15FixturePath, node.Dir) + + return node +} + +// runDaemonWithLegacyMigrationMonitoring monitors for hybrid migration patterns +func runDaemonWithLegacyMigrationMonitoring(t *testing.T, node *harness.Node) (string, bool) { + // Monitor for hybrid migration completion - use "Hybrid migration completed successfully" as success pattern + stdoutOutput, daemonStarted := runDaemonWithMigrationMonitoringCustomEnv(t, node, "Using hybrid migration strategy", "Hybrid migration completed successfully", map[string]string{ + "PATH": os.Getenv("PATH"), // Pass current PATH which includes our mock binaries + }) + + // Check for hybrid migration patterns in output + hasHybridStart := strings.Contains(stdoutOutput, "Using hybrid migration strategy") + hasPhase1 := strings.Contains(stdoutOutput, "Phase 1: External migration from v15 to v16") + hasPhase2 := strings.Contains(stdoutOutput, "Phase 2: Embedded migration from v16 to v17") + hasHybridSuccess := strings.Contains(stdoutOutput, "Hybrid migration completed successfully") + + // Success requires daemon to start and hybrid migration patterns to be detected + hybridMigrationSuccess := daemonStarted && hasHybridStart && hasPhase1 && hasPhase2 && hasHybridSuccess + + return stdoutOutput, hybridMigrationSuccess +} + +// runDaemonWithMigrationMonitoringCustomEnv is like runDaemonWithMigrationMonitoring but allows custom environment +func runDaemonWithMigrationMonitoringCustomEnv(t *testing.T, node *harness.Node, migrationPattern, successPattern string, extraEnv map[string]string) (string, bool) { + // Create context with timeout as safety net + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + + // Set up daemon command with output monitoring + cmd := exec.CommandContext(ctx, node.IPFSBin, "daemon", "--migrate") + cmd.Dir = node.Dir + + // Set environment (especially IPFS_PATH) + for k, v := range node.Runner.Env { + cmd.Env = append(cmd.Env, k+"="+v) + } + + // Add extra environment variables (like PATH with mock binaries) + for k, v := range extraEnv { + cmd.Env = append(cmd.Env, k+"="+v) + } + + // Set up pipes for output monitoring + stdout, err := cmd.StdoutPipe() + require.NoError(t, err) + stderr, err := cmd.StderrPipe() + require.NoError(t, err) + + // Start the daemon + require.NoError(t, cmd.Start()) + + // Monitor output from both streams + var outputBuffer strings.Builder + done := make(chan bool) + migrationStarted := false + migrationCompleted := false + + go func() { + scanner := bufio.NewScanner(io.MultiReader(stdout, stderr)) + for scanner.Scan() { + line := scanner.Text() + outputBuffer.WriteString(line + "\n") + + // Check for migration start + if strings.Contains(line, migrationPattern) { + migrationStarted = true + } + + // Check for migration completion + if strings.Contains(line, successPattern) { + migrationCompleted = true + } + + // Check for daemon ready + if strings.Contains(line, "Daemon is ready") { + done <- true + return + } + } + done <- false + }() + + // Wait for daemon to be ready or timeout + daemonReady := false + select { + case ready := <-done: + daemonReady = ready + case <-ctx.Done(): + t.Log("Daemon startup timed out") + } + + // Stop the daemon + if cmd.Process != nil { + _ = cmd.Process.Signal(syscall.SIGTERM) + _ = cmd.Wait() + } + + return outputBuffer.String(), daemonReady && migrationStarted && migrationCompleted +} + +// createMockMigrationBinary creates a platform-agnostic Go binary for migration on PATH +func createMockMigrationBinary(t *testing.T, fromVer, toVer string) { + // Create bin directory for migration binaries + binDir := t.TempDir() + + // Create Go source for mock migration binary + scriptName := fmt.Sprintf("fs-repo-%s-to-%s", fromVer, toVer) + sourceFile := filepath.Join(binDir, scriptName+".go") + binaryPath := filepath.Join(binDir, scriptName) + + goSource := fmt.Sprintf(`package main + +import ( + "fmt" + "os" + "path/filepath" + "strings" +) + +func main() { + // Parse command line arguments - real migration binaries expect -path= + var repoPath string + var revert bool + for _, arg := range os.Args[1:] { + if strings.HasPrefix(arg, "-path=") { + repoPath = strings.TrimPrefix(arg, "-path=") + } else if arg == "-revert" { + revert = true + } + } + + if repoPath == "" { + fmt.Fprintf(os.Stderr, "Usage: %%s -path= [-verbose=true] [-revert]\n", os.Args[0]) + os.Exit(1) + } + + // Determine source and target versions based on revert flag + var sourceVer, targetVer string + if revert { + // When reverting, we go backwards: fs-repo-15-to-16 with -revert goes 16→15 + sourceVer = "%s" + targetVer = "%s" + } else { + // Normal forward migration: fs-repo-15-to-16 goes 15→16 + sourceVer = "%s" + targetVer = "%s" + } + + // Print migration message (same format as real migrations) + fmt.Printf("fake applying %%s-to-%%s repo migration\n", sourceVer, targetVer) + + // Update version file + versionFile := filepath.Join(repoPath, "version") + err := os.WriteFile(versionFile, []byte(targetVer), 0644) + if err != nil { + fmt.Fprintf(os.Stderr, "Error updating version: %%v\n", err) + os.Exit(1) + } +} +`, toVer, fromVer, fromVer, toVer) + + require.NoError(t, os.WriteFile(sourceFile, []byte(goSource), 0644)) + + // Compile the Go binary + require.NoError(t, os.Setenv("CGO_ENABLED", "0")) // Ensure static binary + require.NoError(t, exec.Command("go", "build", "-o", binaryPath, sourceFile).Run()) + + // Add bin directory to PATH for this test + currentPath := os.Getenv("PATH") + newPath := binDir + string(filepath.ListSeparator) + currentPath + require.NoError(t, os.Setenv("PATH", newPath)) + t.Cleanup(func() { os.Setenv("PATH", currentPath) }) + + // Verify the binary exists and is executable + _, err := os.Stat(binaryPath) + require.NoError(t, err, "Mock binary should exist") +} + +// getNestedValue retrieves a nested value from a config map using dot notation +func getNestedValue(config map[string]interface{}, path string) interface{} { + parts := strings.Split(path, ".") + current := interface{}(config) + + for _, part := range parts { + switch v := current.(type) { + case map[string]interface{}: + current = v[part] + default: + return nil + } + if current == nil { + return nil + } + } + + return current +} + +func testRepoReverseHybridMigration17To15(t *testing.T) { + // TEST: Reverse hybrid migration from v17 to v15 using 'ipfs repo migrate --to=15 --allow-downgrade' + // This tests reverse hybrid migration: embedded (17→16) + external (16→15) + + // Start with v15 fixture and migrate forward to v17 to create proper backup files + node := setupStaticV15Repo(t) + + // Create mock migration binary for 15→16 (needed for forward migration) + createMockMigrationBinary(t, "15", "16") + // Create mock migration binary for 16→15 (needed for downgrade) + createMockMigrationBinary(t, "16", "15") + + configPath := filepath.Join(node.Dir, "config") + versionPath := filepath.Join(node.Dir, "version") + + // Step 1: Forward migration from v15 to v17 to create backup files + t.Log("Step 1: Forward migration v15 → v17") + result := node.Runner.Run(harness.RunRequest{ + Path: node.IPFSBin, + Args: []string{"repo", "migrate"}, + CmdOpts: []harness.CmdOpt{ + func(cmd *exec.Cmd) { + // Ensure the command inherits our modified PATH with mock binaries + cmd.Env = append(cmd.Env, "PATH="+os.Getenv("PATH")) + }, + }, + }) + + // Debug: print the output to see what happened + t.Logf("Forward migration stdout:\n%s", result.Stdout.String()) + t.Logf("Forward migration stderr:\n%s", result.Stderr.String()) + + require.Empty(t, result.Stderr.String(), "Forward migration should succeed without errors") + + // Verify we're at v17 after forward migration + versionData, err := os.ReadFile(versionPath) + require.NoError(t, err) + require.Equal(t, "17", strings.TrimSpace(string(versionData)), "Should be at version 17 after forward migration") + + // Read config after forward migration to use as baseline for downgrade + var v17Config map[string]interface{} + configData, err := os.ReadFile(configPath) + require.NoError(t, err) + require.NoError(t, json.Unmarshal(configData, &v17Config)) + + originalPeerID := getNestedValue(v17Config, "Identity.PeerID") + + // Step 2: Reverse hybrid migration from v17 to v15 + t.Log("Step 2: Reverse hybrid migration v17 → v15") + result = node.Runner.Run(harness.RunRequest{ + Path: node.IPFSBin, + Args: []string{"repo", "migrate", "--to=15", "--allow-downgrade"}, + CmdOpts: []harness.CmdOpt{ + func(cmd *exec.Cmd) { + // Ensure the command inherits our modified PATH with mock binaries + cmd.Env = append(cmd.Env, "PATH="+os.Getenv("PATH")) + }, + }, + }) + require.Empty(t, result.Stderr.String(), "Reverse hybrid migration should succeed without errors") + + // Debug output + t.Logf("Downgrade migration output:\n%s", result.Stdout.String()) + + // Verify final version is 15 + versionData, err = os.ReadFile(versionPath) + require.NoError(t, err) + require.Equal(t, "15", strings.TrimSpace(string(versionData)), "Version should be updated to 15") + + // Verify config is still valid JSON and key fields preserved + var finalConfig map[string]interface{} + configData, err = os.ReadFile(configPath) + require.NoError(t, err) + require.NoError(t, json.Unmarshal(configData, &finalConfig), "Config should remain valid JSON") + + // Verify essential fields preserved + finalPeerID := getNestedValue(finalConfig, "Identity.PeerID") + require.Equal(t, originalPeerID, finalPeerID, "Identity.PeerID should be preserved") + + // Verify bootstrap exists (may be modified by migrations) + finalBootstrap := getNestedValue(finalConfig, "Bootstrap") + require.NotNil(t, finalBootstrap, "Bootstrap should exist after migration") + + // AutoConf should be removed by the downgrade (was added in 16→17) + autoConf := getNestedValue(finalConfig, "AutoConf") + require.Nil(t, autoConf, "AutoConf should be removed by downgrade to v15") +} diff --git a/test/cli/migrations/testdata/v15-repo/blocks/SHARDING b/test/cli/migrations/testdata/v15-repo/blocks/SHARDING new file mode 100644 index 000000000..a153331da Binary files /dev/null and b/test/cli/migrations/testdata/v15-repo/blocks/SHARDING differ diff --git a/test/cli/migrations/testdata/v15-repo/blocks/X3/CIQFTFEEHEDF6KLBT32BFAGLXEZL4UWFNWM4LFTLMXQBCERZ6CMLX3Y.data b/test/cli/migrations/testdata/v15-repo/blocks/X3/CIQFTFEEHEDF6KLBT32BFAGLXEZL4UWFNWM4LFTLMXQBCERZ6CMLX3Y.data new file mode 100644 index 000000000..9553a942d Binary files /dev/null and b/test/cli/migrations/testdata/v15-repo/blocks/X3/CIQFTFEEHEDF6KLBT32BFAGLXEZL4UWFNWM4LFTLMXQBCERZ6CMLX3Y.data differ diff --git a/test/cli/migrations/testdata/v15-repo/blocks/_README b/test/cli/migrations/testdata/v15-repo/blocks/_README new file mode 100644 index 000000000..572e7e4d0 Binary files /dev/null and b/test/cli/migrations/testdata/v15-repo/blocks/_README differ diff --git a/test/cli/migrations/testdata/v15-repo/blocks/diskUsage.cache b/test/cli/migrations/testdata/v15-repo/blocks/diskUsage.cache new file mode 100644 index 000000000..15876dc11 Binary files /dev/null and b/test/cli/migrations/testdata/v15-repo/blocks/diskUsage.cache differ diff --git a/test/cli/migrations/testdata/v15-repo/config b/test/cli/migrations/testdata/v15-repo/config new file mode 100644 index 000000000..c789c2cea Binary files /dev/null and b/test/cli/migrations/testdata/v15-repo/config differ diff --git a/test/cli/migrations/testdata/v15-repo/datastore/000001.log b/test/cli/migrations/testdata/v15-repo/datastore/000001.log new file mode 100644 index 000000000..9591b22ef Binary files /dev/null and b/test/cli/migrations/testdata/v15-repo/datastore/000001.log differ diff --git a/test/cli/migrations/testdata/v15-repo/datastore/CURRENT b/test/cli/migrations/testdata/v15-repo/datastore/CURRENT new file mode 100644 index 000000000..feda7d6b2 Binary files /dev/null and b/test/cli/migrations/testdata/v15-repo/datastore/CURRENT differ diff --git a/test/cli/migrations/testdata/v15-repo/datastore/LOCK b/test/cli/migrations/testdata/v15-repo/datastore/LOCK new file mode 100644 index 000000000..e69de29bb diff --git a/test/cli/migrations/testdata/v15-repo/datastore/LOG b/test/cli/migrations/testdata/v15-repo/datastore/LOG new file mode 100644 index 000000000..74e0f5f6b Binary files /dev/null and b/test/cli/migrations/testdata/v15-repo/datastore/LOG differ diff --git a/test/cli/migrations/testdata/v15-repo/datastore/MANIFEST-000000 b/test/cli/migrations/testdata/v15-repo/datastore/MANIFEST-000000 new file mode 100644 index 000000000..9d54f6733 Binary files /dev/null and b/test/cli/migrations/testdata/v15-repo/datastore/MANIFEST-000000 differ diff --git a/test/cli/migrations/testdata/v15-repo/datastore_spec b/test/cli/migrations/testdata/v15-repo/datastore_spec new file mode 100644 index 000000000..7bf9626c2 Binary files /dev/null and b/test/cli/migrations/testdata/v15-repo/datastore_spec differ diff --git a/test/cli/migrations/testdata/v15-repo/version b/test/cli/migrations/testdata/v15-repo/version new file mode 100644 index 000000000..60d3b2f4a Binary files /dev/null and b/test/cli/migrations/testdata/v15-repo/version differ diff --git a/test/cli/migrations/testdata/v16-repo/blocks/SHARDING b/test/cli/migrations/testdata/v16-repo/blocks/SHARDING new file mode 100644 index 000000000..a153331da Binary files /dev/null and b/test/cli/migrations/testdata/v16-repo/blocks/SHARDING differ diff --git a/test/cli/migrations/testdata/v16-repo/blocks/X3/CIQFTFEEHEDF6KLBT32BFAGLXEZL4UWFNWM4LFTLMXQBCERZ6CMLX3Y.data b/test/cli/migrations/testdata/v16-repo/blocks/X3/CIQFTFEEHEDF6KLBT32BFAGLXEZL4UWFNWM4LFTLMXQBCERZ6CMLX3Y.data new file mode 100644 index 000000000..9553a942d Binary files /dev/null and b/test/cli/migrations/testdata/v16-repo/blocks/X3/CIQFTFEEHEDF6KLBT32BFAGLXEZL4UWFNWM4LFTLMXQBCERZ6CMLX3Y.data differ diff --git a/test/cli/migrations/testdata/v16-repo/blocks/_README b/test/cli/migrations/testdata/v16-repo/blocks/_README new file mode 100644 index 000000000..572e7e4d0 Binary files /dev/null and b/test/cli/migrations/testdata/v16-repo/blocks/_README differ diff --git a/test/cli/migrations/testdata/v16-repo/blocks/diskUsage.cache b/test/cli/migrations/testdata/v16-repo/blocks/diskUsage.cache new file mode 100644 index 000000000..15876dc11 Binary files /dev/null and b/test/cli/migrations/testdata/v16-repo/blocks/diskUsage.cache differ diff --git a/test/cli/migrations/testdata/v16-repo/config b/test/cli/migrations/testdata/v16-repo/config new file mode 100644 index 000000000..dcbceb49c Binary files /dev/null and b/test/cli/migrations/testdata/v16-repo/config differ diff --git a/test/cli/migrations/testdata/v16-repo/datastore/000001.log b/test/cli/migrations/testdata/v16-repo/datastore/000001.log new file mode 100644 index 000000000..51686e36c Binary files /dev/null and b/test/cli/migrations/testdata/v16-repo/datastore/000001.log differ diff --git a/test/cli/migrations/testdata/v16-repo/datastore/CURRENT b/test/cli/migrations/testdata/v16-repo/datastore/CURRENT new file mode 100644 index 000000000..feda7d6b2 Binary files /dev/null and b/test/cli/migrations/testdata/v16-repo/datastore/CURRENT differ diff --git a/test/cli/migrations/testdata/v16-repo/datastore/LOCK b/test/cli/migrations/testdata/v16-repo/datastore/LOCK new file mode 100644 index 000000000..e69de29bb diff --git a/test/cli/migrations/testdata/v16-repo/datastore/LOG b/test/cli/migrations/testdata/v16-repo/datastore/LOG new file mode 100644 index 000000000..c19fc88e4 Binary files /dev/null and b/test/cli/migrations/testdata/v16-repo/datastore/LOG differ diff --git a/test/cli/migrations/testdata/v16-repo/datastore/MANIFEST-000000 b/test/cli/migrations/testdata/v16-repo/datastore/MANIFEST-000000 new file mode 100644 index 000000000..9d54f6733 Binary files /dev/null and b/test/cli/migrations/testdata/v16-repo/datastore/MANIFEST-000000 differ diff --git a/test/cli/migrations/testdata/v16-repo/datastore_spec b/test/cli/migrations/testdata/v16-repo/datastore_spec new file mode 100644 index 000000000..7bf9626c2 Binary files /dev/null and b/test/cli/migrations/testdata/v16-repo/datastore_spec differ diff --git a/test/cli/migrations/testdata/v16-repo/version b/test/cli/migrations/testdata/v16-repo/version new file mode 100644 index 000000000..b6a7d89c6 Binary files /dev/null and b/test/cli/migrations/testdata/v16-repo/version differ diff --git a/test/cli/name_test.go b/test/cli/name_test.go index 42c649c09..a0931bfa0 100644 --- a/test/cli/name_test.go +++ b/test/cli/name_test.go @@ -150,7 +150,7 @@ func TestName(t *testing.T) { res := node.RunIPFS("name", "publish", "/ipfs/"+fixtureCid) require.Error(t, res.Err) require.Equal(t, 1, res.ExitCode()) - require.Contains(t, res.Stderr.String(), `can't publish while offline`) + require.Contains(t, res.Stderr.String(), "can't publish while offline: pass `--allow-offline` to override or `--allow-delegated` if Ipns.DelegatedPublishers are set up") }) t.Run("Publish V2-only record", func(t *testing.T) { @@ -263,4 +263,71 @@ func TestName(t *testing.T) { require.NoError(t, err) require.False(t, val.Validation.Valid) }) + + t.Run("Publishing with custom sequence number", func(t *testing.T) { + t.Parallel() + + node := makeDaemon(t, nil) + publishPath := "/ipfs/" + fixtureCid + name := ipns.NameFromPeer(node.PeerID()) + + t.Run("Publish with sequence=0 is not allowed", func(t *testing.T) { + // Sequence=0 is never valid, even on a fresh node + res := node.RunIPFS("name", "publish", "--allow-offline", "--ttl=0", "--sequence=0", publishPath) + require.NotEqual(t, 0, res.ExitCode(), "Expected publish with sequence=0 to fail") + require.Contains(t, res.Stderr.String(), "sequence number must be greater than the current record sequence") + }) + + t.Run("Publish with sequence=1 on fresh node", func(t *testing.T) { + // Sequence=1 is the minimum valid sequence number for first publish + res := node.IPFS("name", "publish", "--allow-offline", "--ttl=0", "--sequence=1", publishPath) + require.Equal(t, fmt.Sprintf("Published to %s: %s\n", name.String(), publishPath), res.Stdout.String()) + }) + + t.Run("Publish with sequence=42", func(t *testing.T) { + res := node.IPFS("name", "publish", "--allow-offline", "--ttl=0", "--sequence=42", publishPath) + require.Equal(t, fmt.Sprintf("Published to %s: %s\n", name.String(), publishPath), res.Stdout.String()) + }) + + t.Run("Publish with large sequence number", func(t *testing.T) { + res := node.IPFS("name", "publish", "--allow-offline", "--ttl=0", "--sequence=18446744073709551615", publishPath) // Max uint64 + require.Equal(t, fmt.Sprintf("Published to %s: %s\n", name.String(), publishPath), res.Stdout.String()) + }) + }) + + t.Run("Sequence number monotonic check", func(t *testing.T) { + t.Parallel() + + node := makeDaemon(t, nil).StartDaemon() + publishPath1 := "/ipfs/" + fixtureCid + publishPath2 := "/ipfs/" + dagCid // Different content + name := ipns.NameFromPeer(node.PeerID()) + + // First, publish with a high sequence number (1000) + res := node.IPFS("name", "publish", "--ttl=0", "--sequence=1000", publishPath1) + require.Equal(t, fmt.Sprintf("Published to %s: %s\n", name.String(), publishPath1), res.Stdout.String()) + + // Verify the record was published successfully + res = node.IPFS("name", "resolve", name.String()) + require.Contains(t, res.Stdout.String(), publishPath1) + + // Now try to publish different content with a LOWER sequence number (500) + // This should fail due to monotonic sequence check + res = node.RunIPFS("name", "publish", "--ttl=0", "--sequence=500", publishPath2) + require.NotEqual(t, 0, res.ExitCode(), "Expected publish with lower sequence to fail") + require.Contains(t, res.Stderr.String(), "sequence number", "Expected error about sequence number") + + // Verify the original content is still published (not overwritten) + res = node.IPFS("name", "resolve", name.String()) + require.Contains(t, res.Stdout.String(), publishPath1, "Original content should still be published") + require.NotContains(t, res.Stdout.String(), publishPath2, "New content should not have been published") + + // Publishing with a HIGHER sequence number should succeed + res = node.IPFS("name", "publish", "--ttl=0", "--sequence=2000", publishPath2) + require.Equal(t, fmt.Sprintf("Published to %s: %s\n", name.String(), publishPath2), res.Stdout.String()) + + // Verify the new content is now published + res = node.IPFS("name", "resolve", name.String()) + require.Contains(t, res.Stdout.String(), publishPath2, "New content should now be published") + }) } diff --git a/test/cli/pinning_remote_test.go b/test/cli/pinning_remote_test.go index fede942ba..fd9ae8e94 100644 --- a/test/cli/pinning_remote_test.go +++ b/test/cli/pinning_remote_test.go @@ -9,8 +9,8 @@ import ( "time" "github.com/google/uuid" + "github.com/ipfs/go-test/random" "github.com/ipfs/kubo/test/cli/harness" - "github.com/ipfs/kubo/test/cli/testutils" "github.com/ipfs/kubo/test/cli/testutils/pinningservice" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -350,7 +350,7 @@ func TestRemotePinning(t *testing.T) { pin.Status = "pinned" transitionedCh <- struct{}{} } - hash := node.IPFSAddStr(string(testutils.RandomBytes(1000))) + hash := node.IPFSAddStr(string(random.Bytes(1000))) node.IPFS("pin", "remote", "add", "--background=false", "--service=svc", hash) <-transitionedCh res := node.IPFS("pin", "remote", "ls", "--service=svc", "--cid="+hash, "--enc=json").Stdout.String() @@ -368,7 +368,7 @@ func TestRemotePinning(t *testing.T) { defer pin.M.Unlock() pin.Status = "pinned" } - hash := node.IPFSAddStr(string(testutils.RandomBytes(1000))) + hash := node.IPFSAddStr(string(random.Bytes(1000))) node.IPFS("pin", "remote", "add", "--service=svc", "--name=force-test-name", hash) node.IPFS("pin", "remote", "add", "--service=svc", "--name=force-test-name", hash) @@ -396,7 +396,7 @@ func TestRemotePinning(t *testing.T) { defer pin.M.Unlock() pin.Status = "pinned" } - hash := node.IPFSAddStr(string(testutils.RandomBytes(1000))) + hash := node.IPFSAddStr(string(random.Bytes(1000))) node.IPFS("pin", "remote", "add", "--service=svc", "--name=force-test-name", hash) node.IPFS("pin", "remote", "add", "--service=svc", "--name=force-test-name", hash) @@ -417,7 +417,7 @@ func TestRemotePinning(t *testing.T) { pin.Status = "pinned" } for i := 0; i < 4; i++ { - hash := node.IPFSAddStr(string(testutils.RandomBytes(1000))) + hash := node.IPFSAddStr(string(random.Bytes(1000))) name := fmt.Sprintf("--name=%d", i) node.IPFS("pin", "remote", "add", "--service=svc", "--name="+name, hash) } @@ -438,7 +438,7 @@ func TestRemotePinning(t *testing.T) { _, svcURL := runPinningService(t, authToken) node.IPFS("pin", "remote", "service", "add", "svc", svcURL, authToken) - hash := node.IPFSAddStr(string(testutils.RandomBytes(1000))) + hash := node.IPFSAddStr(string(random.Bytes(1000))) res := node.IPFS("pin", "remote", "add", "--service=svc", "--background", hash) warningMsg := "WARNING: the local node is offline and remote pinning may fail if there is no other provider for this CID" assert.Contains(t, res.Stdout.String(), warningMsg) diff --git a/test/cli/pins_test.go b/test/cli/pins_test.go index 3e3325a01..68611c8f8 100644 --- a/test/cli/pins_test.go +++ b/test/cli/pins_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/ipfs/go-cid" + "github.com/ipfs/go-test/random" "github.com/ipfs/kubo/test/cli/harness" . "github.com/ipfs/kubo/test/cli/testutils" "github.com/stretchr/testify/assert" @@ -142,7 +143,7 @@ func testPinDAG(t *testing.T, args testPinsArgs) { if args.runDaemon { node.StartDaemon("--offline") } - bytes := RandomBytes(1 << 20) // 1 MiB + bytes := random.Bytes(1 << 20) // 1 MiB tmpFile := h.WriteToTemp(string(bytes)) cid := node.IPFS(StrCat("add", args.pinArg, "--pin=false", "-q", tmpFile)...).Stdout.Trimmed() @@ -169,7 +170,7 @@ func testPinProgress(t *testing.T, args testPinsArgs) { node.StartDaemon("--offline") } - bytes := RandomBytes(1 << 20) // 1 MiB + bytes := random.Bytes(1 << 20) // 1 MiB tmpFile := h.WriteToTemp(string(bytes)) cid := node.IPFS(StrCat("add", args.pinArg, "--pin=false", "-q", tmpFile)...).Stdout.Trimmed() @@ -218,8 +219,8 @@ func TestPins(t *testing.T) { t.Parallel() node := harness.NewT(t).NewNode().Init() - cidAStr := node.IPFSAddStr(RandomStr(1000), "--pin=false") - cidBStr := node.IPFSAddStr(RandomStr(1000), "--pin=false") + cidAStr := node.IPFSAddStr(string(random.Bytes(1000)), "--pin=false") + cidBStr := node.IPFSAddStr(string(random.Bytes(1000)), "--pin=false") _ = node.IPFS("pin", "add", "--name", "testPin", cidAStr) @@ -246,9 +247,9 @@ func TestPins(t *testing.T) { t.Parallel() node := harness.NewT(t).NewNode().Init() - cidAStr := node.IPFSAddStr(RandomStr(1000), "--pin=false") - cidBStr := node.IPFSAddStr(RandomStr(1000), "--pin=false") - cidCStr := node.IPFSAddStr(RandomStr(1000), "--pin=false") + cidAStr := node.IPFSAddStr(string(random.Bytes(1000)), "--pin=false") + cidBStr := node.IPFSAddStr(string(random.Bytes(1000)), "--pin=false") + cidCStr := node.IPFSAddStr(string(random.Bytes(1000)), "--pin=false") outA := cidAStr + " recursive testPin" outB := cidBStr + " recursive testPin" @@ -284,7 +285,7 @@ func TestPins(t *testing.T) { t.Parallel() node := harness.NewT(t).NewNode().Init() - cidStr := node.IPFSAddStr(RandomStr(1000), "--pin=false") + cidStr := node.IPFSAddStr(string(random.Bytes(1000)), "--pin=false") outBefore := cidStr + " recursive A" outAfter := cidStr + " recursive B" @@ -305,8 +306,8 @@ func TestPins(t *testing.T) { t.Parallel() node := harness.NewT(t).NewNode().Init() - cidAStr := node.IPFSAddStr(RandomStr(1000), "--pin=false") - cidBStr := node.IPFSAddStr(RandomStr(1000), "--pin=false") + cidAStr := node.IPFSAddStr(string(random.Bytes(1000)), "--pin=false") + cidBStr := node.IPFSAddStr(string(random.Bytes(1000)), "--pin=false") _ = node.IPFS("pin", "add", "--name", "testPinJson", cidAStr) diff --git a/test/cli/provider_test.go b/test/cli/provider_test.go index 7e2bee411..f0d04e1d2 100644 --- a/test/cli/provider_test.go +++ b/test/cli/provider_test.go @@ -2,11 +2,12 @@ package cli import ( "bytes" + "encoding/json" "testing" "time" + "github.com/ipfs/go-test/random" "github.com/ipfs/kubo/test/cli/harness" - "github.com/ipfs/kubo/test/cli/testutils" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -20,6 +21,12 @@ func TestProvider(t *testing.T) { return nodes.StartDaemons().Connect() } + initNodesWithoutStart := func(t *testing.T, n int, fn func(n *harness.Node)) harness.Nodes { + nodes := harness.NewT(t).NewNodes(n).Init() + nodes.ForEachPar(fn) + return nodes + } + expectNoProviders := func(t *testing.T, cid string, nodes ...*harness.Node) { for _, node := range nodes { res := node.IPFS("routing", "findprovs", "-n=1", cid) @@ -43,9 +50,47 @@ func TestProvider(t *testing.T) { defer nodes.StopDaemons() cid := nodes[0].IPFSAddStr(time.Now().String()) - // Reprovide as initialProviderDelay still ongoing - res := nodes[0].IPFS("routing", "reprovide") - require.NoError(t, res.Err) + expectProviders(t, cid, nodes[0].PeerID().String(), nodes[1:]...) + }) + + t.Run("Provider.Enabled=true announces new CIDs created by ipfs add --pin=false with default strategy", func(t *testing.T) { + t.Parallel() + + nodes := initNodes(t, 2, func(n *harness.Node) { + n.SetIPFSConfig("Provider.Enabled", true) + // Default strategy is "all" which should provide even unpinned content + }) + defer nodes.StopDaemons() + + cid := nodes[0].IPFSAddStr(time.Now().String(), "--pin=false") + expectProviders(t, cid, nodes[0].PeerID().String(), nodes[1:]...) + }) + + t.Run("Provider.Enabled=true announces new CIDs created by ipfs block put --pin=false with default strategy", func(t *testing.T) { + t.Parallel() + + nodes := initNodes(t, 2, func(n *harness.Node) { + n.SetIPFSConfig("Provider.Enabled", true) + // Default strategy is "all" which should provide unpinned content from block put + }) + defer nodes.StopDaemons() + + data := random.Bytes(256) + cid := nodes[0].IPFSBlockPut(bytes.NewReader(data), "--pin=false") + expectProviders(t, cid, nodes[0].PeerID().String(), nodes[1:]...) + }) + + t.Run("Provider.Enabled=true announces new CIDs created by ipfs dag put --pin=false with default strategy", func(t *testing.T) { + t.Parallel() + + nodes := initNodes(t, 2, func(n *harness.Node) { + n.SetIPFSConfig("Provider.Enabled", true) + // Default strategy is "all" which should provide unpinned content from dag put + }) + defer nodes.StopDaemons() + + dagData := `{"hello": "world", "timestamp": "` + time.Now().String() + `"}` + cid := nodes[0].IPFSDAGPut(bytes.NewReader([]byte(dagData)), "--pin=false") expectProviders(t, cid, nodes[0].PeerID().String(), nodes[1:]...) }) @@ -99,7 +144,7 @@ func TestProvider(t *testing.T) { }) defer nodes.StopDaemons() - cid := nodes[0].IPFSAddStr(time.Now().String(), "--offline") + cid := nodes[0].IPFSAddStr(time.Now().String()) expectNoProviders(t, cid, nodes[1:]...) @@ -119,7 +164,7 @@ func TestProvider(t *testing.T) { }) defer nodes.StopDaemons() - cid := nodes[0].IPFSAddStr(time.Now().String(), "--offline") + cid := nodes[0].IPFSAddStr(time.Now().String()) expectNoProviders(t, cid, nodes[1:]...) @@ -130,7 +175,7 @@ func TestProvider(t *testing.T) { expectNoProviders(t, cid, nodes[1:]...) }) - t.Run("Reprovides with 'all' strategy", func(t *testing.T) { + t.Run("Provide with 'all' strategy", func(t *testing.T) { t.Parallel() nodes := initNodes(t, 2, func(n *harness.Node) { @@ -138,8 +183,94 @@ func TestProvider(t *testing.T) { }) defer nodes.StopDaemons() - cid := nodes[0].IPFSAddStr(time.Now().String(), "--local") + cid := nodes[0].IPFSAddStr("all strategy") + expectProviders(t, cid, nodes[0].PeerID().String(), nodes[1:]...) + }) + t.Run("Provide with 'pinned' strategy", func(t *testing.T) { + t.Parallel() + + nodes := initNodes(t, 2, func(n *harness.Node) { + n.SetIPFSConfig("Reprovider.Strategy", "pinned") + }) + defer nodes.StopDaemons() + + // Add a non-pinned CID (should not be provided) + cid := nodes[0].IPFSAddStr("pinned strategy", "--pin=false") + expectNoProviders(t, cid, nodes[1:]...) + + // Pin the CID (should now be provided) + nodes[0].IPFS("pin", "add", cid) + expectProviders(t, cid, nodes[0].PeerID().String(), nodes[1:]...) + }) + + t.Run("Provide with 'pinned+mfs' strategy", func(t *testing.T) { + t.Parallel() + + nodes := initNodes(t, 2, func(n *harness.Node) { + n.SetIPFSConfig("Reprovider.Strategy", "pinned+mfs") + }) + defer nodes.StopDaemons() + + // Add a pinned CID (should be provided) + cidPinned := nodes[0].IPFSAddStr("pinned content") + cidUnpinned := nodes[0].IPFSAddStr("unpinned content", "--pin=false") + cidMFS := nodes[0].IPFSAddStr("mfs content", "--pin=false") + nodes[0].IPFS("files", "cp", "/ipfs/"+cidMFS, "/myfile") + + n0pid := nodes[0].PeerID().String() + expectProviders(t, cidPinned, n0pid, nodes[1:]...) + expectNoProviders(t, cidUnpinned, nodes[1:]...) + expectProviders(t, cidMFS, n0pid, nodes[1:]...) + }) + + t.Run("Provide with 'roots' strategy", func(t *testing.T) { + t.Parallel() + + nodes := initNodes(t, 2, func(n *harness.Node) { + n.SetIPFSConfig("Reprovider.Strategy", "roots") + }) + defer nodes.StopDaemons() + + // Add a root CID (should be provided) + cidRoot := nodes[0].IPFSAddStr("roots strategy", "-w", "-Q") + // the same without wrapping should give us a child node. + cidChild := nodes[0].IPFSAddStr("root strategy", "--pin=false") + + expectProviders(t, cidRoot, nodes[0].PeerID().String(), nodes[1:]...) + expectNoProviders(t, cidChild, nodes[1:]...) + }) + + t.Run("Provide with 'mfs' strategy", func(t *testing.T) { + t.Parallel() + + nodes := initNodes(t, 2, func(n *harness.Node) { + n.SetIPFSConfig("Reprovider.Strategy", "mfs") + }) + defer nodes.StopDaemons() + + // Add a file to MFS (should be provided) + data := random.Bytes(1000) + cid := nodes[0].IPFSAdd(bytes.NewReader(data), "-Q") + + // not yet in MFS + expectNoProviders(t, cid, nodes[1:]...) + + nodes[0].IPFS("files", "cp", "/ipfs/"+cid, "/myfile") + expectProviders(t, cid, nodes[0].PeerID().String(), nodes[1:]...) + }) + + t.Run("Reprovides with 'all' strategy when strategy is '' (empty)", func(t *testing.T) { + t.Parallel() + + nodes := initNodesWithoutStart(t, 2, func(n *harness.Node) { + n.SetIPFSConfig("Reprovider.Strategy", "") + }) + + cid := nodes[0].IPFSAddStr(time.Now().String()) + + nodes = nodes.StartDaemons().Connect() + defer nodes.StopDaemons() expectNoProviders(t, cid, nodes[1:]...) nodes[0].IPFS("routing", "reprovide") @@ -147,16 +278,17 @@ func TestProvider(t *testing.T) { expectProviders(t, cid, nodes[0].PeerID().String(), nodes[1:]...) }) - t.Run("Reprovides with 'flat' strategy", func(t *testing.T) { + t.Run("Reprovides with 'all' strategy", func(t *testing.T) { t.Parallel() - nodes := initNodes(t, 2, func(n *harness.Node) { - n.SetIPFSConfig("Reprovider.Strategy", "flat") + nodes := initNodesWithoutStart(t, 2, func(n *harness.Node) { + n.SetIPFSConfig("Reprovider.Strategy", "all") }) + + cid := nodes[0].IPFSAddStr(time.Now().String()) + + nodes = nodes.StartDaemons().Connect() defer nodes.StopDaemons() - - cid := nodes[0].IPFSAddStr(time.Now().String(), "--local") - expectNoProviders(t, cid, nodes[1:]...) nodes[0].IPFS("routing", "reprovide") @@ -167,25 +299,34 @@ func TestProvider(t *testing.T) { t.Run("Reprovides with 'pinned' strategy", func(t *testing.T) { t.Parallel() - foo := testutils.RandomBytes(1000) - bar := testutils.RandomBytes(1000) + foo := random.Bytes(1000) + bar := random.Bytes(1000) - nodes := initNodes(t, 2, func(n *harness.Node) { + nodes := initNodesWithoutStart(t, 2, func(n *harness.Node) { n.SetIPFSConfig("Reprovider.Strategy", "pinned") }) + + // Add a pin while offline so it cannot be provided + cidBarDir := nodes[0].IPFSAdd(bytes.NewReader(bar), "-Q", "-w") + + nodes = nodes.StartDaemons().Connect() defer nodes.StopDaemons() - cidFoo := nodes[0].IPFSAdd(bytes.NewReader(foo), "--offline", "--pin=false") - cidBar := nodes[0].IPFSAdd(bytes.NewReader(bar), "--offline", "--pin=false") - cidBarDir := nodes[0].IPFSAdd(bytes.NewReader(bar), "-Q", "--offline", "-w") + // Add content without pinning while daemon line + cidFoo := nodes[0].IPFSAdd(bytes.NewReader(foo), "--pin=false") + cidBar := nodes[0].IPFSAdd(bytes.NewReader(bar), "--pin=false") + // Nothing should have been provided. The pin was offline, and + // the others should not be provided per the strategy. expectNoProviders(t, cidFoo, nodes[1:]...) expectNoProviders(t, cidBar, nodes[1:]...) expectNoProviders(t, cidBarDir, nodes[1:]...) nodes[0].IPFS("routing", "reprovide") + // cidFoo is not pinned so should not be provided. expectNoProviders(t, cidFoo, nodes[1:]...) + // cidBar gets provided by being a child from cidBarDir even though we added with pin=false. expectProviders(t, cidBar, nodes[0].PeerID().String(), nodes[1:]...) expectProviders(t, cidBarDir, nodes[0].PeerID().String(), nodes[1:]...) }) @@ -193,30 +334,183 @@ func TestProvider(t *testing.T) { t.Run("Reprovides with 'roots' strategy", func(t *testing.T) { t.Parallel() - foo := testutils.RandomBytes(1000) - bar := testutils.RandomBytes(1000) - baz := testutils.RandomBytes(1000) + foo := random.Bytes(1000) + bar := random.Bytes(1000) - nodes := initNodes(t, 2, func(n *harness.Node) { + nodes := initNodesWithoutStart(t, 2, func(n *harness.Node) { n.SetIPFSConfig("Reprovider.Strategy", "roots") }) + n0pid := nodes[0].PeerID().String() + + // Add a pin. Only root should get pinned but not provided + // because node not started + cidBarDir := nodes[0].IPFSAdd(bytes.NewReader(bar), "-Q", "-w") + + nodes = nodes.StartDaemons().Connect() defer nodes.StopDaemons() - cidFoo := nodes[0].IPFSAdd(bytes.NewReader(foo), "--offline", "--pin=false") - cidBar := nodes[0].IPFSAdd(bytes.NewReader(bar), "--offline", "--pin=false") - cidBaz := nodes[0].IPFSAdd(bytes.NewReader(baz), "--offline") - cidBarDir := nodes[0].IPFSAdd(bytes.NewReader(bar), "-Q", "--offline", "-w") + cidFoo := nodes[0].IPFSAdd(bytes.NewReader(foo)) + cidBar := nodes[0].IPFSAdd(bytes.NewReader(bar), "--pin=false") - expectNoProviders(t, cidFoo, nodes[1:]...) + // cidFoo will get provided per the strategy but cidBar will not. + expectProviders(t, cidFoo, n0pid, nodes[1:]...) expectNoProviders(t, cidBar, nodes[1:]...) - expectNoProviders(t, cidBarDir, nodes[1:]...) nodes[0].IPFS("routing", "reprovide") - expectNoProviders(t, cidFoo, nodes[1:]...) + expectProviders(t, cidFoo, n0pid, nodes[1:]...) expectNoProviders(t, cidBar, nodes[1:]...) - expectProviders(t, cidBaz, nodes[0].PeerID().String(), nodes[1:]...) - expectProviders(t, cidBarDir, nodes[0].PeerID().String(), nodes[1:]...) + expectProviders(t, cidBarDir, n0pid, nodes[1:]...) + }) + + t.Run("Reprovides with 'mfs' strategy", func(t *testing.T) { + t.Parallel() + + bar := random.Bytes(1000) + + nodes := initNodesWithoutStart(t, 2, func(n *harness.Node) { + n.SetIPFSConfig("Reprovider.Strategy", "mfs") + }) + n0pid := nodes[0].PeerID().String() + + // add something and lets put it in MFS + cidBar := nodes[0].IPFSAdd(bytes.NewReader(bar), "--pin=false", "-Q") + nodes[0].IPFS("files", "cp", "/ipfs/"+cidBar, "/myfile") + + nodes = nodes.StartDaemons().Connect() + defer nodes.StopDaemons() + + // cidBar is in MFS but not provided + expectNoProviders(t, cidBar, nodes[1:]...) + + nodes[0].IPFS("routing", "reprovide") + + // And now is provided + expectProviders(t, cidBar, n0pid, nodes[1:]...) + }) + + t.Run("Reprovides with 'pinned+mfs' strategy", func(t *testing.T) { + t.Parallel() + + nodes := initNodesWithoutStart(t, 2, func(n *harness.Node) { + n.SetIPFSConfig("Reprovider.Strategy", "pinned+mfs") + }) + n0pid := nodes[0].PeerID().String() + + // Add a pinned CID (should be provided) + cidPinned := nodes[0].IPFSAddStr("pinned content", "--pin=true") + // Add a CID to MFS (should be provided) + cidMFS := nodes[0].IPFSAddStr("mfs content") + nodes[0].IPFS("files", "cp", "/ipfs/"+cidMFS, "/myfile") + // Add a CID that is neither pinned nor in MFS (should not be provided) + cidNeither := nodes[0].IPFSAddStr("neither content", "--pin=false") + + nodes = nodes.StartDaemons().Connect() + defer nodes.StopDaemons() + + // Trigger reprovide + nodes[0].IPFS("routing", "reprovide") + + // Check that pinned CID is provided + expectProviders(t, cidPinned, n0pid, nodes[1:]...) + // Check that MFS CID is provided + expectProviders(t, cidMFS, n0pid, nodes[1:]...) + // Check that neither CID is not provided + expectNoProviders(t, cidNeither, nodes[1:]...) + }) + + t.Run("provide clear command removes items from provide queue", func(t *testing.T) { + t.Parallel() + + nodes := harness.NewT(t).NewNodes(1).Init() + nodes.ForEachPar(func(n *harness.Node) { + n.SetIPFSConfig("Provider.Enabled", true) + n.SetIPFSConfig("Reprovider.Interval", "22h") + n.SetIPFSConfig("Reprovider.Strategy", "all") + }) + nodes.StartDaemons() + defer nodes.StopDaemons() + + // Clear the provide queue first time - works regardless of queue state + res1 := nodes[0].IPFS("provide", "clear") + require.NoError(t, res1.Err) + + // Should report cleared items and proper message format + assert.Contains(t, res1.Stdout.String(), "removed") + assert.Contains(t, res1.Stdout.String(), "items from provide queue") + + // Clear the provide queue second time - should definitely report 0 items + res2 := nodes[0].IPFS("provide", "clear") + require.NoError(t, res2.Err) + + // Should report 0 items cleared since queue was already cleared + assert.Contains(t, res2.Stdout.String(), "removed 0 items from provide queue") + }) + + t.Run("provide clear command with quiet option", func(t *testing.T) { + t.Parallel() + + nodes := harness.NewT(t).NewNodes(1).Init() + nodes.ForEachPar(func(n *harness.Node) { + n.SetIPFSConfig("Provider.Enabled", true) + n.SetIPFSConfig("Reprovider.Interval", "22h") + n.SetIPFSConfig("Reprovider.Strategy", "all") + }) + nodes.StartDaemons() + defer nodes.StopDaemons() + + // Clear the provide queue with quiet option + res := nodes[0].IPFS("provide", "clear", "-q") + require.NoError(t, res.Err) + + // Should have no output when quiet + assert.Empty(t, res.Stdout.String()) + }) + + t.Run("provide clear command works when provider is disabled", func(t *testing.T) { + t.Parallel() + + nodes := harness.NewT(t).NewNodes(1).Init() + nodes.ForEachPar(func(n *harness.Node) { + n.SetIPFSConfig("Provider.Enabled", false) + n.SetIPFSConfig("Reprovider.Interval", "22h") + n.SetIPFSConfig("Reprovider.Strategy", "all") + }) + nodes.StartDaemons() + defer nodes.StopDaemons() + + // Clear should succeed even when provider is disabled + res := nodes[0].IPFS("provide", "clear") + require.NoError(t, res.Err) + }) + + t.Run("provide clear command returns JSON with removed item count", func(t *testing.T) { + t.Parallel() + + nodes := harness.NewT(t).NewNodes(1).Init() + nodes.ForEachPar(func(n *harness.Node) { + n.SetIPFSConfig("Provider.Enabled", true) + n.SetIPFSConfig("Reprovider.Interval", "22h") + n.SetIPFSConfig("Reprovider.Strategy", "all") + }) + nodes.StartDaemons() + defer nodes.StopDaemons() + + // Clear the provide queue with JSON encoding + res := nodes[0].IPFS("provide", "clear", "--enc=json") + require.NoError(t, res.Err) + + // Should return valid JSON with the number of removed items + output := res.Stdout.String() + assert.NotEmpty(t, output) + + // Parse JSON to verify structure + var result int + err := json.Unmarshal([]byte(output), &result) + require.NoError(t, err, "Output should be valid JSON") + + // Should be a non-negative integer (0 or positive) + assert.GreaterOrEqual(t, result, 0) }) } diff --git a/test/cli/swarm_test.go b/test/cli/swarm_test.go index ecb668362..88f5f403b 100644 --- a/test/cli/swarm_test.go +++ b/test/cli/swarm_test.go @@ -50,7 +50,7 @@ func TestSwarm(t *testing.T) { actualID := output.Peers[0].Identify.ID actualPublicKey := output.Peers[0].Identify.PublicKey actualAgentVersion := output.Peers[0].Identify.AgentVersion - actualAdresses := output.Peers[0].Identify.Addresses + actualAddresses := output.Peers[0].Identify.Addresses actualProtocols := output.Peers[0].Identify.Protocols expectedID := otherNode.PeerID().String() @@ -59,8 +59,8 @@ func TestSwarm(t *testing.T) { assert.Equal(t, actualID, expectedID) assert.NotNil(t, actualPublicKey) assert.NotNil(t, actualAgentVersion) - assert.Len(t, actualAdresses, 1) - assert.Equal(t, expectedAddresses[0], actualAdresses[0]) + assert.Len(t, actualAddresses, 1) + assert.Equal(t, expectedAddresses[0], actualAddresses[0]) assert.Greater(t, len(actualProtocols), 0) }) diff --git a/test/cli/telemetry_test.go b/test/cli/telemetry_test.go new file mode 100644 index 000000000..69b87e80d --- /dev/null +++ b/test/cli/telemetry_test.go @@ -0,0 +1,314 @@ +package cli + +import ( + "encoding/json" + "io" + "maps" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "slices" + "testing" + "time" + + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestTelemetry(t *testing.T) { + t.Parallel() + + t.Run("opt-out via environment variable", func(t *testing.T) { + t.Parallel() + + // Create a new node + node := harness.NewT(t).NewNode().Init() + node.SetIPFSConfig("Plugins.Plugins.telemetry.Disabled", false) + + // Set the opt-out environment variable + node.Runner.Env["IPFS_TELEMETRY"] = "off" + node.Runner.Env["GOLOG_LOG_LEVEL"] = "telemetry=debug" + + // Capture daemon output + stdout := &harness.Buffer{} + stderr := &harness.Buffer{} + + // Start daemon with output capture + node.StartDaemonWithReq(harness.RunRequest{ + CmdOpts: []harness.CmdOpt{ + harness.RunWithStdout(stdout), + harness.RunWithStderr(stderr), + }, + }, "") + + time.Sleep(500 * time.Millisecond) + + // Get daemon output + output := stdout.String() + stderr.String() + + // Check that telemetry is disabled + assert.Contains(t, output, "telemetry disabled via opt-out", "Expected telemetry disabled message") + + // Stop daemon + node.StopDaemon() + + // Verify UUID file was not created or was removed + uuidPath := filepath.Join(node.Dir, "telemetry_uuid") + _, err := os.Stat(uuidPath) + assert.True(t, os.IsNotExist(err), "UUID file should not exist when opted out") + }) + + t.Run("opt-out via config", func(t *testing.T) { + t.Parallel() + + // Create a new node + node := harness.NewT(t).NewNode().Init() + node.SetIPFSConfig("Plugins.Plugins.telemetry.Disabled", false) + + // Set opt-out via config + node.IPFS("config", "Plugins.Plugins.telemetry.Config.Mode", "off") + + // Enable debug logging + node.Runner.Env["GOLOG_LOG_LEVEL"] = "telemetry=debug" + + // Capture daemon output + stdout := &harness.Buffer{} + stderr := &harness.Buffer{} + + // Start daemon with output capture + node.StartDaemonWithReq(harness.RunRequest{ + CmdOpts: []harness.CmdOpt{ + harness.RunWithStdout(stdout), + harness.RunWithStderr(stderr), + }, + }, "") + + time.Sleep(500 * time.Millisecond) + + // Get daemon output + output := stdout.String() + stderr.String() + + // Check that telemetry is disabled + assert.Contains(t, output, "telemetry disabled via opt-out", "Expected telemetry disabled message") + assert.Contains(t, output, "telemetry collection skipped: opted out", "Expected telemetry skipped message") + + // Stop daemon + node.StopDaemon() + + // Verify UUID file was not created or was removed + uuidPath := filepath.Join(node.Dir, "telemetry_uuid") + _, err := os.Stat(uuidPath) + assert.True(t, os.IsNotExist(err), "UUID file should not exist when opted out") + }) + + t.Run("opt-out removes existing UUID file", func(t *testing.T) { + t.Parallel() + + // Create a new node + node := harness.NewT(t).NewNode().Init() + node.SetIPFSConfig("Plugins.Plugins.telemetry.Disabled", false) + + // Create a UUID file manually to simulate previous telemetry run + uuidPath := filepath.Join(node.Dir, "telemetry_uuid") + testUUID := "test-uuid-12345" + err := os.WriteFile(uuidPath, []byte(testUUID), 0600) + require.NoError(t, err, "Failed to create test UUID file") + + // Verify file exists + _, err = os.Stat(uuidPath) + require.NoError(t, err, "UUID file should exist before opt-out") + + // Set the opt-out environment variable + node.Runner.Env["IPFS_TELEMETRY"] = "off" + node.Runner.Env["GOLOG_LOG_LEVEL"] = "telemetry=debug" + + // Capture daemon output + stdout := &harness.Buffer{} + stderr := &harness.Buffer{} + + // Start daemon with output capture + node.StartDaemonWithReq(harness.RunRequest{ + CmdOpts: []harness.CmdOpt{ + harness.RunWithStdout(stdout), + harness.RunWithStderr(stderr), + }, + }, "") + + time.Sleep(500 * time.Millisecond) + + // Get daemon output + output := stdout.String() + stderr.String() + + // Check that UUID file was removed + assert.Contains(t, output, "removed existing telemetry UUID file due to opt-out", "Expected UUID removal message") + + // Stop daemon + node.StopDaemon() + + // Verify UUID file was removed + _, err = os.Stat(uuidPath) + assert.True(t, os.IsNotExist(err), "UUID file should be removed after opt-out") + }) + + t.Run("telemetry enabled shows info message", func(t *testing.T) { + t.Parallel() + + // Create a new node + node := harness.NewT(t).NewNode().Init() + node.SetIPFSConfig("Plugins.Plugins.telemetry.Disabled", false) + + // Capture daemon output + stdout := &harness.Buffer{} + stderr := &harness.Buffer{} + + // Don't set opt-out, so telemetry will be enabled + // This should trigger the info message on first run + node.StartDaemonWithReq(harness.RunRequest{ + CmdOpts: []harness.CmdOpt{ + harness.RunWithStdout(stdout), + harness.RunWithStderr(stderr), + }, + }, "") + + time.Sleep(500 * time.Millisecond) + + // Get daemon output + output := stdout.String() + stderr.String() + + // First run - should show info message + assert.Contains(t, output, "Anonymous telemetry") + assert.Contains(t, output, "No data sent yet", "Expected no data sent message") + assert.Contains(t, output, "To opt-out before collection starts", "Expected opt-out instructions") + assert.Contains(t, output, "Learn more:", "Expected learn more link") + + // Stop daemon + node.StopDaemon() + + // Verify UUID file was created + uuidPath := filepath.Join(node.Dir, "telemetry_uuid") + _, err := os.Stat(uuidPath) + assert.NoError(t, err, "UUID file should exist when daemon started without telemetry opt-out") + }) + + t.Run("telemetry schema regression guard", func(t *testing.T) { + t.Parallel() + + // Define the exact set of expected telemetry fields + // This list must be updated whenever telemetry fields change + expectedFields := []string{ + "uuid", + "agent_version", + "private_network", + "bootstrappers_custom", + "repo_size_bucket", + "uptime_bucket", + "reprovider_strategy", + "routing_type", + "routing_accelerated_dht_client", + "routing_delegated_count", + "autonat_service_mode", + "autonat_reachability", + "swarm_enable_hole_punching", + "swarm_circuit_addresses", + "swarm_ipv4_public_addresses", + "swarm_ipv6_public_addresses", + "auto_tls_auto_wss", + "auto_tls_domain_suffix_custom", + "autoconf", + "autoconf_custom", + "discovery_mdns_enabled", + "platform_os", + "platform_arch", + "platform_containerized", + "platform_vm", + } + + // Channel to receive captured telemetry data + telemetryChan := make(chan map[string]interface{}, 1) + + // Create a mock HTTP server to capture telemetry + mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) + return + } + + body, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, "Failed to read body", http.StatusBadRequest) + return + } + + var telemetryData map[string]interface{} + if err := json.Unmarshal(body, &telemetryData); err != nil { + http.Error(w, "Invalid JSON", http.StatusBadRequest) + return + } + + // Send captured data through channel + select { + case telemetryChan <- telemetryData: + default: + } + + w.WriteHeader(http.StatusOK) + })) + defer mockServer.Close() + + // Create a new node + node := harness.NewT(t).NewNode().Init() + node.SetIPFSConfig("Plugins.Plugins.telemetry.Disabled", false) + + // Configure telemetry with a very short delay for testing + node.IPFS("config", "Plugins.Plugins.telemetry.Config.Delay", "100ms") + node.IPFS("config", "Plugins.Plugins.telemetry.Config.Endpoint", mockServer.URL) + + // Enable debug logging to see what's being sent + node.Runner.Env["GOLOG_LOG_LEVEL"] = "telemetry=debug" + + // Start daemon + node.StartDaemon() + defer node.StopDaemon() + + // Wait for telemetry to be sent (configured delay + buffer) + select { + case telemetryData := <-telemetryChan: + receivedFields := slices.Collect(maps.Keys(telemetryData)) + slices.Sort(expectedFields) + slices.Sort(receivedFields) + + // Fast path: check if fields match exactly + if !slices.Equal(expectedFields, receivedFields) { + var missingFields, unexpectedFields []string + for _, field := range expectedFields { + if _, ok := telemetryData[field]; !ok { + missingFields = append(missingFields, field) + } + } + + expectedSet := make(map[string]struct{}, len(expectedFields)) + for _, f := range expectedFields { + expectedSet[f] = struct{}{} + } + for field := range telemetryData { + if _, ok := expectedSet[field]; !ok { + unexpectedFields = append(unexpectedFields, field) + } + } + + t.Fatalf("Telemetry field mismatch:\n"+ + " Missing fields: %v\n"+ + " Unexpected fields: %v\n"+ + " Note: Update expectedFields list in this test when adding/removing telemetry fields", + missingFields, unexpectedFields) + } + + t.Logf("Telemetry field validation passed: %d fields verified", len(expectedFields)) + + case <-time.After(5 * time.Second): + t.Fatal("Timeout waiting for telemetry data to be sent") + } + }) +} diff --git a/test/cli/testutils/random.go b/test/cli/testutils/random.go deleted file mode 100644 index 6fa6528c3..000000000 --- a/test/cli/testutils/random.go +++ /dev/null @@ -1,16 +0,0 @@ -package testutils - -import "crypto/rand" - -func RandomBytes(n int) []byte { - bytes := make([]byte, n) - _, err := rand.Read(bytes) - if err != nil { - panic(err) - } - return bytes -} - -func RandomStr(n int) string { - return string(RandomBytes(n)) -} diff --git a/test/cli/testutils/random_files.go b/test/cli/testutils/random_files.go deleted file mode 100644 index 7991cad83..000000000 --- a/test/cli/testutils/random_files.go +++ /dev/null @@ -1,123 +0,0 @@ -package testutils - -import ( - "fmt" - "io" - "math/rand" - "os" - "path" - "time" -) - -var ( - AlphabetEasy = []rune("abcdefghijklmnopqrstuvwxyz01234567890-_") - AlphabetHard = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567890!@#$%^&*()-_+= ;.,<>'\"[]{}() ") -) - -type RandFiles struct { - Rand *rand.Rand - FileSize int // the size per file. - FilenameSize int - Alphabet []rune // for filenames - - FanoutDepth int // how deep the hierarchy goes - FanoutFiles int // how many files per dir - FanoutDirs int // how many dirs per dir - - RandomSize bool // randomize file sizes - RandomNameSize bool // randomize filename lengths - RandomFanout bool // randomize fanout numbers -} - -func NewRandFiles() *RandFiles { - return &RandFiles{ - Rand: rand.New(rand.NewSource(time.Now().UnixNano())), - FileSize: 4096, - FilenameSize: 16, - Alphabet: AlphabetEasy, - FanoutDepth: 2, - FanoutDirs: 5, - FanoutFiles: 10, - RandomSize: true, - RandomNameSize: true, - } -} - -func (r *RandFiles) WriteRandomFiles(root string, depth int) error { - numfiles := r.FanoutFiles - if r.RandomFanout { - numfiles = rand.Intn(r.FanoutFiles) + 1 - } - - for i := 0; i < numfiles; i++ { - if err := r.WriteRandomFile(root); err != nil { - return err - } - } - - if depth+1 <= r.FanoutDepth { - numdirs := r.FanoutDirs - if r.RandomFanout { - numdirs = r.Rand.Intn(numdirs) + 1 - } - - for i := 0; i < numdirs; i++ { - if err := r.WriteRandomDir(root, depth+1); err != nil { - return err - } - } - } - - return nil -} - -func (r *RandFiles) RandomFilename(length int) string { - b := make([]rune, length) - for i := range b { - b[i] = r.Alphabet[r.Rand.Intn(len(r.Alphabet))] - } - return string(b) -} - -func (r *RandFiles) WriteRandomFile(root string) error { - filesize := int64(r.FileSize) - if r.RandomSize { - filesize = r.Rand.Int63n(filesize) + 1 - } - - n := r.FilenameSize - if r.RandomNameSize { - n = rand.Intn(r.FilenameSize-4) + 4 - } - name := r.RandomFilename(n) - filepath := path.Join(root, name) - f, err := os.Create(filepath) - if err != nil { - return fmt.Errorf("creating random file: %w", err) - } - - if _, err := io.CopyN(f, r.Rand, filesize); err != nil { - return fmt.Errorf("copying random file: %w", err) - } - - return f.Close() -} - -func (r *RandFiles) WriteRandomDir(root string, depth int) error { - if depth > r.FanoutDepth { - return nil - } - - n := rand.Intn(r.FilenameSize-4) + 4 - name := r.RandomFilename(n) - root = path.Join(root, name) - if err := os.MkdirAll(root, 0o755); err != nil { - return fmt.Errorf("creating random dir: %w", err) - } - - err := r.WriteRandomFiles(root, depth) - if err != nil { - return fmt.Errorf("writing random files in random dir: %w", err) - } - return nil -} diff --git a/test/cli/testutils/requires.go b/test/cli/testutils/requires.go index 1462b7fee..b0070e441 100644 --- a/test/cli/testutils/requires.go +++ b/test/cli/testutils/requires.go @@ -2,6 +2,7 @@ package testutils import ( "os" + "os/exec" "runtime" "testing" ) @@ -13,9 +14,48 @@ func RequiresDocker(t *testing.T) { } func RequiresFUSE(t *testing.T) { - if os.Getenv("TEST_FUSE") != "1" { - t.SkipNow() + // Skip if FUSE tests are explicitly disabled + if os.Getenv("TEST_FUSE") == "0" { + t.Skip("FUSE tests disabled via TEST_FUSE=0") } + + // If TEST_FUSE=1 is set, always run (for backwards compatibility) + if os.Getenv("TEST_FUSE") == "1" { + return + } + + // Auto-detect FUSE availability based on platform and tools + if !isFUSEAvailable(t) { + t.Skip("FUSE not available (no fusermount/umount found or unsupported platform)") + } +} + +// isFUSEAvailable checks if FUSE is available on the current system +func isFUSEAvailable(t *testing.T) bool { + t.Helper() + + // Check platform support + switch runtime.GOOS { + case "linux", "darwin", "freebsd", "openbsd", "netbsd": + // These platforms potentially support FUSE + case "windows": + // Windows has limited FUSE support via WinFsp, but skip for now + return false + default: + // Unknown platform, assume no FUSE support + return false + } + + // Check for required unmount tools + var unmountCmd string + if runtime.GOOS == "linux" { + unmountCmd = "fusermount" + } else { + unmountCmd = "umount" + } + + _, err := exec.LookPath(unmountCmd) + return err == nil } func RequiresExpensive(t *testing.T) { diff --git a/test/cli/testutils/strings.go b/test/cli/testutils/strings.go index 110051e67..9bd73b379 100644 --- a/test/cli/testutils/strings.go +++ b/test/cli/testutils/strings.go @@ -13,6 +13,11 @@ import ( manet "github.com/multiformats/go-multiaddr/net" ) +var ( + AlphabetEasy = []rune("abcdefghijklmnopqrstuvwxyz01234567890-_") + AlphabetHard = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567890!@#$%^&*()-_+= ;.,<>'\"[]{}() ") +) + // StrCat takes a bunch of strings or string slices // and concats them all together into one string slice. // If an arg is not one of those types, this panics. diff --git a/test/cli/transports_test.go b/test/cli/transports_test.go index ec27e00ba..43daa8ed4 100644 --- a/test/cli/transports_test.go +++ b/test/cli/transports_test.go @@ -6,9 +6,10 @@ import ( "path/filepath" "testing" + "github.com/ipfs/go-test/random" + "github.com/ipfs/go-test/random/files" "github.com/ipfs/kubo/config" "github.com/ipfs/kubo/test/cli/harness" - "github.com/ipfs/kubo/test/cli/testutils" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -23,7 +24,7 @@ func TestTransports(t *testing.T) { }) } checkSingleFile := func(nodes harness.Nodes) { - s := testutils.RandomStr(100) + s := string(random.Bytes(100)) hash := nodes[0].IPFSAddStr(s) nodes.ForEachPar(func(n *harness.Node) { val := n.IPFS("cat", hash).Stdout.String() @@ -33,10 +34,11 @@ func TestTransports(t *testing.T) { checkRandomDir := func(nodes harness.Nodes) { randDir := filepath.Join(nodes[0].Dir, "foobar") require.NoError(t, os.Mkdir(randDir, 0o777)) - rf := testutils.NewRandFiles() - rf.FanoutDirs = 3 - rf.FanoutFiles = 6 - require.NoError(t, rf.WriteRandomFiles(randDir, 4)) + rfCfg := files.DefaultConfig() + rfCfg.Dirs = 3 + rfCfg.Files = 6 + rfCfg.Depth = 4 + require.NoError(t, files.Create(rfCfg, randDir)) hash := nodes[1].IPFS("add", "-r", "-Q", randDir).Stdout.Trimmed() nodes.ForEachPar(func(n *harness.Node) { @@ -60,6 +62,8 @@ func TestTransports(t *testing.T) { cfg.Swarm.Transports.Network.WebTransport = config.False cfg.Swarm.Transports.Network.WebRTCDirect = config.False cfg.Swarm.Transports.Network.Websocket = config.False + // Disable AutoTLS since we're disabling WebSocket transport + cfg.AutoTLS.Enabled = config.False }) }) disableRouting(nodes) diff --git a/test/dependencies/go.mod b/test/dependencies/go.mod index d641e1034..962e3093a 100644 --- a/test/dependencies/go.mod +++ b/test/dependencies/go.mod @@ -1,63 +1,65 @@ module github.com/ipfs/kubo/test/dependencies -go 1.24 +go 1.25 replace github.com/ipfs/kubo => ../../ require ( github.com/Kubuxu/gocovmerge v0.0.0-20161216165753-7ecaa51963cd - github.com/golangci/golangci-lint v1.60.2 + github.com/golangci/golangci-lint v1.64.8 github.com/ipfs/go-cidutil v0.1.0 - github.com/ipfs/go-log/v2 v2.6.0 - github.com/ipfs/go-test v0.2.2 + github.com/ipfs/go-log/v2 v2.8.1 + github.com/ipfs/go-test v0.2.3 github.com/ipfs/hang-fds v0.1.0 github.com/ipfs/iptb v1.4.1 github.com/ipfs/iptb-plugins v0.5.1 - github.com/multiformats/go-multiaddr v0.16.0 + github.com/multiformats/go-multiaddr v0.16.1 github.com/multiformats/go-multihash v0.2.3 - gotest.tools/gotestsum v1.12.0 + gotest.tools/gotestsum v1.12.3 ) require ( - 4d63.com/gocheckcompilerdirectives v1.2.1 // indirect - 4d63.com/gochecknoglobals v0.2.1 // indirect - github.com/4meepo/tagalign v1.3.4 // indirect - github.com/Abirdcfly/dupword v0.0.14 // indirect - github.com/Antonboom/errname v0.1.13 // indirect - github.com/Antonboom/nilnil v0.1.9 // indirect - github.com/Antonboom/testifylint v1.4.3 // indirect + 4d63.com/gocheckcompilerdirectives v1.3.0 // indirect + 4d63.com/gochecknoglobals v0.2.2 // indirect + github.com/4meepo/tagalign v1.4.2 // indirect + github.com/Abirdcfly/dupword v0.1.3 // indirect + github.com/Antonboom/errname v1.0.0 // indirect + github.com/Antonboom/nilnil v1.0.1 // indirect + github.com/Antonboom/testifylint v1.5.2 // indirect github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c // indirect - github.com/Crocmagnon/fatcontext v0.4.0 // indirect + github.com/Crocmagnon/fatcontext v0.7.1 // indirect github.com/DataDog/zstd v1.5.6-0.20230824185856-869dae002e5e // indirect github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect - github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.0 // indirect - github.com/Masterminds/semver/v3 v3.2.1 // indirect - github.com/OpenPeeDeeP/depguard/v2 v2.2.0 // indirect - github.com/alecthomas/go-check-sumtype v0.1.4 // indirect + github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.1 // indirect + github.com/Jorropo/jsync v1.0.1 // indirect + github.com/Masterminds/semver/v3 v3.3.0 // indirect + github.com/OpenPeeDeeP/depguard/v2 v2.2.1 // indirect + github.com/alecthomas/go-check-sumtype v0.3.1 // indirect github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect - github.com/alexkohler/nakedret/v2 v2.0.4 // indirect + github.com/alexkohler/nakedret/v2 v2.0.5 // indirect github.com/alexkohler/prealloc v1.0.0 // indirect github.com/alingse/asasalint v0.0.11 // indirect + github.com/alingse/nilnesserr v0.1.2 // indirect github.com/ashanbrown/forbidigo v1.6.0 // indirect - github.com/ashanbrown/makezero v1.1.1 // indirect + github.com/ashanbrown/makezero v1.2.0 // indirect github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bitfield/gotestdox v0.2.2 // indirect - github.com/bkielbasa/cyclop v1.2.1 // indirect + github.com/bkielbasa/cyclop v1.2.3 // indirect github.com/blizzy78/varnamelen v0.8.0 // indirect - github.com/bombsimon/wsl/v4 v4.4.1 // indirect - github.com/breml/bidichk v0.2.7 // indirect - github.com/breml/errchkjson v0.3.6 // indirect - github.com/butuzov/ireturn v0.3.0 // indirect - github.com/butuzov/mirror v1.2.0 // indirect - github.com/caddyserver/certmagic v0.21.6 // indirect + github.com/bombsimon/wsl/v4 v4.5.0 // indirect + github.com/breml/bidichk v0.3.2 // indirect + github.com/breml/errchkjson v0.4.0 // indirect + github.com/butuzov/ireturn v0.3.1 // indirect + github.com/butuzov/mirror v1.3.0 // indirect + github.com/caddyserver/certmagic v0.23.0 // indirect github.com/caddyserver/zerossl v0.1.3 // indirect - github.com/catenacyber/perfsprint v0.7.1 // indirect + github.com/catenacyber/perfsprint v0.8.2 // indirect github.com/ccojocar/zxcvbn-go v1.0.2 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/charithe/durationcheck v0.0.10 // indirect github.com/chavacava/garif v0.1.0 // indirect - github.com/ckaznocha/intrange v0.1.2 // indirect + github.com/ckaznocha/intrange v0.3.0 // indirect github.com/cockroachdb/crlib v0.0.0-20241015224233-894974b3ad94 // indirect github.com/cockroachdb/errors v1.11.3 // indirect github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce // indirect @@ -66,32 +68,34 @@ require ( github.com/cockroachdb/redact v1.1.5 // indirect github.com/cockroachdb/swiss v0.0.0-20250624142022-d6e517c1d961 // indirect github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect github.com/crackcomm/go-gitignore v0.0.0-20241020182519-7843d2ba8fdf // indirect - github.com/curioswitch/go-reassign v0.2.0 // indirect - github.com/daixiang0/gci v0.13.4 // indirect + github.com/curioswitch/go-reassign v0.3.0 // indirect + github.com/daixiang0/gci v0.13.5 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect github.com/denis-tingaikin/go-header v0.5.0 // indirect github.com/dnephin/pflag v1.0.7 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect github.com/ettle/strcase v0.2.0 // indirect github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5 // indirect - github.com/fatih/color v1.17.0 // indirect + github.com/fatih/color v1.18.0 // indirect github.com/fatih/structtag v1.2.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect github.com/filecoin-project/go-clock v0.1.0 // indirect github.com/firefart/nonamedreturns v1.0.5 // indirect github.com/flynn/noise v1.1.0 // indirect github.com/francoispqt/gojay v1.2.13 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/fsnotify/fsnotify v1.8.0 // indirect github.com/fzipp/gocyclo v0.6.0 // indirect - github.com/gammazero/deque v1.0.0 // indirect + github.com/gabriel-vasile/mimetype v1.4.9 // indirect + github.com/gammazero/deque v1.1.0 // indirect github.com/getsentry/sentry-go v0.27.0 // indirect - github.com/ghostiam/protogetter v0.3.6 // indirect - github.com/go-critic/go-critic v0.11.4 // indirect - github.com/go-logr/logr v1.4.2 // indirect + github.com/ghostiam/protogetter v0.3.9 // indirect + github.com/go-critic/go-critic v0.12.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/go-toolsmith/astcast v1.1.0 // indirect github.com/go-toolsmith/astcopy v1.1.0 // indirect github.com/go-toolsmith/astequal v1.2.0 // indirect @@ -99,29 +103,29 @@ require ( github.com/go-toolsmith/astp v1.1.0 // indirect github.com/go-toolsmith/strparse v1.1.0 // indirect github.com/go-toolsmith/typep v1.1.0 // indirect - github.com/go-viper/mapstructure/v2 v2.0.0 // indirect - github.com/go-xmlfmt/xmlfmt v1.1.2 // indirect + github.com/go-viper/mapstructure/v2 v2.2.1 // indirect + github.com/go-xmlfmt/xmlfmt v1.1.3 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/gofrs/flock v0.12.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e // indirect - github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect - github.com/golangci/gofmt v0.0.0-20240816233607-d8596aa466a9 // indirect + github.com/golangci/dupl v0.0.0-20250308024227-f665c8d69b32 // indirect + github.com/golangci/go-printf-func-name v0.1.0 // indirect + github.com/golangci/gofmt v0.0.0-20250106114630-d62b90e6713d // indirect github.com/golangci/misspell v0.6.0 // indirect - github.com/golangci/modinfo v0.3.4 // indirect github.com/golangci/plugin-module-register v0.1.1 // indirect - github.com/golangci/revgrep v0.5.3 // indirect + github.com/golangci/revgrep v0.8.0 // indirect github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed // indirect github.com/google/go-cmp v0.7.0 // indirect github.com/google/gopacket v1.1.19 // indirect - github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gordonklaus/ineffassign v0.1.0 // indirect github.com/gostaticanalysis/analysisutil v0.7.1 // indirect - github.com/gostaticanalysis/comment v1.4.2 // indirect - github.com/gostaticanalysis/forcetypeassert v0.1.0 // indirect + github.com/gostaticanalysis/comment v1.5.0 // indirect + github.com/gostaticanalysis/forcetypeassert v0.2.0 // indirect github.com/gostaticanalysis/nilerr v0.1.1 // indirect + github.com/hashicorp/go-immutable-radix/v2 v2.1.0 // indirect github.com/hashicorp/go-version v1.7.0 // indirect github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect @@ -130,66 +134,71 @@ require ( github.com/huin/goupnp v1.3.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/ipfs/bbloom v0.0.4 // indirect - github.com/ipfs/boxo v0.33.0 // indirect + github.com/ipfs/boxo v0.34.0 // indirect github.com/ipfs/go-bitfield v1.1.0 // indirect github.com/ipfs/go-block-format v0.2.2 // indirect github.com/ipfs/go-cid v0.5.0 // indirect - github.com/ipfs/go-datastore v0.8.2 // indirect + github.com/ipfs/go-datastore v0.8.3 // indirect + github.com/ipfs/go-ipfs-redirects-file v0.1.2 // indirect + github.com/ipfs/go-ipld-cbor v0.2.1 // indirect github.com/ipfs/go-ipld-format v0.6.2 // indirect github.com/ipfs/go-ipld-legacy v0.2.2 // indirect github.com/ipfs/go-metrics-interface v0.3.0 // indirect + github.com/ipfs/go-unixfsnode v1.10.1 // indirect github.com/ipfs/kubo v0.31.0 // indirect + github.com/ipld/go-car/v2 v2.14.3 // indirect github.com/ipld/go-codec-dagpb v1.7.0 // indirect github.com/ipld/go-ipld-prime v0.21.0 // indirect - github.com/ipshipyard/p2p-forge v0.6.0 // indirect + github.com/ipshipyard/p2p-forge v0.6.1 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect github.com/jgautheron/goconst v1.7.1 // indirect github.com/jingyugao/rowserrcheck v1.1.1 // indirect - github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af // indirect - github.com/jjti/go-spancheck v0.6.2 // indirect - github.com/julz/importas v0.1.0 // indirect - github.com/karamaru-alpha/copyloopvar v1.1.0 // indirect - github.com/kisielk/errcheck v1.7.0 // indirect - github.com/kkHAIKE/contextcheck v1.1.5 // indirect + github.com/jjti/go-spancheck v0.6.4 // indirect + github.com/julz/importas v0.2.0 // indirect + github.com/karamaru-alpha/copyloopvar v1.2.1 // indirect + github.com/kisielk/errcheck v1.9.0 // indirect + github.com/kkHAIKE/contextcheck v1.1.6 // indirect github.com/klauspost/compress v1.18.0 // indirect - github.com/klauspost/cpuid/v2 v2.2.10 // indirect + github.com/klauspost/cpuid/v2 v2.3.0 // indirect github.com/koron/go-ssdp v0.0.6 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/kulti/thelper v0.6.3 // indirect github.com/kunwardeep/paralleltest v1.0.10 // indirect - github.com/kyoh86/exportloopref v0.1.11 // indirect - github.com/lasiar/canonicalheader v1.1.1 // indirect - github.com/ldez/gomoddirectives v0.2.4 // indirect - github.com/ldez/tagliatelle v0.5.0 // indirect + github.com/lasiar/canonicalheader v1.1.2 // indirect + github.com/ldez/exptostd v0.4.2 // indirect + github.com/ldez/gomoddirectives v0.6.1 // indirect + github.com/ldez/grignotin v0.9.0 // indirect + github.com/ldez/tagliatelle v0.7.1 // indirect + github.com/ldez/usetesting v0.4.2 // indirect github.com/leonklingele/grouper v1.1.2 // indirect - github.com/libdns/libdns v0.2.2 // indirect + github.com/libdns/libdns v1.0.0-beta.1 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/libp2p/go-cidranger v1.1.0 // indirect + github.com/libp2p/go-doh-resolver v0.5.0 // indirect github.com/libp2p/go-flow-metrics v0.3.0 // indirect - github.com/libp2p/go-libp2p v0.42.0 // indirect + github.com/libp2p/go-libp2p v0.43.0 // indirect github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect - github.com/libp2p/go-libp2p-kad-dht v0.33.1 // indirect + github.com/libp2p/go-libp2p-kad-dht v0.34.0 // indirect github.com/libp2p/go-libp2p-kbucket v0.7.0 // indirect github.com/libp2p/go-libp2p-record v0.3.1 // indirect github.com/libp2p/go-libp2p-routing-helpers v0.7.5 // indirect github.com/libp2p/go-msgio v0.3.0 // indirect github.com/libp2p/go-netroute v0.2.2 // indirect github.com/libp2p/go-reuseport v0.4.0 // indirect - github.com/lufeee/execinquery v1.2.1 // indirect github.com/macabu/inamedparam v0.1.3 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/maratori/testableexamples v1.0.0 // indirect github.com/maratori/testpackage v1.1.1 // indirect - github.com/matoous/godox v0.0.0-20240105082147-c5b5e0e7c0c0 // indirect - github.com/mattn/go-colorable v0.1.13 // indirect + github.com/matoous/godox v1.1.0 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mattn/go-runewidth v0.0.15 // indirect + github.com/mattn/go-runewidth v0.0.16 // indirect github.com/mattn/go-shellwords v1.0.12 // indirect - github.com/mgechev/revive v1.3.9 // indirect - github.com/mholt/acmez/v3 v3.0.0 // indirect - github.com/miekg/dns v1.1.66 // indirect + github.com/mgechev/revive v1.7.0 // indirect + github.com/mholt/acmez/v3 v3.1.2 // indirect + github.com/miekg/dns v1.1.68 // indirect github.com/minio/sha256-simd v1.0.1 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect @@ -207,11 +216,11 @@ require ( github.com/nakabonne/nestif v0.3.1 // indirect github.com/nishanths/exhaustive v0.12.0 // indirect github.com/nishanths/predeclared v0.2.2 // indirect - github.com/nunnatsa/ginkgolinter v0.16.2 // indirect + github.com/nunnatsa/ginkgolinter v0.19.1 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect - github.com/onsi/ginkgo/v2 v2.23.4 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect - github.com/pelletier/go-toml/v2 v2.2.2 // indirect + github.com/pelletier/go-toml/v2 v2.2.3 // indirect + github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 // indirect github.com/pion/datachannel v1.5.10 // indirect github.com/pion/dtls/v2 v2.2.12 // indirect github.com/pion/dtls/v3 v3.0.6 // indirect @@ -234,61 +243,64 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/polydawn/refmt v0.89.0 // indirect - github.com/polyfloyd/go-errorlint v1.6.0 // indirect - github.com/prometheus/client_golang v1.22.0 // indirect + github.com/polyfloyd/go-errorlint v1.7.1 // indirect + github.com/prometheus/client_golang v1.23.0 // indirect github.com/prometheus/client_model v0.6.2 // indirect - github.com/prometheus/common v0.64.0 // indirect - github.com/prometheus/procfs v0.16.1 // indirect - github.com/quasilyte/go-ruleguard v0.4.2 // indirect + github.com/prometheus/common v0.65.0 // indirect + github.com/prometheus/procfs v0.17.0 // indirect + github.com/quasilyte/go-ruleguard v0.4.3-0.20240823090925-0fe6f58b47b1 // indirect github.com/quasilyte/go-ruleguard/dsl v0.3.22 // indirect github.com/quasilyte/gogrep v0.5.0 // indirect github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 // indirect github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect github.com/quic-go/qpack v0.5.1 // indirect - github.com/quic-go/quic-go v0.52.0 // indirect - github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 // indirect + github.com/quic-go/quic-go v0.54.0 // indirect + github.com/quic-go/webtransport-go v0.9.0 // indirect + github.com/raeperd/recvcheck v0.2.0 // indirect github.com/rivo/uniseg v0.4.7 // indirect - github.com/rogpeppe/go-internal v1.13.1 // indirect + github.com/rogpeppe/go-internal v1.14.1 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/ryancurrah/gomodguard v1.3.3 // indirect + github.com/ryancurrah/gomodguard v1.3.5 // indirect github.com/ryanrolds/sqlclosecheck v0.5.1 // indirect github.com/sagikazarmark/locafero v0.6.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect - github.com/samber/lo v1.47.0 // indirect - github.com/sanposhiho/wastedassign/v2 v2.0.7 // indirect - github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 // indirect + github.com/sanposhiho/wastedassign/v2 v2.1.0 // indirect + github.com/santhosh-tekuri/jsonschema/v6 v6.0.1 // indirect github.com/sashamelentyev/interfacebloat v1.1.0 // indirect - github.com/sashamelentyev/usestdlibvars v1.27.0 // indirect - github.com/securego/gosec/v2 v2.20.1-0.20240820084340-81cda2f91fbe // indirect - github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect + github.com/sashamelentyev/usestdlibvars v1.28.0 // indirect + github.com/securego/gosec/v2 v2.22.2 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/sivchari/containedctx v1.0.3 // indirect - github.com/sivchari/tenv v1.10.0 // indirect - github.com/sonatard/noctx v0.0.2 // indirect + github.com/sivchari/tenv v1.12.1 // indirect + github.com/sonatard/noctx v0.1.0 // indirect github.com/sourcegraph/conc v0.3.0 // indirect github.com/sourcegraph/go-diff v0.7.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect - github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/afero v1.12.0 // indirect github.com/spf13/cast v1.6.0 // indirect - github.com/spf13/cobra v1.8.1 // indirect - github.com/spf13/pflag v1.0.5 // indirect + github.com/spf13/cobra v1.9.1 // indirect + github.com/spf13/pflag v1.0.6 // indirect github.com/spf13/viper v1.19.0 // indirect github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect - github.com/stbenjam/no-sprintf-host-port v0.1.1 // indirect + github.com/stbenjam/no-sprintf-host-port v0.2.0 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/stretchr/testify v1.10.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect - github.com/tdakkota/asciicheck v0.2.0 // indirect - github.com/tetafro/godot v1.4.16 // indirect - github.com/timakin/bodyclose v0.0.0-20240125160201-f835fa56326a // indirect - github.com/timonwong/loggercheck v0.9.4 // indirect - github.com/tomarrell/wrapcheck/v2 v2.9.0 // indirect + github.com/tdakkota/asciicheck v0.4.1 // indirect + github.com/tetafro/godot v1.5.0 // indirect + github.com/timakin/bodyclose v0.0.0-20241017074812-ed6a65f985e3 // indirect + github.com/timonwong/loggercheck v0.10.1 // indirect + github.com/tomarrell/wrapcheck/v2 v2.10.0 // indirect github.com/tommy-muehle/go-mnd/v2 v2.5.1 // indirect - github.com/ultraware/funlen v0.1.0 // indirect - github.com/ultraware/whitespace v0.1.1 // indirect + github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb // indirect + github.com/ultraware/funlen v0.2.0 // indirect + github.com/ultraware/whitespace v0.2.0 // indirect github.com/urfave/cli v1.22.16 // indirect - github.com/uudashr/gocognit v1.1.3 // indirect + github.com/uudashr/gocognit v1.2.0 // indirect + github.com/uudashr/iface v1.3.1 // indirect github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc // indirect + github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 // indirect + github.com/whyrusleeping/cbor-gen v0.3.1 // indirect github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f // indirect github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect github.com/wlynxg/anet v0.0.5 // indirect @@ -298,12 +310,13 @@ require ( github.com/ykadowak/zerologlint v0.1.5 // indirect github.com/zeebo/blake3 v0.2.4 // indirect gitlab.com/bosi/decorder v0.4.2 // indirect - go-simpler.org/musttag v0.12.2 // indirect - go-simpler.org/sloglint v0.7.2 // indirect + go-simpler.org/musttag v0.13.0 // indirect + go-simpler.org/sloglint v0.9.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/otel v1.35.0 // indirect - go.opentelemetry.io/otel/metric v1.35.0 // indirect - go.opentelemetry.io/otel/trace v1.35.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 // indirect + go.opentelemetry.io/otel v1.37.0 // indirect + go.opentelemetry.io/otel/metric v1.37.0 // indirect + go.opentelemetry.io/otel/trace v1.37.0 // indirect go.uber.org/automaxprocs v1.6.0 // indirect go.uber.org/dig v1.19.0 // indirect go.uber.org/fx v1.24.0 // indirect @@ -311,23 +324,24 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect go.uber.org/zap/exp v0.3.0 // indirect - golang.org/x/crypto v0.39.0 // indirect - golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b // indirect - golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f // indirect - golang.org/x/mod v0.25.0 // indirect - golang.org/x/net v0.41.0 // indirect - golang.org/x/sync v0.15.0 // indirect - golang.org/x/sys v0.33.0 // indirect - golang.org/x/term v0.32.0 // indirect - golang.org/x/text v0.26.0 // indirect + golang.org/x/crypto v0.41.0 // indirect + golang.org/x/exp v0.0.0-20250813145105-42675adae3e6 // indirect + golang.org/x/exp/typeparams v0.0.0-20250210185358-939b2ce775ac // indirect + golang.org/x/mod v0.27.0 // indirect + golang.org/x/net v0.43.0 // indirect + golang.org/x/sync v0.16.0 // indirect + golang.org/x/sys v0.35.0 // indirect + golang.org/x/term v0.34.0 // indirect + golang.org/x/text v0.28.0 // indirect golang.org/x/time v0.12.0 // indirect - golang.org/x/tools v0.34.0 // indirect + golang.org/x/tools v0.36.0 // indirect + golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect gonum.org/v1/gonum v0.16.0 // indirect - google.golang.org/protobuf v1.36.6 // indirect + google.golang.org/protobuf v1.36.7 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - honnef.co/go/tools v0.5.1 // indirect + honnef.co/go/tools v0.6.1 // indirect lukechampine.com/blake3 v1.4.1 // indirect mvdan.cc/gofumpt v0.7.0 // indirect mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f // indirect diff --git a/test/dependencies/go.sum b/test/dependencies/go.sum index 28cf50f9c..a8018cc80 100644 --- a/test/dependencies/go.sum +++ b/test/dependencies/go.sum @@ -1,7 +1,7 @@ -4d63.com/gocheckcompilerdirectives v1.2.1 h1:AHcMYuw56NPjq/2y615IGg2kYkBdTvOaojYCBcRE7MA= -4d63.com/gocheckcompilerdirectives v1.2.1/go.mod h1:yjDJSxmDTtIHHCqX0ufRYZDL6vQtMG7tJdKVeWwsqvs= -4d63.com/gochecknoglobals v0.2.1 h1:1eiorGsgHOFOuoOiJDy2psSrQbRdIHrlge0IJIkUgDc= -4d63.com/gochecknoglobals v0.2.1/go.mod h1:KRE8wtJB3CXCsb1xy421JfTHIIbmT3U5ruxw2Qu8fSU= +4d63.com/gocheckcompilerdirectives v1.3.0 h1:Ew5y5CtcAAQeTVKUVFrE7EwHMrTO6BggtEj8BZSjZ3A= +4d63.com/gocheckcompilerdirectives v1.3.0/go.mod h1:ofsJ4zx2QAuIP/NO/NAh1ig6R1Fb18/GI7RVMwz7kAY= +4d63.com/gochecknoglobals v0.2.2 h1:H1vdnwnMaZdQW/N+NrkT1SZMTBmcwHe9Vq8lJcYYTtU= +4d63.com/gochecknoglobals v0.2.2/go.mod h1:lLxwTQjL5eIesRbvnzIP3jZtG140FnTdz+AlMa+ogt0= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= @@ -11,55 +11,59 @@ dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBr dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= -github.com/4meepo/tagalign v1.3.4 h1:P51VcvBnf04YkHzjfclN6BbsopfJR5rxs1n+5zHt+w8= -github.com/4meepo/tagalign v1.3.4/go.mod h1:M+pnkHH2vG8+qhE5bVc/zeP7HS/j910Fwa9TUSyZVI0= -github.com/Abirdcfly/dupword v0.0.14 h1:3U4ulkc8EUo+CaT105/GJ1BQwtgyj6+VaBVbAX11Ba8= -github.com/Abirdcfly/dupword v0.0.14/go.mod h1:VKDAbxdY8YbKUByLGg8EETzYSuC4crm9WwI6Y3S0cLI= -github.com/Antonboom/errname v0.1.13 h1:JHICqsewj/fNckzrfVSe+T33svwQxmjC+1ntDsHOVvM= -github.com/Antonboom/errname v0.1.13/go.mod h1:uWyefRYRN54lBg6HseYCFhs6Qjcy41Y3Jl/dVhA87Ns= -github.com/Antonboom/nilnil v0.1.9 h1:eKFMejSxPSA9eLSensFmjW2XTgTwJMjZ8hUHtV4s/SQ= -github.com/Antonboom/nilnil v0.1.9/go.mod h1:iGe2rYwCq5/Me1khrysB4nwI7swQvjclR8/YRPl5ihQ= -github.com/Antonboom/testifylint v1.4.3 h1:ohMt6AHuHgttaQ1xb6SSnxCeK4/rnK7KKzbvs7DmEck= -github.com/Antonboom/testifylint v1.4.3/go.mod h1:+8Q9+AOLsz5ZiQiiYujJKs9mNz398+M6UgslP4qgJLA= +github.com/4meepo/tagalign v1.4.2 h1:0hcLHPGMjDyM1gHG58cS73aQF8J4TdVR96TZViorO9E= +github.com/4meepo/tagalign v1.4.2/go.mod h1:+p4aMyFM+ra7nb41CnFG6aSDXqRxU/w1VQqScKqDARI= +github.com/Abirdcfly/dupword v0.1.3 h1:9Pa1NuAsZvpFPi9Pqkd93I7LIYRURj+A//dFd5tgBeE= +github.com/Abirdcfly/dupword v0.1.3/go.mod h1:8VbB2t7e10KRNdwTVoxdBaxla6avbhGzb8sCTygUMhw= +github.com/Antonboom/errname v1.0.0 h1:oJOOWR07vS1kRusl6YRSlat7HFnb3mSfMl6sDMRoTBA= +github.com/Antonboom/errname v1.0.0/go.mod h1:gMOBFzK/vrTiXN9Oh+HFs+e6Ndl0eTFbtsRTSRdXyGI= +github.com/Antonboom/nilnil v1.0.1 h1:C3Tkm0KUxgfO4Duk3PM+ztPncTFlOf0b2qadmS0s4xs= +github.com/Antonboom/nilnil v1.0.1/go.mod h1:CH7pW2JsRNFgEh8B2UaPZTEPhCMuFowP/e8Udp9Nnb0= +github.com/Antonboom/testifylint v1.5.2 h1:4s3Xhuv5AvdIgbd8wOOEeo0uZG7PbDKQyKY5lGoQazk= +github.com/Antonboom/testifylint v1.5.2/go.mod h1:vxy8VJ0bc6NavlYqjZfmp6EfqXMtBgQ4+mhCojwC1P8= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c h1:pxW6RcqyfI9/kWtOwnv/G+AzdKuy2ZrqINhenH4HyNs= github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= -github.com/Crocmagnon/fatcontext v0.4.0 h1:4ykozu23YHA0JB6+thiuEv7iT6xq995qS1vcuWZq0tg= -github.com/Crocmagnon/fatcontext v0.4.0/go.mod h1:ZtWrXkgyfsYPzS6K3O88va6t2GEglG93vnII/F94WC0= +github.com/Crocmagnon/fatcontext v0.7.1 h1:SC/VIbRRZQeQWj/TcQBS6JmrXcfA+BU4OGSVUt54PjM= +github.com/Crocmagnon/fatcontext v0.7.1/go.mod h1:1wMvv3NXEBJucFGfwOJBxSVWcoIO6emV215SMkW9MFU= github.com/DataDog/zstd v1.5.6-0.20230824185856-869dae002e5e h1:ZIWapoIRN1VqT8GR8jAwb1Ie9GyehWjVcGh32Y2MznE= github.com/DataDog/zstd v1.5.6-0.20230824185856-869dae002e5e/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= -github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.0 h1:/fTUt5vmbkAcMBt4YQiuC23cV0kEsN1MVMNqeOW43cU= -github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.0/go.mod h1:ONJg5sxcbsdQQ4pOW8TGdTidT2TMAUy/2Xhr8mrYaao= +github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.1 h1:Sz1JIXEcSfhz7fUi7xHnhpIE0thVASYjvosApmHuD2k= +github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.1/go.mod h1:n/LSCXNuIYqVfBlVXyHfMQkZDdp1/mmxfSjADd3z1Zg= +github.com/Jorropo/jsync v1.0.1 h1:6HgRolFZnsdfzRUj+ImB9og1JYOxQoReSywkHOGSaUU= +github.com/Jorropo/jsync v1.0.1/go.mod h1:jCOZj3vrBCri3bSU3ErUYvevKlnbssrXeCivybS5ABQ= github.com/Kubuxu/gocovmerge v0.0.0-20161216165753-7ecaa51963cd h1:HNhzThEtZW714v8Eda8sWWRcu9WSzJC+oCyjRjvZgRA= github.com/Kubuxu/gocovmerge v0.0.0-20161216165753-7ecaa51963cd/go.mod h1:bqoB8kInrTeEtYAwaIXoSRqdwnjQmFhsfusnzyui6yY= -github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= -github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= -github.com/OpenPeeDeeP/depguard/v2 v2.2.0 h1:vDfG60vDtIuf0MEOhmLlLLSzqaRM8EMcgJPdp74zmpA= -github.com/OpenPeeDeeP/depguard/v2 v2.2.0/go.mod h1:CIzddKRvLBC4Au5aYP/i3nyaWQ+ClszLIuVocRiCYFQ= +github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0= +github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/OpenPeeDeeP/depguard/v2 v2.2.1 h1:vckeWVESWp6Qog7UZSARNqfu/cZqvki8zsuj3piCMx4= +github.com/OpenPeeDeeP/depguard/v2 v2.2.1/go.mod h1:q4DKzC4UcVaAvcfd41CZh0PWpGgzrVxUYBlgKNGquUo= github.com/aclements/go-perfevent v0.0.0-20240301234650-f7843625020f h1:JjxwchlOepwsUWcQwD2mLUAGE9aCp0/ehy6yCHFBOvo= github.com/aclements/go-perfevent v0.0.0-20240301234650-f7843625020f/go.mod h1:tMDTce/yLLN/SK8gMOxQfnyeMeCg8KGzp0D1cbECEeo= -github.com/alecthomas/assert/v2 v2.2.2 h1:Z/iVC0xZfWTaFNE6bA3z07T86hd45Xe2eLt6WVy2bbk= -github.com/alecthomas/assert/v2 v2.2.2/go.mod h1:pXcQ2Asjp247dahGEmsZ6ru0UVwnkhktn7S0bBDLxvQ= -github.com/alecthomas/go-check-sumtype v0.1.4 h1:WCvlB3l5Vq5dZQTFmodqL2g68uHiSwwlWcT5a2FGK0c= -github.com/alecthomas/go-check-sumtype v0.1.4/go.mod h1:WyYPfhfkdhyrdaligV6svFopZV8Lqdzn5pyVBaV6jhQ= -github.com/alecthomas/repr v0.2.0 h1:HAzS41CIzNW5syS8Mf9UwXhNH1J9aix/BvDRf1Ml2Yk= -github.com/alecthomas/repr v0.2.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= +github.com/alecthomas/assert/v2 v2.11.0 h1:2Q9r3ki8+JYXvGsDyBXwH3LcJ+WK5D0gc5E8vS6K3D0= +github.com/alecthomas/assert/v2 v2.11.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k= +github.com/alecthomas/go-check-sumtype v0.3.1 h1:u9aUvbGINJxLVXiFvHUlPEaD7VDULsrxJb4Aq31NLkU= +github.com/alecthomas/go-check-sumtype v0.3.1/go.mod h1:A8TSiN3UPRw3laIgWEUOHHLPa6/r9MtoigdlP5h3K/E= +github.com/alecthomas/repr v0.4.0 h1:GhI2A8MACjfegCPVq9f1FLvIBS+DrQ2KQBFZP1iFzXc= +github.com/alecthomas/repr v0.4.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b h1:mimo19zliBX/vSQ6PWWSL9lK8qwHozUj03+zLoEB8O0= github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= -github.com/alexkohler/nakedret/v2 v2.0.4 h1:yZuKmjqGi0pSmjGpOC016LtPJysIL0WEUiaXW5SUnNg= -github.com/alexkohler/nakedret/v2 v2.0.4/go.mod h1:bF5i0zF2Wo2o4X4USt9ntUWve6JbFv02Ff4vlkmS/VU= +github.com/alexkohler/nakedret/v2 v2.0.5 h1:fP5qLgtwbx9EJE8dGEERT02YwS8En4r9nnZ71RK+EVU= +github.com/alexkohler/nakedret/v2 v2.0.5/go.mod h1:bF5i0zF2Wo2o4X4USt9ntUWve6JbFv02Ff4vlkmS/VU= github.com/alexkohler/prealloc v1.0.0 h1:Hbq0/3fJPQhNkN0dR95AVrr6R7tou91y0uHG5pOcUuw= github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= github.com/alingse/asasalint v0.0.11 h1:SFwnQXJ49Kx/1GghOFz1XGqHYKp21Kq1nHad/0WQRnw= github.com/alingse/asasalint v0.0.11/go.mod h1:nCaoMhw7a9kSJObvQyVzNTPBDbNpdocqrSP7t/cW5+I= +github.com/alingse/nilnesserr v0.1.2 h1:Yf8Iwm3z2hUUrP4muWfW83DF4nE3r1xZ26fGWUKCZlo= +github.com/alingse/nilnesserr v0.1.2/go.mod h1:1xJPrXonEtX7wyTq8Dytns5P2hNzoWymVUIaKm4HNFg= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/ashanbrown/forbidigo v1.6.0 h1:D3aewfM37Yb3pxHujIPSpTf6oQk9sc9WZi8gerOIVIY= github.com/ashanbrown/forbidigo v1.6.0/go.mod h1:Y8j9jy9ZYAEHXdu723cUlraTqbzjKF1MUyfOKL+AjcU= -github.com/ashanbrown/makezero v1.1.1 h1:iCQ87C0V0vSyO+M9E/FZYbu65auqH0lnsOkf5FcB28s= -github.com/ashanbrown/makezero v1.1.1/go.mod h1:i1bJLCRSCHOcOa9Y6MyF2FTfMZMFdHvxKHxgO5Z1axI= +github.com/ashanbrown/makezero v1.2.0 h1:/2Lp1bypdmK9wDIq7uWBlDF1iMUpIIS4A+pF6C9IEUU= +github.com/ashanbrown/makezero v1.2.0/go.mod h1:dxlPhHbDMC6N6xICzFBSK+4njQDdK8euNO0qjQMtGY4= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -67,28 +71,28 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bitfield/gotestdox v0.2.2 h1:x6RcPAbBbErKLnapz1QeAlf3ospg8efBsedU93CDsnE= github.com/bitfield/gotestdox v0.2.2/go.mod h1:D+gwtS0urjBrzguAkTM2wodsTQYFHdpx8eqRJ3N+9pY= -github.com/bkielbasa/cyclop v1.2.1 h1:AeF71HZDob1P2/pRm1so9cd1alZnrpyc4q2uP2l0gJY= -github.com/bkielbasa/cyclop v1.2.1/go.mod h1:K/dT/M0FPAiYjBgQGau7tz+3TMh4FWAEqlMhzFWCrgM= +github.com/bkielbasa/cyclop v1.2.3 h1:faIVMIGDIANuGPWH031CZJTi2ymOQBULs9H21HSMa5w= +github.com/bkielbasa/cyclop v1.2.3/go.mod h1:kHTwA9Q0uZqOADdupvcFJQtp/ksSnytRMe8ztxG8Fuo= github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ089M= github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k= -github.com/bombsimon/wsl/v4 v4.4.1 h1:jfUaCkN+aUpobrMO24zwyAMwMAV5eSziCkOKEauOLdw= -github.com/bombsimon/wsl/v4 v4.4.1/go.mod h1:Xu/kDxGZTofQcDGCtQe9KCzhHphIe0fDuyWTxER9Feo= +github.com/bombsimon/wsl/v4 v4.5.0 h1:iZRsEvDdyhd2La0FVi5k6tYehpOR/R7qIUjmKk7N74A= +github.com/bombsimon/wsl/v4 v4.5.0/go.mod h1:NOQ3aLF4nD7N5YPXMruR6ZXDOAqLoM0GEpLwTdvmOSc= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= -github.com/breml/bidichk v0.2.7 h1:dAkKQPLl/Qrk7hnP6P+E0xOodrq8Us7+U0o4UBOAlQY= -github.com/breml/bidichk v0.2.7/go.mod h1:YodjipAGI9fGcYM7II6wFvGhdMYsC5pHDlGzqvEW3tQ= -github.com/breml/errchkjson v0.3.6 h1:VLhVkqSBH96AvXEyclMR37rZslRrY2kcyq+31HCsVrA= -github.com/breml/errchkjson v0.3.6/go.mod h1:jhSDoFheAF2RSDOlCfhHO9KqhZgAYLyvHe7bRCX8f/U= +github.com/breml/bidichk v0.3.2 h1:xV4flJ9V5xWTqxL+/PMFF6dtJPvZLPsyixAoPe8BGJs= +github.com/breml/bidichk v0.3.2/go.mod h1:VzFLBxuYtT23z5+iVkamXO386OB+/sVwZOpIj6zXGos= +github.com/breml/errchkjson v0.4.0 h1:gftf6uWZMtIa/Is3XJgibewBm2ksAQSY/kABDNFTAdk= +github.com/breml/errchkjson v0.4.0/go.mod h1:AuBOSTHyLSaaAFlWsRSuRBIroCh3eh7ZHh5YeelDIk8= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= -github.com/butuzov/ireturn v0.3.0 h1:hTjMqWw3y5JC3kpnC5vXmFJAWI/m31jaCYQqzkS6PL0= -github.com/butuzov/ireturn v0.3.0/go.mod h1:A09nIiwiqzN/IoVo9ogpa0Hzi9fex1kd9PSD6edP5ZA= -github.com/butuzov/mirror v1.2.0 h1:9YVK1qIjNspaqWutSv8gsge2e/Xpq1eqEkslEUHy5cs= -github.com/butuzov/mirror v1.2.0/go.mod h1:DqZZDtzm42wIAIyHXeN8W/qb1EPlb9Qn/if9icBOpdQ= -github.com/caddyserver/certmagic v0.21.6 h1:1th6GfprVfsAtFNOu4StNMF5IxK5XiaI0yZhAHlZFPE= -github.com/caddyserver/certmagic v0.21.6/go.mod h1:n1sCo7zV1Ez2j+89wrzDxo4N/T1Ws/Vx8u5NvuBFabw= +github.com/butuzov/ireturn v0.3.1 h1:mFgbEI6m+9W8oP/oDdfA34dLisRFCj2G6o/yiI1yZrY= +github.com/butuzov/ireturn v0.3.1/go.mod h1:ZfRp+E7eJLC0NQmk1Nrm1LOrn/gQlOykv+cVPdiXH5M= +github.com/butuzov/mirror v1.3.0 h1:HdWCXzmwlQHdVhwvsfBb2Au0r3HyINry3bDWLYXiKoc= +github.com/butuzov/mirror v1.3.0/go.mod h1:AEij0Z8YMALaq4yQj9CPPVYOyJQyiexpQEQgihajRfI= +github.com/caddyserver/certmagic v0.23.0 h1:CfpZ/50jMfG4+1J/u2LV6piJq4HOfO6ppOnOf7DkFEU= +github.com/caddyserver/certmagic v0.23.0/go.mod h1:9mEZIWqqWoI+Gf+4Trh04MOVPD0tGSxtqsxg87hAIH4= github.com/caddyserver/zerossl v0.1.3 h1:onS+pxp3M8HnHpN5MMbOMyNjmTheJyWRaZYwn+YTAyA= github.com/caddyserver/zerossl v0.1.3/go.mod h1:CxA0acn7oEGO6//4rtrRjYgEoa4MFw/XofZnrYwGqG4= -github.com/catenacyber/perfsprint v0.7.1 h1:PGW5G/Kxn+YrN04cRAZKC+ZuvlVwolYMrIyyTJ/rMmc= -github.com/catenacyber/perfsprint v0.7.1/go.mod h1:/wclWYompEyjUD2FuIIDVKNkqz7IgBIWXIH3V0Zol50= +github.com/catenacyber/perfsprint v0.8.2 h1:+o9zVmCSVa7M4MvabsWvESEhpsMkhfE7k0sHNGL95yw= +github.com/catenacyber/perfsprint v0.8.2/go.mod h1:q//VWC2fWbcdSLEY1R3l8n0zQCDPdE4IjZwyY1HMunM= github.com/ccojocar/zxcvbn-go v1.0.2 h1:na/czXU8RrhXO4EZme6eQJLR4PzcGsahsBOAwU6I3Vg= github.com/ccojocar/zxcvbn-go v1.0.2/go.mod h1:g1qkXtUSvHP8lhHp5GrSmTz6uWALGRMQdw6Qnz/hi60= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= @@ -97,8 +101,8 @@ github.com/charithe/durationcheck v0.0.10 h1:wgw73BiocdBDQPik+zcEoBG/ob8uyBHf2iy github.com/charithe/durationcheck v0.0.10/go.mod h1:bCWXb7gYRysD1CU3C+u4ceO49LoGOY1C1L6uouGNreQ= github.com/chavacava/garif v0.1.0 h1:2JHa3hbYf5D9dsgseMKAmc/MZ109otzgNFk5s87H9Pc= github.com/chavacava/garif v0.1.0/go.mod h1:XMyYCkEL58DF0oyW4qDjjnPWONs2HBqYKI+UIPD+Gww= -github.com/ckaznocha/intrange v0.1.2 h1:3Y4JAxcMntgb/wABQ6e8Q8leMd26JbX2790lIss9MTI= -github.com/ckaznocha/intrange v0.1.2/go.mod h1:RWffCw/vKBwHeOEwWdCikAtY0q4gGt8VhJZEEA5n+RE= +github.com/ckaznocha/intrange v0.3.0 h1:VqnxtK32pxgkhJgYQEeOArVidIPg+ahLP7WBOXZd5ZY= +github.com/ckaznocha/intrange v0.3.0/go.mod h1:+I/o2d2A1FBHgGELbGxzIcyd3/9l9DuwjM8FsbSS3Lo= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cockroachdb/crlib v0.0.0-20241015224233-894974b3ad94 h1:bvJv505UUfjzbaIPdNS4AEkHreDqQk6yuNpsdRHpwFA= github.com/cockroachdb/crlib v0.0.0-20241015224233-894974b3ad94/go.mod h1:Gq51ZeKaFCXk6QwuGM0w1dnaOqc/F5zKT2zA9D6Xeac= @@ -122,18 +126,18 @@ github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAK github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/cpuguy83/go-md2man/v2 v2.0.5 h1:ZtcqGrnekaHpVLArFSe4HK5DoKx1T0rq2DwVB0alcyc= github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/crackcomm/go-gitignore v0.0.0-20241020182519-7843d2ba8fdf h1:dwGgBWn84wUS1pVikGiruW+x5XM4amhjaZO20vCjay4= github.com/crackcomm/go-gitignore v0.0.0-20241020182519-7843d2ba8fdf/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= -github.com/curioswitch/go-reassign v0.2.0 h1:G9UZyOcpk/d7Gd6mqYgd8XYWFMw/znxwGDUstnC9DIo= -github.com/curioswitch/go-reassign v0.2.0/go.mod h1:x6OpXuWvgfQaMGks2BZybTngWjT84hqJfKoO8Tt/Roc= -github.com/daixiang0/gci v0.13.4 h1:61UGkmpoAcxHM2hhNkZEf5SzwQtWJXTSws7jaPyqwlw= -github.com/daixiang0/gci v0.13.4/go.mod h1:12etP2OniiIdP4q+kjUGrC/rUagga7ODbqsom5Eo5Yk= +github.com/curioswitch/go-reassign v0.3.0 h1:dh3kpQHuADL3cobV/sSGETA8DOv457dwl+fbBAhrQPs= +github.com/curioswitch/go-reassign v0.3.0/go.mod h1:nApPCCTtqLJN/s8HfItCcKV0jIPwluBOvZP+dsJGA88= +github.com/daixiang0/gci v0.13.5 h1:kThgmH1yBmZSBCh1EJVxQ7JsHpm5Oms0AMed/0LaH4c= +github.com/daixiang0/gci v0.13.5/go.mod h1:12etP2OniiIdP4q+kjUGrC/rUagga7ODbqsom5Eo5Yk= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -146,19 +150,23 @@ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvw github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40= github.com/denis-tingaikin/go-header v0.5.0 h1:SRdnP5ZKvcO9KKRP1KJrhFR3RrlGuD+42t4429eC9k8= github.com/denis-tingaikin/go-header v0.5.0/go.mod h1:mMenU5bWrok6Wl2UsZjy+1okegmwQ3UgWl4V1D8gjlY= +github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI= +github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= github.com/dnephin/pflag v1.0.7 h1:oxONGlWxhmUct0YzKTgrpQv9AUA1wtPBn7zuSjJqptk= github.com/dnephin/pflag v1.0.7/go.mod h1:uxE91IoWURlOiTUIA8Mq5ZZkAv3dPUfZNaT80Zm7OQE= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/ettle/strcase v0.2.0 h1:fGNiVF21fHXpX1niBgk0aROov1LagYsOwV/xqKDKR/Q= github.com/ettle/strcase v0.2.0/go.mod h1:DajmHElDSaX76ITe3/VHVyMin4LWSJN5Z909Wp+ED1A= github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5 h1:BBso6MBKW8ncyZLv37o+KNyy0HrrHgfnOaGQC2qvN+A= github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5/go.mod h1:JpoxHjuQauoxiFMl1ie8Xc/7TfLuMZ5eOCONd1sUBHg= -github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= -github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= -github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4= -github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/filecoin-project/go-clock v0.1.0 h1:SFbYIM75M8NnFm1yMHhN9Ahy3W5bEZV9gd6MPfXbKVU= github.com/filecoin-project/go-clock v0.1.0/go.mod h1:4uB/O4PvOjlx1VCMdZ9MyDZXRm//gkj1ELEbxfI1AZs= github.com/firefart/nonamedreturns v1.0.5 h1:tM+Me2ZaXs8tfdDw3X6DOX++wMCOqzYUho6tUTYIdRA= @@ -171,30 +179,32 @@ github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiD github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo= github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= -github.com/gammazero/chanqueue v1.1.0 h1:yiwtloc1azhgGLFo2gMloJtQvkYD936Ai7tBfa+rYJw= -github.com/gammazero/chanqueue v1.1.0/go.mod h1:fMwpwEiuUgpab0sH4VHiVcEoji1pSi+EIzeG4TPeKPc= -github.com/gammazero/deque v1.0.0 h1:LTmimT8H7bXkkCy6gZX7zNLtkbz4NdS2z8LZuor3j34= -github.com/gammazero/deque v1.0.0/go.mod h1:iflpYvtGfM3U8S8j+sZEKIak3SAKYpA5/SQewgfXDKo= +github.com/gabriel-vasile/mimetype v1.4.9 h1:5k+WDwEsD9eTLL8Tz3L0VnmVh9QxGjRmjBvAG7U/oYY= +github.com/gabriel-vasile/mimetype v1.4.9/go.mod h1:WnSQhFKJuBlRyLiKohA/2DtIlPFAbguNaG7QCHcyGok= +github.com/gammazero/chanqueue v1.1.1 h1:n9Y+zbBxw2f7uUE9wpgs0rOSkP/I/yhDLiNuhyVjojQ= +github.com/gammazero/chanqueue v1.1.1/go.mod h1:fMwpwEiuUgpab0sH4VHiVcEoji1pSi+EIzeG4TPeKPc= +github.com/gammazero/deque v1.1.0 h1:OyiyReBbnEG2PP0Bnv1AASLIYvyKqIFN5xfl1t8oGLo= +github.com/gammazero/deque v1.1.0/go.mod h1:JVrR+Bj1NMQbPnYclvDlvSX0nVGReLrQZ0aUMuWLctg= github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9 h1:r5GgOLGbza2wVHRzK7aAj6lWZjfbAwiu/RDCVOKjRyM= github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/ghostiam/protogetter v0.3.6 h1:R7qEWaSgFCsy20yYHNIJsU9ZOb8TziSRRxuAOTVKeOk= -github.com/ghostiam/protogetter v0.3.6/go.mod h1:7lpeDnEJ1ZjL/YtyoN99ljO4z0pd3H0d18/t2dPBxHw= +github.com/ghostiam/protogetter v0.3.9 h1:j+zlLLWzqLay22Cz/aYwTHKQ88GE2DQ6GkWSYFOI4lQ= +github.com/ghostiam/protogetter v0.3.9/go.mod h1:WZ0nw9pfzsgxuRsPOFQomgDVSWtDLJRfQJEhsGbmQMA= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= -github.com/go-critic/go-critic v0.11.4 h1:O7kGOCx0NDIni4czrkRIXTnit0mkyKOCePh3My6OyEU= -github.com/go-critic/go-critic v0.11.4/go.mod h1:2QAdo4iuLik5S9YG0rT4wcZ8QxwHYkrr6/2MWAiv/vc= +github.com/go-critic/go-critic v0.12.0 h1:iLosHZuye812wnkEz1Xu3aBwn5ocCPfc9yqmFG9pa6w= +github.com/go-critic/go-critic v0.12.0/go.mod h1:DpE0P6OVc6JzVYzmM5gq5jMU31zLr4am5mB/VfFK64w= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI= @@ -220,10 +230,10 @@ github.com/go-toolsmith/strparse v1.1.0 h1:GAioeZUK9TGxnLS+qfdqNbA4z0SSm5zVNtCQi github.com/go-toolsmith/strparse v1.1.0/go.mod h1:7ksGy58fsaQkGQlY8WVoBFNyEPMGuJin1rfoPS4lBSQ= github.com/go-toolsmith/typep v1.1.0 h1:fIRYDyF+JywLfqzyhdiHzRop/GQDxxNhLGQ6gFUNHus= github.com/go-toolsmith/typep v1.1.0/go.mod h1:fVIw+7zjdsMxDA3ITWnH1yOiw1rnTQKCsF/sk2H/qig= -github.com/go-viper/mapstructure/v2 v2.0.0 h1:dhn8MZ1gZ0mzeodTG3jt5Vj/o87xZKuNAprG2mQfMfc= -github.com/go-viper/mapstructure/v2 v2.0.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= -github.com/go-xmlfmt/xmlfmt v1.1.2 h1:Nea7b4icn8s57fTx1M5AI4qQT5HEM3rVUO8MuE6g80U= -github.com/go-xmlfmt/xmlfmt v1.1.2/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= +github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss= +github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-xmlfmt/xmlfmt v1.1.3 h1:t8Ey3Uy7jDSEisW2K3somuMKIpzktkWptA0iFCnRUWY= +github.com/go-xmlfmt/xmlfmt v1.1.3/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= @@ -240,20 +250,20 @@ github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e h1:4bw4WeyTYPp0smaXiJZCNnLrvVBqirQVreixayXezGc= github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM= -github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= -github.com/golangci/gofmt v0.0.0-20240816233607-d8596aa466a9 h1:/1322Qns6BtQxUZDTAT4SdcoxknUki7IAoK4SAXr8ME= -github.com/golangci/gofmt v0.0.0-20240816233607-d8596aa466a9/go.mod h1:Oesb/0uFAyWoaw1U1qS5zyjCg5NP9C9iwjnI4tIsXEE= -github.com/golangci/golangci-lint v1.60.2 h1:Y8aWnZCMOLY5T7Ga5hcoemyKsZZJCUmIIK3xTD3jIhc= -github.com/golangci/golangci-lint v1.60.2/go.mod h1:4UvjLpOJoQSvmyWkmO1urDR3txhL9R9sn4oM/evJ95g= +github.com/golangci/dupl v0.0.0-20250308024227-f665c8d69b32 h1:WUvBfQL6EW/40l6OmeSBYQJNSif4O11+bmWEz+C7FYw= +github.com/golangci/dupl v0.0.0-20250308024227-f665c8d69b32/go.mod h1:NUw9Zr2Sy7+HxzdjIULge71wI6yEg1lWQr7Evcu8K0E= +github.com/golangci/go-printf-func-name v0.1.0 h1:dVokQP+NMTO7jwO4bwsRwLWeudOVUPPyAKJuzv8pEJU= +github.com/golangci/go-printf-func-name v0.1.0/go.mod h1:wqhWFH5mUdJQhweRnldEywnR5021wTdZSNgwYceV14s= +github.com/golangci/gofmt v0.0.0-20250106114630-d62b90e6713d h1:viFft9sS/dxoYY0aiOTsLKO2aZQAPT4nlQCsimGcSGE= +github.com/golangci/gofmt v0.0.0-20250106114630-d62b90e6713d/go.mod h1:ivJ9QDg0XucIkmwhzCDsqcnxxlDStoTl89jDMIoNxKY= +github.com/golangci/golangci-lint v1.64.8 h1:y5TdeVidMtBGG32zgSC7ZXTFNHrsJkDnpO4ItB3Am+I= +github.com/golangci/golangci-lint v1.64.8/go.mod h1:5cEsUQBSr6zi8XI8OjmcY2Xmliqc4iYL7YoPrL+zLJ4= github.com/golangci/misspell v0.6.0 h1:JCle2HUTNWirNlDIAUO44hUsKhOFqGPoC4LZxlaSXDs= github.com/golangci/misspell v0.6.0/go.mod h1:keMNyY6R9isGaSAu+4Q8NMBwMPkh15Gtc8UCVoDtAWo= -github.com/golangci/modinfo v0.3.4 h1:oU5huX3fbxqQXdfspamej74DFX0kyGLkw1ppvXoJ8GA= -github.com/golangci/modinfo v0.3.4/go.mod h1:wytF1M5xl9u0ij8YSvhkEVPP3M5Mc7XLl1pxH3B2aUM= github.com/golangci/plugin-module-register v0.1.1 h1:TCmesur25LnyJkpsVrupv1Cdzo+2f7zX0H6Jkw1Ol6c= github.com/golangci/plugin-module-register v0.1.1/go.mod h1:TTpqoB6KkwOJMV8u7+NyXMrkwwESJLOkfl9TxR1DGFc= -github.com/golangci/revgrep v0.5.3 h1:3tL7c1XBMtWHHqVpS5ChmiAAoe4PF/d5+ULzV9sLAzs= -github.com/golangci/revgrep v0.5.3/go.mod h1:U4R/s9dlXZsg8uJmaR1GrloUr14D7qDl8gi2iPXJH8k= +github.com/golangci/revgrep v0.8.0 h1:EZBctwbVd0aMeRnNUsFogoyayvKHyxlV3CdUA46FX2s= +github.com/golangci/revgrep v0.8.0/go.mod h1:U4R/s9dlXZsg8uJmaR1GrloUr14D7qDl8gi2iPXJH8k= github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed h1:IURFTjxeTfNFP0hTEi1YKjB/ub8zkpaOqFFMApi2EAs= github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed/go.mod h1:XLXN8bNw4CGRPaqgl3bv/lhz7bsGPh4/xSaMTbo2vkQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -263,8 +273,6 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= @@ -291,17 +299,22 @@ github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/ad github.com/gostaticanalysis/analysisutil v0.7.1 h1:ZMCjoue3DtDWQ5WyU16YbjbQEQ3VuzwxALrpYd+HeKk= github.com/gostaticanalysis/analysisutil v0.7.1/go.mod h1:v21E3hY37WKMGSnbsw2S/ojApNWb6C1//mXO48CXbVc= github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado= -github.com/gostaticanalysis/comment v1.4.2 h1:hlnx5+S2fY9Zo9ePo4AhgYsYHbM2+eAv8m/s1JiCd6Q= github.com/gostaticanalysis/comment v1.4.2/go.mod h1:KLUTGDv6HOCotCH8h2erHKmpci2ZoR8VPu34YA2uzdM= -github.com/gostaticanalysis/forcetypeassert v0.1.0 h1:6eUflI3DiGusXGK6X7cCcIgVCpZ2CiZ1Q7jl6ZxNV70= -github.com/gostaticanalysis/forcetypeassert v0.1.0/go.mod h1:qZEedyP/sY1lTGV1uJ3VhWZ2mqag3IkWsDHVbplHXak= +github.com/gostaticanalysis/comment v1.5.0 h1:X82FLl+TswsUMpMh17srGRuKaaXprTaytmEpgnKIDu8= +github.com/gostaticanalysis/comment v1.5.0/go.mod h1:V6eb3gpCv9GNVqb6amXzEUX3jXLVK/AdA+IrAMSqvEc= +github.com/gostaticanalysis/forcetypeassert v0.2.0 h1:uSnWrrUEYDr86OCxWa4/Tp2jeYDlogZiZHzGkWFefTk= +github.com/gostaticanalysis/forcetypeassert v0.2.0/go.mod h1:M5iPavzE9pPqWyeiVXSFghQjljW1+l/Uke3PXHS6ILY= github.com/gostaticanalysis/nilerr v0.1.1 h1:ThE+hJP0fEp4zWLkWHWcRyI2Od0p7DlgYG3Uqrmrcpk= github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A= github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M= -github.com/gostaticanalysis/testutil v0.4.0 h1:nhdCmubdmDF6VEatUNjgUZBJKWRqugoISdUv3PPQgHY= -github.com/gostaticanalysis/testutil v0.4.0/go.mod h1:bLIoPefWXrRi/ssLFWX1dx7Repi5x3CuviD3dgAZaBU= +github.com/gostaticanalysis/testutil v0.5.0 h1:Dq4wT1DdTwTGCQQv3rl3IvD5Ld0E6HiY+3Zh0sUGqw8= +github.com/gostaticanalysis/testutil v0.5.0/go.mod h1:OLQSbuM6zw2EvCcXTz1lVq5unyoNft372msDY0nY5Hs= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/hashicorp/go-immutable-radix/v2 v2.1.0 h1:CUW5RYIcysz+D3B+l1mDeXrQ7fUvGGCwJfdASSzbrfo= +github.com/hashicorp/go-immutable-radix/v2 v2.1.0/go.mod h1:hgdqLXA4f6NIjRVisM1TJ9aOJVNRqKZj+xDGF6m7PBw= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= @@ -319,8 +332,8 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2 github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= -github.com/ipfs/boxo v0.33.0 h1:9ow3chwkDzMj0Deq4AWRUEI7WnIIV7SZhPTzzG2mmfw= -github.com/ipfs/boxo v0.33.0/go.mod h1:3IPh7YFcCIcKp6o02mCHovrPntoT5Pctj/7j4syh/RM= +github.com/ipfs/boxo v0.34.0 h1:pMP9bAsTs4xVh8R0ZmxIWviV7kjDa60U24QrlGgHb1g= +github.com/ipfs/boxo v0.34.0/go.mod h1:kzdH/ewDybtO3+M8MCVkpwnIIc/d2VISX95DFrY4vQA= github.com/ipfs/go-bitfield v1.1.0 h1:fh7FIo8bSwaJEh6DdTWbCeZ1eqOaOkKFI74SCnsWbGA= github.com/ipfs/go-bitfield v1.1.0/go.mod h1:paqf1wjq/D2BBmzfTVFlJQ9IlFOZpg422HL0HqsGWHU= github.com/ipfs/go-block-format v0.2.2 h1:uecCTgRwDIXyZPgYspaLXoMiMmxQpSx2aq34eNc4YvQ= @@ -329,38 +342,54 @@ github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg= github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk= github.com/ipfs/go-cidutil v0.1.0 h1:RW5hO7Vcf16dplUU60Hs0AKDkQAVPVplr7lk97CFL+Q= github.com/ipfs/go-cidutil v0.1.0/go.mod h1:e7OEVBMIv9JaOxt9zaGEmAoSlXW9jdFZ5lP/0PwcfpA= -github.com/ipfs/go-datastore v0.8.2 h1:Jy3wjqQR6sg/LhyY0NIePZC3Vux19nLtg7dx0TVqr6U= -github.com/ipfs/go-datastore v0.8.2/go.mod h1:W+pI1NsUsz3tcsAACMtfC+IZdnQTnC/7VfPoJBQuts0= +github.com/ipfs/go-datastore v0.8.3 h1:z391GsQyGKUIUof2tPoaZVeDknbt7fNHs6Gqjcw5Jo4= +github.com/ipfs/go-datastore v0.8.3/go.mod h1:raxQ/CreIy9L6MxT71ItfMX12/ASN6EhXJoUFjICQ2M= github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= +github.com/ipfs/go-ipfs-blockstore v1.3.1 h1:cEI9ci7V0sRNivqaOr0elDsamxXFxJMMMy7PTTDQNsQ= +github.com/ipfs/go-ipfs-blockstore v1.3.1/go.mod h1:KgtZyc9fq+P2xJUiCAzbRdhhqJHvsw8u2Dlqy2MyRTE= github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ= github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= +github.com/ipfs/go-ipfs-ds-help v1.1.1 h1:B5UJOH52IbcfS56+Ul+sv8jnIV10lbjLF5eOO0C66Nw= +github.com/ipfs/go-ipfs-ds-help v1.1.1/go.mod h1:75vrVCkSdSFidJscs8n4W+77AtTpCIAdDGAwjitJMIo= github.com/ipfs/go-ipfs-pq v0.0.3 h1:YpoHVJB+jzK15mr/xsWC574tyDLkezVrDNeaalQBsTE= github.com/ipfs/go-ipfs-pq v0.0.3/go.mod h1:btNw5hsHBpRcSSgZtiNm/SLj5gYIZ18AKtv3kERkRb4= +github.com/ipfs/go-ipfs-redirects-file v0.1.2 h1:QCK7VtL91FH17KROVVy5KrzDx2hu68QvB2FTWk08ZQk= +github.com/ipfs/go-ipfs-redirects-file v0.1.2/go.mod h1:yIiTlLcDEM/8lS6T3FlCEXZktPPqSOyuY6dEzVqw7Fw= +github.com/ipfs/go-ipld-cbor v0.2.1 h1:H05yEJbK/hxg0uf2AJhyerBDbjOuHX4yi+1U/ogRa7E= +github.com/ipfs/go-ipld-cbor v0.2.1/go.mod h1:x9Zbeq8CoE5R2WicYgBMcr/9mnkQ0lHddYWJP2sMV3A= github.com/ipfs/go-ipld-format v0.6.2 h1:bPZQ+A05ol0b3lsJSl0bLvwbuQ+HQbSsdGTy4xtYUkU= github.com/ipfs/go-ipld-format v0.6.2/go.mod h1:nni2xFdHKx5lxvXJ6brt/pndtGxKAE+FPR1rg4jTkyk= github.com/ipfs/go-ipld-legacy v0.2.2 h1:DThbqCPVLpWBcGtU23KDLiY2YRZZnTkXQyfz8aOfBkQ= github.com/ipfs/go-ipld-legacy v0.2.2/go.mod h1:hhkj+b3kG9b2BcUNw8IFYAsfeNo8E3U7eYlWeAOPyDU= -github.com/ipfs/go-log/v2 v2.6.0 h1:2Nu1KKQQ2ayonKp4MPo6pXCjqw1ULc9iohRqWV5EYqg= -github.com/ipfs/go-log/v2 v2.6.0/go.mod h1:p+Efr3qaY5YXpx9TX7MoLCSEZX5boSWj9wh86P5HJa8= +github.com/ipfs/go-log v1.0.5 h1:2dOuUCB1Z7uoczMWgAyDck5JLb72zHzrMnGnCNNbvY8= +github.com/ipfs/go-log v1.0.5/go.mod h1:j0b8ZoR+7+R99LD9jZ6+AJsrzkPbSXbZfGakb5JPtIo= +github.com/ipfs/go-log/v2 v2.8.1 h1:Y/X36z7ASoLJaYIJAL4xITXgwf7RVeqb1+/25aq/Xk0= +github.com/ipfs/go-log/v2 v2.8.1/go.mod h1:NyhTBcZmh2Y55eWVjOeKf8M7e4pnJYM3yDZNxQBWEEY= github.com/ipfs/go-metrics-interface v0.3.0 h1:YwG7/Cy4R94mYDUuwsBfeziJCVm9pBMJ6q/JR9V40TU= github.com/ipfs/go-metrics-interface v0.3.0/go.mod h1:OxxQjZDGocXVdyTPocns6cOLwHieqej/jos7H4POwoY= github.com/ipfs/go-peertaskqueue v0.8.2 h1:PaHFRaVFdxQk1Qo3OKiHPYjmmusQy7gKQUaL8JDszAU= github.com/ipfs/go-peertaskqueue v0.8.2/go.mod h1:L6QPvou0346c2qPJNiJa6BvOibxDfaiPlqHInmzg0FA= -github.com/ipfs/go-test v0.2.2 h1:1yjYyfbdt1w93lVzde6JZ2einh3DIV40at4rVoyEcE8= -github.com/ipfs/go-test v0.2.2/go.mod h1:cmLisgVwkdRCnKu/CFZOk2DdhOcwghr5GsHeqwexoRA= +github.com/ipfs/go-test v0.2.3 h1:Z/jXNAReQFtCYyn7bsv/ZqUwS6E7iIcSpJ2CuzCvnrc= +github.com/ipfs/go-test v0.2.3/go.mod h1:QW8vSKkwYvWFwIZQLGQXdkt9Ud76eQXRQ9Ao2H+cA1o= +github.com/ipfs/go-unixfsnode v1.10.1 h1:hGKhzuH6NSzZ4y621wGuDspkjXRNG3B+HqhlyTjSwSM= +github.com/ipfs/go-unixfsnode v1.10.1/go.mod h1:eguv/otvacjmfSbYvmamc9ssNAzLvRk0+YN30EYeOOY= github.com/ipfs/hang-fds v0.1.0 h1:deBiFlWHsVGzJ0ZMaqscEqRM1r2O1rFZ59UiQXb1Xko= github.com/ipfs/hang-fds v0.1.0/go.mod h1:29VLWOn3ftAgNNgXg/al7b11UzuQ+w7AwtCGcTaWkbM= github.com/ipfs/iptb v1.4.1 h1:faXd3TKGPswbHyZecqqg6UfbES7RDjTKQb+6VFPKDUo= github.com/ipfs/iptb v1.4.1/go.mod h1:nTsBMtVYFEu0FjC5DgrErnABm3OG9ruXkFXGJoTV5OA= github.com/ipfs/iptb-plugins v0.5.1 h1:11PNTNEt2+SFxjUcO5qpyCTXqDj6T8Tx9pU/G4ytCIQ= github.com/ipfs/iptb-plugins v0.5.1/go.mod h1:mscJAjRnu4g16QK6oUBn9RGpcp8ueJmLfmPxIG/At78= +github.com/ipld/go-car/v2 v2.14.3 h1:1Mhl82/ny8MVP+w1M4LXbj4j99oK3gnuZG2GmG1IhC8= +github.com/ipld/go-car/v2 v2.14.3/go.mod h1:/vpSvPngOX8UnvmdFJ3o/mDgXa9LuyXsn7wxOzHDYQE= github.com/ipld/go-codec-dagpb v1.7.0 h1:hpuvQjCSVSLnTnHXn+QAMR0mLmb1gA6wl10LExo2Ts0= github.com/ipld/go-codec-dagpb v1.7.0/go.mod h1:rD3Zg+zub9ZnxcLwfol/OTQRVjaLzXypgy4UqHQvilM= github.com/ipld/go-ipld-prime v0.21.0 h1:n4JmcpOlPDIxBcY037SVfpd1G+Sj1nKZah0m6QH9C2E= github.com/ipld/go-ipld-prime v0.21.0/go.mod h1:3RLqy//ERg/y5oShXXdx5YIp50cFGOanyMctpPjsvxQ= -github.com/ipshipyard/p2p-forge v0.6.0 h1:kNhYxgYGtqF3MLts/i0hw+7ygtgNB4Qv8h6fo7j6Iq4= -github.com/ipshipyard/p2p-forge v0.6.0/go.mod h1:pj8Zcs+ex5OMq5a1bFLHqW0oL3qYO0v5eGLZmit0l7U= +github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20230102063945-1a409dc236dd h1:gMlw/MhNr2Wtp5RwGdsW23cs+yCuj9k2ON7i9MiJlRo= +github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20230102063945-1a409dc236dd/go.mod h1:wZ8hH8UxeryOs4kJEJaiui/s00hDSbE37OKsL47g+Sw= +github.com/ipshipyard/p2p-forge v0.6.1 h1:987/hUC1YxI56CcMX6iTB+9BLjFV0d2SJnig9Z1pf8A= +github.com/ipshipyard/p2p-forge v0.6.1/go.mod h1:pj8Zcs+ex5OMq5a1bFLHqW0oL3qYO0v5eGLZmit0l7U= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= @@ -370,28 +399,26 @@ github.com/jgautheron/goconst v1.7.1 h1:VpdAG7Ca7yvvJk5n8dMwQhfEZJh95kl/Hl9S1OI5 github.com/jgautheron/goconst v1.7.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjzq7gFzUs= github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c= -github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af h1:KA9BjwUk7KlCh6S9EAGWBt1oExIUv9WyNCiRz5amv48= -github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= -github.com/jjti/go-spancheck v0.6.2 h1:iYtoxqPMzHUPp7St+5yA8+cONdyXD3ug6KK15n7Pklk= -github.com/jjti/go-spancheck v0.6.2/go.mod h1:+X7lvIrR5ZdUTkxFYqzJ0abr8Sb5LOo80uOhWNqIrYA= +github.com/jjti/go-spancheck v0.6.4 h1:Tl7gQpYf4/TMU7AT84MN83/6PutY21Nb9fuQjFTpRRc= +github.com/jjti/go-spancheck v0.6.4/go.mod h1:yAEYdKJ2lRkDA8g7X+oKUHXOWVAXSBJRv04OhF+QUjk= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julz/importas v0.1.0 h1:F78HnrsjY3cR7j0etXy5+TU1Zuy7Xt08X/1aJnH5xXY= -github.com/julz/importas v0.1.0/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= -github.com/karamaru-alpha/copyloopvar v1.1.0 h1:x7gNyKcC2vRBO1H2Mks5u1VxQtYvFiym7fCjIP8RPos= -github.com/karamaru-alpha/copyloopvar v1.1.0/go.mod h1:u7CIfztblY0jZLOQZgH3oYsJzpC2A7S6u/lfgSXHy0k= +github.com/julz/importas v0.2.0 h1:y+MJN/UdL63QbFJHws9BVC5RpA2iq0kpjrFajTGivjQ= +github.com/julz/importas v0.2.0/go.mod h1:pThlt589EnCYtMnmhmRYY/qn9lCf/frPOK+WMx3xiJY= +github.com/karamaru-alpha/copyloopvar v1.2.1 h1:wmZaZYIjnJ0b5UoKDjUHrikcV0zuPyyxI4SVplLd2CI= +github.com/karamaru-alpha/copyloopvar v1.2.1/go.mod h1:nFmMlFNlClC2BPvNaHMdkirmTJxVCY0lhxBtlfOypMM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/errcheck v1.7.0 h1:+SbscKmWJ5mOK/bO1zS60F5I9WwZDWOfRsC4RwfwRV0= -github.com/kisielk/errcheck v1.7.0/go.mod h1:1kLL+jV4e+CFfueBmI1dSK2ADDyQnlrnrY/FqKluHJQ= +github.com/kisielk/errcheck v1.9.0 h1:9xt1zI9EBfcYBvdU1nVrzMzzUPUtPKs9bVSIM3TAb3M= +github.com/kisielk/errcheck v1.9.0/go.mod h1:kQxWMMVZgIkDq7U8xtG/n2juOjbLgZtedi0D+/VL/i8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kkHAIKE/contextcheck v1.1.5 h1:CdnJh63tcDe53vG+RebdpdXJTc9atMgGqdx8LXxiilg= -github.com/kkHAIKE/contextcheck v1.1.5/go.mod h1:O930cpht4xb1YQpK+1+AgoM3mFsvxr7uyFptcnWTYUA= +github.com/kkHAIKE/contextcheck v1.1.6 h1:7HIyRcnyzxL9Lz06NGhiKvenXq7Zw6Q0UQu/ttjfJCE= +github.com/kkHAIKE/contextcheck v1.1.6/go.mod h1:3dDbMRNBFaq8HFXWC1JyvDSPm43CmE6IuHam8Wr0rkg= github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= -github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= -github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= +github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= +github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= github.com/koron/go-ssdp v0.0.6 h1:Jb0h04599eq/CY7rB5YEqPS83HmRfHP2azkxMN2rFtU= github.com/koron/go-ssdp v0.0.6/go.mod h1:0R9LfRJGek1zWTjN3JUNlm5INCDYGpRDfAptnct63fI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -407,30 +434,36 @@ github.com/kulti/thelper v0.6.3 h1:ElhKf+AlItIu+xGnI990no4cE2+XaSu1ULymV2Yulxs= github.com/kulti/thelper v0.6.3/go.mod h1:DsqKShOvP40epevkFrvIwkCMNYxMeTNjdWL4dqWHZ6I= github.com/kunwardeep/paralleltest v1.0.10 h1:wrodoaKYzS2mdNVnc4/w31YaXFtsc21PCTdvWJ/lDDs= github.com/kunwardeep/paralleltest v1.0.10/go.mod h1:2C7s65hONVqY7Q5Efj5aLzRCNLjw2h4eMc9EcypGjcY= -github.com/kyoh86/exportloopref v0.1.11 h1:1Z0bcmTypkL3Q4k+IDHMWTcnCliEZcaPiIe0/ymEyhQ= -github.com/kyoh86/exportloopref v0.1.11/go.mod h1:qkV4UF1zGl6EkF1ox8L5t9SwyeBAZ3qLMd6up458uqA= -github.com/lasiar/canonicalheader v1.1.1 h1:wC+dY9ZfiqiPwAexUApFush/csSPXeIi4QqyxXmng8I= -github.com/lasiar/canonicalheader v1.1.1/go.mod h1:cXkb3Dlk6XXy+8MVQnF23CYKWlyA7kfQhSw2CcZtZb0= -github.com/ldez/gomoddirectives v0.2.4 h1:j3YjBIjEBbqZ0NKtBNzr8rtMHTOrLPeiwTkfUJZ3alg= -github.com/ldez/gomoddirectives v0.2.4/go.mod h1:oWu9i62VcQDYp9EQ0ONTfqLNh+mDLWWDO+SO0qSQw5g= -github.com/ldez/tagliatelle v0.5.0 h1:epgfuYt9v0CG3fms0pEgIMNPuFf/LpPIfjk4kyqSioo= -github.com/ldez/tagliatelle v0.5.0/go.mod h1:rj1HmWiL1MiKQuOONhd09iySTEkUuE/8+5jtPYz9xa4= +github.com/lasiar/canonicalheader v1.1.2 h1:vZ5uqwvDbyJCnMhmFYimgMZnJMjwljN5VGY0VKbMXb4= +github.com/lasiar/canonicalheader v1.1.2/go.mod h1:qJCeLFS0G/QlLQ506T+Fk/fWMa2VmBUiEI2cuMK4djI= +github.com/ldez/exptostd v0.4.2 h1:l5pOzHBz8mFOlbcifTxzfyYbgEmoUqjxLFHZkjlbHXs= +github.com/ldez/exptostd v0.4.2/go.mod h1:iZBRYaUmcW5jwCR3KROEZ1KivQQp6PHXbDPk9hqJKCQ= +github.com/ldez/gomoddirectives v0.6.1 h1:Z+PxGAY+217f/bSGjNZr/b2KTXcyYLgiWI6geMBN2Qc= +github.com/ldez/gomoddirectives v0.6.1/go.mod h1:cVBiu3AHR9V31em9u2kwfMKD43ayN5/XDgr+cdaFaKs= +github.com/ldez/grignotin v0.9.0 h1:MgOEmjZIVNn6p5wPaGp/0OKWyvq42KnzAt/DAb8O4Ow= +github.com/ldez/grignotin v0.9.0/go.mod h1:uaVTr0SoZ1KBii33c47O1M8Jp3OP3YDwhZCmzT9GHEk= +github.com/ldez/tagliatelle v0.7.1 h1:bTgKjjc2sQcsgPiT902+aadvMjCeMHrY7ly2XKFORIk= +github.com/ldez/tagliatelle v0.7.1/go.mod h1:3zjxUpsNB2aEZScWiZTHrAXOl1x25t3cRmzfK1mlo2I= +github.com/ldez/usetesting v0.4.2 h1:J2WwbrFGk3wx4cZwSMiCQQ00kjGR0+tuuyW0Lqm4lwA= +github.com/ldez/usetesting v0.4.2/go.mod h1:eEs46T3PpQ+9RgN9VjpY6qWdiw2/QmfiDeWmdZdrjIQ= github.com/leonklingele/grouper v1.1.2 h1:o1ARBDLOmmasUaNDesWqWCIFH3u7hoFlM84YrjT3mIY= github.com/leonklingele/grouper v1.1.2/go.mod h1:6D0M/HVkhs2yRKRFZUoGjeDy7EZTfFBE9gl4kjmIGkA= -github.com/libdns/libdns v0.2.2 h1:O6ws7bAfRPaBsgAYt8MDe2HcNBGC29hkZ9MX2eUSX3s= -github.com/libdns/libdns v0.2.2/go.mod h1:4Bj9+5CQiNMVGf87wjX4CY3HQJypUHRuLvlsfsZqLWQ= +github.com/libdns/libdns v1.0.0-beta.1 h1:KIf4wLfsrEpXpZ3vmc/poM8zCATXT2klbdPe6hyOBjQ= +github.com/libdns/libdns v1.0.0-beta.1/go.mod h1:4Bj9+5CQiNMVGf87wjX4CY3HQJypUHRuLvlsfsZqLWQ= github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c= github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic= +github.com/libp2p/go-doh-resolver v0.5.0 h1:4h7plVVW+XTS+oUBw2+8KfoM1jF6w8XmO7+skhePFdE= +github.com/libp2p/go-doh-resolver v0.5.0/go.mod h1:aPDxfiD2hNURgd13+hfo29z9IC22fv30ee5iM31RzxU= github.com/libp2p/go-flow-metrics v0.3.0 h1:q31zcHUvHnwDO0SHaukewPYgwOBSxtt830uJtUx6784= github.com/libp2p/go-flow-metrics v0.3.0/go.mod h1:nuhlreIwEguM1IvHAew3ij7A8BMlyHQJ279ao24eZZo= -github.com/libp2p/go-libp2p v0.42.0 h1:A8foZk+ZEhZTv0Jb++7xUFlrFhBDv4j2Vh/uq4YX+KE= -github.com/libp2p/go-libp2p v0.42.0/go.mod h1:4NGcjbD9OIvFiSRb0XueCO19zJ4kSPK5vkyyOUYmMro= +github.com/libp2p/go-libp2p v0.43.0 h1:b2bg2cRNmY4HpLK8VHYQXLX2d3iND95OjodLFymvqXU= +github.com/libp2p/go-libp2p v0.43.0/go.mod h1:IiSqAXDyP2sWH+J2gs43pNmB/y4FOi2XQPbsb+8qvzc= github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= -github.com/libp2p/go-libp2p-kad-dht v0.33.1 h1:hKFhHMf7WH69LDjaxsJUWOU6qZm71uO47M/a5ijkiP0= -github.com/libp2p/go-libp2p-kad-dht v0.33.1/go.mod h1:CdmNk4VeGJa9EXM9SLNyNVySEvduKvb+5rSC/H4pLAo= +github.com/libp2p/go-libp2p-kad-dht v0.34.0 h1:yvJ/Vrt36GVjsqPxiGcuuwOloKuZLV9Aa7awIKyNXy0= +github.com/libp2p/go-libp2p-kad-dht v0.34.0/go.mod h1:JNbkES4W5tajS6uYivw6MPs0842cPHAwhgaPw8sQG4o= github.com/libp2p/go-libp2p-kbucket v0.7.0 h1:vYDvRjkyJPeWunQXqcW2Z6E93Ywx7fX0jgzb/dGOKCs= github.com/libp2p/go-libp2p-kbucket v0.7.0/go.mod h1:blOINGIj1yiPYlVEX0Rj9QwEkmVnz3EP8LK1dRKBC6g= github.com/libp2p/go-libp2p-record v0.3.1 h1:cly48Xi5GjNw5Wq+7gmjfBiG9HCzQVkiZOUZ8kUl+Fg= @@ -447,8 +480,6 @@ github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQsc github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU= github.com/libp2p/go-yamux/v5 v5.0.1 h1:f0WoX/bEF2E8SbE4c/k1Mo+/9z0O4oC/hWEA+nfYRSg= github.com/libp2p/go-yamux/v5 v5.0.1/go.mod h1:en+3cdX51U0ZslwRdRLrvQsdayFt3TSUKvBGErzpWbU= -github.com/lufeee/execinquery v1.2.1 h1:hf0Ems4SHcUGBxpGN7Jz78z1ppVkP/837ZlETPCEtOM= -github.com/lufeee/execinquery v1.2.1/go.mod h1:EC7DrEKView09ocscGHC+apXMIaorh4xqSxS/dy8SbM= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/macabu/inamedparam v0.1.3 h1:2tk/phHkMlEL/1GNe/Yf6kkR/hkcUdAEY3L0hjYV1Mk= github.com/macabu/inamedparam v0.1.3/go.mod h1:93FLICAIk/quk7eaPPQvbzihUdn/QkGDwIZEoLtpH6I= @@ -461,30 +492,27 @@ github.com/maratori/testpackage v1.1.1 h1:S58XVV5AD7HADMmD0fNnziNHqKvSdDuEKdPD1r github.com/maratori/testpackage v1.1.1/go.mod h1:s4gRK/ym6AMrqpOa/kEbQTV4Q4jb7WeLZzVhVVVOQMc= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= -github.com/matoous/godox v0.0.0-20240105082147-c5b5e0e7c0c0 h1:Ny7cm4KSWceJLYyI1sm+aFIVDWSGXLcOJ0O0UaS5wdU= -github.com/matoous/godox v0.0.0-20240105082147-c5b5e0e7c0c0/go.mod h1:jgE/3fUXiTurkdHOLT5WEkThTSuE7yxHv5iWPa80afs= +github.com/matoous/godox v1.1.0 h1:W5mqwbyWrwZv6OQ5Z1a/DHGMOvXYCBP3+Ht7KMoJhq4= +github.com/matoous/godox v1.1.0/go.mod h1:jgE/3fUXiTurkdHOLT5WEkThTSuE7yxHv5iWPa80afs= github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE= github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= -github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= +github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mgechev/revive v1.3.9 h1:18Y3R4a2USSBF+QZKFQwVkBROUda7uoBlkEuBD+YD1A= -github.com/mgechev/revive v1.3.9/go.mod h1:+uxEIr5UH0TjXWHTno3xh4u7eg6jDpXKzQccA9UGhHU= -github.com/mholt/acmez/v3 v3.0.0 h1:r1NcjuWR0VaKP2BTjDK9LRFBw/WvURx3jlaEUl9Ht8E= -github.com/mholt/acmez/v3 v3.0.0/go.mod h1:L1wOU06KKvq7tswuMDwKdcHeKpFFgkppZy/y0DFxagQ= +github.com/mgechev/revive v1.7.0 h1:JyeQ4yO5K8aZhIKf5rec56u0376h8AlKNQEmjfkjKlY= +github.com/mgechev/revive v1.7.0/go.mod h1:qZnwcNhoguE58dfi96IJeSTPeZQejNeoMQLUZGi4SW4= +github.com/mholt/acmez/v3 v3.1.2 h1:auob8J/0FhmdClQicvJvuDavgd5ezwLBfKuYmynhYzc= +github.com/mholt/acmez/v3 v3.1.2/go.mod h1:L1wOU06KKvq7tswuMDwKdcHeKpFFgkppZy/y0DFxagQ= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= -github.com/miekg/dns v1.1.66 h1:FeZXOS3VCVsKnEAd+wBkjMC3D2K+ww66Cq3VnCINuJE= -github.com/miekg/dns v1.1.66/go.mod h1:jGFzBsSNbJw6z1HYut1RKBKHA9PBdxeHrZG8J+gC2WE= +github.com/miekg/dns v1.1.68 h1:jsSRkNozw7G/mnmXULynzMNIsgY2dHC8LO6U6Ij2JEA= +github.com/miekg/dns v1.1.68/go.mod h1:fujopn7TB3Pu3JM69XaawiU0wqjpL9/8xGop5UrTPps= github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKoFL8DUUmalo2yJJUCxbPKtm8OKfqr2/FTNU= github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc= @@ -509,8 +537,8 @@ github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYg github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= -github.com/multiformats/go-multiaddr v0.16.0 h1:oGWEVKioVQcdIOBlYM8BH1rZDWOGJSqr9/BKl6zQ4qc= -github.com/multiformats/go-multiaddr v0.16.0/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0= +github.com/multiformats/go-multiaddr v0.16.1 h1:fgJ0Pitow+wWXzN9do+1b8Pyjmo8m5WhGfzpL82MpCw= +github.com/multiformats/go-multiaddr v0.16.1/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0= github.com/multiformats/go-multiaddr-dns v0.4.1 h1:whi/uCLbDS3mSEUMb1MsoT4uzUeZB0N32yzufqS0i5M= github.com/multiformats/go-multiaddr-dns v0.4.1/go.mod h1:7hfthtB4E4pQwirrz+J0CcDUfbWzTqEzVyYKKIKpgkc= github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= @@ -536,14 +564,17 @@ github.com/nishanths/exhaustive v0.12.0 h1:vIY9sALmw6T/yxiASewa4TQcFsVYZQQRUQJhK github.com/nishanths/exhaustive v0.12.0/go.mod h1:mEZ95wPIZW+x8kC4TgC+9YCUgiST7ecevsVDTgc2obs= github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm/w98Vk= github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c= -github.com/nunnatsa/ginkgolinter v0.16.2 h1:8iLqHIZvN4fTLDC0Ke9tbSZVcyVHoBs0HIbnVSxfHJk= -github.com/nunnatsa/ginkgolinter v0.16.2/go.mod h1:4tWRinDN1FeJgU+iJANW/kz7xKN5nYRAOfJDQUS9dOQ= +github.com/nunnatsa/ginkgolinter v0.19.1 h1:mjwbOlDQxZi9Cal+KfbEJTCz327OLNfwNvoZ70NJ+c4= +github.com/nunnatsa/ginkgolinter v0.19.1/go.mod h1:jkQ3naZDmxaZMXPWaS9rblH+i+GWXQCaS/JFIWcOH2s= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus= github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8= github.com/onsi/gomega v1.36.3 h1:hID7cr8t3Wp26+cYnfcjR6HpJ00fdogN6dqZ1t6IylU= github.com/onsi/gomega v1.36.3/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= @@ -554,8 +585,10 @@ github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT9 github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= -github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= -github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= +github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= +github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= +github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 h1:1/WtZae0yGtPq+TI6+Tv1WTxkukpXeMlviSxvL7SRgk= +github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9/go.mod h1:x3N5drFsm2uilKKuuYo6LdyD8vZAW55sH/9w+pbo1sw= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pion/datachannel v1.5.10 h1:ly0Q26K1i6ZkGf42W7D4hQYR90pZwzFOjTq5AuCKk4o= @@ -609,24 +642,24 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4= github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw= -github.com/polyfloyd/go-errorlint v1.6.0 h1:tftWV9DE7txiFzPpztTAwyoRLKNj9gpVm2cg8/OwcYY= -github.com/polyfloyd/go-errorlint v1.6.0/go.mod h1:HR7u8wuP1kb1NeN1zqTd1ZMlqUKPPHF+Id4vIPvDqVw= +github.com/polyfloyd/go-errorlint v1.7.1 h1:RyLVXIbosq1gBdk/pChWA8zWYLsq9UEw7a1L5TVMCnA= +github.com/polyfloyd/go-errorlint v1.7.1/go.mod h1:aXjNb1x2TNhoLsk26iv1yl7a+zTnXPhwEMtEXukiLR8= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= -github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= +github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc= +github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.64.0 h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQPGO4= -github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= +github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE= +github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= -github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= -github.com/quasilyte/go-ruleguard v0.4.2 h1:htXcXDK6/rO12kiTHKfHuqR4kr3Y4M0J0rOL6CH/BYs= -github.com/quasilyte/go-ruleguard v0.4.2/go.mod h1:GJLgqsLeo4qgavUoL8JeGFNS7qcisx3awV/w9eWTmNI= +github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= +github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= +github.com/quasilyte/go-ruleguard v0.4.3-0.20240823090925-0fe6f58b47b1 h1:+Wl/0aFp0hpuHM3H//KMft64WQ1yX9LdJY64Qm/gFCo= +github.com/quasilyte/go-ruleguard v0.4.3-0.20240823090925-0fe6f58b47b1/go.mod h1:GJLgqsLeo4qgavUoL8JeGFNS7qcisx3awV/w9eWTmNI= github.com/quasilyte/go-ruleguard/dsl v0.3.22 h1:wd8zkOhSNr+I+8Qeciml08ivDt1pSXe60+5DqOpCjPE= github.com/quasilyte/go-ruleguard/dsl v0.3.22/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= github.com/quasilyte/gogrep v0.5.0 h1:eTKODPXbI8ffJMN+W2aE0+oL0z/nh8/5eNdiO34SOAo= @@ -637,44 +670,41 @@ github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 h1:M8mH9eK4OUR4l github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567/go.mod h1:DWNGW8A4Y+GyBgPuaQJuWiy0XYftx4Xm/y5Jqk9I6VQ= github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI= github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg= -github.com/quic-go/quic-go v0.52.0 h1:/SlHrCRElyaU6MaEPKqKr9z83sBg2v4FLLvWM+Z47pA= -github.com/quic-go/quic-go v0.52.0/go.mod h1:MFlGGpcpJqRAfmYi6NC2cptDPSxRWTOGNuP4wqrWmzQ= -github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 h1:4WFk6u3sOT6pLa1kQ50ZVdm8BQFgJNA117cepZxtLIg= -github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66/go.mod h1:Vp72IJajgeOL6ddqrAhmp7IM9zbTcgkQxD/YdxrVwMw= +github.com/quic-go/quic-go v0.54.0 h1:6s1YB9QotYI6Ospeiguknbp2Znb/jZYjZLRXn9kMQBg= +github.com/quic-go/quic-go v0.54.0/go.mod h1:e68ZEaCdyviluZmy44P6Iey98v/Wfz6HCjQEm+l8zTY= +github.com/quic-go/webtransport-go v0.9.0 h1:jgys+7/wm6JarGDrW+lD/r9BGqBAmqY/ssklE09bA70= +github.com/quic-go/webtransport-go v0.9.0/go.mod h1:4FUYIiUc75XSsF6HShcLeXXYZJ9AGwo/xh3L8M/P1ao= +github.com/raeperd/recvcheck v0.2.0 h1:GnU+NsbiCqdC2XX5+vMZzP+jAJC5fht7rcVTAhX74UI= +github.com/raeperd/recvcheck v0.2.0/go.mod h1:n04eYkwIR0JbgD73wT8wL4JjPC3wm0nFtzBnWNocnYU= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= -github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= -github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryancurrah/gomodguard v1.3.3 h1:eiSQdJVNr9KTNxY2Niij8UReSwR8Xrte3exBrAZfqpg= -github.com/ryancurrah/gomodguard v1.3.3/go.mod h1:rsKQjj4l3LXe8N344Ow7agAy5p9yjsWOtRzUMYmA0QY= +github.com/ryancurrah/gomodguard v1.3.5 h1:cShyguSwUEeC0jS7ylOiG/idnd1TpJ1LfHGpV3oJmPU= +github.com/ryancurrah/gomodguard v1.3.5/go.mod h1:MXlEPQRxgfPQa62O8wzK3Ozbkv9Rkqr+wKjSxTdsNJE= github.com/ryanrolds/sqlclosecheck v0.5.1 h1:dibWW826u0P8jNLsLN+En7+RqWWTYrjCB9fJfSfdyCU= github.com/ryanrolds/sqlclosecheck v0.5.1/go.mod h1:2g3dUjoS6AL4huFdv6wn55WpLIDjY7ZgUR4J8HOO/XQ= github.com/sagikazarmark/locafero v0.6.0 h1:ON7AQg37yzcRPU69mt7gwhFEBwxI6P9T4Qu3N51bwOk= github.com/sagikazarmark/locafero v0.6.0/go.mod h1:77OmuIc6VTraTXKXIs/uvUxKGUXjE1GbemJYHqdNjX0= github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= -github.com/samber/lo v1.47.0 h1:z7RynLwP5nbyRscyvcD043DWYoOcYRv3mV8lBeqOCLc= -github.com/samber/lo v1.47.0/go.mod h1:RmDH9Ct32Qy3gduHQuKJ3gW1fMHAnE/fAzQuf6He5cU= -github.com/sanposhiho/wastedassign/v2 v2.0.7 h1:J+6nrY4VW+gC9xFzUc+XjPD3g3wF3je/NsJFwFK7Uxc= -github.com/sanposhiho/wastedassign/v2 v2.0.7/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= -github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 h1:lZUw3E0/J3roVtGQ+SCrUrg3ON6NgVqpn3+iol9aGu4= -github.com/santhosh-tekuri/jsonschema/v5 v5.3.1/go.mod h1:uToXkOrWAZ6/Oc07xWQrPOhJotwFIyu2bBVN41fcDUY= +github.com/sanposhiho/wastedassign/v2 v2.1.0 h1:crurBF7fJKIORrV85u9UUpePDYGWnwvv3+A96WvwXT0= +github.com/sanposhiho/wastedassign/v2 v2.1.0/go.mod h1:+oSmSC+9bQ+VUAxA66nBb0Z7N8CK7mscKTDYC6aIek4= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.1 h1:PKK9DyHxif4LZo+uQSgXNqs0jj5+xZwwfKHgph2lxBw= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.1/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU= github.com/sashamelentyev/interfacebloat v1.1.0 h1:xdRdJp0irL086OyW1H/RTZTr1h/tMEOsumirXcOJqAw= github.com/sashamelentyev/interfacebloat v1.1.0/go.mod h1:+Y9yU5YdTkrNvoX0xHc84dxiN1iBi9+G8zZIhPVoNjQ= -github.com/sashamelentyev/usestdlibvars v1.27.0 h1:t/3jZpSXtRPRf2xr0m63i32ZrusyurIGT9E5wAvXQnI= -github.com/sashamelentyev/usestdlibvars v1.27.0/go.mod h1:9nl0jgOfHKWNFS43Ojw0i7aRoS4j6EBye3YBhmAIRF8= -github.com/securego/gosec/v2 v2.20.1-0.20240820084340-81cda2f91fbe h1:exdneYmXwZ4+VaIWv9mQ47uIHkTQSN50DYdCjXJ1cdQ= -github.com/securego/gosec/v2 v2.20.1-0.20240820084340-81cda2f91fbe/go.mod h1:iyeMMRw8QEmueUSZ2VqmkQMiDyDcobfPnG00CV/NWdE= +github.com/sashamelentyev/usestdlibvars v1.28.0 h1:jZnudE2zKCtYlGzLVreNp5pmCdOxXUzwsMDBkR21cyQ= +github.com/sashamelentyev/usestdlibvars v1.28.0/go.mod h1:9nl0jgOfHKWNFS43Ojw0i7aRoS4j6EBye3YBhmAIRF8= +github.com/securego/gosec/v2 v2.22.2 h1:IXbuI7cJninj0nRpZSLCUlotsj8jGusohfONMrHoF6g= +github.com/securego/gosec/v2 v2.22.2/go.mod h1:UEBGA+dSKb+VqM6TdehR7lnQtIIMorYJ4/9CW1KVQBE= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU= -github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= @@ -702,14 +732,14 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/sivchari/containedctx v1.0.3 h1:x+etemjbsh2fB5ewm5FeLNi5bUjK0V8n0RB+Wwfd0XE= github.com/sivchari/containedctx v1.0.3/go.mod h1:c1RDvCbnJLtH4lLcYD/GqwiBSSf4F5Qk0xld2rBqzJ4= -github.com/sivchari/tenv v1.10.0 h1:g/hzMA+dBCKqGXgW8AV/1xIWhAvDrx0zFKNR48NFMg0= -github.com/sivchari/tenv v1.10.0/go.mod h1:tdY24masnVoZFxYrHv/nD6Tc8FbkEtAQEEziXpyMgqY= +github.com/sivchari/tenv v1.12.1 h1:+E0QzjktdnExv/wwsnnyk4oqZBUfuh89YMQT1cyuvSY= +github.com/sivchari/tenv v1.12.1/go.mod h1:1LjSOUCc25snIr5n3DtGGrENhX3LuWefcplwVGC24mw= github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs= github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg= github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM= -github.com/sonatard/noctx v0.0.2 h1:L7Dz4De2zDQhW8S0t+KUjY0MAQJd6SgVwhzNIc4ok00= -github.com/sonatard/noctx v0.0.2/go.mod h1:kzFz+CzWSjQ2OzIm46uJZoXuBpa2+0y3T36U18dWqIo= +github.com/sonatard/noctx v0.1.0 h1:JjqOc2WN16ISWAjAk8M5ej0RfExEXtkEyExl2hLW+OM= +github.com/sonatard/noctx v0.1.0/go.mod h1:0RvBxqY8D4j9cTTTWE8ylt2vqj2EPI8fHmrxHdsaZ2c= github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= @@ -718,20 +748,21 @@ github.com/sourcegraph/go-diff v0.7.0/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag07 github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= -github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs= +github.com/spf13/afero v1.12.0/go.mod h1:ZTlWwG4/ahT8W7T0WQ5uYmjI9duaLQGy3Q2OAl4sk/4= github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= -github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= +github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0= github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= -github.com/stbenjam/no-sprintf-host-port v0.1.1 h1:tYugd/yrm1O0dV+ThCbaKZh195Dfm07ysF0U6JQXczc= -github.com/stbenjam/no-sprintf-host-port v0.1.1/go.mod h1:TLhvtIvONRzdmkFiio4O8LHsN9N74I+PhRquPsxpL0I= +github.com/stbenjam/no-sprintf-host-port v0.2.0 h1:i8pxvGrt1+4G0czLr/WnmyH7zbZ8Bg8etvARQ1rpyl4= +github.com/stbenjam/no-sprintf-host-port v0.2.0/go.mod h1:eL0bQ9PasS0hsyTyfTjjG+E80QIyPnBVQbYZyv20Jfk= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -752,31 +783,35 @@ github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= -github.com/tdakkota/asciicheck v0.2.0 h1:o8jvnUANo0qXtnslk2d3nMKTFNlOnJjRrNcj0j9qkHM= -github.com/tdakkota/asciicheck v0.2.0/go.mod h1:Qb7Y9EgjCLJGup51gDHFzbI08/gbGhL/UVhYIPWG2rg= +github.com/tdakkota/asciicheck v0.4.1 h1:bm0tbcmi0jezRA2b5kg4ozmMuGAFotKI3RZfrhfovg8= +github.com/tdakkota/asciicheck v0.4.1/go.mod h1:0k7M3rCfRXb0Z6bwgvkEIMleKH3kXNz9UqJ9Xuqopr8= github.com/tenntenn/modver v1.0.1 h1:2klLppGhDgzJrScMpkj9Ujy3rXPUspSjAcev9tSEBgA= github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0= github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3 h1:f+jULpRQGxTSkNYKJ51yaw6ChIqO+Je8UqsTKN/cDag= github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY= -github.com/tetafro/godot v1.4.16 h1:4ChfhveiNLk4NveAZ9Pu2AN8QZ2nkUGFuadM9lrr5D0= -github.com/tetafro/godot v1.4.16/go.mod h1:2oVxTBSftRTh4+MVfUaUXR6bn2GDXCaMcOG4Dk3rfio= -github.com/timakin/bodyclose v0.0.0-20240125160201-f835fa56326a h1:A6uKudFIfAEpoPdaal3aSqGxBzLyU8TqyXImLwo6dIo= -github.com/timakin/bodyclose v0.0.0-20240125160201-f835fa56326a/go.mod h1:mkjARE7Yr8qU23YcGMSALbIxTQ9r9QBVahQOBRfU460= -github.com/timonwong/loggercheck v0.9.4 h1:HKKhqrjcVj8sxL7K77beXh0adEm6DLjV/QOGeMXEVi4= -github.com/timonwong/loggercheck v0.9.4/go.mod h1:caz4zlPcgvpEkXgVnAJGowHAMW2NwHaNlpS8xDbVhTg= -github.com/tomarrell/wrapcheck/v2 v2.9.0 h1:801U2YCAjLhdN8zhZ/7tdjB3EnAoRlJHt/s+9hijLQ4= -github.com/tomarrell/wrapcheck/v2 v2.9.0/go.mod h1:g9vNIyhb5/9TQgumxQyOEqDHsmGYcGsVMOx/xGkqdMo= +github.com/tetafro/godot v1.5.0 h1:aNwfVI4I3+gdxjMgYPus9eHmoBeJIbnajOyqZYStzuw= +github.com/tetafro/godot v1.5.0/go.mod h1:2oVxTBSftRTh4+MVfUaUXR6bn2GDXCaMcOG4Dk3rfio= +github.com/timakin/bodyclose v0.0.0-20241017074812-ed6a65f985e3 h1:y4mJRFlM6fUyPhoXuFg/Yu02fg/nIPFMOY8tOqppoFg= +github.com/timakin/bodyclose v0.0.0-20241017074812-ed6a65f985e3/go.mod h1:mkjARE7Yr8qU23YcGMSALbIxTQ9r9QBVahQOBRfU460= +github.com/timonwong/loggercheck v0.10.1 h1:uVZYClxQFpw55eh+PIoqM7uAOHMrhVcDoWDery9R8Lg= +github.com/timonwong/loggercheck v0.10.1/go.mod h1:HEAWU8djynujaAVX7QI65Myb8qgfcZ1uKbdpg3ZzKl8= +github.com/tomarrell/wrapcheck/v2 v2.10.0 h1:SzRCryzy4IrAH7bVGG4cK40tNUhmVmMDuJujy4XwYDg= +github.com/tomarrell/wrapcheck/v2 v2.10.0/go.mod h1:g9vNIyhb5/9TQgumxQyOEqDHsmGYcGsVMOx/xGkqdMo= github.com/tommy-muehle/go-mnd/v2 v2.5.1 h1:NowYhSdyE/1zwK9QCLeRb6USWdoif80Ie+v+yU8u1Zw= github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= -github.com/ultraware/funlen v0.1.0 h1:BuqclbkY6pO+cvxoq7OsktIXZpgBSkYTQtmwhAK81vI= -github.com/ultraware/funlen v0.1.0/go.mod h1:XJqmOQja6DpxarLj6Jj1U7JuoS8PvL4nEqDaQhy22p4= -github.com/ultraware/whitespace v0.1.1 h1:bTPOGejYFulW3PkcrqkeQwOd6NKOOXvmGD9bo/Gk8VQ= -github.com/ultraware/whitespace v0.1.1/go.mod h1:XcP1RLD81eV4BW8UhQlpaR+SDc2givTvyI8a586WjW8= +github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb h1:Ywfo8sUltxogBpFuMOFRrrSifO788kAFxmvVw31PtQQ= +github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb/go.mod h1:ikPs9bRWicNw3S7XpJ8sK/smGwU9WcSVU3dy9qahYBM= +github.com/ultraware/funlen v0.2.0 h1:gCHmCn+d2/1SemTdYMiKLAHFYxTYz7z9VIDRaTGyLkI= +github.com/ultraware/funlen v0.2.0/go.mod h1:ZE0q4TsJ8T1SQcjmkhN/w+MceuatI6pBFSxxyteHIJA= +github.com/ultraware/whitespace v0.2.0 h1:TYowo2m9Nfj1baEQBjuHzvMRbp19i+RCcRYrSWoFa+g= +github.com/ultraware/whitespace v0.2.0/go.mod h1:XcP1RLD81eV4BW8UhQlpaR+SDc2givTvyI8a586WjW8= github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.16 h1:MH0k6uJxdwdeWQTwhSO42Pwr4YLrNLwBtg1MRgTqPdQ= github.com/urfave/cli v1.22.16/go.mod h1:EeJR6BKodywf4zciqrdw6hpCPk68JO9z5LazXZMn5Po= -github.com/uudashr/gocognit v1.1.3 h1:l+a111VcDbKfynh+airAy/DJQKaXh2m9vkoysMPSZyM= -github.com/uudashr/gocognit v1.1.3/go.mod h1:aKH8/e8xbTRBwjbCkwZ8qt4l2EpKXl31KMHgSS+lZ2U= +github.com/uudashr/gocognit v1.2.0 h1:3BU9aMr1xbhPlvJLSydKwdLN3tEUUrzPSSM8S4hDYRA= +github.com/uudashr/gocognit v1.2.0/go.mod h1:k/DdKPI6XBZO1q7HgoV2juESI2/Ofj9AcHPZhBBdrTU= +github.com/uudashr/iface v1.3.1 h1:bA51vmVx1UIhiIsQFSNq6GZ6VPTk3WNMZgRiCe9R29U= +github.com/uudashr/iface v1.3.1/go.mod h1:4QvspiRd3JLPAEXBQ9AiZpLbJlrWWgRChOKDJEuQTdg= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= github.com/warpfork/go-testmark v0.12.1 h1:rMgCpJfwy1sJ50x0M0NgyphxYYPMOODIJHhsXyEHU0s= @@ -785,6 +820,10 @@ github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0 h1:GDDkbFiaK8jsSD github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc h1:BCPnHtcboadS0DvysUuJXZ4lWVv5Bh5i7+tbIyi+ck4= github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc/go.mod h1:r45hJU7yEoA81k6MWNhpMj/kms0n14dkzkxYHoB96UM= +github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 h1:5HZfQkwe0mIfyDmc1Em5GqlNRzcdtlv4HTNmdpt7XH0= +github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11/go.mod h1:Wlo/SzPmxVp6vXpGt/zaXhHH0fn4IxgqZc82aKg6bpQ= +github.com/whyrusleeping/cbor-gen v0.3.1 h1:82ioxmhEYut7LBVGhGq8xoRkXPLElVuh5mV67AFfdv0= +github.com/whyrusleeping/cbor-gen v0.3.1/go.mod h1:pM99HXyEbSQHcosHc0iW7YFmwnscr+t9Te4ibko05so= github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E= github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= @@ -817,19 +856,27 @@ gitlab.com/bosi/decorder v0.4.2 h1:qbQaV3zgwnBZ4zPMhGLW4KZe7A7NwxEhJx39R3shffo= gitlab.com/bosi/decorder v0.4.2/go.mod h1:muuhHoaJkA9QLcYHq4Mj8FJUwDZ+EirSHRiaTcTf6T8= go-simpler.org/assert v0.9.0 h1:PfpmcSvL7yAnWyChSjOz6Sp6m9j5lyK8Ok9pEL31YkQ= go-simpler.org/assert v0.9.0/go.mod h1:74Eqh5eI6vCK6Y5l3PI8ZYFXG4Sa+tkr70OIPJAUr28= -go-simpler.org/musttag v0.12.2 h1:J7lRc2ysXOq7eM8rwaTYnNrHd5JwjppzB6mScysB2Cs= -go-simpler.org/musttag v0.12.2/go.mod h1:uN1DVIasMTQKk6XSik7yrJoEysGtR2GRqvWnI9S7TYM= -go-simpler.org/sloglint v0.7.2 h1:Wc9Em/Zeuu7JYpl+oKoYOsQSy2X560aVueCW/m6IijY= -go-simpler.org/sloglint v0.7.2/go.mod h1:US+9C80ppl7VsThQclkM7BkCHQAzuz8kHLsW3ppuluo= +go-simpler.org/musttag v0.13.0 h1:Q/YAW0AHvaoaIbsPj3bvEI5/QFP7w696IMUpnKXQfCE= +go-simpler.org/musttag v0.13.0/go.mod h1:FTzIGeK6OkKlUDVpj0iQUXZLUO1Js9+mvykDQy9C5yM= +go-simpler.org/sloglint v0.9.0 h1:/40NQtjRx9txvsB/RN022KsUJU+zaaSb/9q9BSefSrE= +go-simpler.org/sloglint v0.9.0/go.mod h1:G/OrAF6uxj48sHahCzrbarVMptL2kjWTaUeC8+fOGww= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= -go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= -go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= -go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= -go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= -go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 h1:Hf9xI/XLML9ElpiHVDNwvqI0hIFlzV8dgIr35kV1kRU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0/go.mod h1:NfchwuyNoMcZ5MLHwPrODwUF1HWCXWrL31s8gSAdIKY= +go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= +go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= +go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= +go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= +go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= +go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= +go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= +go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= +go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= +go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/dig v1.19.0 h1:BACLhebsYdpQ7IROQ1AGPjrXcP5dF80U3gKoFzbaq/4= @@ -857,24 +904,20 @@ golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= -golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= -golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= +golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4= +golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b h1:M2rDM6z3Fhozi9O7NWsxAkg/yqS/lQJ6PmkyIV3YP+o= -golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8= +golang.org/x/exp v0.0.0-20250813145105-42675adae3e6 h1:SbTAbRFnd5kjQXbczszQ0hdk3ctwYf3qBNH9jIsGclE= +golang.org/x/exp v0.0.0-20250813145105-42675adae3e6/go.mod h1:4QTo5u+SEIbbKW1RacMZq1YEfOBqeXa19JeshGi+zc4= golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/exp/typeparams v0.0.0-20230203172020-98cc5a0785f9/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= -golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f h1:phY1HzDcf18Aq9A8KkmRtY9WvOFIxN8wgfvy6Zm1DV8= -golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp/typeparams v0.0.0-20250210185358-939b2ce775ac h1:TSSpLIG4v+p0rPv1pNOQtl1I8knsO4S9trOxNMOLVP4= +golang.org/x/exp/typeparams v0.0.0-20250210185358-939b2ce775ac/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -884,19 +927,15 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w= -golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= +golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ= +golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -915,21 +954,17 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= -golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= -golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= -golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= +golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -948,9 +983,8 @@ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8= -golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= +golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -961,7 +995,6 @@ golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -972,42 +1005,30 @@ golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= -golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= -golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= -golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= -golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= -golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= +golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= +golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= @@ -1015,15 +1036,14 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= -golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= +golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= +golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= @@ -1033,9 +1053,7 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190321232350-e250d351ecad/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= @@ -1045,28 +1063,29 @@ golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= golang.org/x/tools v0.1.1-0.20210302220138-2ac05c832e1a/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= -golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.11.0/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= -golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc= -golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo= -golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg= +golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg= +golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= +golang.org/x/tools/go/expect v0.1.1-deprecated h1:jpBZDwmgPhXsKZC6WhL20P4b/wmnpsEAGHaNy0n/rJM= +golang.org/x/tools/go/expect v0.1.1-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= +golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM= +golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated/go.mod h1:RVAQXBGNv1ib0J382/DPCRS/BPnsGebyM1Gj5VSDpG8= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= @@ -1085,8 +1104,8 @@ google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmE google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= -google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +google.golang.org/protobuf v1.36.7 h1:IgrO7UwFQGJdRNXH/sQux4R1Dj1WAKcLElzeeRaXV2A= +google.golang.org/protobuf v1.36.7/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= @@ -1100,16 +1119,16 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/gotestsum v1.12.0 h1:CmwtaGDkHxrZm4Ib0Vob89MTfpc3GrEFMJKovliPwGk= -gotest.tools/gotestsum v1.12.0/go.mod h1:fAvqkSptospfSbQw26CTYzNwnsE/ztqLeyhP0h67ARY= -gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= -gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= +gotest.tools/gotestsum v1.12.3 h1:jFwenGJ0RnPkuKh2VzAYl1mDOJgbhobBDeL2W1iEycs= +gotest.tools/gotestsum v1.12.3/go.mod h1:Y1+e0Iig4xIRtdmYbEV7K7H6spnjc1fX4BOuUhWw2Wk= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.5.1 h1:4bH5o3b5ZULQ4UrBmP+63W9r7qIkqJClEA9ko5YKx+I= -honnef.co/go/tools v0.5.1/go.mod h1:e9irvo83WDG9/irijV44wr3tbhcFeRnfpVlRqVwpzMs= +honnef.co/go/tools v0.6.1 h1:R094WgE8K4JirYjBaOpz/AvTyUu/3wbmAoskKN/pxTI= +honnef.co/go/tools v0.6.1/go.mod h1:3puzxxljPCe8RGJX7BIy1plGbxEOZni5mR2aXe3/uk4= lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg= lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo= mvdan.cc/gofumpt v0.7.0 h1:bg91ttqXmi9y2xawvkuMXyvAA/1ZGJqYAEGjXuP0JXU= diff --git a/test/sharness/Rules.mk b/test/sharness/Rules.mk index 2f2e076ba..0ac3cf950 100644 --- a/test/sharness/Rules.mk +++ b/test/sharness/Rules.mk @@ -14,10 +14,10 @@ DEPS_$(d) += $(SHARNESS_$(d)) ifeq ($(OS),Linux) PLUGINS_DIR_$(d) := $(d)/plugins/ -ORGIN_PLUGINS_$(d) := $(plugin/plugins_plugins_so) -PLUGINS_$(d) := $(addprefix $(PLUGINS_DIR_$(d)),$(notdir $(ORGIN_PLUGINS_$(d)))) +ORIGIN_PLUGINS_$(d) := $(plugin/plugins_plugins_so) +PLUGINS_$(d) := $(addprefix $(PLUGINS_DIR_$(d)),$(notdir $(ORIGIN_PLUGINS_$(d)))) -$(PLUGINS_$(d)): $(ORGIN_PLUGINS_$(d)) +$(PLUGINS_$(d)): $(ORIGIN_PLUGINS_$(d)) @mkdir -p $(@D) cp -f plugin/plugins/$(@F) $@ diff --git a/test/sharness/lib/test-lib.sh b/test/sharness/lib/test-lib.sh index f9292cb23..413d0e92f 100644 --- a/test/sharness/lib/test-lib.sh +++ b/test/sharness/lib/test-lib.sh @@ -54,7 +54,7 @@ cur_test_pwd="$(pwd)" while true ; do echo -n > stuck_cwd_list - lsof -c ipfs -Ffn 2>/dev/null | grep -A1 '^fcwd$' | grep '^n' | cut -b 2- | while read -r pwd_of_stuck ; do + timeout 5 lsof -c ipfs -Ffn 2>/dev/null | grep -A1 '^fcwd$' | grep '^n' | cut -b 2- | while read -r pwd_of_stuck ; do case "$pwd_of_stuck" in "$cur_test_pwd"*) echo "$pwd_of_stuck" >> stuck_cwd_list @@ -205,6 +205,10 @@ test_init_ipfs() { ipfs init "${args[@]}" --profile=test > /dev/null ' + test_expect_success "disable telemetry" ' + test_config_set --bool Plugins.Plugins.telemetry.Disabled "true" + ' + test_expect_success "prepare config -- mounting" ' mkdir mountdir ipfs ipns mfs && test_config_set Mounts.IPFS "$(pwd)/ipfs" && @@ -227,6 +231,10 @@ test_init_ipfs_measure() { ipfs init "${args[@]}" --profile=test,flatfs-measure > /dev/null ' + test_expect_success "disable telemetry" ' + test_config_set --bool Plugins.Plugins.telemetry.Disabled "true" + ' + test_expect_success "prepare config -- mounting" ' mkdir mountdir ipfs ipns && test_config_set Mounts.IPFS "$(pwd)/ipfs" && @@ -309,10 +317,37 @@ test_launch_ipfs_daemon_without_network() { } do_umount() { + local mount_point="$1" + local max_retries=3 + local retry_delay=0.5 + + # Try normal unmount first (without lazy flag) + for i in $(seq 1 $max_retries); do + if [ "$(uname -s)" = "Linux" ]; then + # First attempt: standard unmount + if fusermount -u "$mount_point" 2>/dev/null; then + return 0 + fi + else + if umount "$mount_point" 2>/dev/null; then + return 0 + fi + fi + + # If not last attempt, wait before retry + if [ $i -lt $max_retries ]; then + go-sleep "${retry_delay}s" + fi + done + + # If normal unmount failed, try lazy unmount as last resort (Linux only) if [ "$(uname -s)" = "Linux" ]; then - fusermount -z -u "$1" + # Log that we're falling back to lazy unmount + test "$TEST_VERBOSE" = 1 && echo "# Warning: falling back to lazy unmount for $mount_point" + fusermount -z -u "$mount_point" 2>/dev/null else - umount "$1" + # On non-Linux, try force unmount + umount -f "$mount_point" 2>/dev/null || true fi } diff --git a/test/sharness/t0018-indent.sh b/test/sharness/t0018-indent.sh index 5fa398fd2..a6029d93f 100755 --- a/test/sharness/t0018-indent.sh +++ b/test/sharness/t0018-indent.sh @@ -5,6 +5,9 @@ test_description="Test sharness test indent" . lib/test-lib.sh for file in $(find .. -name 't*.sh' -type f); do + if [ "$(basename "$file")" = "t0290-cid.sh" ]; then + continue + fi test_expect_success "indent in $file is not using tabs" ' test_must_fail grep -P "^ *\t" $file ' diff --git a/test/sharness/t0066-migration.sh b/test/sharness/t0066-migration.sh index fa6a10e02..50ca3d17c 100755 --- a/test/sharness/t0066-migration.sh +++ b/test/sharness/t0066-migration.sh @@ -10,6 +10,10 @@ test_description="Test migrations auto update prompt" test_init_ipfs +# Remove explicit AutoConf.Enabled=false from test profile to use implicit default +# This allows daemon to work with 'auto' values added by v16-to-17 migration +ipfs config --json AutoConf.Enabled null >/dev/null 2>&1 + MIGRATION_START=7 IPFS_REPO_VER=$(<.ipfs/version) @@ -22,6 +26,12 @@ gen_mock_migrations() { j=$((i+1)) echo "#!/bin/bash" > bin/fs-repo-${i}-to-${j} echo "echo fake applying ${i}-to-${j} repo migration" >> bin/fs-repo-${i}-to-${j} + # Update version file to the target version for hybrid migration system + echo "if [ \"\$1\" = \"-path\" ] && [ -n \"\$2\" ]; then" >> bin/fs-repo-${i}-to-${j} + echo " echo $j > \"\$2/version\"" >> bin/fs-repo-${i}-to-${j} + echo "elif [ -n \"\$IPFS_PATH\" ]; then" >> bin/fs-repo-${i}-to-${j} + echo " echo $j > \"\$IPFS_PATH/version\"" >> bin/fs-repo-${i}-to-${j} + echo "fi" >> bin/fs-repo-${i}-to-${j} chmod +x bin/fs-repo-${i}-to-${j} ((i++)) done @@ -54,34 +64,42 @@ test_expect_success "manually reset repo version to $MIGRATION_START" ' ' test_expect_success "ipfs daemon --migrate=false fails" ' - test_expect_code 1 ipfs daemon --migrate=false > false_out + test_expect_code 1 ipfs daemon --migrate=false > false_out 2>&1 ' test_expect_success "output looks good" ' - grep "Please get fs-repo-migrations from https://dist.ipfs.tech" false_out + grep "Kubo repository at .* has version .* and needs to be migrated to version" false_out && + grep "Error: fs-repo requires migration" false_out ' -# The migrations will succeed, but the daemon will still exit with 1 because -# the fake migrations do not update the repo version number. -# -# If run with real migrations, the daemon continues running and must be killed. +# The migrations will succeed and the daemon will continue running +# since the mock migrations now properly update the repo version number. test_expect_success "ipfs daemon --migrate=true runs migration" ' - test_expect_code 1 ipfs daemon --migrate=true > true_out + ipfs daemon --migrate=true > true_out 2>&1 & + DAEMON_PID=$! + # Wait for daemon to be ready then shutdown gracefully + sleep 3 && ipfs shutdown 2>/dev/null || kill $DAEMON_PID 2>/dev/null || true + wait $DAEMON_PID 2>/dev/null || true ' test_expect_success "output looks good" ' check_migration_output true_out && - grep "Success: fs-repo migrated to version $IPFS_REPO_VER" true_out > /dev/null + (grep "Success: fs-repo migrated to version $IPFS_REPO_VER" true_out > /dev/null || + grep "Hybrid migration completed successfully: v$MIGRATION_START → v$IPFS_REPO_VER" true_out > /dev/null) +' + +test_expect_success "reset repo version for auto-migration test" ' + echo "$MIGRATION_START" > "$IPFS_PATH"/version ' test_expect_success "'ipfs daemon' prompts to auto migrate" ' - test_expect_code 1 ipfs daemon > daemon_out 2> daemon_err + test_expect_code 1 ipfs daemon > daemon_out 2>&1 ' test_expect_success "output looks good" ' - grep "Found outdated fs-repo" daemon_out > /dev/null && + grep "Kubo repository at .* has version .* and needs to be migrated to version" daemon_out > /dev/null && grep "Run migrations now?" daemon_out > /dev/null && - grep "Please get fs-repo-migrations from https://dist.ipfs.tech" daemon_out > /dev/null + grep "Error: fs-repo requires migration" daemon_out > /dev/null ' test_expect_success "ipfs repo migrate succeed" ' @@ -89,8 +107,9 @@ test_expect_success "ipfs repo migrate succeed" ' ' test_expect_success "output looks good" ' - grep "Found outdated fs-repo, starting migration." migrate_out > /dev/null && - grep "Success: fs-repo migrated to version $IPFS_REPO_VER" true_out > /dev/null + grep "Migrating repository from version" migrate_out > /dev/null && + (grep "Success: fs-repo migrated to version $IPFS_REPO_VER" migrate_out > /dev/null || + grep "Hybrid migration completed successfully: v$MIGRATION_START → v$IPFS_REPO_VER" migrate_out > /dev/null) ' test_expect_success "manually reset repo version to latest" ' @@ -102,7 +121,7 @@ test_expect_success "detect repo does not need migration" ' ' test_expect_success "output looks good" ' - grep "Repo does not require migration" migrate_out > /dev/null + grep "Repository is already at version" migrate_out > /dev/null ' # ensure that we get a lock error if we need to migrate and the daemon is running diff --git a/test/sharness/t0086-repo-verify.sh b/test/sharness/t0086-repo-verify.sh index 0f12fef8f..612d281ef 100755 --- a/test/sharness/t0086-repo-verify.sh +++ b/test/sharness/t0086-repo-verify.sh @@ -24,7 +24,10 @@ sort_rand() { } check_random_corruption() { - to_break=$(find "$IPFS_PATH/blocks" -type f -name '*.data' | sort_rand | head -n 1) + # Exclude well-known blocks from corruption as they cause test flakiness: + # - CIQL7TG2PB52XIZLLHDYIUFMHUQLMMZWBNBZSLDXFCPZ5VDNQQ2WDZQ.data: empty file block + # - CIQFTFEEHEDF6KLBT32BFAGLXEZL4UWFNWM4LFTLMXQBCERZ6CMLX3Y.data: empty directory block (has special handling, served from memory even when corrupted on disk) + to_break=$(find "$IPFS_PATH/blocks" -type f -name '*.data' | grep -v -E "CIQL7TG2PB52XIZLLHDYIUFMHUQLMMZWBNBZSLDXFCPZ5VDNQQ2WDZQ.data|CIQFTFEEHEDF6KLBT32BFAGLXEZL4UWFNWM4LFTLMXQBCERZ6CMLX3Y.data" | sort_rand | head -n 1) test_expect_success "back up file and overwrite it" ' cp "$to_break" backup_file && diff --git a/test/sharness/t0114-gateway-subdomains.sh b/test/sharness/t0114-gateway-subdomains.sh index 7d18ab265..ae1bc1a93 100755 --- a/test/sharness/t0114-gateway-subdomains.sh +++ b/test/sharness/t0114-gateway-subdomains.sh @@ -802,7 +802,7 @@ test_expect_success "request for http://fake.domain.com/ipfs/{CID} with X-Forwar # Kubo specific end-to-end test # (independent of gateway-conformance) -# test configuration beign wired up correctly end-to-end +# test configuration being wired up correctly end-to-end ## ============================================================================ ## Test support for wildcards in gateway config diff --git a/test/sharness/t0119-prometheus-data/prometheus_metrics b/test/sharness/t0119-prometheus-data/prometheus_metrics index fa257d9fc..12be12cb2 100644 --- a/test/sharness/t0119-prometheus-data/prometheus_metrics +++ b/test/sharness/t0119-prometheus-data/prometheus_metrics @@ -157,6 +157,7 @@ ipfs_fsrepo_datastore_sync_latency_seconds_bucket ipfs_fsrepo_datastore_sync_latency_seconds_count ipfs_fsrepo_datastore_sync_latency_seconds_sum ipfs_fsrepo_datastore_sync_total +ipfs_http_gw_concurrent_requests ipfs_http_request_duration_seconds ipfs_http_request_duration_seconds_count ipfs_http_request_duration_seconds_sum diff --git a/test/sharness/t0120-bootstrap.sh b/test/sharness/t0120-bootstrap.sh index 00141da1f..e4bbde78a 100755 --- a/test/sharness/t0120-bootstrap.sh +++ b/test/sharness/t0120-bootstrap.sh @@ -13,7 +13,10 @@ BP5="/dnsaddr/va1.bootstrap.libp2p.io/p2p/12D3KooWKnDdG3iXw9eTFijk3EWSunZcFi54Zk BP6="/ip4/104.131.131.82/tcp/4001/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ" BP7="/ip4/104.131.131.82/udp/4001/quic-v1/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ" -test_description="Test ipfs repo operations" +test_description="Test ipfs bootstrap operations" + +# NOTE: For AutoConf bootstrap functionality (add default, --expand-auto, etc.) +# see test/cli/bootstrap_auto_test.go and test/cli/autoconf/expand_test.go . lib/test-lib.sh @@ -83,35 +86,12 @@ test_bootstrap_cmd() { test_bootstrap_list_cmd $BP2 - test_expect_success "'ipfs bootstrap add --default' succeeds" ' - ipfs bootstrap add --default >add2_actual - ' - - test_expect_success "'ipfs bootstrap add --default' output has default BP" ' - echo "added $BP1" >add2_expected && - echo "added $BP2" >>add2_expected && - echo "added $BP3" >>add2_expected && - echo "added $BP4" >>add2_expected && - echo "added $BP5" >>add2_expected && - echo "added $BP6" >>add2_expected && - echo "added $BP7" >>add2_expected && - test_cmp add2_expected add2_actual - ' - - test_bootstrap_list_cmd $BP1 $BP2 $BP3 $BP4 $BP5 $BP6 $BP7 - test_expect_success "'ipfs bootstrap rm --all' succeeds" ' ipfs bootstrap rm --all >rm2_actual ' test_expect_success "'ipfs bootstrap rm' output looks good" ' - echo "removed $BP1" >rm2_expected && - echo "removed $BP2" >>rm2_expected && - echo "removed $BP3" >>rm2_expected && - echo "removed $BP4" >>rm2_expected && - echo "removed $BP5" >>rm2_expected && - echo "removed $BP6" >>rm2_expected && - echo "removed $BP7" >>rm2_expected && + echo "removed $BP2" >rm2_expected && test_cmp rm2_expected rm2_actual ' diff --git a/test/sharness/t0181-private-network.sh b/test/sharness/t0181-private-network.sh index 5e566d317..efae18b15 100755 --- a/test/sharness/t0181-private-network.sh +++ b/test/sharness/t0181-private-network.sh @@ -10,6 +10,10 @@ test_description="Test private network feature" test_init_ipfs +test_expect_success "disable AutoConf for private network tests" ' + ipfs config --json AutoConf.Enabled false +' + export LIBP2P_FORCE_PNET=1 test_expect_success "daemon won't start with force pnet env but with no key" ' @@ -37,7 +41,8 @@ test_expect_success "set up iptb testbed" ' iptb testbed create -type localipfs -count 5 -force -init && iptb run -- ipfs config --json "Routing.LoopbackAddressesOnLanDHT" true && iptb run -- ipfs config --json "Swarm.Transports.Network.Websocket" false && - iptb run -- ipfs config --json Addresses.Swarm '"'"'["/ip4/127.0.0.1/tcp/0"]'"'"' + iptb run -- ipfs config --json Addresses.Swarm '"'"'["/ip4/127.0.0.1/tcp/0"]'"'"' && + iptb run -- ipfs config --json AutoConf.Enabled false ' set_key() { @@ -136,4 +141,23 @@ test_expect_success "stop testbed" ' test_kill_ipfs_daemon +# Test that AutoConf with default mainnet URL fails on private networks +test_expect_success "setup test repo with AutoConf enabled and private network" ' + export IPFS_PATH="$(pwd)/.ipfs-autoconf-test" && + ipfs init --profile=test > /dev/null && + ipfs config --json AutoConf.Enabled true && + pnet_key > "${IPFS_PATH}/swarm.key" +' + +test_expect_success "daemon fails with AutoConf + private network error" ' + export IPFS_PATH="$(pwd)/.ipfs-autoconf-test" && + test_expect_code 1 ipfs daemon > autoconf_stdout 2> autoconf_stderr +' + +test_expect_success "error message mentions AutoConf and private network conflict" ' + grep "AutoConf cannot use the default mainnet URL" autoconf_stderr > /dev/null && + grep "private network.*swarm.key" autoconf_stderr > /dev/null && + grep "AutoConf.Enabled=false" autoconf_stderr > /dev/null +' + test_done diff --git a/test/sharness/t0290-cid.sh b/test/sharness/t0290-cid.sh index e0c31a330..97ec0cd42 100755 --- a/test/sharness/t0290-cid.sh +++ b/test/sharness/t0290-cid.sh @@ -4,6 +4,11 @@ test_description="Test cid commands" . lib/test-lib.sh +# NOTE: Primary tests for "ipfs cid" commands are in test/cli/cid_test.go +# These sharness tests are kept for backward compatibility but new tests +# should be added to test/cli/cid_test.go instead. If any of these tests +# break, consider removing them and updating only the test/cli version. + # note: all "ipfs cid" commands should work without requiring a repo CIDv0="QmS4ustL54uo8FzR9455qaxZwuMiUhyvMcX9Ba8nUH4uVv" @@ -101,7 +106,7 @@ v 118 base32hex V 86 base32hexupper z 122 base58btc Z 90 base58flickr - 128640 base256emoji +🚀 128640 base256emoji EOF cat < codecs_expect @@ -240,13 +245,57 @@ cat < hashes_expect EOF test_expect_success "cid bases" ' - cut -c 12- bases_expect > expect && + cat <<-EOF > expect + identity + base2 + base32 + base32upper + base32pad + base32padupper + base16 + base16upper + base36 + base36upper + base64 + base64pad + base32hexpad + base32hexpadupper + base64url + base64urlpad + base32hex + base32hexupper + base58btc + base58flickr + base256emoji + EOF ipfs cid bases > actual && test_cmp expect actual ' test_expect_success "cid bases --prefix" ' - cut -c 1-3,12- bases_expect > expect && + cat <<-EOF > expect + identity + 0 base2 + b base32 + B base32upper + c base32pad + C base32padupper + f base16 + F base16upper + k base36 + K base36upper + m base64 + M base64pad + t base32hexpad + T base32hexpadupper + u base64url + U base64urlpad + v base32hex + V base32hexupper + z base58btc + Z base58flickr + 🚀 base256emoji + EOF ipfs cid bases --prefix > actual && test_cmp expect actual ' diff --git a/thirdparty/README.md b/thirdparty/README.md index a68b51c5d..a4774a4af 100644 --- a/thirdparty/README.md +++ b/thirdparty/README.md @@ -1,5 +1,2 @@ -thirdparty consists of Golang packages that contain no go-ipfs dependencies and -may be vendored ipfs/go-ipfs at a later date. - packages under this directory _must not_ import packages under -`ipfs/go-ipfs` that are not also under `thirdparty`. +`ipfs/kubo` that are not also under `thirdparty`. diff --git a/thirdparty/assert/assert.go b/thirdparty/assert/assert.go deleted file mode 100644 index f737d191e..000000000 --- a/thirdparty/assert/assert.go +++ /dev/null @@ -1,25 +0,0 @@ -package assert - -import "testing" - -func Nil(err error, t *testing.T, msgs ...string) { - if err != nil { - t.Fatal(msgs, "error:", err) - } -} - -func True(v bool, t *testing.T, msgs ...string) { - if !v { - t.Fatal(msgs) - } -} - -func False(v bool, t *testing.T, msgs ...string) { - True(!v, t, msgs...) -} - -func Err(err error, t *testing.T, msgs ...string) { - if err == nil { - t.Fatal(msgs, "error:", err) - } -} diff --git a/thirdparty/dir/dir.go b/thirdparty/dir/dir.go deleted file mode 100644 index 5aa93c329..000000000 --- a/thirdparty/dir/dir.go +++ /dev/null @@ -1,25 +0,0 @@ -package dir - -// TODO move somewhere generic - -import ( - "errors" - "os" - "path/filepath" -) - -// Writable ensures the directory exists and is writable. -func Writable(path string) error { - // Construct the path if missing - if err := os.MkdirAll(path, os.ModePerm); err != nil { - return err - } - // Check the directory is writable - if f, err := os.Create(filepath.Join(path, "._check_writable")); err == nil { - f.Close() - os.Remove(f.Name()) - } else { - return errors.New("'" + path + "' is not writable") - } - return nil -} diff --git a/thirdparty/notifier/notifier.go b/thirdparty/notifier/notifier.go deleted file mode 100644 index bb8860702..000000000 --- a/thirdparty/notifier/notifier.go +++ /dev/null @@ -1,142 +0,0 @@ -// Package notifier provides a simple notification dispatcher -// meant to be embedded in larger structures who wish to allow -// clients to sign up for event notifications. -package notifier - -import ( - "sync" - - process "github.com/jbenet/goprocess" - ratelimit "github.com/jbenet/goprocess/ratelimit" -) - -// Notifiee is a generic interface. Clients implement -// their own Notifiee interfaces to ensure type-safety -// of notifications: -// -// type RocketNotifiee interface{ -// Countdown(r Rocket, countdown time.Duration) -// LiftedOff(Rocket) -// ReachedOrbit(Rocket) -// Detached(Rocket, Capsule) -// Landed(Rocket) -// } -type Notifiee interface{} - -// Notifier is a notification dispatcher. It's meant -// to be composed, and its zero-value is ready to be used. -// -// type Rocket struct { -// notifier notifier.Notifier -// } -type Notifier struct { - mu sync.RWMutex // guards notifiees - nots map[Notifiee]struct{} - lim *ratelimit.RateLimiter -} - -// RateLimited returns a rate limited Notifier. only limit goroutines -// will be spawned. If limit is zero, no rate limiting happens. This -// is the same as `Notifier{}`. -func RateLimited(limit int) *Notifier { - n := &Notifier{} - if limit > 0 { - n.lim = ratelimit.NewRateLimiter(process.Background(), limit) - } - return n -} - -// Notify signs up Notifiee e for notifications. This function -// is meant to be called behind your own type-safe function(s): -// -// // generic function for pattern-following -// func (r *Rocket) Notify(n Notifiee) { -// r.notifier.Notify(n) -// } -// -// // or as part of other functions -// func (r *Rocket) Onboard(a Astronaut) { -// r.astronauts = append(r.austronauts, a) -// r.notifier.Notify(a) -// } -func (n *Notifier) Notify(e Notifiee) { - n.mu.Lock() - if n.nots == nil { // so that zero-value is ready to be used. - n.nots = make(map[Notifiee]struct{}) - } - n.nots[e] = struct{}{} - n.mu.Unlock() -} - -// StopNotify stops notifying Notifiee e. This function -// is meant to be called behind your own type-safe function(s): -// -// // generic function for pattern-following -// func (r *Rocket) StopNotify(n Notifiee) { -// r.notifier.StopNotify(n) -// } -// -// // or as part of other functions -// func (r *Rocket) Detach(c Capsule) { -// r.notifier.StopNotify(c) -// r.capsule = nil -// } -func (n *Notifier) StopNotify(e Notifiee) { - n.mu.Lock() - if n.nots != nil { // so that zero-value is ready to be used. - delete(n.nots, e) - } - n.mu.Unlock() -} - -// NotifyAll messages the notifier's notifiees with a given notification. -// This is done by calling the given function with each notifiee. It is -// meant to be called with your own type-safe notification functions: -// -// func (r *Rocket) Launch() { -// r.notifyAll(func(n Notifiee) { -// n.Launched(r) -// }) -// } -// -// // make it private so only you can use it. This function is necessary -// // to make sure you only up-cast in one place. You control who you added -// // to be a notifiee. If Go adds generics, maybe we can get rid of this -// // method but for now it is like wrapping a type-less container with -// // a type safe interface. -// func (r *Rocket) notifyAll(notify func(Notifiee)) { -// r.notifier.NotifyAll(func(n notifier.Notifiee) { -// notify(n.(Notifiee)) -// }) -// } -// -// Note well: each notification is launched in its own goroutine, so they -// can be processed concurrently, and so that whatever the notification does -// it _never_ blocks out the client. This is so that consumers _cannot_ add -// hooks into your object that block you accidentally. -func (n *Notifier) NotifyAll(notify func(Notifiee)) { - n.mu.Lock() - defer n.mu.Unlock() - - if n.nots == nil { // so that zero-value is ready to be used. - return - } - - // no rate limiting. - if n.lim == nil { - for notifiee := range n.nots { - go notify(notifiee) - } - return - } - - // with rate limiting. - n.lim.Go(func(worker process.Process) { - for notifiee := range n.nots { - notifiee := notifiee // rebind for loop data races - n.lim.LimitedGo(func(worker process.Process) { - notify(notifiee) - }) - } - }) -} diff --git a/thirdparty/notifier/notifier_test.go b/thirdparty/notifier/notifier_test.go deleted file mode 100644 index 401b3b02a..000000000 --- a/thirdparty/notifier/notifier_test.go +++ /dev/null @@ -1,289 +0,0 @@ -package notifier - -import ( - "fmt" - "sync" - "testing" - "time" -) - -// test data structures. -type Router struct { - queue chan Packet - notifier Notifier -} - -type Packet struct{} - -type RouterNotifiee interface { - Enqueued(*Router, Packet) - Forwarded(*Router, Packet) - Dropped(*Router, Packet) -} - -func (r *Router) Notify(n RouterNotifiee) { - r.notifier.Notify(n) -} - -func (r *Router) StopNotify(n RouterNotifiee) { - r.notifier.StopNotify(n) -} - -func (r *Router) notifyAll(notify func(n RouterNotifiee)) { - r.notifier.NotifyAll(func(n Notifiee) { - notify(n.(RouterNotifiee)) - }) -} - -func (r *Router) Receive(p Packet) { - select { - case r.queue <- p: // enqueued - r.notifyAll(func(n RouterNotifiee) { - n.Enqueued(r, p) - }) - - default: // drop - r.notifyAll(func(n RouterNotifiee) { - n.Dropped(r, p) - }) - } -} - -func (r *Router) Forward() { - p := <-r.queue - r.notifyAll(func(n RouterNotifiee) { - n.Forwarded(r, p) - }) -} - -type Metrics struct { - enqueued int - forwarded int - dropped int - received chan struct{} - sync.Mutex -} - -func (m *Metrics) Enqueued(*Router, Packet) { - m.Lock() - m.enqueued++ - m.Unlock() - if m.received != nil { - m.received <- struct{}{} - } -} - -func (m *Metrics) Forwarded(*Router, Packet) { - m.Lock() - m.forwarded++ - m.Unlock() - if m.received != nil { - m.received <- struct{}{} - } -} - -func (m *Metrics) Dropped(*Router, Packet) { - m.Lock() - m.dropped++ - m.Unlock() - if m.received != nil { - m.received <- struct{}{} - } -} - -func (m *Metrics) String() string { - m.Lock() - defer m.Unlock() - return fmt.Sprintf("%d enqueued, %d forwarded, %d in queue, %d dropped", - m.enqueued, m.forwarded, m.enqueued-m.forwarded, m.dropped) -} - -func TestNotifies(t *testing.T) { - m := Metrics{received: make(chan struct{})} - r := Router{queue: make(chan Packet, 10)} - r.Notify(&m) - - for i := 0; i < 10; i++ { - r.Receive(Packet{}) - <-m.received - if m.enqueued != (1 + i) { - t.Error("not notifying correctly", m.enqueued, 1+i) - } - - } - - for i := 0; i < 10; i++ { - r.Receive(Packet{}) - <-m.received - if m.enqueued != 10 { - t.Error("not notifying correctly", m.enqueued, 10) - } - if m.dropped != (1 + i) { - t.Error("not notifying correctly", m.dropped, 1+i) - } - } -} - -func TestStopsNotifying(t *testing.T) { - m := Metrics{received: make(chan struct{})} - r := Router{queue: make(chan Packet, 10)} - r.Notify(&m) - - for i := 0; i < 5; i++ { - r.Receive(Packet{}) - <-m.received - if m.enqueued != (1 + i) { - t.Error("not notifying correctly") - } - } - - r.StopNotify(&m) - - for i := 0; i < 5; i++ { - r.Receive(Packet{}) - select { - case <-m.received: - t.Error("did not stop notifying") - default: - } - if m.enqueued != 5 { - t.Error("did not stop notifying") - } - } -} - -func TestThreadsafe(t *testing.T) { - N := 1000 - r := Router{queue: make(chan Packet, 10)} - m1 := Metrics{received: make(chan struct{})} - m2 := Metrics{received: make(chan struct{})} - m3 := Metrics{received: make(chan struct{})} - r.Notify(&m1) - r.Notify(&m2) - r.Notify(&m3) - - var n int - var wg sync.WaitGroup - for i := 0; i < N; i++ { - n++ - wg.Add(1) - go func() { - defer wg.Done() - r.Receive(Packet{}) - }() - - if i%3 == 0 { - n++ - wg.Add(1) - go func() { - defer wg.Done() - r.Forward() - }() - } - } - - // drain queues - for i := 0; i < (n * 3); i++ { - select { - case <-m1.received: - case <-m2.received: - case <-m3.received: - } - } - - wg.Wait() - - // counts should be correct and all agree. and this should - // run fine under `go test -race -cpu=5` - - t.Log("m1", m1.String()) - t.Log("m2", m2.String()) - t.Log("m3", m3.String()) - - if m1.String() != m2.String() || m2.String() != m3.String() { - t.Error("counts disagree") - } -} - -type highwatermark struct { - mu sync.Mutex - mark int - limit int - errs chan error -} - -func (m *highwatermark) incr() { - m.mu.Lock() - m.mark++ - // fmt.Println("incr", m.mark) - if m.mark > m.limit { - m.errs <- fmt.Errorf("went over limit: %d/%d", m.mark, m.limit) - } - m.mu.Unlock() -} - -func (m *highwatermark) decr() { - m.mu.Lock() - m.mark-- - // fmt.Println("decr", m.mark) - if m.mark < 0 { - m.errs <- fmt.Errorf("went under zero: %d/%d", m.mark, m.limit) - } - m.mu.Unlock() -} - -func TestLimited(t *testing.T) { - timeout := 10 * time.Second // huge timeout. - limit := 9 - - hwm := highwatermark{limit: limit, errs: make(chan error, 100)} - n := RateLimited(limit) // will stop after 3 rounds - n.Notify(1) - n.Notify(2) - n.Notify(3) - - entr := make(chan struct{}) - exit := make(chan struct{}) - done := make(chan struct{}) - go func() { - for i := 0; i < 10; i++ { - // fmt.Printf("round: %d\n", i) - n.NotifyAll(func(e Notifiee) { - hwm.incr() - entr <- struct{}{} - <-exit // wait - hwm.decr() - }) - } - done <- struct{}{} - }() - - for i := 0; i < 30; { - select { - case <-entr: - continue // let as many enter as possible - case <-time.After(1 * time.Millisecond): - } - - // let one exit - select { - case <-entr: - continue // in case of timing issues. - case exit <- struct{}{}: - case <-time.After(timeout): - t.Error("got stuck") - } - i++ - } - - select { - case <-done: // two parts done - case <-time.After(timeout): - t.Error("did not finish") - } - - close(hwm.errs) - for err := range hwm.errs { - t.Error(err) - } -} diff --git a/version.go b/version.go index af28b1aab..244fe6726 100644 --- a/version.go +++ b/version.go @@ -3,18 +3,19 @@ package ipfs import ( "fmt" "runtime" - - "github.com/ipfs/kubo/repo/fsrepo" ) // CurrentCommit is the current git commit, this is set as a ldflag in the Makefile. var CurrentCommit string // CurrentVersionNumber is the current application's version literal. -const CurrentVersionNumber = "0.36.0" +const CurrentVersionNumber = "0.37.0" const ApiVersion = "/kubo/" + CurrentVersionNumber + "/" //nolint +// RepoVersion is the version number that we are currently expecting to see. +const RepoVersion = 17 + // GetUserAgentVersion is the libp2p user agent used by go-ipfs. // // Note: This will end in `/` when no commit is available. This is expected. @@ -47,7 +48,7 @@ func GetVersionInfo() *VersionInfo { return &VersionInfo{ Version: CurrentVersionNumber, Commit: CurrentCommit, - Repo: fmt.Sprint(fsrepo.RepoVersion), + Repo: fmt.Sprint(RepoVersion), System: runtime.GOARCH + "/" + runtime.GOOS, // TODO: Precise version here Golang: runtime.Version(), }