mirror of
https://github.com/ipfs/kubo.git
synced 2026-02-21 10:27:46 +08:00
commit
6898472220
@ -1,21 +0,0 @@
|
||||
Adin
|
||||
nd
|
||||
Nd
|
||||
afile
|
||||
thirdparty
|
||||
receivedFrom
|
||||
origN
|
||||
hel
|
||||
TotalIn
|
||||
childs
|
||||
userA
|
||||
AssignT
|
||||
OT
|
||||
AssignT
|
||||
fo
|
||||
recusive
|
||||
raison
|
||||
Boddy
|
||||
ressource
|
||||
achin
|
||||
re-using
|
||||
6
.cspell.yml
Normal file
6
.cspell.yml
Normal file
@ -0,0 +1,6 @@
|
||||
ignoreWords:
|
||||
- childs # This spelling is used in the files command
|
||||
- NodeCreater # This spelling is used in the fuse dependency
|
||||
- Boddy # One of the contributors to the project - Chris Boddy
|
||||
- Botto # One of the contributors to the project - Santiago Botto
|
||||
- cose # dag-cose
|
||||
20
.gitattributes
vendored
20
.gitattributes
vendored
@ -15,3 +15,23 @@ LICENSE text eol=auto
|
||||
# Binary assets
|
||||
assets/init-doc/* binary
|
||||
core/coreunix/test_data/** binary
|
||||
test/cli/migrations/testdata/** binary
|
||||
|
||||
# Generated test data
|
||||
test/cli/migrations/testdata/** linguist-generated=true
|
||||
test/cli/autoconf/testdata/** linguist-generated=true
|
||||
test/cli/fixtures/** linguist-generated=true
|
||||
test/sharness/t0054-dag-car-import-export-data/** linguist-generated=true
|
||||
test/sharness/t0109-gateway-web-_redirects-data/** linguist-generated=true
|
||||
test/sharness/t0114-gateway-subdomains/** linguist-generated=true
|
||||
test/sharness/t0115-gateway-dir-listing/** linguist-generated=true
|
||||
test/sharness/t0116-gateway-cache/** linguist-generated=true
|
||||
test/sharness/t0119-prometheus-data/** linguist-generated=true
|
||||
test/sharness/t0165-keystore-data/** linguist-generated=true
|
||||
test/sharness/t0275-cid-security-data/** linguist-generated=true
|
||||
test/sharness/t0280-plugin-dag-jose-data/** linguist-generated=true
|
||||
test/sharness/t0280-plugin-data/** linguist-generated=true
|
||||
test/sharness/t0280-plugin-git-data/** linguist-generated=true
|
||||
test/sharness/t0400-api-no-gateway/** linguist-generated=true
|
||||
test/sharness/t0701-delegated-routing-reframe/** linguist-generated=true
|
||||
test/sharness/t0702-delegated-routing-http/** linguist-generated=true
|
||||
|
||||
4
.github/workflows/codeql-analysis.yml
vendored
4
.github/workflows/codeql-analysis.yml
vendored
@ -29,12 +29,12 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.24.x
|
||||
go-version: 1.25.x
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
|
||||
34
.github/workflows/docker-build.yml
vendored
34
.github/workflows/docker-build.yml
vendored
@ -1,34 +0,0 @@
|
||||
# If we decide to run build-image.yml on every PR, we could deprecate this workflow.
|
||||
name: Docker Build
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
paths-ignore:
|
||||
- '**/*.md'
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
docker-build:
|
||||
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
env:
|
||||
IMAGE_NAME: ipfs/kubo
|
||||
WIP_IMAGE_TAG: wip
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
steps:
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.24.x
|
||||
- uses: actions/checkout@v4
|
||||
- run: docker build -t $IMAGE_NAME:$WIP_IMAGE_TAG .
|
||||
- run: docker run --rm $IMAGE_NAME:$WIP_IMAGE_TAG --version
|
||||
62
.github/workflows/docker-check.yml
vendored
Normal file
62
.github/workflows/docker-check.yml
vendored
Normal file
@ -0,0 +1,62 @@
|
||||
# This workflow performs a quick Docker build check on PRs and pushes to master.
|
||||
# It builds the Docker image and runs a basic smoke test to ensure the image works.
|
||||
# This is a lightweight check - for full multi-platform builds and publishing, see docker-image.yml
|
||||
name: Docker Check
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
paths-ignore:
|
||||
- '**/*.md'
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: hadolint/hadolint-action@v3.1.0
|
||||
with:
|
||||
dockerfile: Dockerfile
|
||||
failure-threshold: warning
|
||||
verbose: true
|
||||
format: tty
|
||||
|
||||
build:
|
||||
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
env:
|
||||
IMAGE_NAME: ipfs/kubo
|
||||
WIP_IMAGE_TAG: wip
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Build Docker image with BuildKit
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
push: false
|
||||
load: true
|
||||
tags: ${{ env.IMAGE_NAME }}:${{ env.WIP_IMAGE_TAG }}
|
||||
cache-from: |
|
||||
type=gha
|
||||
type=registry,ref=${{ env.IMAGE_NAME }}:buildcache
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Test Docker image
|
||||
run: docker run --rm $IMAGE_NAME:$WIP_IMAGE_TAG --version
|
||||
55
.github/workflows/docker-image.yml
vendored
55
.github/workflows/docker-image.yml
vendored
@ -1,3 +1,7 @@
|
||||
# This workflow builds and publishes official Docker images to Docker Hub.
|
||||
# It handles multi-platform builds (amd64, arm/v7, arm64/v8) and pushes tagged releases.
|
||||
# This workflow is triggered on tags, specific branches, and can be manually dispatched.
|
||||
# For quick build checks during development, see docker-check.yml
|
||||
name: Docker Push
|
||||
|
||||
on:
|
||||
@ -38,7 +42,7 @@ jobs:
|
||||
LEGACY_IMAGE_NAME: ipfs/go-ipfs
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
@ -46,13 +50,11 @@ jobs:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v4
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildx-
|
||||
username: ${{ vars.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Get tags
|
||||
id: tags
|
||||
@ -63,12 +65,6 @@ jobs:
|
||||
echo "EOF" >> $GITHUB_OUTPUT
|
||||
shell: bash
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ vars.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
# We have to build each platform separately because when using multi-arch
|
||||
# builds, only one platform is being loaded into the cache. This would
|
||||
# prevent us from testing the other platforms.
|
||||
@ -81,8 +77,10 @@ jobs:
|
||||
load: true
|
||||
file: ./Dockerfile
|
||||
tags: ${{ env.IMAGE_NAME }}:linux-amd64
|
||||
cache-from: type=local,src=/tmp/.buildx-cache
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache-new
|
||||
cache-from: |
|
||||
type=gha
|
||||
type=registry,ref=${{ env.IMAGE_NAME }}:buildcache
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Build Docker image (linux/arm/v7)
|
||||
uses: docker/build-push-action@v6
|
||||
@ -93,8 +91,10 @@ jobs:
|
||||
load: true
|
||||
file: ./Dockerfile
|
||||
tags: ${{ env.IMAGE_NAME }}:linux-arm-v7
|
||||
cache-from: type=local,src=/tmp/.buildx-cache
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache-new
|
||||
cache-from: |
|
||||
type=gha
|
||||
type=registry,ref=${{ env.IMAGE_NAME }}:buildcache
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Build Docker image (linux/arm64/v8)
|
||||
uses: docker/build-push-action@v6
|
||||
@ -105,8 +105,10 @@ jobs:
|
||||
load: true
|
||||
file: ./Dockerfile
|
||||
tags: ${{ env.IMAGE_NAME }}:linux-arm64-v8
|
||||
cache-from: type=local,src=/tmp/.buildx-cache
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache-new
|
||||
cache-from: |
|
||||
type=gha
|
||||
type=registry,ref=${{ env.IMAGE_NAME }}:buildcache
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
# We test all the images on amd64 host here. This uses QEMU to emulate
|
||||
# the other platforms.
|
||||
@ -132,12 +134,9 @@ jobs:
|
||||
push: true
|
||||
file: ./Dockerfile
|
||||
tags: "${{ github.event.inputs.tags || steps.tags.outputs.value }}"
|
||||
cache-from: type=local,src=/tmp/.buildx-cache-new
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache-new
|
||||
|
||||
# https://github.com/docker/build-push-action/issues/252
|
||||
# https://github.com/moby/buildkit/issues/1896
|
||||
- name: Move cache to limit growth
|
||||
run: |
|
||||
rm -rf /tmp/.buildx-cache
|
||||
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
|
||||
cache-from: |
|
||||
type=gha
|
||||
type=registry,ref=${{ env.IMAGE_NAME }}:buildcache
|
||||
cache-to: |
|
||||
type=gha,mode=max
|
||||
type=registry,ref=${{ env.IMAGE_NAME }}:buildcache,mode=max
|
||||
|
||||
8
.github/workflows/gateway-conformance.yml
vendored
8
.github/workflows/gateway-conformance.yml
vendored
@ -49,12 +49,12 @@ jobs:
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.24.x
|
||||
go-version: 1.25.x
|
||||
- uses: protocol/cache-go-action@v1
|
||||
with:
|
||||
name: ${{ github.job }}
|
||||
- name: Checkout kubo-gateway
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
path: kubo-gateway
|
||||
- name: Build kubo-gateway
|
||||
@ -136,12 +136,12 @@ jobs:
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.24.x
|
||||
go-version: 1.25.x
|
||||
- uses: protocol/cache-go-action@v1
|
||||
with:
|
||||
name: ${{ github.job }}
|
||||
- name: Checkout kubo-gateway
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
path: kubo-gateway
|
||||
- name: Build kubo-gateway
|
||||
|
||||
4
.github/workflows/gobuild.yml
vendored
4
.github/workflows/gobuild.yml
vendored
@ -30,8 +30,8 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.24.x
|
||||
- uses: actions/checkout@v4
|
||||
go-version: 1.25.x
|
||||
- uses: actions/checkout@v5
|
||||
- run: make cmd/ipfs-try-build
|
||||
env:
|
||||
TEST_FUSE: 1
|
||||
|
||||
4
.github/workflows/golang-analysis.yml
vendored
4
.github/workflows/golang-analysis.yml
vendored
@ -22,12 +22,12 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
submodules: recursive
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.24.x"
|
||||
go-version: "1.25.x"
|
||||
- name: Check that go.mod is tidy
|
||||
uses: protocol/multiple-go-modules@v1.4
|
||||
with:
|
||||
|
||||
4
.github/workflows/golint.yml
vendored
4
.github/workflows/golint.yml
vendored
@ -31,6 +31,6 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.24.x
|
||||
- uses: actions/checkout@v4
|
||||
go-version: 1.25.x
|
||||
- uses: actions/checkout@v5
|
||||
- run: make -O test_go_lint
|
||||
|
||||
4
.github/workflows/gotest.yml
vendored
4
.github/workflows/gotest.yml
vendored
@ -32,9 +32,9 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.24.x
|
||||
go-version: 1.25.x
|
||||
- name: Check out Kubo
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
- name: Install missing tools
|
||||
run: sudo apt update && sudo apt install -y zsh
|
||||
- name: 👉️ If this step failed, go to «Summary» (top left) → inspect the «Failures/Errors» table
|
||||
|
||||
12
.github/workflows/interop.yml
vendored
12
.github/workflows/interop.yml
vendored
@ -10,7 +10,7 @@ on:
|
||||
- 'master'
|
||||
|
||||
env:
|
||||
GO_VERSION: 1.24.x
|
||||
GO_VERSION: 1.25.x
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }}
|
||||
@ -39,7 +39,7 @@ jobs:
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
- run: make build
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
@ -56,7 +56,7 @@ jobs:
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: lts/*
|
||||
- uses: actions/download-artifact@v4
|
||||
- uses: actions/download-artifact@v5
|
||||
with:
|
||||
name: kubo
|
||||
path: cmd/ipfs
|
||||
@ -91,13 +91,13 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 18.14.0
|
||||
- uses: actions/download-artifact@v4
|
||||
node-version: 20.x
|
||||
- uses: actions/download-artifact@v5
|
||||
with:
|
||||
name: kubo
|
||||
path: cmd/ipfs
|
||||
- run: chmod +x cmd/ipfs/ipfs
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
repository: ipfs/ipfs-webui
|
||||
path: ipfs-webui
|
||||
|
||||
4
.github/workflows/sharness.yml
vendored
4
.github/workflows/sharness.yml
vendored
@ -25,9 +25,9 @@ jobs:
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.24.x
|
||||
go-version: 1.25.x
|
||||
- name: Checkout Kubo
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
path: kubo
|
||||
- name: Install missing tools
|
||||
|
||||
29
.github/workflows/spellcheck.yml
vendored
29
.github/workflows/spellcheck.yml
vendored
@ -1,21 +1,18 @@
|
||||
name: Spell Check
|
||||
|
||||
on: [push, pull_request]
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches: ["master"]
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
spellcheck:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install Codespell
|
||||
run: pip install codespell==2.4.0
|
||||
|
||||
- name: Run Codespell
|
||||
uses: codespell-project/actions-codespell@v2
|
||||
with:
|
||||
only_warn: 1
|
||||
ignore_words_file: .codespell-ignore
|
||||
skip: "*.mod,*.sum,*.pdf,./docs/AUTHORS,./test/sharness/t0275-cid-security-data,./test/sharness/t0280-plugin-dag-jose-data,./bin"
|
||||
uses: ipdxco/unified-github-workflows/.github/workflows/reusable-spellcheck.yml@v1
|
||||
|
||||
13
.hadolint.yaml
Normal file
13
.hadolint.yaml
Normal file
@ -0,0 +1,13 @@
|
||||
# Hadolint configuration for Kubo Docker image
|
||||
# https://github.com/hadolint/hadolint
|
||||
|
||||
# Ignore specific rules
|
||||
ignored:
|
||||
# DL3008: Pin versions in apt-get install
|
||||
# We use stable base images and prefer smaller layers over version pinning
|
||||
- DL3008
|
||||
|
||||
# Trust base images from these registries
|
||||
trustedRegistries:
|
||||
- docker.io
|
||||
- gcr.io
|
||||
@ -1,5 +1,6 @@
|
||||
# Kubo Changelogs
|
||||
|
||||
- [v0.37](docs/changelogs/v0.37.md)
|
||||
- [v0.36](docs/changelogs/v0.36.md)
|
||||
- [v0.35](docs/changelogs/v0.35.md)
|
||||
- [v0.34](docs/changelogs/v0.34.md)
|
||||
|
||||
95
Dockerfile
95
Dockerfile
@ -1,13 +1,16 @@
|
||||
FROM --platform=${BUILDPLATFORM:-linux/amd64} golang:1.24 AS builder
|
||||
# syntax=docker/dockerfile:1
|
||||
# Enables BuildKit with cache mounts for faster builds
|
||||
FROM --platform=${BUILDPLATFORM:-linux/amd64} golang:1.25 AS builder
|
||||
|
||||
ARG TARGETOS TARGETARCH
|
||||
|
||||
ENV SRC_DIR /kubo
|
||||
ENV SRC_DIR=/kubo
|
||||
|
||||
# Download packages first so they can be cached.
|
||||
# Cache go module downloads between builds for faster rebuilds
|
||||
COPY go.mod go.sum $SRC_DIR/
|
||||
RUN cd $SRC_DIR \
|
||||
&& go mod download
|
||||
WORKDIR $SRC_DIR
|
||||
RUN --mount=type=cache,target=/go/pkg/mod \
|
||||
go mod download
|
||||
|
||||
COPY . $SRC_DIR
|
||||
|
||||
@ -18,92 +21,78 @@ ARG IPFS_PLUGINS
|
||||
# Allow for other targets to be built, e.g.: docker build --build-arg MAKE_TARGET="nofuse"
|
||||
ARG MAKE_TARGET=build
|
||||
|
||||
# Build the thing.
|
||||
# Also: fix getting HEAD commit hash via git rev-parse.
|
||||
RUN cd $SRC_DIR \
|
||||
&& mkdir -p .git/objects \
|
||||
# Build ipfs binary with cached go modules and build cache.
|
||||
# mkdir .git/objects allows git rev-parse to read commit hash for version info
|
||||
RUN --mount=type=cache,target=/go/pkg/mod \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
mkdir -p .git/objects \
|
||||
&& GOOS=$TARGETOS GOARCH=$TARGETARCH GOFLAGS=-buildvcs=false make ${MAKE_TARGET} IPFS_PLUGINS=$IPFS_PLUGINS
|
||||
|
||||
# Using Debian Buster because the version of busybox we're using is based on it
|
||||
# and we want to make sure the libraries we're using are compatible. That's also
|
||||
# why we're running this for the target platform.
|
||||
FROM debian:stable-slim AS utilities
|
||||
# Extract required runtime tools from Debian.
|
||||
# We use Debian instead of Alpine because we need glibc compatibility
|
||||
# for the busybox base image we're using.
|
||||
FROM debian:bookworm-slim AS utilities
|
||||
RUN set -eux; \
|
||||
apt-get update; \
|
||||
apt-get install -y \
|
||||
apt-get install -y --no-install-recommends \
|
||||
tini \
|
||||
# Using gosu (~2MB) instead of su-exec (~20KB) because it's easier to
|
||||
# install on Debian. Useful links:
|
||||
# - https://github.com/ncopa/su-exec#why-reinvent-gosu
|
||||
# - https://github.com/tianon/gosu/issues/52#issuecomment-441946745
|
||||
gosu \
|
||||
# This installs fusermount which we later copy over to the target image.
|
||||
# fusermount enables IPFS mount commands
|
||||
fuse \
|
||||
ca-certificates \
|
||||
; \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
apt-get clean; \
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
# Now comes the actual target image, which aims to be as small as possible.
|
||||
# Final minimal image with shell for debugging (busybox provides sh)
|
||||
FROM busybox:stable-glibc
|
||||
|
||||
# Get the ipfs binary, entrypoint script, and TLS CAs from the build container.
|
||||
ENV SRC_DIR /kubo
|
||||
# Copy ipfs binary, startup scripts, and runtime dependencies
|
||||
ENV SRC_DIR=/kubo
|
||||
COPY --from=utilities /usr/sbin/gosu /sbin/gosu
|
||||
COPY --from=utilities /usr/bin/tini /sbin/tini
|
||||
COPY --from=utilities /bin/fusermount /usr/local/bin/fusermount
|
||||
COPY --from=utilities /etc/ssl/certs /etc/ssl/certs
|
||||
COPY --from=builder $SRC_DIR/cmd/ipfs/ipfs /usr/local/bin/ipfs
|
||||
COPY --from=builder $SRC_DIR/bin/container_daemon /usr/local/bin/start_ipfs
|
||||
COPY --from=builder --chmod=755 $SRC_DIR/bin/container_daemon /usr/local/bin/start_ipfs
|
||||
COPY --from=builder $SRC_DIR/bin/container_init_run /usr/local/bin/container_init_run
|
||||
|
||||
# Add suid bit on fusermount so it will run properly
|
||||
# Set SUID for fusermount to enable FUSE mounting by non-root user
|
||||
RUN chmod 4755 /usr/local/bin/fusermount
|
||||
|
||||
# Fix permissions on start_ipfs (ignore the build machine's permissions)
|
||||
RUN chmod 0755 /usr/local/bin/start_ipfs
|
||||
|
||||
# Swarm TCP; should be exposed to the public
|
||||
EXPOSE 4001
|
||||
# Swarm UDP; should be exposed to the public
|
||||
EXPOSE 4001/udp
|
||||
# Daemon API; must not be exposed publicly but to client services under you control
|
||||
# Swarm P2P port (TCP/UDP) - expose publicly for peer connections
|
||||
EXPOSE 4001 4001/udp
|
||||
# API port - keep private, only for trusted clients
|
||||
EXPOSE 5001
|
||||
# Web Gateway; can be exposed publicly with a proxy, e.g. as https://ipfs.example.org
|
||||
# Gateway port - can be exposed publicly via reverse proxy
|
||||
EXPOSE 8080
|
||||
# Swarm Websockets; must be exposed publicly when the node is listening using the websocket transport (/ipX/.../tcp/8081/ws).
|
||||
# Swarm WebSockets - expose publicly for browser-based peers
|
||||
EXPOSE 8081
|
||||
|
||||
# Create the fs-repo directory and switch to a non-privileged user.
|
||||
ENV IPFS_PATH /data/ipfs
|
||||
RUN mkdir -p $IPFS_PATH \
|
||||
# Create ipfs user (uid 1000) and required directories with proper ownership
|
||||
ENV IPFS_PATH=/data/ipfs
|
||||
RUN mkdir -p $IPFS_PATH /ipfs /ipns /mfs /container-init.d \
|
||||
&& adduser -D -h $IPFS_PATH -u 1000 -G users ipfs \
|
||||
&& chown ipfs:users $IPFS_PATH
|
||||
&& chown ipfs:users $IPFS_PATH /ipfs /ipns /mfs /container-init.d
|
||||
|
||||
# Create mount points for `ipfs mount` command
|
||||
RUN mkdir /ipfs /ipns /mfs \
|
||||
&& chown ipfs:users /ipfs /ipns /mfs
|
||||
|
||||
# Create the init scripts directory
|
||||
RUN mkdir /container-init.d \
|
||||
&& chown ipfs:users /container-init.d
|
||||
|
||||
# Expose the fs-repo as a volume.
|
||||
# start_ipfs initializes an fs-repo if none is mounted.
|
||||
# Important this happens after the USER directive so permissions are correct.
|
||||
# Volume for IPFS repository data persistence
|
||||
VOLUME $IPFS_PATH
|
||||
|
||||
# The default logging level
|
||||
ENV GOLOG_LOG_LEVEL ""
|
||||
ENV GOLOG_LOG_LEVEL=""
|
||||
|
||||
# This just makes sure that:
|
||||
# 1. There's an fs-repo, and initializes one if there isn't.
|
||||
# 2. The API and Gateway are accessible from outside the container.
|
||||
# Entrypoint initializes IPFS repo if needed and configures networking.
|
||||
# tini ensures proper signal handling and zombie process cleanup
|
||||
ENTRYPOINT ["/sbin/tini", "--", "/usr/local/bin/start_ipfs"]
|
||||
|
||||
# Healthcheck for the container
|
||||
# QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn is the CID of empty folder
|
||||
# Health check verifies IPFS daemon is responsive.
|
||||
# Uses empty directory CID (QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn) as test
|
||||
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
|
||||
CMD ipfs --api=/ip4/127.0.0.1/tcp/5001 dag stat /ipfs/QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn || exit 1
|
||||
|
||||
# Execute the daemon subcommand by default
|
||||
# Default: run IPFS daemon with auto-migration enabled
|
||||
CMD ["daemon", "--migrate=true", "--agent-version-suffix=docker"]
|
||||
|
||||
@ -79,7 +79,12 @@ msg() {
|
||||
|
||||
statlog() {
|
||||
local module="$1"
|
||||
local rpath="$GOPATH/src/$(strip_version "$module")"
|
||||
local rpath
|
||||
if [[ "$module" == "github.com/ipfs/kubo" ]]; then
|
||||
rpath="$ROOT_DIR"
|
||||
else
|
||||
rpath="$GOPATH/src/$(strip_version "$module")"
|
||||
fi
|
||||
local start="${2:-}"
|
||||
local end="${3:-HEAD}"
|
||||
local mailmap_file="$rpath/.mailmap"
|
||||
@ -166,7 +171,12 @@ release_log() {
|
||||
local start="$2"
|
||||
local end="${3:-HEAD}"
|
||||
local repo="$(strip_version "$1")"
|
||||
local dir="$GOPATH/src/$repo"
|
||||
local dir
|
||||
if [[ "$module" == "github.com/ipfs/kubo" ]]; then
|
||||
dir="$ROOT_DIR"
|
||||
else
|
||||
dir="$GOPATH/src/$repo"
|
||||
fi
|
||||
|
||||
local commit pr
|
||||
git -C "$dir" log \
|
||||
@ -203,8 +213,13 @@ mod_deps() {
|
||||
ensure() {
|
||||
local repo="$(strip_version "$1")"
|
||||
local commit="$2"
|
||||
local rpath="$GOPATH/src/$repo"
|
||||
if [[ ! -d "$rpath" ]]; then
|
||||
local rpath
|
||||
if [[ "$1" == "github.com/ipfs/kubo" ]]; then
|
||||
rpath="$ROOT_DIR"
|
||||
else
|
||||
rpath="$GOPATH/src/$repo"
|
||||
fi
|
||||
if [[ "$1" != "github.com/ipfs/kubo" ]] && [[ ! -d "$rpath" ]]; then
|
||||
msg "Cloning $repo..."
|
||||
git clone "http://$repo" "$rpath" >&2
|
||||
fi
|
||||
@ -237,10 +252,7 @@ recursive_release_log() {
|
||||
local module="$(go list -m)"
|
||||
local dir="$(go list -m -f '{{.Dir}}')"
|
||||
|
||||
if [[ "${GOPATH}/${module}" -ef "${dir}" ]]; then
|
||||
echo "This script requires the target module and all dependencies to live in a GOPATH."
|
||||
return 1
|
||||
fi
|
||||
# Kubo can be run from any directory, dependencies still use GOPATH
|
||||
|
||||
(
|
||||
local result=0
|
||||
|
||||
@ -2,9 +2,9 @@ package rpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@ -12,11 +12,11 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/boxo/path"
|
||||
"github.com/ipfs/kubo/config"
|
||||
iface "github.com/ipfs/kubo/core/coreiface"
|
||||
"github.com/ipfs/kubo/core/coreiface/tests"
|
||||
"github.com/ipfs/kubo/test/cli/harness"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
"go.uber.org/multierr"
|
||||
)
|
||||
|
||||
type NodeProvider struct{}
|
||||
@ -45,6 +45,9 @@ func (np NodeProvider) MakeAPISwarm(t *testing.T, ctx context.Context, fullIdent
|
||||
|
||||
c := n.ReadConfig()
|
||||
c.Experimental.FilestoreEnabled = true
|
||||
// only provide things we pin. Allows to test
|
||||
// provide operations.
|
||||
c.Reprovider.Strategy = config.NewOptionalString("roots")
|
||||
n.WriteConfig(c)
|
||||
n.StartDaemon("--enable-pubsub-experiment", "--offline="+strconv.FormatBool(!online))
|
||||
|
||||
@ -88,16 +91,12 @@ func (np NodeProvider) MakeAPISwarm(t *testing.T, ctx context.Context, fullIdent
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return apis, multierr.Combine(errs...)
|
||||
return apis, errors.Join(errs...)
|
||||
}
|
||||
|
||||
func TestHttpApi(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("skipping due to #9905")
|
||||
}
|
||||
|
||||
tests.TestApi(NodeProvider{})(t)
|
||||
}
|
||||
|
||||
|
||||
@ -86,7 +86,7 @@ func addMigrationFiles(ctx context.Context, node *core.IpfsNode, paths []string,
|
||||
return err
|
||||
}
|
||||
|
||||
ipfsPath, err := ufs.Add(ctx, files.NewReaderStatFile(f, fi), options.Unixfs.Pin(pin))
|
||||
ipfsPath, err := ufs.Add(ctx, files.NewReaderStatFile(f, fi), options.Unixfs.Pin(pin, ""))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -34,8 +34,6 @@ import (
|
||||
nodeMount "github.com/ipfs/kubo/fuse/node"
|
||||
fsrepo "github.com/ipfs/kubo/repo/fsrepo"
|
||||
"github.com/ipfs/kubo/repo/fsrepo/migrations"
|
||||
"github.com/ipfs/kubo/repo/fsrepo/migrations/ipfsfetcher"
|
||||
goprocess "github.com/jbenet/goprocess"
|
||||
p2pcrypto "github.com/libp2p/go-libp2p/core/crypto"
|
||||
pnet "github.com/libp2p/go-libp2p/core/pnet"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
@ -45,7 +43,6 @@ import (
|
||||
manet "github.com/multiformats/go-multiaddr/net"
|
||||
prometheus "github.com/prometheus/client_golang/prometheus"
|
||||
promauto "github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"go.uber.org/multierr"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -67,6 +64,7 @@ const (
|
||||
routingOptionDHTServerKwd = "dhtserver"
|
||||
routingOptionNoneKwd = "none"
|
||||
routingOptionCustomKwd = "custom"
|
||||
routingOptionDelegatedKwd = "delegated"
|
||||
routingOptionDefaultKwd = "default"
|
||||
routingOptionAutoKwd = "auto"
|
||||
routingOptionAutoClientKwd = "autoclient"
|
||||
@ -277,7 +275,7 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment
|
||||
}
|
||||
|
||||
var cacheMigrations, pinMigrations bool
|
||||
var fetcher migrations.Fetcher
|
||||
var externalMigrationFetcher migrations.Fetcher
|
||||
|
||||
// acquire the repo lock _before_ constructing a node. we need to make
|
||||
// sure we are permitted to access the resources (datastore, etc.)
|
||||
@ -287,74 +285,39 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment
|
||||
return err
|
||||
case fsrepo.ErrNeedMigration:
|
||||
domigrate, found := req.Options[migrateKwd].(bool)
|
||||
fmt.Println("Found outdated fs-repo, migrations need to be run.")
|
||||
|
||||
// Get current repo version for more informative message
|
||||
currentVersion, verErr := migrations.RepoVersion(cctx.ConfigRoot)
|
||||
if verErr != nil {
|
||||
// Fallback to generic message if we can't read version
|
||||
fmt.Printf("Kubo repository at %s requires migration.\n", cctx.ConfigRoot)
|
||||
} else {
|
||||
fmt.Printf("Kubo repository at %s has version %d and needs to be migrated to version %d.\n",
|
||||
cctx.ConfigRoot, currentVersion, version.RepoVersion)
|
||||
}
|
||||
|
||||
if !found {
|
||||
domigrate = YesNoPrompt("Run migrations now? [y/N]")
|
||||
}
|
||||
|
||||
if !domigrate {
|
||||
fmt.Println("Not running migrations of fs-repo now.")
|
||||
fmt.Println("Please get fs-repo-migrations from https://dist.ipfs.tech")
|
||||
fmt.Printf("Not running migrations on repository at %s. Re-run daemon with --migrate or see 'ipfs repo migrate --help'\n", cctx.ConfigRoot)
|
||||
return errors.New("fs-repo requires migration")
|
||||
}
|
||||
|
||||
// Read Migration section of IPFS config
|
||||
configFileOpt, _ := req.Options[commands.ConfigFileOption].(string)
|
||||
migrationCfg, err := migrations.ReadMigrationConfig(cctx.ConfigRoot, configFileOpt)
|
||||
// Use hybrid migration strategy that intelligently combines external and embedded migrations
|
||||
err = migrations.RunHybridMigrations(cctx.Context(), version.RepoVersion, cctx.ConfigRoot, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Define function to create IPFS fetcher. Do not supply an
|
||||
// already-constructed IPFS fetcher, because this may be expensive and
|
||||
// not needed according to migration config. Instead, supply a function
|
||||
// to construct the particular IPFS fetcher implementation used here,
|
||||
// which is called only if an IPFS fetcher is needed.
|
||||
newIpfsFetcher := func(distPath string) migrations.Fetcher {
|
||||
return ipfsfetcher.NewIpfsFetcher(distPath, 0, &cctx.ConfigRoot, configFileOpt)
|
||||
}
|
||||
|
||||
// Fetch migrations from current distribution, or location from environ
|
||||
fetchDistPath := migrations.GetDistPathEnv(migrations.CurrentIpfsDist)
|
||||
|
||||
// Create fetchers according to migrationCfg.DownloadSources
|
||||
fetcher, err = migrations.GetMigrationFetcher(migrationCfg.DownloadSources, fetchDistPath, newIpfsFetcher)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer fetcher.Close()
|
||||
|
||||
if migrationCfg.Keep == "cache" {
|
||||
cacheMigrations = true
|
||||
} else if migrationCfg.Keep == "pin" {
|
||||
pinMigrations = true
|
||||
}
|
||||
|
||||
if cacheMigrations || pinMigrations {
|
||||
// Create temp directory to store downloaded migration archives
|
||||
migrations.DownloadDirectory, err = os.MkdirTemp("", "migrations")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Defer cleanup of download directory so that it gets cleaned up
|
||||
// if daemon returns early due to error
|
||||
defer func() {
|
||||
if migrations.DownloadDirectory != "" {
|
||||
os.RemoveAll(migrations.DownloadDirectory)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
err = migrations.RunMigration(cctx.Context(), fetcher, fsrepo.RepoVersion, "", false)
|
||||
if err != nil {
|
||||
fmt.Println("The migrations of fs-repo failed:")
|
||||
fmt.Println("Repository migration failed:")
|
||||
fmt.Printf(" %s\n", err)
|
||||
fmt.Println("If you think this is a bug, please file an issue and include this whole log output.")
|
||||
fmt.Println(" https://github.com/ipfs/fs-repo-migrations")
|
||||
fmt.Println(" https://github.com/ipfs/kubo")
|
||||
return err
|
||||
}
|
||||
|
||||
// Note: Migration caching/pinning functionality has been deprecated
|
||||
// The hybrid migration system handles legacy migrations more efficiently
|
||||
|
||||
repo, err = fsrepo.Open(cctx.ConfigRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -381,6 +344,27 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment
|
||||
return err
|
||||
}
|
||||
|
||||
// Validate autoconf setup - check for private network conflict
|
||||
swarmKey, _ := repo.SwarmKey()
|
||||
isPrivateNetwork := swarmKey != nil || pnet.ForcePrivateNetwork
|
||||
if err := config.ValidateAutoConfWithRepo(cfg, isPrivateNetwork); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Start background AutoConf updater if enabled
|
||||
if cfg.AutoConf.Enabled.WithDefault(config.DefaultAutoConfEnabled) {
|
||||
// Start autoconf client for background updates
|
||||
client, err := config.GetAutoConfClient(cfg)
|
||||
if err != nil {
|
||||
log.Errorf("failed to create autoconf client: %v", err)
|
||||
} else {
|
||||
// Start primes cache and starts background updater
|
||||
if _, err := client.Start(cctx.Context()); err != nil {
|
||||
log.Errorf("failed to start autoconf updater: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("PeerID: %s\n", cfg.Identity.PeerID)
|
||||
|
||||
if !psSet {
|
||||
@ -404,8 +388,8 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment
|
||||
}
|
||||
|
||||
routingOption, _ := req.Options[routingOptionKwd].(string)
|
||||
if routingOption == routingOptionDefaultKwd {
|
||||
routingOption = cfg.Routing.Type.WithDefault(routingOptionAutoKwd)
|
||||
if routingOption == routingOptionDefaultKwd || routingOption == "" {
|
||||
routingOption = cfg.Routing.Type.WithDefault(config.DefaultRoutingType)
|
||||
if routingOption == "" {
|
||||
routingOption = routingOptionAutoKwd
|
||||
}
|
||||
@ -435,6 +419,8 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment
|
||||
}
|
||||
}
|
||||
|
||||
// Use config for routing construction
|
||||
|
||||
switch routingOption {
|
||||
case routingOptionSupernodeKwd:
|
||||
return errors.New("supernode routing was never fully implemented and has been removed")
|
||||
@ -450,6 +436,8 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment
|
||||
ncfg.Routing = libp2p.DHTServerOption
|
||||
case routingOptionNoneKwd:
|
||||
ncfg.Routing = libp2p.NilRouterOption
|
||||
case routingOptionDelegatedKwd:
|
||||
ncfg.Routing = libp2p.ConstructDelegatedOnlyRouting(cfg)
|
||||
case routingOptionCustomKwd:
|
||||
if cfg.Routing.AcceleratedDHTClient.WithDefault(config.DefaultAcceleratedDHTClient) {
|
||||
return errors.New("Routing.AcceleratedDHTClient option is set even tho Routing.Type is custom, using custom .AcceleratedDHTClient needs to be set on DHT routers individually")
|
||||
@ -491,11 +479,24 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment
|
||||
if cfg.Provider.Strategy.WithDefault("") != "" && cfg.Reprovider.Strategy.IsDefault() {
|
||||
log.Fatal("Invalid config. Remove unused Provider.Strategy and set Reprovider.Strategy instead. Documentation: https://github.com/ipfs/kubo/blob/master/docs/config.md#reproviderstrategy")
|
||||
}
|
||||
// Check for deprecated "flat" strategy
|
||||
if cfg.Reprovider.Strategy.WithDefault("") == "flat" {
|
||||
log.Error("Reprovider.Strategy='flat' is deprecated and will be removed in the next release. Please update your config to use 'all' instead.")
|
||||
}
|
||||
if cfg.Experimental.StrategicProviding {
|
||||
log.Error("Experimental.StrategicProviding was removed. Remove it from your config and set Provider.Enabled=false to remove this message. Documentation: https://github.com/ipfs/kubo/blob/master/docs/experimental-features.md#strategic-providing")
|
||||
cfg.Experimental.StrategicProviding = false
|
||||
cfg.Provider.Enabled = config.False
|
||||
}
|
||||
if routingOption == routingOptionDelegatedKwd {
|
||||
// Delegated routing is read-only mode - content providing must be disabled
|
||||
if cfg.Provider.Enabled.WithDefault(config.DefaultProviderEnabled) {
|
||||
log.Fatal("Routing.Type=delegated does not support content providing. Set Provider.Enabled=false in your config.")
|
||||
}
|
||||
if cfg.Reprovider.Interval.WithDefault(config.DefaultReproviderInterval) != 0 {
|
||||
log.Fatal("Routing.Type=delegated does not support content providing. Set Reprovider.Interval='0' in your config.")
|
||||
}
|
||||
}
|
||||
|
||||
printLibp2pPorts(node)
|
||||
|
||||
@ -527,6 +528,9 @@ take effect.
|
||||
}
|
||||
}()
|
||||
|
||||
// Clear any cached offline node and set the online daemon node
|
||||
// This ensures HTTP RPC server uses the online node, not any cached offline node
|
||||
cctx.ClearCachedNode()
|
||||
cctx.ConstructNode = func() (*core.IpfsNode, error) {
|
||||
return node, nil
|
||||
}
|
||||
@ -537,10 +541,19 @@ take effect.
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pluginErrc := make(chan error, 1)
|
||||
select {
|
||||
case <-node.Process.Closing():
|
||||
case <-node.Context().Done():
|
||||
close(pluginErrc)
|
||||
default:
|
||||
node.Process.AddChild(goprocess.WithTeardown(cctx.Plugins.Close))
|
||||
context.AfterFunc(node.Context(), func() {
|
||||
err := cctx.Plugins.Close()
|
||||
if err != nil {
|
||||
pluginErrc <- fmt.Errorf("closing plugins: %w", err)
|
||||
}
|
||||
close(pluginErrc)
|
||||
})
|
||||
}
|
||||
|
||||
// construct api endpoint - every time
|
||||
@ -558,6 +571,11 @@ take effect.
|
||||
if err := mountFuse(req, cctx); err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if _err != nil {
|
||||
nodeMount.Unmount(node)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// repo blockstore GC - if --enable-gc flag is present
|
||||
@ -566,9 +584,9 @@ take effect.
|
||||
return err
|
||||
}
|
||||
|
||||
// Add any files downloaded by migration.
|
||||
if cacheMigrations || pinMigrations {
|
||||
err = addMigrations(cctx.Context(), node, fetcher, pinMigrations)
|
||||
// Add any files downloaded by external migrations (embedded migrations don't download files)
|
||||
if externalMigrationFetcher != nil && (cacheMigrations || pinMigrations) {
|
||||
err = addMigrations(cctx.Context(), node, externalMigrationFetcher, pinMigrations)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "Could not add migration to IPFS:", err)
|
||||
}
|
||||
@ -577,10 +595,10 @@ take effect.
|
||||
os.RemoveAll(migrations.DownloadDirectory)
|
||||
migrations.DownloadDirectory = ""
|
||||
}
|
||||
if fetcher != nil {
|
||||
if externalMigrationFetcher != nil {
|
||||
// If there is an error closing the IpfsFetcher, then print error, but
|
||||
// do not fail because of it.
|
||||
err = fetcher.Close()
|
||||
err = externalMigrationFetcher.Close()
|
||||
if err != nil {
|
||||
log.Errorf("error closing IPFS fetcher: %s", err)
|
||||
}
|
||||
@ -646,6 +664,17 @@ take effect.
|
||||
⚠️ Local CIDs will not be announced to Amino DHT, making them impossible to retrieve without manual peering
|
||||
⚠️ If this is not intentional, call 'ipfs config profile apply announce-on', or set 'Reprovider.Interval=22h'
|
||||
|
||||
`)
|
||||
}
|
||||
|
||||
// Inform user about Routing.AcceleratedDHTClient when enabled
|
||||
if cfg.Routing.AcceleratedDHTClient.WithDefault(config.DefaultAcceleratedDHTClient) {
|
||||
fmt.Print(`
|
||||
|
||||
ℹ️ Routing.AcceleratedDHTClient is enabled for faster content discovery
|
||||
ℹ️ and DHT provides. Routing table is initializing. IPFS is ready to use,
|
||||
ℹ️ but performance will improve over time as more peers are discovered
|
||||
|
||||
`)
|
||||
}
|
||||
|
||||
@ -692,16 +721,26 @@ take effect.
|
||||
log.Fatal("Support for IPFS_REUSEPORT was removed. Use LIBP2P_TCP_REUSEPORT instead.")
|
||||
}
|
||||
|
||||
unmountErrc := make(chan error)
|
||||
context.AfterFunc(node.Context(), func() {
|
||||
<-node.Context().Done()
|
||||
nodeMount.Unmount(node)
|
||||
close(unmountErrc)
|
||||
})
|
||||
|
||||
// collect long-running errors and block for shutdown
|
||||
// TODO(cryptix): our fuse currently doesn't follow this pattern for graceful shutdown
|
||||
var errs error
|
||||
for err := range merge(apiErrc, gwErrc, gcErrc, p2pGwErrc) {
|
||||
var errs []error
|
||||
for err := range merge(apiErrc, gwErrc, gcErrc, p2pGwErrc, pluginErrc, unmountErrc) {
|
||||
if err != nil {
|
||||
errs = multierr.Append(errs, err)
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
if len(errs) != 0 {
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
return errs
|
||||
return nil
|
||||
}
|
||||
|
||||
// serveHTTPApi collects options, creates listener, prints status message and starts serving requests.
|
||||
@ -851,6 +890,12 @@ func printLibp2pPorts(node *core.IpfsNode) {
|
||||
return
|
||||
}
|
||||
|
||||
if node.PeerHost == nil {
|
||||
log.Error("PeerHost is nil - this should not happen and likely indicates an FX dependency injection issue or race condition")
|
||||
fmt.Println("Swarm not properly initialized - node PeerHost is nil.")
|
||||
return
|
||||
}
|
||||
|
||||
ifaceAddrs, err := node.PeerHost.Network().InterfaceListenAddresses()
|
||||
if err != nil {
|
||||
log.Errorf("failed to read listening addresses: %s", err)
|
||||
@ -1032,6 +1077,10 @@ func serveTrustlessGatewayOverLibp2p(cctx *oldcmds.Context) (<-chan error, error
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if node.PeerHost == nil {
|
||||
return nil, fmt.Errorf("cannot create libp2p gateway: node PeerHost is nil (this should not happen and likely indicates an FX dependency injection issue or race condition)")
|
||||
}
|
||||
|
||||
h := p2phttp.Host{
|
||||
StreamHost: node.PeerHost,
|
||||
}
|
||||
@ -1042,14 +1091,13 @@ func serveTrustlessGatewayOverLibp2p(cctx *oldcmds.Context) (<-chan error, error
|
||||
|
||||
errc := make(chan error, 1)
|
||||
go func() {
|
||||
defer close(errc)
|
||||
errc <- h.Serve()
|
||||
close(errc)
|
||||
}()
|
||||
|
||||
go func() {
|
||||
<-node.Process.Closing()
|
||||
context.AfterFunc(node.Context(), func() {
|
||||
h.Close()
|
||||
}()
|
||||
})
|
||||
|
||||
return errc, nil
|
||||
}
|
||||
@ -1134,14 +1182,14 @@ func maybeRunGC(req *cmds.Request, node *core.IpfsNode) (<-chan error, error) {
|
||||
return errc, nil
|
||||
}
|
||||
|
||||
// merge does fan-in of multiple read-only error channels
|
||||
// taken from http://blog.golang.org/pipelines
|
||||
// merge does fan-in of multiple read-only error channels.
|
||||
func merge(cs ...<-chan error) <-chan error {
|
||||
var wg sync.WaitGroup
|
||||
out := make(chan error)
|
||||
|
||||
// Start an output goroutine for each input channel in cs. output
|
||||
// copies values from c to out until c is closed, then calls wg.Done.
|
||||
// Start a goroutine for each input channel in cs, that copies values from
|
||||
// the input channel to the output channel until the input channel is
|
||||
// closed.
|
||||
output := func(c <-chan error) {
|
||||
for n := range c {
|
||||
out <- n
|
||||
@ -1155,8 +1203,8 @@ func merge(cs ...<-chan error) <-chan error {
|
||||
}
|
||||
}
|
||||
|
||||
// Start a goroutine to close out once all the output goroutines are
|
||||
// done. This must start after the wg.Add call.
|
||||
// Start a goroutine to close out once all the output goroutines, and other
|
||||
// things to wait on, are done.
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(out)
|
||||
@ -1227,8 +1275,6 @@ Visit https://github.com/ipfs/kubo/releases or https://dist.ipfs.tech/#kubo and
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-nd.Process.Closing():
|
||||
return
|
||||
case <-ticker.C:
|
||||
continue
|
||||
}
|
||||
|
||||
@ -214,8 +214,8 @@ func insideGUI() bool {
|
||||
func checkDebug(req *cmds.Request) {
|
||||
// check if user wants to debug. option OR env var.
|
||||
debug, _ := req.Options["debug"].(bool)
|
||||
ipfsLogLevel, _ := logging.LevelFromString(os.Getenv("IPFS_LOGGING")) // IPFS_LOGGING is deprecated
|
||||
goLogLevel, _ := logging.LevelFromString(os.Getenv("GOLOG_LOG_LEVEL"))
|
||||
ipfsLogLevel, _ := logging.Parse(os.Getenv("IPFS_LOGGING")) // IPFS_LOGGING is deprecated
|
||||
goLogLevel, _ := logging.Parse(os.Getenv("GOLOG_LOG_LEVEL"))
|
||||
|
||||
if debug || goLogLevel == logging.LevelDebug || ipfsLogLevel == logging.LevelDebug {
|
||||
u.Debug = true
|
||||
|
||||
@ -6,11 +6,11 @@ package main
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ipfs/kubo/thirdparty/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestIsHidden(t *testing.T) {
|
||||
assert.True(IsHidden("bar/.git"), t, "dirs beginning with . should be recognized as hidden")
|
||||
assert.False(IsHidden("."), t, ". for current dir should not be considered hidden")
|
||||
assert.False(IsHidden("bar/baz"), t, "normal dirs should not be hidden")
|
||||
require.True(t, IsHidden("bar/.git"), "dirs beginning with . should be recognized as hidden")
|
||||
require.False(t, IsHidden("."), ". for current dir should not be considered hidden")
|
||||
require.False(t, IsHidden("bar/baz"), "normal dirs should not be hidden")
|
||||
}
|
||||
|
||||
@ -21,7 +21,6 @@ import (
|
||||
|
||||
fsnotify "github.com/fsnotify/fsnotify"
|
||||
"github.com/ipfs/boxo/files"
|
||||
process "github.com/jbenet/goprocess"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -54,7 +53,6 @@ func main() {
|
||||
}
|
||||
|
||||
func run(ipfsPath, watchPath string) error {
|
||||
proc := process.WithParent(process.Background())
|
||||
log.Printf("running IPFSWatch on '%s' using repo at '%s'...", watchPath, ipfsPath)
|
||||
|
||||
ipfsPath, err := fsutil.ExpandHome(ipfsPath)
|
||||
@ -99,11 +97,11 @@ func run(ipfsPath, watchPath string) error {
|
||||
corehttp.WebUIOption,
|
||||
corehttp.CommandsOption(cmdCtx(node, ipfsPath)),
|
||||
}
|
||||
proc.Go(func(p process.Process) {
|
||||
go func() {
|
||||
if err := corehttp.ListenAndServe(node, addr, opts...); err != nil {
|
||||
return
|
||||
}
|
||||
})
|
||||
}()
|
||||
}
|
||||
|
||||
interrupts := make(chan os.Signal, 1)
|
||||
@ -137,7 +135,7 @@ func run(ipfsPath, watchPath string) error {
|
||||
}
|
||||
}
|
||||
}
|
||||
proc.Go(func(p process.Process) {
|
||||
go func() {
|
||||
file, err := os.Open(e.Name)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
@ -162,7 +160,7 @@ func run(ipfsPath, watchPath string) error {
|
||||
log.Println(err)
|
||||
}
|
||||
log.Printf("added %s... key: %s", e.Name, k)
|
||||
})
|
||||
}()
|
||||
}
|
||||
case err := <-watcher.Errors:
|
||||
log.Println(err)
|
||||
|
||||
@ -53,6 +53,23 @@ func (c *Context) GetNode() (*core.IpfsNode, error) {
|
||||
return c.node, err
|
||||
}
|
||||
|
||||
// ClearCachedNode clears any cached node, forcing GetNode to construct a new one.
|
||||
//
|
||||
// This method is critical for mitigating racy FX dependency injection behavior
|
||||
// that can occur during daemon startup. The daemon may create multiple IpfsNode
|
||||
// instances during initialization - first an offline node during early init, then
|
||||
// the proper online daemon node. Without clearing the cache, HTTP RPC handlers may
|
||||
// end up using the first (offline) cached node instead of the intended online daemon node.
|
||||
//
|
||||
// This behavior was likely present forever in go-ipfs, but recent changes made it more
|
||||
// prominent and forced us to proactively mitigate FX shortcomings. The daemon calls
|
||||
// this method immediately before setting its ConstructNode function to ensure that
|
||||
// subsequent GetNode() calls use the correct online daemon node rather than any
|
||||
// stale cached offline node from initialization.
|
||||
func (c *Context) ClearCachedNode() {
|
||||
c.node = nil
|
||||
}
|
||||
|
||||
// GetAPI returns CoreAPI instance backed by ipfs node.
|
||||
// It may construct the node with the provided function.
|
||||
func (c *Context) GetAPI() (coreiface.CoreAPI, error) {
|
||||
|
||||
319
config/autoconf.go
Normal file
319
config/autoconf.go
Normal file
@ -0,0 +1,319 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"maps"
|
||||
"math/rand"
|
||||
"strings"
|
||||
|
||||
"github.com/ipfs/boxo/autoconf"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
peer "github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
var log = logging.Logger("config")
|
||||
|
||||
// AutoConf contains the configuration for the autoconf subsystem
|
||||
type AutoConf struct {
|
||||
// URL is the HTTP(S) URL to fetch the autoconf.json from
|
||||
// Default: see boxo/autoconf.MainnetAutoConfURL
|
||||
URL *OptionalString `json:",omitempty"`
|
||||
|
||||
// Enabled determines whether to use autoconf
|
||||
// Default: true
|
||||
Enabled Flag `json:",omitempty"`
|
||||
|
||||
// RefreshInterval is how often to refresh autoconf data
|
||||
// Default: 24h
|
||||
RefreshInterval *OptionalDuration `json:",omitempty"`
|
||||
|
||||
// TLSInsecureSkipVerify allows skipping TLS verification (for testing only)
|
||||
// Default: false
|
||||
TLSInsecureSkipVerify Flag `json:",omitempty"`
|
||||
}
|
||||
|
||||
const (
|
||||
// AutoPlaceholder is the string used as a placeholder for autoconf values
|
||||
AutoPlaceholder = "auto"
|
||||
|
||||
// DefaultAutoConfEnabled is the default value for AutoConf.Enabled
|
||||
DefaultAutoConfEnabled = true
|
||||
|
||||
// DefaultAutoConfURL is the default URL for fetching autoconf
|
||||
DefaultAutoConfURL = autoconf.MainnetAutoConfURL
|
||||
|
||||
// DefaultAutoConfRefreshInterval is the default interval for refreshing autoconf data
|
||||
DefaultAutoConfRefreshInterval = autoconf.DefaultRefreshInterval
|
||||
|
||||
// AutoConf client configuration constants
|
||||
DefaultAutoConfCacheSize = autoconf.DefaultCacheSize
|
||||
DefaultAutoConfTimeout = autoconf.DefaultTimeout
|
||||
)
|
||||
|
||||
// getNativeSystems returns the list of systems that should be used natively based on routing type
|
||||
func getNativeSystems(routingType string) []string {
|
||||
switch routingType {
|
||||
case "dht", "dhtclient", "dhtserver":
|
||||
return []string{autoconf.SystemAminoDHT} // Only native DHT
|
||||
case "auto", "autoclient":
|
||||
return []string{autoconf.SystemAminoDHT} // Native DHT, delegated others
|
||||
case "delegated":
|
||||
return []string{} // Everything delegated
|
||||
case "none":
|
||||
return []string{} // No native systems
|
||||
default:
|
||||
return []string{} // Custom mode
|
||||
}
|
||||
}
|
||||
|
||||
// selectRandomResolver picks a random resolver from a list for load balancing
|
||||
func selectRandomResolver(resolvers []string) string {
|
||||
if len(resolvers) == 0 {
|
||||
return ""
|
||||
}
|
||||
return resolvers[rand.Intn(len(resolvers))]
|
||||
}
|
||||
|
||||
// DNSResolversWithAutoConf returns DNS resolvers with "auto" values replaced by autoconf values
|
||||
func (c *Config) DNSResolversWithAutoConf() map[string]string {
|
||||
if c.DNS.Resolvers == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
resolved := make(map[string]string)
|
||||
autoConf := c.getAutoConf()
|
||||
autoExpanded := 0
|
||||
|
||||
// Process each configured resolver
|
||||
for domain, resolver := range c.DNS.Resolvers {
|
||||
if resolver == AutoPlaceholder {
|
||||
// Try to resolve from autoconf
|
||||
if autoConf != nil && autoConf.DNSResolvers != nil {
|
||||
if resolvers, exists := autoConf.DNSResolvers[domain]; exists && len(resolvers) > 0 {
|
||||
resolved[domain] = selectRandomResolver(resolvers)
|
||||
autoExpanded++
|
||||
}
|
||||
}
|
||||
// If autoConf is disabled or domain not found, skip this "auto" resolver
|
||||
} else {
|
||||
// Keep custom resolver as-is
|
||||
resolved[domain] = resolver
|
||||
}
|
||||
}
|
||||
|
||||
// Add default resolvers from autoconf that aren't already configured
|
||||
if autoConf != nil && autoConf.DNSResolvers != nil {
|
||||
for domain, resolvers := range autoConf.DNSResolvers {
|
||||
if _, exists := resolved[domain]; !exists && len(resolvers) > 0 {
|
||||
resolved[domain] = selectRandomResolver(resolvers)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Log expansion statistics
|
||||
if autoExpanded > 0 {
|
||||
log.Debugf("expanded %d 'auto' DNS.Resolvers from autoconf", autoExpanded)
|
||||
}
|
||||
|
||||
return resolved
|
||||
}
|
||||
|
||||
// expandAutoConfSlice is a generic helper for expanding "auto" placeholders in string slices
|
||||
// It handles the common pattern of: iterate through slice, expand "auto" once, keep custom values
|
||||
func expandAutoConfSlice(sourceSlice []string, autoConfData []string) []string {
|
||||
var resolved []string
|
||||
autoExpanded := false
|
||||
|
||||
for _, item := range sourceSlice {
|
||||
if item == AutoPlaceholder {
|
||||
// Replace with autoconf data (only once)
|
||||
if autoConfData != nil && !autoExpanded {
|
||||
resolved = append(resolved, autoConfData...)
|
||||
autoExpanded = true
|
||||
}
|
||||
// If autoConfData is nil or already expanded, skip redundant "auto" entries silently
|
||||
} else {
|
||||
// Keep custom item
|
||||
resolved = append(resolved, item)
|
||||
}
|
||||
}
|
||||
|
||||
return resolved
|
||||
}
|
||||
|
||||
// BootstrapWithAutoConf returns bootstrap config with "auto" values replaced by autoconf values
|
||||
func (c *Config) BootstrapWithAutoConf() []string {
|
||||
autoConf := c.getAutoConf()
|
||||
var autoConfData []string
|
||||
|
||||
if autoConf != nil {
|
||||
routingType := c.Routing.Type.WithDefault(DefaultRoutingType)
|
||||
nativeSystems := getNativeSystems(routingType)
|
||||
autoConfData = autoConf.GetBootstrapPeers(nativeSystems...)
|
||||
log.Debugf("BootstrapWithAutoConf: processing with routing type: %s", routingType)
|
||||
} else {
|
||||
log.Debugf("BootstrapWithAutoConf: autoConf disabled, using original config")
|
||||
}
|
||||
|
||||
result := expandAutoConfSlice(c.Bootstrap, autoConfData)
|
||||
log.Debugf("BootstrapWithAutoConf: final result contains %d peers", len(result))
|
||||
return result
|
||||
}
|
||||
|
||||
// getAutoConf is a helper to get autoconf data with fallbacks
|
||||
func (c *Config) getAutoConf() *autoconf.Config {
|
||||
if !c.AutoConf.Enabled.WithDefault(DefaultAutoConfEnabled) {
|
||||
log.Debugf("getAutoConf: AutoConf disabled, returning nil")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create or get cached client with config
|
||||
client, err := GetAutoConfClient(c)
|
||||
if err != nil {
|
||||
log.Debugf("getAutoConf: client creation failed - %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Use GetCached to avoid network I/O during config operations
|
||||
// This ensures config retrieval doesn't block on network operations
|
||||
result := client.GetCached()
|
||||
|
||||
log.Debugf("getAutoConf: returning autoconf data")
|
||||
return result
|
||||
}
|
||||
|
||||
// BootstrapPeersWithAutoConf returns bootstrap peers with "auto" values replaced by autoconf values
|
||||
// and parsed into peer.AddrInfo structures
|
||||
func (c *Config) BootstrapPeersWithAutoConf() ([]peer.AddrInfo, error) {
|
||||
bootstrapStrings := c.BootstrapWithAutoConf()
|
||||
return ParseBootstrapPeers(bootstrapStrings)
|
||||
}
|
||||
|
||||
// DelegatedRoutersWithAutoConf returns delegated router URLs without trailing slashes
|
||||
func (c *Config) DelegatedRoutersWithAutoConf() []string {
|
||||
autoConf := c.getAutoConf()
|
||||
|
||||
// Use autoconf to expand the endpoints with supported paths for read operations
|
||||
routingType := c.Routing.Type.WithDefault(DefaultRoutingType)
|
||||
nativeSystems := getNativeSystems(routingType)
|
||||
return autoconf.ExpandDelegatedEndpoints(
|
||||
c.Routing.DelegatedRouters,
|
||||
autoConf,
|
||||
nativeSystems,
|
||||
// Kubo supports all read paths
|
||||
autoconf.RoutingV1ProvidersPath,
|
||||
autoconf.RoutingV1PeersPath,
|
||||
autoconf.RoutingV1IPNSPath,
|
||||
)
|
||||
}
|
||||
|
||||
// DelegatedPublishersWithAutoConf returns delegated publisher URLs without trailing slashes
|
||||
func (c *Config) DelegatedPublishersWithAutoConf() []string {
|
||||
autoConf := c.getAutoConf()
|
||||
|
||||
// Use autoconf to expand the endpoints with IPNS write path
|
||||
routingType := c.Routing.Type.WithDefault(DefaultRoutingType)
|
||||
nativeSystems := getNativeSystems(routingType)
|
||||
return autoconf.ExpandDelegatedEndpoints(
|
||||
c.Ipns.DelegatedPublishers,
|
||||
autoConf,
|
||||
nativeSystems,
|
||||
autoconf.RoutingV1IPNSPath, // Only IPNS operations (for write)
|
||||
)
|
||||
}
|
||||
|
||||
// expandConfigField expands a specific config field with autoconf values
|
||||
// Handles both top-level fields ("Bootstrap") and nested fields ("DNS.Resolvers")
|
||||
func (c *Config) expandConfigField(expandedCfg map[string]any, fieldPath string) {
|
||||
// Check if this field supports autoconf expansion
|
||||
expandFunc, supported := supportedAutoConfFields[fieldPath]
|
||||
if !supported {
|
||||
return
|
||||
}
|
||||
|
||||
// Handle top-level fields (no dot in path)
|
||||
if !strings.Contains(fieldPath, ".") {
|
||||
if _, exists := expandedCfg[fieldPath]; exists {
|
||||
expandedCfg[fieldPath] = expandFunc(c)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Handle nested fields (section.field format)
|
||||
parts := strings.SplitN(fieldPath, ".", 2)
|
||||
if len(parts) != 2 {
|
||||
return
|
||||
}
|
||||
|
||||
sectionName, fieldName := parts[0], parts[1]
|
||||
if section, exists := expandedCfg[sectionName]; exists {
|
||||
if sectionMap, ok := section.(map[string]any); ok {
|
||||
if _, exists := sectionMap[fieldName]; exists {
|
||||
sectionMap[fieldName] = expandFunc(c)
|
||||
expandedCfg[sectionName] = sectionMap
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ExpandAutoConfValues expands "auto" placeholders in config with their actual values using the same methods as the daemon
|
||||
func (c *Config) ExpandAutoConfValues(cfg map[string]any) (map[string]any, error) {
|
||||
// Create a deep copy of the config map to avoid modifying the original
|
||||
expandedCfg := maps.Clone(cfg)
|
||||
|
||||
// Use the same expansion methods that the daemon uses - ensures runtime consistency
|
||||
// Unified expansion for all supported autoconf fields
|
||||
c.expandConfigField(expandedCfg, "Bootstrap")
|
||||
c.expandConfigField(expandedCfg, "DNS.Resolvers")
|
||||
c.expandConfigField(expandedCfg, "Routing.DelegatedRouters")
|
||||
c.expandConfigField(expandedCfg, "Ipns.DelegatedPublishers")
|
||||
|
||||
return expandedCfg, nil
|
||||
}
|
||||
|
||||
// supportedAutoConfFields maps field keys to their expansion functions
|
||||
var supportedAutoConfFields = map[string]func(*Config) any{
|
||||
"Bootstrap": func(c *Config) any {
|
||||
expanded := c.BootstrapWithAutoConf()
|
||||
return stringSliceToInterfaceSlice(expanded)
|
||||
},
|
||||
"DNS.Resolvers": func(c *Config) any {
|
||||
expanded := c.DNSResolversWithAutoConf()
|
||||
return stringMapToInterfaceMap(expanded)
|
||||
},
|
||||
"Routing.DelegatedRouters": func(c *Config) any {
|
||||
expanded := c.DelegatedRoutersWithAutoConf()
|
||||
return stringSliceToInterfaceSlice(expanded)
|
||||
},
|
||||
"Ipns.DelegatedPublishers": func(c *Config) any {
|
||||
expanded := c.DelegatedPublishersWithAutoConf()
|
||||
return stringSliceToInterfaceSlice(expanded)
|
||||
},
|
||||
}
|
||||
|
||||
// ExpandConfigField expands auto values for a specific config field using the same methods as the daemon
|
||||
func (c *Config) ExpandConfigField(key string, value any) any {
|
||||
if expandFunc, supported := supportedAutoConfFields[key]; supported {
|
||||
return expandFunc(c)
|
||||
}
|
||||
|
||||
// Return original value if no expansion needed (not a field that supports auto values)
|
||||
return value
|
||||
}
|
||||
|
||||
// Helper functions for type conversion between string types and any types for JSON compatibility
|
||||
|
||||
func stringSliceToInterfaceSlice(slice []string) []any {
|
||||
result := make([]any, len(slice))
|
||||
for i, v := range slice {
|
||||
result[i] = v
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func stringMapToInterfaceMap(m map[string]string) map[string]any {
|
||||
result := make(map[string]any)
|
||||
for k, v := range m {
|
||||
result[k] = v
|
||||
}
|
||||
return result
|
||||
}
|
||||
136
config/autoconf_client.go
Normal file
136
config/autoconf_client.go
Normal file
@ -0,0 +1,136 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/ipfs/boxo/autoconf"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
version "github.com/ipfs/kubo"
|
||||
)
|
||||
|
||||
var autoconfLog = logging.Logger("autoconf")
|
||||
|
||||
// Singleton state for autoconf client
|
||||
var (
|
||||
clientOnce sync.Once
|
||||
clientCache *autoconf.Client
|
||||
clientErr error
|
||||
)
|
||||
|
||||
// GetAutoConfClient returns a cached autoconf client or creates a new one.
|
||||
// This is thread-safe and uses a singleton pattern.
|
||||
func GetAutoConfClient(cfg *Config) (*autoconf.Client, error) {
|
||||
clientOnce.Do(func() {
|
||||
clientCache, clientErr = newAutoConfClient(cfg)
|
||||
})
|
||||
return clientCache, clientErr
|
||||
}
|
||||
|
||||
// newAutoConfClient creates a new autoconf client with the given config
|
||||
func newAutoConfClient(cfg *Config) (*autoconf.Client, error) {
|
||||
// Get repo path for cache directory
|
||||
repoPath, err := PathRoot()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get repo path: %w", err)
|
||||
}
|
||||
|
||||
// Prepare refresh interval with nil check
|
||||
refreshInterval := cfg.AutoConf.RefreshInterval
|
||||
if refreshInterval == nil {
|
||||
refreshInterval = &OptionalDuration{}
|
||||
}
|
||||
|
||||
// Use default URL if not specified
|
||||
url := cfg.AutoConf.URL.WithDefault(DefaultAutoConfURL)
|
||||
|
||||
// Build client options
|
||||
options := []autoconf.Option{
|
||||
autoconf.WithCacheDir(filepath.Join(repoPath, "autoconf")),
|
||||
autoconf.WithUserAgent(version.GetUserAgentVersion()),
|
||||
autoconf.WithCacheSize(DefaultAutoConfCacheSize),
|
||||
autoconf.WithTimeout(DefaultAutoConfTimeout),
|
||||
autoconf.WithRefreshInterval(refreshInterval.WithDefault(DefaultAutoConfRefreshInterval)),
|
||||
autoconf.WithFallback(autoconf.GetMainnetFallbackConfig),
|
||||
autoconf.WithURL(url),
|
||||
}
|
||||
|
||||
return autoconf.NewClient(options...)
|
||||
}
|
||||
|
||||
// ValidateAutoConfWithRepo validates that autoconf setup is correct at daemon startup with repo access
|
||||
func ValidateAutoConfWithRepo(cfg *Config, swarmKeyExists bool) error {
|
||||
if !cfg.AutoConf.Enabled.WithDefault(DefaultAutoConfEnabled) {
|
||||
// AutoConf is disabled, check for "auto" values and warn
|
||||
return validateAutoConfDisabled(cfg)
|
||||
}
|
||||
|
||||
// Check for private network with default mainnet URL
|
||||
url := cfg.AutoConf.URL.WithDefault(DefaultAutoConfURL)
|
||||
if swarmKeyExists && url == DefaultAutoConfURL {
|
||||
return fmt.Errorf("AutoConf cannot use the default mainnet URL (%s) on a private network (swarm.key or LIBP2P_FORCE_PNET detected). Either disable AutoConf by setting AutoConf.Enabled=false, or configure AutoConf.URL to point to a configuration service specific to your private swarm", DefaultAutoConfURL)
|
||||
}
|
||||
|
||||
// Further validation will happen lazily when config is accessed
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateAutoConfDisabled checks for "auto" values when AutoConf is disabled and logs errors
|
||||
func validateAutoConfDisabled(cfg *Config) error {
|
||||
hasAutoValues := false
|
||||
var errors []string
|
||||
|
||||
// Check Bootstrap
|
||||
for _, peer := range cfg.Bootstrap {
|
||||
if peer == AutoPlaceholder {
|
||||
hasAutoValues = true
|
||||
errors = append(errors, "Bootstrap contains 'auto' but AutoConf.Enabled=false")
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Check DNS.Resolvers
|
||||
if cfg.DNS.Resolvers != nil {
|
||||
for _, resolver := range cfg.DNS.Resolvers {
|
||||
if resolver == AutoPlaceholder {
|
||||
hasAutoValues = true
|
||||
errors = append(errors, "DNS.Resolvers contains 'auto' but AutoConf.Enabled=false")
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check Routing.DelegatedRouters
|
||||
for _, router := range cfg.Routing.DelegatedRouters {
|
||||
if router == AutoPlaceholder {
|
||||
hasAutoValues = true
|
||||
errors = append(errors, "Routing.DelegatedRouters contains 'auto' but AutoConf.Enabled=false")
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Check Ipns.DelegatedPublishers
|
||||
for _, publisher := range cfg.Ipns.DelegatedPublishers {
|
||||
if publisher == AutoPlaceholder {
|
||||
hasAutoValues = true
|
||||
errors = append(errors, "Ipns.DelegatedPublishers contains 'auto' but AutoConf.Enabled=false")
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Log all errors
|
||||
for _, errMsg := range errors {
|
||||
autoconfLog.Error(errMsg)
|
||||
}
|
||||
|
||||
// If only auto values exist and no static ones, fail to start
|
||||
if hasAutoValues {
|
||||
if len(cfg.Bootstrap) == 1 && cfg.Bootstrap[0] == AutoPlaceholder {
|
||||
autoconfLog.Error("Kubo cannot start with only 'auto' Bootstrap values when AutoConf.Enabled=false")
|
||||
return fmt.Errorf("no usable bootstrap peers: AutoConf is disabled (AutoConf.Enabled=false) but 'auto' placeholder is used in Bootstrap config. Either set AutoConf.Enabled=true to enable automatic configuration, or replace 'auto' with specific Bootstrap peer addresses")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
92
config/autoconf_test.go
Normal file
92
config/autoconf_test.go
Normal file
@ -0,0 +1,92 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAutoConfDefaults(t *testing.T) {
|
||||
// Test that AutoConf has the correct default values
|
||||
cfg := &Config{
|
||||
AutoConf: AutoConf{
|
||||
URL: NewOptionalString(DefaultAutoConfURL),
|
||||
Enabled: True,
|
||||
},
|
||||
}
|
||||
|
||||
assert.Equal(t, DefaultAutoConfURL, cfg.AutoConf.URL.WithDefault(DefaultAutoConfURL))
|
||||
assert.True(t, cfg.AutoConf.Enabled.WithDefault(DefaultAutoConfEnabled))
|
||||
|
||||
// Test default refresh interval
|
||||
if cfg.AutoConf.RefreshInterval == nil {
|
||||
// This is expected - nil means use default
|
||||
duration := (*OptionalDuration)(nil).WithDefault(DefaultAutoConfRefreshInterval)
|
||||
assert.Equal(t, DefaultAutoConfRefreshInterval, duration)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAutoConfProfile(t *testing.T) {
|
||||
cfg := &Config{
|
||||
Bootstrap: []string{"some", "existing", "peers"},
|
||||
DNS: DNS{
|
||||
Resolvers: map[string]string{
|
||||
"eth.": "https://example.com",
|
||||
},
|
||||
},
|
||||
Routing: Routing{
|
||||
DelegatedRouters: []string{"https://existing.router"},
|
||||
},
|
||||
Ipns: Ipns{
|
||||
DelegatedPublishers: []string{"https://existing.publisher"},
|
||||
},
|
||||
AutoConf: AutoConf{
|
||||
Enabled: False,
|
||||
},
|
||||
}
|
||||
|
||||
// Apply autoconf profile
|
||||
profile, ok := Profiles["autoconf-on"]
|
||||
require.True(t, ok, "autoconf-on profile not found")
|
||||
|
||||
err := profile.Transform(cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that values were set to "auto"
|
||||
assert.Equal(t, []string{AutoPlaceholder}, cfg.Bootstrap)
|
||||
assert.Equal(t, AutoPlaceholder, cfg.DNS.Resolvers["."])
|
||||
assert.Equal(t, []string{AutoPlaceholder}, cfg.Routing.DelegatedRouters)
|
||||
assert.Equal(t, []string{AutoPlaceholder}, cfg.Ipns.DelegatedPublishers)
|
||||
|
||||
// Check that AutoConf was enabled
|
||||
assert.True(t, cfg.AutoConf.Enabled.WithDefault(DefaultAutoConfEnabled))
|
||||
|
||||
// Check that URL was set
|
||||
assert.Equal(t, DefaultAutoConfURL, cfg.AutoConf.URL.WithDefault(DefaultAutoConfURL))
|
||||
}
|
||||
|
||||
func TestInitWithAutoValues(t *testing.T) {
|
||||
identity := Identity{
|
||||
PeerID: "QmTest",
|
||||
}
|
||||
|
||||
cfg, err := InitWithIdentity(identity)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that Bootstrap is set to "auto"
|
||||
assert.Equal(t, []string{AutoPlaceholder}, cfg.Bootstrap)
|
||||
|
||||
// Check that DNS resolver is set to "auto"
|
||||
assert.Equal(t, AutoPlaceholder, cfg.DNS.Resolvers["."])
|
||||
|
||||
// Check that DelegatedRouters is set to "auto"
|
||||
assert.Equal(t, []string{AutoPlaceholder}, cfg.Routing.DelegatedRouters)
|
||||
|
||||
// Check that DelegatedPublishers is set to "auto"
|
||||
assert.Equal(t, []string{AutoPlaceholder}, cfg.Ipns.DelegatedPublishers)
|
||||
|
||||
// Check that AutoConf is enabled with correct URL
|
||||
assert.True(t, cfg.AutoConf.Enabled.WithDefault(DefaultAutoConfEnabled))
|
||||
assert.Equal(t, DefaultAutoConfURL, cfg.AutoConf.URL.WithDefault(DefaultAutoConfURL))
|
||||
}
|
||||
@ -2,28 +2,11 @@ package config
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
peer "github.com/libp2p/go-libp2p/core/peer"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
)
|
||||
|
||||
// DefaultBootstrapAddresses are the hardcoded bootstrap addresses
|
||||
// for IPFS. they are nodes run by the IPFS team. docs on these later.
|
||||
// As with all p2p networks, bootstrap is an important security concern.
|
||||
//
|
||||
// NOTE: This is here -- and not inside cmd/ipfs/init.go -- because of an
|
||||
// import dependency issue. TODO: move this into a config/default/ package.
|
||||
var DefaultBootstrapAddresses = []string{
|
||||
"/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN",
|
||||
"/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa", // rust-libp2p-server
|
||||
"/dnsaddr/bootstrap.libp2p.io/p2p/QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb",
|
||||
"/dnsaddr/bootstrap.libp2p.io/p2p/QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt",
|
||||
"/dnsaddr/va1.bootstrap.libp2p.io/p2p/12D3KooWKnDdG3iXw9eTFijk3EWSunZcFi54Zka4wmtqtt6rPxc8", // js-libp2p-amino-dht-bootstrapper
|
||||
"/ip4/104.131.131.82/tcp/4001/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", // mars.i.ipfs.io
|
||||
"/ip4/104.131.131.82/udp/4001/quic-v1/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", // mars.i.ipfs.io
|
||||
}
|
||||
|
||||
// ErrInvalidPeerAddr signals an address is not a valid peer address.
|
||||
var ErrInvalidPeerAddr = errors.New("invalid peer address")
|
||||
|
||||
@ -31,18 +14,6 @@ func (c *Config) BootstrapPeers() ([]peer.AddrInfo, error) {
|
||||
return ParseBootstrapPeers(c.Bootstrap)
|
||||
}
|
||||
|
||||
// DefaultBootstrapPeers returns the (parsed) set of default bootstrap peers.
|
||||
// if it fails, it returns a meaningful error for the user.
|
||||
// This is here (and not inside cmd/ipfs/init) because of module dependency problems.
|
||||
func DefaultBootstrapPeers() ([]peer.AddrInfo, error) {
|
||||
ps, err := ParseBootstrapPeers(DefaultBootstrapAddresses)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`failed to parse hardcoded bootstrap peers: %w
|
||||
This is a problem with the ipfs codebase. Please report it to the dev team`, err)
|
||||
}
|
||||
return ps, nil
|
||||
}
|
||||
|
||||
func (c *Config) SetBootstrapPeers(bps []peer.AddrInfo) {
|
||||
c.Bootstrap = BootstrapPeerStrings(bps)
|
||||
}
|
||||
|
||||
@ -1,24 +1,28 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/ipfs/boxo/autoconf"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestBoostrapPeerStrings(t *testing.T) {
|
||||
parsed, err := ParseBootstrapPeers(DefaultBootstrapAddresses)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
func TestBootstrapPeerStrings(t *testing.T) {
|
||||
// Test round-trip: string -> parse -> format -> string
|
||||
// This ensures that parsing and formatting are inverse operations
|
||||
|
||||
formatted := BootstrapPeerStrings(parsed)
|
||||
sort.Strings(formatted)
|
||||
expected := append([]string{}, DefaultBootstrapAddresses...)
|
||||
sort.Strings(expected)
|
||||
// Start with the default bootstrap peer multiaddr strings
|
||||
originalStrings := autoconf.FallbackBootstrapPeers
|
||||
|
||||
for i, s := range formatted {
|
||||
if expected[i] != s {
|
||||
t.Fatalf("expected %s, %s", expected[i], s)
|
||||
}
|
||||
}
|
||||
// Parse multiaddr strings into structured peer data
|
||||
parsed, err := ParseBootstrapPeers(originalStrings)
|
||||
require.NoError(t, err, "parsing bootstrap peers should succeed")
|
||||
|
||||
// Format the parsed data back into multiaddr strings
|
||||
formattedStrings := BootstrapPeerStrings(parsed)
|
||||
|
||||
// Verify round-trip: we should get back exactly what we started with
|
||||
assert.ElementsMatch(t, originalStrings, formattedStrings,
|
||||
"round-trip through parse/format should preserve all bootstrap peers")
|
||||
}
|
||||
|
||||
@ -31,7 +31,9 @@ type Config struct {
|
||||
Pubsub PubsubConfig
|
||||
Peering Peering
|
||||
DNS DNS
|
||||
|
||||
Migration Migration
|
||||
AutoConf AutoConf
|
||||
|
||||
Provider Provider
|
||||
Reprovider Reprovider
|
||||
|
||||
@ -10,7 +10,7 @@ type DNS struct {
|
||||
//
|
||||
// Example:
|
||||
// - Custom resolver for ENS: `eth.` → `https://dns.eth.limo/dns-query`
|
||||
// - Override the default OS resolver: `.` → `https://doh.applied-privacy.net/query`
|
||||
// - Override the default OS resolver: `.` → `https://1.1.1.1/dns-query`
|
||||
Resolvers map[string]string
|
||||
// MaxCacheTTL is the maximum duration DNS entries are valid in the cache.
|
||||
MaxCacheTTL *OptionalDuration `json:",omitempty"`
|
||||
|
||||
@ -1,10 +1,18 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"github.com/ipfs/boxo/gateway"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultInlineDNSLink = false
|
||||
DefaultDeserializedResponses = true
|
||||
DefaultDisableHTMLErrors = false
|
||||
DefaultExposeRoutingAPI = false
|
||||
|
||||
// Gateway limit defaults from boxo
|
||||
DefaultRetrievalTimeout = gateway.DefaultRetrievalTimeout
|
||||
DefaultMaxConcurrentRequests = gateway.DefaultMaxConcurrentRequests
|
||||
)
|
||||
|
||||
type GatewaySpec struct {
|
||||
@ -73,4 +81,21 @@ type Gateway struct {
|
||||
// ExposeRoutingAPI configures the gateway port to expose
|
||||
// routing system as HTTP API at /routing/v1 (https://specs.ipfs.tech/routing/http-routing-v1/).
|
||||
ExposeRoutingAPI Flag
|
||||
|
||||
// RetrievalTimeout enforces a maximum duration for content retrieval:
|
||||
// - Time to first byte: If the gateway cannot start writing the response within
|
||||
// this duration (e.g., stuck searching for providers), a 504 Gateway Timeout
|
||||
// is returned.
|
||||
// - Time between writes: After the first byte, the timeout resets each time new
|
||||
// bytes are written to the client. If the gateway cannot write additional data
|
||||
// within this duration after the last successful write, the response is terminated.
|
||||
// This helps free resources when the gateway gets stuck looking for providers
|
||||
// or cannot retrieve the requested content.
|
||||
// A value of 0 disables this timeout.
|
||||
RetrievalTimeout *OptionalDuration `json:",omitempty"`
|
||||
|
||||
// MaxConcurrentRequests limits concurrent HTTP requests handled by the gateway.
|
||||
// Requests beyond this limit receive 429 Too Many Requests with Retry-After header.
|
||||
// A value of 0 disables the limit.
|
||||
MaxConcurrentRequests *OptionalInteger `json:",omitempty"`
|
||||
}
|
||||
|
||||
@ -21,7 +21,6 @@ const (
|
||||
// write-batch. The total size of the batch is limited by
|
||||
// BatchMaxnodes and BatchMaxSize.
|
||||
DefaultBatchMaxSize = 100 << 20 // 20MiB
|
||||
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@ -23,11 +23,6 @@ func Init(out io.Writer, nBitsForKeypair int) (*Config, error) {
|
||||
}
|
||||
|
||||
func InitWithIdentity(identity Identity) (*Config, error) {
|
||||
bootstrapPeers, err := DefaultBootstrapPeers()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
datastore := DefaultDatastoreConfig()
|
||||
|
||||
conf := &Config{
|
||||
@ -40,7 +35,7 @@ func InitWithIdentity(identity Identity) (*Config, error) {
|
||||
Addresses: addressesConfig(),
|
||||
|
||||
Datastore: datastore,
|
||||
Bootstrap: BootstrapPeerStrings(bootstrapPeers),
|
||||
Bootstrap: []string{AutoPlaceholder},
|
||||
Identity: identity,
|
||||
Discovery: Discovery{
|
||||
MDNS: MDNS{
|
||||
@ -56,7 +51,8 @@ func InitWithIdentity(identity Identity) (*Config, error) {
|
||||
},
|
||||
|
||||
Ipns: Ipns{
|
||||
ResolveCacheSize: 128,
|
||||
ResolveCacheSize: 128,
|
||||
DelegatedPublishers: []string{AutoPlaceholder},
|
||||
},
|
||||
|
||||
Gateway: Gateway{
|
||||
@ -72,11 +68,12 @@ func InitWithIdentity(identity Identity) (*Config, error) {
|
||||
RemoteServices: map[string]RemotePinningService{},
|
||||
},
|
||||
DNS: DNS{
|
||||
Resolvers: map[string]string{},
|
||||
Resolvers: map[string]string{
|
||||
".": AutoPlaceholder,
|
||||
},
|
||||
},
|
||||
Migration: Migration{
|
||||
DownloadSources: []string{},
|
||||
Keep: "",
|
||||
Routing: Routing{
|
||||
DelegatedRouters: []string{AutoPlaceholder},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@ -20,4 +20,7 @@ type Ipns struct {
|
||||
|
||||
// Enable namesys pubsub (--enable-namesys-pubsub)
|
||||
UsePubsub Flag `json:",omitempty"`
|
||||
|
||||
// Simplified configuration for delegated IPNS publishers
|
||||
DelegatedPublishers []string
|
||||
}
|
||||
|
||||
@ -2,16 +2,18 @@ package config
|
||||
|
||||
const DefaultMigrationKeep = "cache"
|
||||
|
||||
var DefaultMigrationDownloadSources = []string{"HTTPS", "IPFS"}
|
||||
// DefaultMigrationDownloadSources defines the default download sources for legacy migrations (repo versions <16).
|
||||
// Only HTTPS is supported for legacy migrations. IPFS downloads are not supported.
|
||||
var DefaultMigrationDownloadSources = []string{"HTTPS"}
|
||||
|
||||
// Migration configures how migrations are downloaded and if the downloads are
|
||||
// added to IPFS locally.
|
||||
// Migration configures how legacy migrations are downloaded (repo versions <16).
|
||||
//
|
||||
// DEPRECATED: This configuration only applies to legacy external migrations for repository
|
||||
// versions below 16. Modern repositories (v16+) use embedded migrations that do not require
|
||||
// external downloads. These settings will be ignored for modern repository versions.
|
||||
type Migration struct {
|
||||
// Sources in order of preference, where "IPFS" means use IPFS and "HTTPS"
|
||||
// means use default gateways. Any other values are interpreted as
|
||||
// hostnames for custom gateways. Empty list means "use default sources".
|
||||
DownloadSources []string
|
||||
// Whether or not to keep the migration after downloading it.
|
||||
// Options are "discard", "cache", "pin". Empty string for default.
|
||||
Keep string
|
||||
// DEPRECATED: This field is deprecated and ignored for modern repositories (repo versions ≥16).
|
||||
DownloadSources []string `json:",omitempty"`
|
||||
// DEPRECATED: This field is deprecated and ignored for modern repositories (repo versions ≥16).
|
||||
Keep string `json:",omitempty"`
|
||||
}
|
||||
|
||||
@ -7,5 +7,5 @@ type Plugins struct {
|
||||
|
||||
type Plugin struct {
|
||||
Disabled bool
|
||||
Config interface{}
|
||||
Config interface{} `json:",omitempty"`
|
||||
}
|
||||
|
||||
@ -87,6 +87,12 @@ is useful when using the daemon in test environments.`,
|
||||
c.Bootstrap = []string{}
|
||||
c.Discovery.MDNS.Enabled = false
|
||||
c.AutoTLS.Enabled = False
|
||||
c.AutoConf.Enabled = False
|
||||
|
||||
// Explicitly set autoconf-controlled fields to empty when autoconf is disabled
|
||||
c.DNS.Resolvers = map[string]string{}
|
||||
c.Routing.DelegatedRouters = []string{}
|
||||
c.Ipns.DelegatedPublishers = []string{}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
@ -97,11 +103,10 @@ Inverse profile of the test profile.`,
|
||||
Transform: func(c *Config) error {
|
||||
c.Addresses = addressesConfig()
|
||||
|
||||
bootstrapPeers, err := DefaultBootstrapPeers()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.Bootstrap = appendSingle(c.Bootstrap, BootstrapPeerStrings(bootstrapPeers))
|
||||
// Use AutoConf system for bootstrap peers
|
||||
c.Bootstrap = []string{AutoPlaceholder}
|
||||
c.AutoConf.Enabled = Default
|
||||
c.AutoConf.URL = nil // Clear URL to use implicit default
|
||||
|
||||
c.Swarm.DisableNatPortMap = false
|
||||
c.Discovery.MDNS.Enabled = true
|
||||
@ -349,6 +354,39 @@ fetching may be degraded.
|
||||
return nil
|
||||
},
|
||||
},
|
||||
"autoconf-on": {
|
||||
Description: `Sets configuration to use implicit defaults from remote autoconf service.
|
||||
Bootstrap peers, DNS resolvers, delegated routers, and IPNS delegated publishers are set to "auto".
|
||||
This profile requires AutoConf to be enabled and configured.`,
|
||||
|
||||
Transform: func(c *Config) error {
|
||||
c.Bootstrap = []string{AutoPlaceholder}
|
||||
c.DNS.Resolvers = map[string]string{
|
||||
".": AutoPlaceholder,
|
||||
}
|
||||
c.Routing.DelegatedRouters = []string{AutoPlaceholder}
|
||||
c.Ipns.DelegatedPublishers = []string{AutoPlaceholder}
|
||||
c.AutoConf.Enabled = True
|
||||
if c.AutoConf.URL == nil {
|
||||
c.AutoConf.URL = NewOptionalString(DefaultAutoConfURL)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
"autoconf-off": {
|
||||
Description: `Disables AutoConf and sets networking fields to empty for manual configuration.
|
||||
Bootstrap peers, DNS resolvers, delegated routers, and IPNS delegated publishers are set to empty.
|
||||
Use this when you want normal networking but prefer manual control over all endpoints.`,
|
||||
|
||||
Transform: func(c *Config) error {
|
||||
c.Bootstrap = nil
|
||||
c.DNS.Resolvers = nil
|
||||
c.Routing.DelegatedRouters = nil
|
||||
c.Ipns.DelegatedPublishers = nil
|
||||
c.AutoConf.Enabled = False
|
||||
return nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func getAvailablePort() (port int, err error) {
|
||||
|
||||
@ -1,15 +1,44 @@
|
||||
package config
|
||||
|
||||
import "time"
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultReproviderInterval = time.Hour * 22 // https://github.com/ipfs/kubo/pull/9326
|
||||
DefaultReproviderStrategy = "all"
|
||||
)
|
||||
|
||||
type ReproviderStrategy int
|
||||
|
||||
const (
|
||||
ReproviderStrategyAll ReproviderStrategy = 1 << iota
|
||||
ReproviderStrategyPinned
|
||||
ReproviderStrategyRoots
|
||||
ReproviderStrategyMFS
|
||||
)
|
||||
|
||||
// Reprovider configuration describes how CID from local datastore are periodically re-announced to routing systems.
|
||||
// For provide behavior of ad-hoc or newly created CIDs and their first-time announcement, see Provider.*
|
||||
type Reprovider struct {
|
||||
Interval *OptionalDuration `json:",omitempty"` // Time period to reprovide locally stored objects to the network
|
||||
Strategy *OptionalString `json:",omitempty"` // Which keys to announce
|
||||
}
|
||||
|
||||
func ParseReproviderStrategy(s string) ReproviderStrategy {
|
||||
var strategy ReproviderStrategy
|
||||
for _, part := range strings.Split(s, "+") {
|
||||
switch part {
|
||||
case "all", "flat", "": // special case, does not mix with others ("flat" is deprecated, maps to "all")
|
||||
return ReproviderStrategyAll
|
||||
case "pinned":
|
||||
strategy |= ReproviderStrategyPinned
|
||||
case "roots":
|
||||
strategy |= ReproviderStrategyRoots
|
||||
case "mfs":
|
||||
strategy |= ReproviderStrategyMFS
|
||||
}
|
||||
}
|
||||
return strategy
|
||||
}
|
||||
|
||||
27
config/reprovider_test.go
Normal file
27
config/reprovider_test.go
Normal file
@ -0,0 +1,27 @@
|
||||
package config
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestParseReproviderStrategy(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
expect ReproviderStrategy
|
||||
}{
|
||||
{"all", ReproviderStrategyAll},
|
||||
{"pinned", ReproviderStrategyPinned},
|
||||
{"mfs", ReproviderStrategyMFS},
|
||||
{"pinned+mfs", ReproviderStrategyPinned | ReproviderStrategyMFS},
|
||||
{"invalid", 0},
|
||||
{"all+invalid", ReproviderStrategyAll},
|
||||
{"", ReproviderStrategyAll},
|
||||
{"flat", ReproviderStrategyAll}, // deprecated, maps to "all"
|
||||
{"flat+all", ReproviderStrategyAll},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
result := ParseReproviderStrategy(tt.input)
|
||||
if result != tt.expect {
|
||||
t.Errorf("ParseReproviderStrategy(%q) = %d, want %d", tt.input, result, tt.expect)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -11,6 +11,7 @@ import (
|
||||
const (
|
||||
DefaultAcceleratedDHTClient = false
|
||||
DefaultLoopbackAddressesOnLanDHT = false
|
||||
DefaultRoutingType = "auto"
|
||||
CidContactRoutingURL = "https://cid.contact"
|
||||
PublicGoodDelegatedRoutingURL = "https://delegated-ipfs.dev" // cid.contact + amino dht (incl. IPNS PUTs)
|
||||
EnvHTTPRouters = "IPFS_HTTP_ROUTERS"
|
||||
@ -18,11 +19,6 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
// Default HTTP routers used in parallel to DHT when Routing.Type = "auto"
|
||||
DefaultHTTPRouters = getEnvOrDefault(EnvHTTPRouters, []string{
|
||||
CidContactRoutingURL, // https://github.com/ipfs/kubo/issues/9422#issuecomment-1338142084
|
||||
})
|
||||
|
||||
// Default filter-protocols to pass along with delegated routing requests (as defined in IPIP-484)
|
||||
// and also filter out locally
|
||||
DefaultHTTPRoutersFilterProtocols = getEnvOrDefault(EnvHTTPRoutersFilterProtocols, []string{
|
||||
@ -37,8 +33,9 @@ var (
|
||||
type Routing struct {
|
||||
// Type sets default daemon routing mode.
|
||||
//
|
||||
// Can be one of "auto", "autoclient", "dht", "dhtclient", "dhtserver", "none", or "custom".
|
||||
// Can be one of "auto", "autoclient", "dht", "dhtclient", "dhtserver", "none", "delegated", or "custom".
|
||||
// When unset or set to "auto", DHT and implicit routers are used.
|
||||
// When "delegated" is set, only HTTP delegated routers and IPNS publishers are used (no DHT).
|
||||
// When "custom" is set, user-provided Routing.Routers is used.
|
||||
Type *OptionalString `json:",omitempty"`
|
||||
|
||||
@ -49,7 +46,7 @@ type Routing struct {
|
||||
IgnoreProviders []string `json:",omitempty"`
|
||||
|
||||
// Simplified configuration used by default when Routing.Type=auto|autoclient
|
||||
DelegatedRouters []string `json:",omitempty"`
|
||||
DelegatedRouters []string
|
||||
|
||||
// Advanced configuration used when Routing.Type=custom
|
||||
Routers Routers `json:",omitempty"`
|
||||
|
||||
@ -37,6 +37,7 @@ type AddEvent struct {
|
||||
}
|
||||
|
||||
const (
|
||||
pinNameOptionName = "pin-name"
|
||||
quietOptionName = "quiet"
|
||||
quieterOptionName = "quieter"
|
||||
silentOptionName = "silent"
|
||||
@ -75,13 +76,15 @@ Adds the content of <path> to IPFS. Use -r to add directories (recursively).
|
||||
`,
|
||||
LongDescription: `
|
||||
Adds the content of <path> to IPFS. Use -r to add directories.
|
||||
Note that directories are added recursively, to form the IPFS
|
||||
MerkleDAG.
|
||||
Note that directories are added recursively, and big files are chunked,
|
||||
to form the IPFS MerkleDAG. Learn more: https://docs.ipfs.tech/concepts/merkle-dag/
|
||||
|
||||
If the daemon is not running, it will just add locally.
|
||||
If the daemon is not running, it will just add locally to the repo at $IPFS_PATH.
|
||||
If the daemon is started later, it will be advertised after a few
|
||||
seconds when the reprovider runs.
|
||||
|
||||
BASIC EXAMPLES:
|
||||
|
||||
The wrap option, '-w', wraps the file (or files, if using the
|
||||
recursive option) in a directory. This directory contains only
|
||||
the files which have been added, and means that the file retains
|
||||
@ -100,6 +103,12 @@ You can now refer to the added file in a gateway, like so:
|
||||
Files imported with 'ipfs add' are protected from GC (implicit '--pin=true'),
|
||||
but it is up to you to remember the returned CID to get the data back later.
|
||||
|
||||
If you need to back up or transport content-addressed data using a non-IPFS
|
||||
medium, CID can be preserved with CAR files.
|
||||
See 'dag export' and 'dag import' for more information.
|
||||
|
||||
MFS INTEGRATION:
|
||||
|
||||
Passing '--to-files' creates a reference in Files API (MFS), making it easier
|
||||
to find it in the future:
|
||||
|
||||
@ -111,6 +120,8 @@ to find it in the future:
|
||||
See 'ipfs files --help' to learn more about using MFS
|
||||
for keeping track of added files and directories.
|
||||
|
||||
CHUNKING EXAMPLES:
|
||||
|
||||
The chunker option, '-s', specifies the chunking strategy that dictates
|
||||
how to break files into blocks. Blocks with same content can
|
||||
be deduplicated. Different chunking strategies will produce different
|
||||
@ -131,14 +142,16 @@ want to use a 1024 times larger chunk sizes for most files.
|
||||
|
||||
You can now check what blocks have been created by:
|
||||
|
||||
> ipfs object links QmafrLBfzRLV4XSH1XcaMMeaXEUhDJjmtDfsYU95TrWG87
|
||||
> ipfs ls QmafrLBfzRLV4XSH1XcaMMeaXEUhDJjmtDfsYU95TrWG87
|
||||
QmY6yj1GsermExDXoosVE3aSPxdMNYr6aKuw3nA8LoWPRS 2059
|
||||
Qmf7ZQeSxq2fJVJbCmgTrLLVN9tDR9Wy5k75DxQKuz5Gyt 1195
|
||||
> ipfs object links Qmf1hDN65tR55Ubh2RN1FPxr69xq3giVBz1KApsresY8Gn
|
||||
> ipfs ls Qmf1hDN65tR55Ubh2RN1FPxr69xq3giVBz1KApsresY8Gn
|
||||
QmY6yj1GsermExDXoosVE3aSPxdMNYr6aKuw3nA8LoWPRS 2059
|
||||
QmerURi9k4XzKCaaPbsK6BL5pMEjF7PGphjDvkkjDtsVf3 868
|
||||
QmQB28iwSriSUSMqG2nXDTLtdPHgWb4rebBrU7Q1j4vxPv 338
|
||||
|
||||
ADVANCED CONFIGURATION:
|
||||
|
||||
Finally, a note on hash (CID) determinism and 'ipfs add' command.
|
||||
|
||||
Almost all the flags provided by this command will change the final CID, and
|
||||
@ -146,12 +159,11 @@ new flags may be added in the future. It is not guaranteed for the implicit
|
||||
defaults of 'ipfs add' to remain the same in future Kubo releases, or for other
|
||||
IPFS software to use the same import parameters as Kubo.
|
||||
|
||||
Note: CIDv1 is automatically used when using non-default options like custom
|
||||
hash functions or when raw-leaves is explicitly enabled.
|
||||
|
||||
Use Import.* configuration options to override global implicit defaults:
|
||||
https://github.com/ipfs/kubo/blob/master/docs/config.md#import
|
||||
|
||||
If you need to back up or transport content-addressed data using a non-IPFS
|
||||
medium, CID can be preserved with CAR files.
|
||||
See 'dag export' and 'dag import' for more information.
|
||||
`,
|
||||
},
|
||||
|
||||
@ -159,36 +171,45 @@ See 'dag export' and 'dag import' for more information.
|
||||
cmds.FileArg("path", true, true, "The path to a file to be added to IPFS.").EnableRecursive().EnableStdin(),
|
||||
},
|
||||
Options: []cmds.Option{
|
||||
// Input Processing
|
||||
cmds.OptionRecursivePath, // a builtin option that allows recursive paths (-r, --recursive)
|
||||
cmds.OptionDerefArgs, // a builtin option that resolves passed in filesystem links (--dereference-args)
|
||||
cmds.OptionStdinName, // a builtin option that optionally allows wrapping stdin into a named file
|
||||
cmds.OptionHidden,
|
||||
cmds.OptionIgnore,
|
||||
cmds.OptionIgnoreRules,
|
||||
// Output Control
|
||||
cmds.BoolOption(quietOptionName, "q", "Write minimal output."),
|
||||
cmds.BoolOption(quieterOptionName, "Q", "Write only final hash."),
|
||||
cmds.BoolOption(silentOptionName, "Write no output."),
|
||||
cmds.BoolOption(progressOptionName, "p", "Stream progress data."),
|
||||
cmds.BoolOption(trickleOptionName, "t", "Use trickle-dag format for dag generation."),
|
||||
// Basic Add Behavior
|
||||
cmds.BoolOption(onlyHashOptionName, "n", "Only chunk and hash - do not write to disk."),
|
||||
cmds.BoolOption(wrapOptionName, "w", "Wrap files with a directory object."),
|
||||
cmds.StringOption(chunkerOptionName, "s", "Chunking algorithm, size-[bytes], rabin-[min]-[avg]-[max] or buzhash. Default: Import.UnixFSChunker"),
|
||||
cmds.BoolOption(rawLeavesOptionName, "Use raw blocks for leaf nodes. Default: Import.UnixFSRawLeaves"),
|
||||
cmds.IntOption(maxFileLinksOptionName, "Limit the maximum number of links in UnixFS file nodes to this value. (experimental) Default: Import.UnixFSFileMaxLinks"),
|
||||
cmds.IntOption(maxDirectoryLinksOptionName, "Limit the maximum number of links in UnixFS basic directory nodes to this value. Default: Import.UnixFSDirectoryMaxLinks. WARNING: experimental, Import.UnixFSHAMTThreshold is a safer alternative."),
|
||||
cmds.IntOption(maxHAMTFanoutOptionName, "Limit the maximum number of links of a UnixFS HAMT directory node to this (power of 2, multiple of 8). Default: Import.UnixFSHAMTDirectoryMaxFanout WARNING: experimental, see Import.UnixFSHAMTDirectorySizeThreshold as well."),
|
||||
cmds.BoolOption(noCopyOptionName, "Add the file using filestore. Implies raw-leaves. (experimental)"),
|
||||
cmds.BoolOption(fstoreCacheOptionName, "Check the filestore for pre-existing blocks. (experimental)"),
|
||||
cmds.IntOption(cidVersionOptionName, "CID version. Defaults to 0 unless an option that depends on CIDv1 is passed. Passing version 1 will cause the raw-leaves option to default to true. Default: Import.CidVersion"),
|
||||
cmds.StringOption(hashOptionName, "Hash function to use. Implies CIDv1 if not sha2-256. Default: Import.HashFunction"),
|
||||
cmds.BoolOption(inlineOptionName, "Inline small blocks into CIDs. (experimental)"),
|
||||
cmds.IntOption(inlineLimitOptionName, "Maximum block size to inline. (experimental)").WithDefault(32),
|
||||
cmds.BoolOption(pinOptionName, "Pin locally to protect added files from garbage collection.").WithDefault(true),
|
||||
cmds.StringOption(pinNameOptionName, "Name to use for the pin. Requires explicit value (e.g., --pin-name=myname)."),
|
||||
// MFS Integration
|
||||
cmds.StringOption(toFilesOptionName, "Add reference to Files API (MFS) at the provided path."),
|
||||
cmds.BoolOption(preserveModeOptionName, "Apply existing POSIX permissions to created UnixFS entries. Disables raw-leaves. (experimental)"),
|
||||
cmds.BoolOption(preserveMtimeOptionName, "Apply existing POSIX modification time to created UnixFS entries. Disables raw-leaves. (experimental)"),
|
||||
cmds.UintOption(modeOptionName, "Custom POSIX file mode to store in created UnixFS entries. Disables raw-leaves. (experimental)"),
|
||||
cmds.Int64Option(mtimeOptionName, "Custom POSIX modification time to store in created UnixFS entries (seconds before or after the Unix Epoch). Disables raw-leaves. (experimental)"),
|
||||
// CID & Hashing
|
||||
cmds.IntOption(cidVersionOptionName, "CID version (0 or 1). CIDv1 automatically enables raw-leaves and is required for non-sha2-256 hashes. Default: Import.CidVersion"),
|
||||
cmds.StringOption(hashOptionName, "Hash function to use. Implies CIDv1 if not sha2-256. Default: Import.HashFunction"),
|
||||
cmds.BoolOption(rawLeavesOptionName, "Use raw blocks for leaf nodes. Note: CIDv1 automatically enables raw-leaves. Default: false for CIDv0, true for CIDv1 (Import.UnixFSRawLeaves)"),
|
||||
// Chunking & DAG Structure
|
||||
cmds.StringOption(chunkerOptionName, "s", "Chunking algorithm, size-[bytes], rabin-[min]-[avg]-[max] or buzhash. Files larger than chunk size are split into multiple blocks. Default: Import.UnixFSChunker"),
|
||||
cmds.BoolOption(trickleOptionName, "t", "Use trickle-dag format for dag generation."),
|
||||
// Advanced UnixFS Limits
|
||||
cmds.IntOption(maxFileLinksOptionName, "Limit the maximum number of links in UnixFS file nodes to this value. WARNING: experimental. Default: Import.UnixFSFileMaxLinks"),
|
||||
cmds.IntOption(maxDirectoryLinksOptionName, "Limit the maximum number of links in UnixFS basic directory nodes to this value. WARNING: experimental, Import.UnixFSHAMTDirectorySizeThreshold is safer. Default: Import.UnixFSDirectoryMaxLinks"),
|
||||
cmds.IntOption(maxHAMTFanoutOptionName, "Limit the maximum number of links of a UnixFS HAMT directory node to this (power of 2, multiple of 8). WARNING: experimental, Import.UnixFSHAMTDirectorySizeThreshold is safer. Default: Import.UnixFSHAMTDirectoryMaxFanout"),
|
||||
// Experimental Features
|
||||
cmds.BoolOption(inlineOptionName, "Inline small blocks into CIDs. WARNING: experimental"),
|
||||
cmds.IntOption(inlineLimitOptionName, "Maximum block size to inline. WARNING: experimental").WithDefault(32),
|
||||
cmds.BoolOption(noCopyOptionName, "Add the file using filestore. Implies raw-leaves. WARNING: experimental"),
|
||||
cmds.BoolOption(fstoreCacheOptionName, "Check the filestore for pre-existing blocks. WARNING: experimental"),
|
||||
cmds.BoolOption(preserveModeOptionName, "Apply existing POSIX permissions to created UnixFS entries. WARNING: experimental, forces dag-pb for root block, disables raw-leaves"),
|
||||
cmds.BoolOption(preserveMtimeOptionName, "Apply existing POSIX modification time to created UnixFS entries. WARNING: experimental, forces dag-pb for root block, disables raw-leaves"),
|
||||
cmds.UintOption(modeOptionName, "Custom POSIX file mode to store in created UnixFS entries. WARNING: experimental, forces dag-pb for root block, disables raw-leaves"),
|
||||
cmds.Int64Option(mtimeOptionName, "Custom POSIX modification time to store in created UnixFS entries (seconds before or after the Unix Epoch). WARNING: experimental, forces dag-pb for root block, disables raw-leaves"),
|
||||
cmds.UintOption(mtimeNsecsOptionName, "Custom POSIX modification time (optional time fraction in nanoseconds)"),
|
||||
},
|
||||
PreRun: func(req *cmds.Request, env cmds.Environment) error {
|
||||
@ -230,6 +251,7 @@ See 'dag export' and 'dag import' for more information.
|
||||
silent, _ := req.Options[silentOptionName].(bool)
|
||||
chunker, _ := req.Options[chunkerOptionName].(string)
|
||||
dopin, _ := req.Options[pinOptionName].(bool)
|
||||
pinName, pinNameSet := req.Options[pinNameOptionName].(string)
|
||||
rawblks, rbset := req.Options[rawLeavesOptionName].(bool)
|
||||
maxFileLinks, maxFileLinksSet := req.Options[maxFileLinksOptionName].(int)
|
||||
maxDirectoryLinks, maxDirectoryLinksSet := req.Options[maxDirectoryLinksOptionName].(int)
|
||||
@ -260,6 +282,8 @@ See 'dag export' and 'dag import' for more information.
|
||||
cidVer = int(cfg.Import.CidVersion.WithDefault(config.DefaultCidVersion))
|
||||
}
|
||||
|
||||
// Pin names are only used when explicitly provided via --pin-name=value
|
||||
|
||||
if !rbset && cfg.Import.UnixFSRawLeaves != config.Default {
|
||||
rbset = true
|
||||
rawblks = cfg.Import.UnixFSRawLeaves.WithDefault(config.DefaultUnixFSRawLeaves)
|
||||
@ -296,7 +320,9 @@ See 'dag export' and 'dag import' for more information.
|
||||
if onlyHash && toFilesSet {
|
||||
return fmt.Errorf("%s and %s options are not compatible", onlyHashOptionName, toFilesOptionName)
|
||||
}
|
||||
|
||||
if !dopin && pinNameSet {
|
||||
return fmt.Errorf("%s option requires %s to be set", pinNameOptionName, pinOptionName)
|
||||
}
|
||||
if wrap && toFilesSet {
|
||||
return fmt.Errorf("%s and %s options are not compatible", wrapOptionName, toFilesOptionName)
|
||||
}
|
||||
@ -326,7 +352,7 @@ See 'dag export' and 'dag import' for more information.
|
||||
|
||||
options.Unixfs.Chunker(chunker),
|
||||
|
||||
options.Unixfs.Pin(dopin),
|
||||
options.Unixfs.Pin(dopin, pinName),
|
||||
options.Unixfs.HashOnly(onlyHash),
|
||||
options.Unixfs.FsCache(fscache),
|
||||
options.Unixfs.Nocopy(nocopy),
|
||||
|
||||
@ -41,15 +41,15 @@ Running 'ipfs bootstrap' with no arguments will run 'ipfs bootstrap list'.
|
||||
},
|
||||
}
|
||||
|
||||
const (
|
||||
defaultOptionName = "default"
|
||||
)
|
||||
|
||||
var bootstrapAddCmd = &cmds.Command{
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Add peers to the bootstrap list.",
|
||||
ShortDescription: `Outputs a list of peers that were added (that weren't already
|
||||
in the bootstrap list).
|
||||
|
||||
The special values 'default' and 'auto' can be used to add the default
|
||||
bootstrap peers. Both are equivalent and will add the 'auto' placeholder to
|
||||
the bootstrap list, which gets resolved using the AutoConf system.
|
||||
` + bootstrapSecurityWarning,
|
||||
},
|
||||
|
||||
@ -57,29 +57,23 @@ in the bootstrap list).
|
||||
cmds.StringArg("peer", false, true, peerOptionDesc).EnableStdin(),
|
||||
},
|
||||
|
||||
Options: []cmds.Option{
|
||||
cmds.BoolOption(defaultOptionName, "Add default bootstrap nodes. (Deprecated, use 'default' subcommand instead)"),
|
||||
},
|
||||
Subcommands: map[string]*cmds.Command{
|
||||
"default": bootstrapAddDefaultCmd,
|
||||
},
|
||||
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
deflt, _ := req.Options[defaultOptionName].(bool)
|
||||
|
||||
inputPeers := config.DefaultBootstrapAddresses
|
||||
if !deflt {
|
||||
if err := req.ParseBodyArgs(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
inputPeers = req.Arguments
|
||||
if err := req.ParseBodyArgs(); err != nil {
|
||||
return err
|
||||
}
|
||||
inputPeers := req.Arguments
|
||||
|
||||
if len(inputPeers) == 0 {
|
||||
return errors.New("no bootstrap peers to add")
|
||||
}
|
||||
|
||||
// Convert "default" to "auto" for backward compatibility
|
||||
for i, peer := range inputPeers {
|
||||
if peer == "default" {
|
||||
inputPeers[i] = "auto"
|
||||
}
|
||||
}
|
||||
|
||||
cfgRoot, err := cmdenv.GetConfigRoot(env)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -95,6 +89,13 @@ in the bootstrap list).
|
||||
return err
|
||||
}
|
||||
|
||||
// Check if trying to add "auto" when AutoConf is disabled
|
||||
for _, peer := range inputPeers {
|
||||
if peer == config.AutoPlaceholder && !cfg.AutoConf.Enabled.WithDefault(config.DefaultAutoConfEnabled) {
|
||||
return errors.New("cannot add default bootstrap peers: AutoConf is disabled (AutoConf.Enabled=false). Enable AutoConf by setting AutoConf.Enabled=true in your config, or add specific peer addresses instead")
|
||||
}
|
||||
}
|
||||
|
||||
added, err := bootstrapAdd(r, cfg, inputPeers)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -110,44 +111,6 @@ in the bootstrap list).
|
||||
},
|
||||
}
|
||||
|
||||
var bootstrapAddDefaultCmd = &cmds.Command{
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Add default peers to the bootstrap list.",
|
||||
ShortDescription: `Outputs a list of peers that were added (that weren't already
|
||||
in the bootstrap list).`,
|
||||
},
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
cfgRoot, err := cmdenv.GetConfigRoot(env)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r, err := fsrepo.Open(cfgRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer r.Close()
|
||||
cfg, err := r.Config()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
added, err := bootstrapAdd(r, cfg, config.DefaultBootstrapAddresses)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return cmds.EmitOnce(res, &BootstrapOutput{added})
|
||||
},
|
||||
Type: BootstrapOutput{},
|
||||
Encoders: cmds.EncoderMap{
|
||||
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *BootstrapOutput) error {
|
||||
return bootstrapWritePeers(w, "added ", out.Peers)
|
||||
}),
|
||||
},
|
||||
}
|
||||
|
||||
const (
|
||||
bootstrapAllOptionName = "all"
|
||||
)
|
||||
@ -251,6 +214,9 @@ var bootstrapListCmd = &cmds.Command{
|
||||
Tagline: "Show peers in the bootstrap list.",
|
||||
ShortDescription: "Peers are output in the format '<multiaddr>/<peerID>'.",
|
||||
},
|
||||
Options: []cmds.Option{
|
||||
cmds.BoolOption(configExpandAutoName, "Expand 'auto' placeholders from AutoConf service."),
|
||||
},
|
||||
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
cfgRoot, err := cmdenv.GetConfigRoot(env)
|
||||
@ -268,12 +234,16 @@ var bootstrapListCmd = &cmds.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
peers, err := cfg.BootstrapPeers()
|
||||
if err != nil {
|
||||
return err
|
||||
// Check if user wants to expand auto values
|
||||
expandAuto, _ := req.Options[configExpandAutoName].(bool)
|
||||
if expandAuto {
|
||||
// Use the same expansion method as the daemon
|
||||
expandedBootstrap := cfg.BootstrapWithAutoConf()
|
||||
return cmds.EmitOnce(res, &BootstrapOutput{expandedBootstrap})
|
||||
}
|
||||
|
||||
return cmds.EmitOnce(res, &BootstrapOutput{config.BootstrapPeerStrings(peers)})
|
||||
// Simply return the bootstrap config as-is, including any "auto" values
|
||||
return cmds.EmitOnce(res, &BootstrapOutput{cfg.Bootstrap})
|
||||
},
|
||||
Type: BootstrapOutput{},
|
||||
Encoders: cmds.EncoderMap{
|
||||
@ -297,7 +267,11 @@ func bootstrapWritePeers(w io.Writer, prefix string, peers []string) error {
|
||||
}
|
||||
|
||||
func bootstrapAdd(r repo.Repo, cfg *config.Config, peers []string) ([]string, error) {
|
||||
// Validate peers - skip validation for "auto" placeholder
|
||||
for _, p := range peers {
|
||||
if p == config.AutoPlaceholder {
|
||||
continue // Skip validation for "auto" placeholder
|
||||
}
|
||||
m, err := ma.NewMultiaddr(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -347,6 +321,16 @@ func bootstrapAdd(r repo.Repo, cfg *config.Config, peers []string) ([]string, er
|
||||
}
|
||||
|
||||
func bootstrapRemove(r repo.Repo, cfg *config.Config, toRemove []string) ([]string, error) {
|
||||
// Check if bootstrap contains "auto"
|
||||
hasAuto := slices.Contains(cfg.Bootstrap, config.AutoPlaceholder)
|
||||
|
||||
if hasAuto && cfg.AutoConf.Enabled.WithDefault(config.DefaultAutoConfEnabled) {
|
||||
// Cannot selectively remove peers when using "auto" bootstrap
|
||||
// Users should either disable AutoConf or replace "auto" with specific peers
|
||||
return nil, fmt.Errorf("cannot remove individual bootstrap peers when using 'auto' placeholder: the 'auto' value is managed by AutoConf. Either disable AutoConf by setting AutoConf.Enabled=false and replace 'auto' with specific peer addresses, or use 'ipfs bootstrap rm --all' to remove all peers")
|
||||
}
|
||||
|
||||
// Original logic for non-auto bootstrap
|
||||
removed := make([]peer.AddrInfo, 0, len(toRemove))
|
||||
keep := make([]peer.AddrInfo, 0, len(cfg.Bootstrap))
|
||||
|
||||
@ -406,16 +390,28 @@ func bootstrapRemove(r repo.Repo, cfg *config.Config, toRemove []string) ([]stri
|
||||
}
|
||||
|
||||
func bootstrapRemoveAll(r repo.Repo, cfg *config.Config) ([]string, error) {
|
||||
removed, err := cfg.BootstrapPeers()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// Check if bootstrap contains "auto" - if so, we need special handling
|
||||
hasAuto := slices.Contains(cfg.Bootstrap, config.AutoPlaceholder)
|
||||
|
||||
var removed []string
|
||||
if hasAuto {
|
||||
// When "auto" is present, we can't parse it as peer.AddrInfo
|
||||
// Just return the raw bootstrap list as strings for display
|
||||
removed = slices.Clone(cfg.Bootstrap)
|
||||
} else {
|
||||
// Original logic for configs without "auto"
|
||||
removedPeers, err := cfg.BootstrapPeers()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
removed = config.BootstrapPeerStrings(removedPeers)
|
||||
}
|
||||
|
||||
cfg.Bootstrap = nil
|
||||
if err := r.SetConfig(cfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return config.BootstrapPeerStrings(removed), nil
|
||||
return removed, nil
|
||||
}
|
||||
|
||||
const bootstrapSecurityWarning = `
|
||||
|
||||
@ -121,7 +121,8 @@ The optional format string is a printf style format string:
|
||||
return ""
|
||||
}),
|
||||
},
|
||||
Type: CidFormatRes{},
|
||||
Type: CidFormatRes{},
|
||||
Extra: CreateCmdExtras(SetDoesNotUseRepo(true)),
|
||||
}
|
||||
|
||||
type CidFormatRes struct {
|
||||
@ -151,6 +152,7 @@ Useful when processing third-party CIDs which could come with arbitrary formats.
|
||||
},
|
||||
PostRun: cidFmtCmd.PostRun,
|
||||
Type: cidFmtCmd.Type,
|
||||
Extra: CreateCmdExtras(SetDoesNotUseRepo(true)),
|
||||
}
|
||||
|
||||
type cidFormatOpts struct {
|
||||
@ -291,7 +293,7 @@ var basesCmd = &cmds.Command{
|
||||
multibaseSorter{val}.Sort()
|
||||
for _, v := range val {
|
||||
code := v.Code
|
||||
if code < 32 || code >= 127 {
|
||||
if !unicode.IsPrint(rune(code)) {
|
||||
// don't display non-printable prefixes
|
||||
code = ' '
|
||||
}
|
||||
@ -309,7 +311,8 @@ var basesCmd = &cmds.Command{
|
||||
return nil
|
||||
}),
|
||||
},
|
||||
Type: []CodeAndName{},
|
||||
Type: []CodeAndName{},
|
||||
Extra: CreateCmdExtras(SetDoesNotUseRepo(true)),
|
||||
}
|
||||
|
||||
const (
|
||||
@ -369,7 +372,8 @@ var codecsCmd = &cmds.Command{
|
||||
return nil
|
||||
}),
|
||||
},
|
||||
Type: []CodeAndName{},
|
||||
Type: []CodeAndName{},
|
||||
Extra: CreateCmdExtras(SetDoesNotUseRepo(true)),
|
||||
}
|
||||
|
||||
var hashesCmd = &cmds.Command{
|
||||
@ -393,6 +397,7 @@ var hashesCmd = &cmds.Command{
|
||||
},
|
||||
Encoders: codecsCmd.Encoders,
|
||||
Type: codecsCmd.Type,
|
||||
Extra: CreateCmdExtras(SetDoesNotUseRepo(true)),
|
||||
}
|
||||
|
||||
type multibaseSorter struct {
|
||||
@ -404,7 +409,7 @@ func (s multibaseSorter) Sort() {
|
||||
if n := cmp.Compare(unicode.ToLower(rune(a.Code)), unicode.ToLower(rune(b.Code))); n != 0 {
|
||||
return n
|
||||
}
|
||||
// lowecase letters should come before uppercase
|
||||
// lowercase letters should come before uppercase
|
||||
return cmp.Compare(b.Code, a.Code)
|
||||
})
|
||||
}
|
||||
|
||||
@ -233,12 +233,11 @@ type nonFatalError string
|
||||
// contain non-fatal errors. The helper function is allowed to panic
|
||||
// on internal errors.
|
||||
func streamResult(procVal func(interface{}, io.Writer) nonFatalError) func(cmds.Response, cmds.ResponseEmitter) error {
|
||||
return func(res cmds.Response, re cmds.ResponseEmitter) (err error) {
|
||||
return func(res cmds.Response, re cmds.ResponseEmitter) (rerr error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
err = fmt.Errorf("internal error: %v", r)
|
||||
rerr = fmt.Errorf("internal error: %v", r)
|
||||
}
|
||||
re.Close()
|
||||
}()
|
||||
|
||||
var errors bool
|
||||
@ -248,7 +247,8 @@ func streamResult(procVal func(interface{}, io.Writer) nonFatalError) func(cmds.
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return err
|
||||
rerr = err
|
||||
return
|
||||
}
|
||||
|
||||
errorMsg := procVal(v, os.Stdout)
|
||||
@ -260,8 +260,8 @@ func streamResult(procVal func(interface{}, io.Writer) nonFatalError) func(cmds.
|
||||
}
|
||||
|
||||
if errors {
|
||||
return fmt.Errorf("errors while displaying some entries")
|
||||
rerr = fmt.Errorf("errors while displaying some entries")
|
||||
}
|
||||
return nil
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@ -30,7 +30,6 @@ func TestCommands(t *testing.T) {
|
||||
"/block/stat",
|
||||
"/bootstrap",
|
||||
"/bootstrap/add",
|
||||
"/bootstrap/add/default",
|
||||
"/bootstrap/list",
|
||||
"/bootstrap/rm",
|
||||
"/bootstrap/rm/all",
|
||||
@ -163,6 +162,9 @@ func TestCommands(t *testing.T) {
|
||||
"/pin/update",
|
||||
"/pin/verify",
|
||||
"/ping",
|
||||
"/provide",
|
||||
"/provide/clear",
|
||||
"/provide/stat",
|
||||
"/pubsub",
|
||||
"/pubsub/ls",
|
||||
"/pubsub/peers",
|
||||
|
||||
@ -5,8 +5,10 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"os"
|
||||
"os/exec"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/anmitsu/go-shlex"
|
||||
@ -33,6 +35,7 @@ const (
|
||||
configBoolOptionName = "bool"
|
||||
configJSONOptionName = "json"
|
||||
configDryRunOptionName = "dry-run"
|
||||
configExpandAutoName = "expand-auto"
|
||||
)
|
||||
|
||||
var ConfigCmd = &cmds.Command{
|
||||
@ -75,6 +78,7 @@ Set multiple values in the 'Addresses.AppendAnnounce' array:
|
||||
Options: []cmds.Option{
|
||||
cmds.BoolOption(configBoolOptionName, "Set a boolean value."),
|
||||
cmds.BoolOption(configJSONOptionName, "Parse stringified JSON."),
|
||||
cmds.BoolOption(configExpandAutoName, "Expand 'auto' placeholders to their expanded values from AutoConf service."),
|
||||
},
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
args := req.Arguments
|
||||
@ -105,6 +109,11 @@ Set multiple values in the 'Addresses.AppendAnnounce' array:
|
||||
}
|
||||
defer r.Close()
|
||||
if len(args) == 2 {
|
||||
// Check if user is trying to write config with expand flag
|
||||
if expandAuto, _ := req.Options[configExpandAutoName].(bool); expandAuto {
|
||||
return fmt.Errorf("--expand-auto can only be used for reading config values, not for setting them")
|
||||
}
|
||||
|
||||
value := args[1]
|
||||
|
||||
if parseJSON, _ := req.Options[configJSONOptionName].(bool); parseJSON {
|
||||
@ -121,7 +130,13 @@ Set multiple values in the 'Addresses.AppendAnnounce' array:
|
||||
output, err = setConfig(r, key, value)
|
||||
}
|
||||
} else {
|
||||
output, err = getConfig(r, key)
|
||||
// Check if user wants to expand auto values for getter
|
||||
expandAuto, _ := req.Options[configExpandAutoName].(bool)
|
||||
if expandAuto {
|
||||
output, err = getConfigWithAutoExpand(r, key)
|
||||
} else {
|
||||
output, err = getConfig(r, key)
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
@ -208,6 +223,23 @@ NOTE: For security reasons, this command will omit your private key and remote s
|
||||
return err
|
||||
}
|
||||
|
||||
// Check if user wants to expand auto values
|
||||
expandAuto, _ := req.Options[configExpandAutoName].(bool)
|
||||
if expandAuto {
|
||||
// Load full config to use resolution methods
|
||||
var fullCfg config.Config
|
||||
err = json.Unmarshal(data, &fullCfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Expand auto values and update the map
|
||||
cfg, err = fullCfg.ExpandAutoConfValues(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
cfg, err = scrubValue(cfg, []string{config.IdentityTag, config.PrivKeyTag})
|
||||
if err != nil {
|
||||
return err
|
||||
@ -417,7 +449,8 @@ var configProfileApplyCmd = &cmds.Command{
|
||||
func buildProfileHelp() string {
|
||||
var out string
|
||||
|
||||
for name, profile := range config.Profiles {
|
||||
for _, name := range slices.Sorted(maps.Keys(config.Profiles)) {
|
||||
profile := config.Profiles[name]
|
||||
dlines := strings.Split(profile.Description, "\n")
|
||||
for i := range dlines {
|
||||
dlines[i] = " " + dlines[i]
|
||||
@ -498,6 +531,28 @@ func getConfig(r repo.Repo, key string) (*ConfigField, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getConfigWithAutoExpand(r repo.Repo, key string) (*ConfigField, error) {
|
||||
// First get the current value
|
||||
value, err := r.GetConfigKey(key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get config value: %q", err)
|
||||
}
|
||||
|
||||
// Load full config for resolution
|
||||
fullCfg, err := r.Config()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load config: %q", err)
|
||||
}
|
||||
|
||||
// Expand auto values based on the key
|
||||
expandedValue := fullCfg.ExpandConfigField(key, value)
|
||||
|
||||
return &ConfigField{
|
||||
Key: key,
|
||||
Value: expandedValue,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func setConfig(r repo.Repo, key string, value interface{}) (*ConfigField, error) {
|
||||
err := r.SetConfigKey(key, value)
|
||||
if err != nil {
|
||||
|
||||
@ -3,16 +3,37 @@ package commands
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"slices"
|
||||
|
||||
cmds "github.com/ipfs/go-ipfs-cmds"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
)
|
||||
|
||||
// Golang os.Args overrides * and replaces the character argument with
|
||||
// an array which includes every file in the user's CWD. As a
|
||||
// workaround, we use 'all' instead. The util library still uses * so
|
||||
// we convert it at this step.
|
||||
var logAllKeyword = "all"
|
||||
const (
|
||||
// allLogSubsystems is used to specify all log subsystems when setting the
|
||||
// log level.
|
||||
allLogSubsystems = "*"
|
||||
// allLogSubsystemsAlias is a convenience alias for allLogSubsystems that
|
||||
// doesn't require shell escaping.
|
||||
allLogSubsystemsAlias = "all"
|
||||
// defaultLogLevel is used to request and to identify the default log
|
||||
// level.
|
||||
defaultLogLevel = "default"
|
||||
// defaultSubsystemKey is the subsystem name that is used to denote the
|
||||
// default log level. We use parentheses for UI clarity to distinguish it
|
||||
// from regular subsystem names.
|
||||
defaultSubsystemKey = "(default)"
|
||||
// logLevelOption is an option for the tail subcommand to select the log
|
||||
// level to output.
|
||||
logLevelOption = "log-level"
|
||||
// noSubsystemSpecified is used when no subsystem argument is provided
|
||||
noSubsystemSpecified = ""
|
||||
)
|
||||
|
||||
type logLevelOutput struct {
|
||||
Levels map[string]string `json:",omitempty"`
|
||||
Message string `json:",omitempty"`
|
||||
}
|
||||
|
||||
var LogCmd = &cmds.Command{
|
||||
Helptext: cmds.HelpText{
|
||||
@ -39,46 +60,161 @@ system (not just for the daemon logs, but all commands):
|
||||
|
||||
var logLevelCmd = &cmds.Command{
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Change the logging level.",
|
||||
Tagline: "Change or get the logging level.",
|
||||
ShortDescription: `
|
||||
Change the verbosity of one or all subsystems log output. This does not affect
|
||||
the event log.
|
||||
Get or change the logging level of one or all logging subsystems.
|
||||
|
||||
This command provides a runtime alternative to the GOLOG_LOG_LEVEL
|
||||
environment variable for debugging and troubleshooting.
|
||||
|
||||
UNDERSTANDING DEFAULT vs '*':
|
||||
|
||||
The "default" level is the fallback used by unconfigured subsystems.
|
||||
You cannot set the default level directly - it only changes when you use '*'.
|
||||
|
||||
The '*' wildcard represents ALL subsystems including the default level.
|
||||
Setting '*' changes everything at once, including the default.
|
||||
|
||||
EXAMPLES - Getting levels:
|
||||
|
||||
ipfs log level # Show only the default fallback level
|
||||
ipfs log level all # Show all subsystem levels (100+ lines)
|
||||
ipfs log level core # Show level for 'core' subsystem only
|
||||
|
||||
EXAMPLES - Setting levels:
|
||||
|
||||
ipfs log level core debug # Set 'core' to 'debug' (default unchanged)
|
||||
ipfs log level all info # Set ALL to 'info' (including default)
|
||||
ipfs log level core default # Reset 'core' to use current default level
|
||||
|
||||
WILDCARD OPTIONS:
|
||||
|
||||
Use 'all' (convenient) or '*' (requires escaping) to affect all subsystems:
|
||||
ipfs log level all debug # Convenient - no shell escaping needed
|
||||
ipfs log level '*' debug # Equivalent but needs quotes: '*' or "*" or \*
|
||||
|
||||
BEHAVIOR EXAMPLES:
|
||||
|
||||
Initial state (all using default 'error'):
|
||||
$ ipfs log level => error
|
||||
$ ipfs log level core => error
|
||||
|
||||
After setting one subsystem:
|
||||
$ ipfs log level core debug
|
||||
$ ipfs log level => error (default unchanged!)
|
||||
$ ipfs log level core => debug (explicitly set)
|
||||
$ ipfs log level dht => error (still uses default)
|
||||
|
||||
After setting everything with 'all':
|
||||
$ ipfs log level all info
|
||||
$ ipfs log level => info (default changed!)
|
||||
$ ipfs log level core => info (all changed)
|
||||
$ ipfs log level dht => info (all changed)
|
||||
|
||||
The 'default' keyword always refers to the current default level:
|
||||
$ ipfs log level => error
|
||||
$ ipfs log level core default # Sets core to 'error'
|
||||
$ ipfs log level all info # Changes default to 'info'
|
||||
$ ipfs log level core default # Now sets core to 'info'
|
||||
`,
|
||||
},
|
||||
|
||||
Arguments: []cmds.Argument{
|
||||
// TODO use a different keyword for 'all' because all can theoretically
|
||||
// clash with a subsystem name
|
||||
cmds.StringArg("subsystem", true, false, fmt.Sprintf("The subsystem logging identifier. Use '%s' for all subsystems.", logAllKeyword)),
|
||||
cmds.StringArg("level", true, false, `The log level, with 'debug' the most verbose and 'fatal' the least verbose.
|
||||
One of: debug, info, warn, error, dpanic, panic, fatal.
|
||||
`),
|
||||
cmds.StringArg("subsystem", false, false, fmt.Sprintf("The subsystem logging identifier. Use '%s' or '%s' to get or set the log level of all subsystems including the default. If not specified, only show the default log level.", allLogSubsystemsAlias, allLogSubsystems)),
|
||||
cmds.StringArg("level", false, false, fmt.Sprintf("The log level, with 'debug' as the most verbose and 'fatal' the least verbose. Use '%s' to set to the current default level. One of: debug, info, warn, error, dpanic, panic, fatal, %s", defaultLogLevel, defaultLogLevel)),
|
||||
},
|
||||
NoLocal: true,
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
args := req.Arguments
|
||||
subsystem, level := args[0], args[1]
|
||||
var level, subsystem string
|
||||
|
||||
if subsystem == logAllKeyword {
|
||||
subsystem = "*"
|
||||
if len(req.Arguments) > 0 {
|
||||
subsystem = req.Arguments[0]
|
||||
if len(req.Arguments) > 1 {
|
||||
level = req.Arguments[1]
|
||||
}
|
||||
|
||||
// Normalize aliases to the canonical "*" form
|
||||
if subsystem == allLogSubsystems || subsystem == allLogSubsystemsAlias {
|
||||
subsystem = "*"
|
||||
}
|
||||
}
|
||||
|
||||
if err := logging.SetLogLevel(subsystem, level); err != nil {
|
||||
return err
|
||||
// If a level is specified, then set the log level.
|
||||
if level != "" {
|
||||
if level == defaultLogLevel {
|
||||
level = logging.DefaultLevel().String()
|
||||
}
|
||||
|
||||
if err := logging.SetLogLevel(subsystem, level); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s := fmt.Sprintf("Changed log level of '%s' to '%s'\n", subsystem, level)
|
||||
log.Info(s)
|
||||
|
||||
return cmds.EmitOnce(res, &logLevelOutput{Message: s})
|
||||
}
|
||||
|
||||
s := fmt.Sprintf("Changed log level of '%s' to '%s'\n", subsystem, level)
|
||||
log.Info(s)
|
||||
// Get the level for the requested subsystem.
|
||||
switch subsystem {
|
||||
case noSubsystemSpecified:
|
||||
// Return the default log level
|
||||
levelMap := map[string]string{logging.DefaultName: logging.DefaultLevel().String()}
|
||||
return cmds.EmitOnce(res, &logLevelOutput{Levels: levelMap})
|
||||
case allLogSubsystems, allLogSubsystemsAlias:
|
||||
// Return levels for all subsystems (default behavior)
|
||||
levels := logging.SubsystemLevelNames()
|
||||
|
||||
// Replace default subsystem key with defaultSubsystemKey.
|
||||
levels[defaultSubsystemKey] = levels[logging.DefaultName]
|
||||
delete(levels, logging.DefaultName)
|
||||
return cmds.EmitOnce(res, &logLevelOutput{Levels: levels})
|
||||
default:
|
||||
// Return level for a specific subsystem.
|
||||
level, err := logging.SubsystemLevelName(subsystem)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
levelMap := map[string]string{subsystem: level}
|
||||
return cmds.EmitOnce(res, &logLevelOutput{Levels: levelMap})
|
||||
}
|
||||
|
||||
return cmds.EmitOnce(res, &MessageOutput{s})
|
||||
},
|
||||
Encoders: cmds.EncoderMap{
|
||||
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *MessageOutput) error {
|
||||
fmt.Fprint(w, out.Message)
|
||||
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *logLevelOutput) error {
|
||||
if out.Message != "" {
|
||||
fmt.Fprint(w, out.Message)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check if this is an RPC call by looking for the encoding option
|
||||
encoding, _ := req.Options["encoding"].(string)
|
||||
isRPC := encoding == "json"
|
||||
|
||||
// Determine whether to show subsystem names in output.
|
||||
// Show subsystem names when:
|
||||
// 1. It's an RPC call (needs JSON structure with named fields)
|
||||
// 2. Multiple subsystems are displayed (for clarity when showing many levels)
|
||||
showNames := isRPC || len(out.Levels) > 1
|
||||
|
||||
levelNames := make([]string, 0, len(out.Levels))
|
||||
for subsystem, level := range out.Levels {
|
||||
if showNames {
|
||||
// Show subsystem name when it's RPC or when showing multiple subsystems
|
||||
levelNames = append(levelNames, fmt.Sprintf("%s: %s", subsystem, level))
|
||||
} else {
|
||||
// For CLI calls with single subsystem, only show the level
|
||||
levelNames = append(levelNames, level)
|
||||
}
|
||||
}
|
||||
slices.Sort(levelNames)
|
||||
for _, ln := range levelNames {
|
||||
fmt.Fprintln(w, ln)
|
||||
}
|
||||
return nil
|
||||
}),
|
||||
},
|
||||
Type: MessageOutput{},
|
||||
Type: logLevelOutput{},
|
||||
}
|
||||
|
||||
var logLsCmd = &cmds.Command{
|
||||
@ -103,12 +239,10 @@ subsystems of a running daemon.
|
||||
Type: stringList{},
|
||||
}
|
||||
|
||||
const logLevelOption = "log-level"
|
||||
|
||||
var logTailCmd = &cmds.Command{
|
||||
Status: cmds.Experimental,
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Read and outpt log messages.",
|
||||
Tagline: "Read and output log messages.",
|
||||
ShortDescription: `
|
||||
Outputs log messages as they are generated.
|
||||
|
||||
@ -130,7 +264,7 @@ This will only return 'info' logs from bitswap and skip 'debug'.
|
||||
var pipeReader *logging.PipeReader
|
||||
logLevelString, _ := req.Options[logLevelOption].(string)
|
||||
if logLevelString != "" {
|
||||
logLevel, err := logging.LevelFromString(logLevelString)
|
||||
logLevel, err := logging.Parse(logLevelString)
|
||||
if err != nil {
|
||||
return fmt.Errorf("setting log level %s: %w", logLevelString, err)
|
||||
}
|
||||
|
||||
@ -16,17 +16,19 @@ import (
|
||||
options "github.com/ipfs/kubo/core/coreiface/options"
|
||||
)
|
||||
|
||||
var errAllowOffline = errors.New("can't publish while offline: pass `--allow-offline` to override")
|
||||
var errAllowOffline = errors.New("can't publish while offline: pass `--allow-offline` to override or `--allow-delegated` if Ipns.DelegatedPublishers are set up")
|
||||
|
||||
const (
|
||||
ipfsPathOptionName = "ipfs-path"
|
||||
resolveOptionName = "resolve"
|
||||
allowOfflineOptionName = "allow-offline"
|
||||
lifeTimeOptionName = "lifetime"
|
||||
ttlOptionName = "ttl"
|
||||
keyOptionName = "key"
|
||||
quieterOptionName = "quieter"
|
||||
v1compatOptionName = "v1compat"
|
||||
ipfsPathOptionName = "ipfs-path"
|
||||
resolveOptionName = "resolve"
|
||||
allowOfflineOptionName = "allow-offline"
|
||||
allowDelegatedOptionName = "allow-delegated"
|
||||
lifeTimeOptionName = "lifetime"
|
||||
ttlOptionName = "ttl"
|
||||
keyOptionName = "key"
|
||||
quieterOptionName = "quieter"
|
||||
v1compatOptionName = "v1compat"
|
||||
sequenceOptionName = "sequence"
|
||||
)
|
||||
|
||||
var PublishCmd = &cmds.Command{
|
||||
@ -47,6 +49,14 @@ which is the hash of its public key.
|
||||
You can use the 'ipfs key' commands to list and generate more names and their
|
||||
respective keys.
|
||||
|
||||
Publishing Modes:
|
||||
|
||||
By default, IPNS records are published to both the DHT and any configured
|
||||
HTTP delegated publishers. You can control this behavior with the following flags:
|
||||
|
||||
--allow-offline Allow publishing when offline (publishes to local datastore, network operations are optional)
|
||||
--allow-delegated Allow publishing without DHT connectivity (local + HTTP delegated publishers only)
|
||||
|
||||
Examples:
|
||||
|
||||
Publish an <ipfs-path> with your default name:
|
||||
@ -54,18 +64,33 @@ Publish an <ipfs-path> with your default name:
|
||||
> ipfs name publish /ipfs/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy
|
||||
Published to QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n: /ipfs/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy
|
||||
|
||||
Publish an <ipfs-path> with another name, added by an 'ipfs key' command:
|
||||
Publish without DHT (HTTP delegated publishers only):
|
||||
|
||||
> ipfs key gen --type=rsa --size=2048 mykey
|
||||
> ipfs name publish --key=mykey /ipfs/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy
|
||||
Published to QmSrPmbaUKA3ZodhzPWZnpFgcPMFWF4QsxXbkWfEptTBJd: /ipfs/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy
|
||||
|
||||
Alternatively, publish an <ipfs-path> using a valid PeerID (as listed by
|
||||
'ipfs key list -l'):
|
||||
|
||||
> ipfs name publish --key=QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n /ipfs/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy
|
||||
> ipfs name publish --allow-delegated /ipfs/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy
|
||||
Published to QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n: /ipfs/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy
|
||||
|
||||
Publish when offline (local publish, network optional):
|
||||
|
||||
> ipfs name publish --allow-offline /ipfs/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy
|
||||
Published to QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n: /ipfs/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy
|
||||
|
||||
Notes:
|
||||
|
||||
The --ttl option specifies the time duration for caching IPNS records.
|
||||
Lower values like '1m' enable faster updates but increase network load,
|
||||
while the default of 1 hour reduces traffic but may delay propagation.
|
||||
Gateway operators may override this with Ipns.MaxCacheTTL configuration.
|
||||
|
||||
The --sequence option sets a custom sequence number for the IPNS record.
|
||||
The sequence number must be monotonically increasing (greater than the
|
||||
current record's sequence). This is useful for manually coordinating
|
||||
updates across multiple writers. If not specified, the sequence number
|
||||
increments automatically.
|
||||
|
||||
For faster IPNS updates, consider:
|
||||
- Using a lower --ttl value (e.g., '1m' for quick updates)
|
||||
- Enabling PubSub via Ipns.UsePubsub in the config
|
||||
|
||||
`,
|
||||
},
|
||||
|
||||
@ -79,7 +104,9 @@ Alternatively, publish an <ipfs-path> using a valid PeerID (as listed by
|
||||
cmds.StringOption(ttlOptionName, "Time duration hint, akin to --lifetime, indicating how long to cache this record before checking for updates.").WithDefault(ipns.DefaultRecordTTL.String()),
|
||||
cmds.BoolOption(quieterOptionName, "Q", "Write only final IPNS Name encoded as CIDv1 (for use in /ipns content paths)."),
|
||||
cmds.BoolOption(v1compatOptionName, "Produce a backward-compatible IPNS Record by including fields for both V1 and V2 signatures.").WithDefault(true),
|
||||
cmds.BoolOption(allowOfflineOptionName, "When --offline, save the IPNS record to the local datastore without broadcasting to the network (instead of failing)."),
|
||||
cmds.BoolOption(allowOfflineOptionName, "Allow publishing when offline - publishes to local datastore without requiring network connectivity."),
|
||||
cmds.BoolOption(allowDelegatedOptionName, "Allow publishing without DHT connectivity - uses local datastore and HTTP delegated publishers only."),
|
||||
cmds.Uint64Option(sequenceOptionName, "Set a custom sequence number for the IPNS record (must be higher than current)."),
|
||||
ke.OptionIPNSBase,
|
||||
},
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
@ -89,9 +116,15 @@ Alternatively, publish an <ipfs-path> using a valid PeerID (as listed by
|
||||
}
|
||||
|
||||
allowOffline, _ := req.Options[allowOfflineOptionName].(bool)
|
||||
allowDelegated, _ := req.Options[allowDelegatedOptionName].(bool)
|
||||
compatibleWithV1, _ := req.Options[v1compatOptionName].(bool)
|
||||
kname, _ := req.Options[keyOptionName].(string)
|
||||
|
||||
// Validate flag combinations
|
||||
if allowOffline && allowDelegated {
|
||||
return errors.New("cannot use both --allow-offline and --allow-delegated flags")
|
||||
}
|
||||
|
||||
validTimeOpt, _ := req.Options[lifeTimeOptionName].(string)
|
||||
validTime, err := time.ParseDuration(validTimeOpt)
|
||||
if err != nil {
|
||||
@ -100,6 +133,7 @@ Alternatively, publish an <ipfs-path> using a valid PeerID (as listed by
|
||||
|
||||
opts := []options.NamePublishOption{
|
||||
options.Name.AllowOffline(allowOffline),
|
||||
options.Name.AllowDelegated(allowDelegated),
|
||||
options.Name.Key(kname),
|
||||
options.Name.ValidTime(validTime),
|
||||
options.Name.CompatibleWithV1(compatibleWithV1),
|
||||
@ -114,6 +148,10 @@ Alternatively, publish an <ipfs-path> using a valid PeerID (as listed by
|
||||
opts = append(opts, options.Name.TTL(d))
|
||||
}
|
||||
|
||||
if sequence, found := req.Options[sequenceOptionName].(uint64); found {
|
||||
opts = append(opts, options.Name.Sequence(sequence))
|
||||
}
|
||||
|
||||
p, err := cmdutils.PathOrCidPath(req.Arguments[0])
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
178
core/commands/provide.go
Normal file
178
core/commands/provide.go
Normal file
@ -0,0 +1,178 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
"github.com/ipfs/boxo/provider"
|
||||
cmds "github.com/ipfs/go-ipfs-cmds"
|
||||
"github.com/ipfs/kubo/core/commands/cmdenv"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/fullrt"
|
||||
"golang.org/x/exp/constraints"
|
||||
)
|
||||
|
||||
const (
|
||||
provideQuietOptionName = "quiet"
|
||||
)
|
||||
|
||||
var ProvideCmd = &cmds.Command{
|
||||
Status: cmds.Experimental,
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Control providing operations",
|
||||
ShortDescription: `
|
||||
Control providing operations.
|
||||
|
||||
NOTE: This command is experimental and not all provide-related commands have
|
||||
been migrated to this namespace yet. For example, 'ipfs routing
|
||||
provide|reprovide' are still under the routing namespace, 'ipfs stats
|
||||
reprovide' provides statistics. Additionally, 'ipfs bitswap reprovide' and
|
||||
'ipfs stats provide' are deprecated.
|
||||
`,
|
||||
},
|
||||
|
||||
Subcommands: map[string]*cmds.Command{
|
||||
"clear": provideClearCmd,
|
||||
"stat": provideStatCmd,
|
||||
},
|
||||
}
|
||||
|
||||
var provideClearCmd = &cmds.Command{
|
||||
Status: cmds.Experimental,
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Clear all CIDs from the provide queue.",
|
||||
ShortDescription: `
|
||||
Clear all CIDs from the reprovide queue.
|
||||
|
||||
Note: Kubo will automatically clear the queue when it detects a change of
|
||||
Reprovider.Strategy upon a restart. For more information about reprovider
|
||||
strategies, see:
|
||||
https://github.com/ipfs/kubo/blob/master/docs/config.md#reproviderstrategy
|
||||
`,
|
||||
},
|
||||
Options: []cmds.Option{
|
||||
cmds.BoolOption(provideQuietOptionName, "q", "Do not write output."),
|
||||
},
|
||||
Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
n, err := cmdenv.GetNode(env)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
quiet, _ := req.Options[provideQuietOptionName].(bool)
|
||||
if n.Provider == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
cleared := n.Provider.Clear()
|
||||
if quiet {
|
||||
return nil
|
||||
}
|
||||
_ = re.Emit(cleared)
|
||||
|
||||
return nil
|
||||
},
|
||||
Type: int(0),
|
||||
Encoders: cmds.EncoderMap{
|
||||
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, cleared int) error {
|
||||
quiet, _ := req.Options[provideQuietOptionName].(bool)
|
||||
if quiet {
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err := fmt.Fprintf(w, "removed %d items from provide queue\n", cleared)
|
||||
return err
|
||||
}),
|
||||
},
|
||||
}
|
||||
|
||||
type provideStats struct {
|
||||
provider.ReproviderStats
|
||||
fullRT bool
|
||||
}
|
||||
|
||||
var provideStatCmd = &cmds.Command{
|
||||
Status: cmds.Experimental,
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Returns statistics about the node's provider system.",
|
||||
ShortDescription: `
|
||||
Returns statistics about the content the node is reproviding every
|
||||
Reprovider.Interval according to Reprovider.Strategy:
|
||||
https://github.com/ipfs/kubo/blob/master/docs/config.md#reprovider
|
||||
|
||||
This interface is not stable and may change from release to release.
|
||||
|
||||
`,
|
||||
},
|
||||
Arguments: []cmds.Argument{},
|
||||
Options: []cmds.Option{},
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
nd, err := cmdenv.GetNode(env)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !nd.IsOnline {
|
||||
return ErrNotOnline
|
||||
}
|
||||
|
||||
stats, err := nd.Provider.Stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, fullRT := nd.DHTClient.(*fullrt.FullRT)
|
||||
|
||||
if err := res.Emit(provideStats{stats, fullRT}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
Encoders: cmds.EncoderMap{
|
||||
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, s provideStats) error {
|
||||
wtr := tabwriter.NewWriter(w, 1, 2, 1, ' ', 0)
|
||||
defer wtr.Flush()
|
||||
|
||||
fmt.Fprintf(wtr, "TotalReprovides:\t%s\n", humanNumber(s.TotalReprovides))
|
||||
fmt.Fprintf(wtr, "AvgReprovideDuration:\t%s\n", humanDuration(s.AvgReprovideDuration))
|
||||
fmt.Fprintf(wtr, "LastReprovideDuration:\t%s\n", humanDuration(s.LastReprovideDuration))
|
||||
if !s.LastRun.IsZero() {
|
||||
fmt.Fprintf(wtr, "LastReprovide:\t%s\n", humanTime(s.LastRun))
|
||||
if s.fullRT {
|
||||
fmt.Fprintf(wtr, "NextReprovide:\t%s\n", humanTime(s.LastRun.Add(s.ReprovideInterval)))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}),
|
||||
},
|
||||
Type: provideStats{},
|
||||
}
|
||||
|
||||
func humanDuration(val time.Duration) string {
|
||||
return val.Truncate(time.Microsecond).String()
|
||||
}
|
||||
|
||||
func humanTime(val time.Time) string {
|
||||
return val.Format("2006-01-02 15:04:05")
|
||||
}
|
||||
|
||||
func humanNumber[T constraints.Float | constraints.Integer](n T) string {
|
||||
nf := float64(n)
|
||||
str := humanSI(nf, 0)
|
||||
fullStr := humanFull(nf, 0)
|
||||
if str != fullStr {
|
||||
return fmt.Sprintf("%s\t(%s)", str, fullStr)
|
||||
}
|
||||
return str
|
||||
}
|
||||
|
||||
func humanSI(val float64, decimals int) string {
|
||||
v, unit := humanize.ComputeSI(val)
|
||||
return fmt.Sprintf("%s%s", humanFull(v, decimals), unit)
|
||||
}
|
||||
|
||||
func humanFull(val float64, decimals int) string {
|
||||
return humanize.CommafWithDigits(val, decimals)
|
||||
}
|
||||
@ -16,7 +16,6 @@ import (
|
||||
corerepo "github.com/ipfs/kubo/core/corerepo"
|
||||
fsrepo "github.com/ipfs/kubo/repo/fsrepo"
|
||||
"github.com/ipfs/kubo/repo/fsrepo/migrations"
|
||||
"github.com/ipfs/kubo/repo/fsrepo/migrations/ipfsfetcher"
|
||||
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
bstore "github.com/ipfs/boxo/blockstore"
|
||||
@ -57,6 +56,7 @@ const (
|
||||
repoQuietOptionName = "quiet"
|
||||
repoSilentOptionName = "silent"
|
||||
repoAllowDowngradeOptionName = "allow-downgrade"
|
||||
repoToVersionOptionName = "to"
|
||||
)
|
||||
|
||||
var repoGcCmd = &cmds.Command{
|
||||
@ -283,8 +283,7 @@ var repoVerifyCmd = &cmds.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
bs := bstore.NewBlockstore(nd.Repo.Datastore())
|
||||
bs.HashOnRead(true)
|
||||
bs := &bstore.ValidatingBlockstore{Blockstore: bstore.NewBlockstore(nd.Repo.Datastore())}
|
||||
|
||||
keys, err := bs.AllKeysChan(req.Context)
|
||||
if err != nil {
|
||||
@ -374,63 +373,81 @@ var repoVersionCmd = &cmds.Command{
|
||||
|
||||
var repoMigrateCmd = &cmds.Command{
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Apply any outstanding migrations to the repo.",
|
||||
Tagline: "Apply repository migrations to a specific version.",
|
||||
ShortDescription: `
|
||||
'ipfs repo migrate' applies repository migrations to bring the repository
|
||||
to a specific version. By default, migrates to the latest version supported
|
||||
by this IPFS binary.
|
||||
|
||||
Examples:
|
||||
ipfs repo migrate # Migrate to latest version
|
||||
ipfs repo migrate --to=17 # Migrate to version 17
|
||||
ipfs repo migrate --to=16 --allow-downgrade # Downgrade to version 16
|
||||
|
||||
WARNING: Downgrading a repository may cause data loss and requires using
|
||||
an older IPFS binary that supports the target version. After downgrading,
|
||||
you must use an IPFS implementation compatible with that repository version.
|
||||
|
||||
Repository versions 16+ use embedded migrations for faster, more reliable
|
||||
migration. Versions below 16 require external migration tools.
|
||||
`,
|
||||
},
|
||||
Options: []cmds.Option{
|
||||
cmds.IntOption(repoToVersionOptionName, "Target repository version").WithDefault(fsrepo.RepoVersion),
|
||||
cmds.BoolOption(repoAllowDowngradeOptionName, "Allow downgrading to a lower repo version"),
|
||||
},
|
||||
NoRemote: true,
|
||||
// SetDoesNotUseRepo(true) might seem counter-intuitive since migrations
|
||||
// do access the repo, but it's correct - we need direct filesystem access
|
||||
// without going through the daemon. Migrations handle their own locking.
|
||||
Extra: CreateCmdExtras(SetDoesNotUseRepo(true)),
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
cctx := env.(*oldcmds.Context)
|
||||
allowDowngrade, _ := req.Options[repoAllowDowngradeOptionName].(bool)
|
||||
targetVersion, _ := req.Options[repoToVersionOptionName].(int)
|
||||
|
||||
_, err := fsrepo.Open(cctx.ConfigRoot)
|
||||
// Get current repo version
|
||||
currentVersion, err := migrations.RepoVersion(cctx.ConfigRoot)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not get current repo version: %w", err)
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
fmt.Println("Repo does not require migration.")
|
||||
// Check if migration is needed
|
||||
if currentVersion == targetVersion {
|
||||
fmt.Printf("Repository is already at version %d.\n", targetVersion)
|
||||
return nil
|
||||
} else if err != fsrepo.ErrNeedMigration {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println("Found outdated fs-repo, starting migration.")
|
||||
// Validate downgrade request
|
||||
if targetVersion < currentVersion && !allowDowngrade {
|
||||
return fmt.Errorf("downgrade from version %d to %d requires --allow-downgrade flag", currentVersion, targetVersion)
|
||||
}
|
||||
|
||||
// Read Migration section of IPFS config
|
||||
configFileOpt, _ := req.Options[ConfigFileOption].(string)
|
||||
migrationCfg, err := migrations.ReadMigrationConfig(cctx.ConfigRoot, configFileOpt)
|
||||
// Check if repo is locked by daemon before running migration
|
||||
locked, err := fsrepo.LockedByOtherProcess(cctx.ConfigRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("could not check repo lock: %w", err)
|
||||
}
|
||||
if locked {
|
||||
return fmt.Errorf("cannot run migration while daemon is running (repo.lock exists)")
|
||||
}
|
||||
|
||||
// Define function to create IPFS fetcher. Do not supply an
|
||||
// already-constructed IPFS fetcher, because this may be expensive and
|
||||
// not needed according to migration config. Instead, supply a function
|
||||
// to construct the particular IPFS fetcher implementation used here,
|
||||
// which is called only if an IPFS fetcher is needed.
|
||||
newIpfsFetcher := func(distPath string) migrations.Fetcher {
|
||||
return ipfsfetcher.NewIpfsFetcher(distPath, 0, &cctx.ConfigRoot, configFileOpt)
|
||||
}
|
||||
fmt.Printf("Migrating repository from version %d to %d...\n", currentVersion, targetVersion)
|
||||
|
||||
// Fetch migrations from current distribution, or location from environ
|
||||
fetchDistPath := migrations.GetDistPathEnv(migrations.CurrentIpfsDist)
|
||||
|
||||
// Create fetchers according to migrationCfg.DownloadSources
|
||||
fetcher, err := migrations.GetMigrationFetcher(migrationCfg.DownloadSources, fetchDistPath, newIpfsFetcher)
|
||||
// Use hybrid migration strategy that intelligently combines external and embedded migrations
|
||||
err = migrations.RunHybridMigrations(cctx.Context(), targetVersion, cctx.ConfigRoot, allowDowngrade)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer fetcher.Close()
|
||||
|
||||
err = migrations.RunMigration(cctx.Context(), fetcher, fsrepo.RepoVersion, "", allowDowngrade)
|
||||
if err != nil {
|
||||
fmt.Println("The migrations of fs-repo failed:")
|
||||
fmt.Println("Repository migration failed:")
|
||||
fmt.Printf(" %s\n", err)
|
||||
fmt.Println("If you think this is a bug, please file an issue and include this whole log output.")
|
||||
fmt.Println(" https://github.com/ipfs/fs-repo-migrations")
|
||||
fmt.Println(" https://github.com/ipfs/kubo")
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("Success: fs-repo has been migrated to version %d.\n", fsrepo.RepoVersion)
|
||||
fmt.Printf("Repository successfully migrated to version %d.\n", targetVersion)
|
||||
if targetVersion < fsrepo.RepoVersion {
|
||||
fmt.Println("WARNING: After downgrading, you must use an IPFS binary compatible with this repository version.")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
@ -65,6 +65,7 @@ ADVANCED COMMANDS
|
||||
p2p Libp2p stream mounting (experimental)
|
||||
filestore Manage the filestore (experimental)
|
||||
mount Mount an IPFS read-only mount point (experimental)
|
||||
provide Control providing operations
|
||||
|
||||
NETWORK COMMANDS
|
||||
id Show info about IPFS peers
|
||||
@ -133,6 +134,7 @@ var rootSubcommands = map[string]*cmds.Command{
|
||||
"files": FilesCmd,
|
||||
"filestore": FileStoreCmd,
|
||||
"get": GetCmd,
|
||||
"provide": ProvideCmd,
|
||||
"pubsub": PubsubCmd,
|
||||
"repo": RepoCmd,
|
||||
"stats": StatsCmd,
|
||||
|
||||
@ -1,65 +1,22 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"text/tabwriter"
|
||||
|
||||
cmds "github.com/ipfs/go-ipfs-cmds"
|
||||
"github.com/ipfs/kubo/core/commands/cmdenv"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/fullrt"
|
||||
)
|
||||
|
||||
var statProvideCmd = &cmds.Command{
|
||||
Status: cmds.Deprecated,
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Deprecated command, use 'ipfs stats reprovide' instead.",
|
||||
Tagline: "Deprecated command, use 'ipfs provide stat' instead.",
|
||||
ShortDescription: `
|
||||
'ipfs stats provide' is deprecated because provide and reprovide operations
|
||||
are now distinct. This command may be replaced by provide only stats in the
|
||||
future.
|
||||
`,
|
||||
},
|
||||
Arguments: []cmds.Argument{},
|
||||
Options: []cmds.Option{},
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
nd, err := cmdenv.GetNode(env)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !nd.IsOnline {
|
||||
return ErrNotOnline
|
||||
}
|
||||
|
||||
stats, err := nd.Provider.Stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, fullRT := nd.DHTClient.(*fullrt.FullRT)
|
||||
|
||||
if err := res.Emit(reprovideStats{stats, fullRT}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
Encoders: cmds.EncoderMap{
|
||||
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, s reprovideStats) error {
|
||||
wtr := tabwriter.NewWriter(w, 1, 2, 1, ' ', 0)
|
||||
defer wtr.Flush()
|
||||
|
||||
fmt.Fprintf(wtr, "TotalProvides:\t%s\n", humanNumber(s.TotalReprovides))
|
||||
fmt.Fprintf(wtr, "AvgProvideDuration:\t%s\n", humanDuration(s.AvgReprovideDuration))
|
||||
fmt.Fprintf(wtr, "LastReprovideDuration:\t%s\n", humanDuration(s.LastReprovideDuration))
|
||||
if !s.LastRun.IsZero() {
|
||||
fmt.Fprintf(wtr, "LastRun:\t%s\n", humanTime(s.LastRun))
|
||||
if s.fullRT {
|
||||
fmt.Fprintf(wtr, "NextRun:\t%s\n", humanTime(s.LastRun.Add(s.ReprovideInterval)))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}),
|
||||
},
|
||||
Type: reprovideStats{},
|
||||
Arguments: provideStatCmd.Arguments,
|
||||
Options: provideStatCmd.Options,
|
||||
Run: provideStatCmd.Run,
|
||||
Encoders: provideStatCmd.Encoders,
|
||||
Type: provideStatCmd.Type,
|
||||
}
|
||||
|
||||
@ -1,104 +1,21 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
"github.com/ipfs/boxo/provider"
|
||||
cmds "github.com/ipfs/go-ipfs-cmds"
|
||||
"github.com/ipfs/kubo/core/commands/cmdenv"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/fullrt"
|
||||
"golang.org/x/exp/constraints"
|
||||
)
|
||||
|
||||
type reprovideStats struct {
|
||||
provider.ReproviderStats
|
||||
fullRT bool
|
||||
}
|
||||
|
||||
var statReprovideCmd = &cmds.Command{
|
||||
Status: cmds.Experimental,
|
||||
Status: cmds.Deprecated,
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Returns statistics about the node's reprovider system.",
|
||||
Tagline: "Deprecated command, use 'ipfs provide stat' instead.",
|
||||
ShortDescription: `
|
||||
Returns statistics about the content the node is reproviding every
|
||||
Reprovider.Interval according to Reprovider.Strategy:
|
||||
https://github.com/ipfs/kubo/blob/master/docs/config.md#reprovider
|
||||
|
||||
This interface is not stable and may change from release to release.
|
||||
|
||||
'ipfs stats reprovide' is deprecated because provider stats are now
|
||||
available from 'ipfs provide stat'.
|
||||
`,
|
||||
},
|
||||
Arguments: []cmds.Argument{},
|
||||
Options: []cmds.Option{},
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
nd, err := cmdenv.GetNode(env)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !nd.IsOnline {
|
||||
return ErrNotOnline
|
||||
}
|
||||
|
||||
stats, err := nd.Provider.Stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, fullRT := nd.DHTClient.(*fullrt.FullRT)
|
||||
|
||||
if err := res.Emit(reprovideStats{stats, fullRT}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
Encoders: cmds.EncoderMap{
|
||||
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, s reprovideStats) error {
|
||||
wtr := tabwriter.NewWriter(w, 1, 2, 1, ' ', 0)
|
||||
defer wtr.Flush()
|
||||
|
||||
fmt.Fprintf(wtr, "TotalReprovides:\t%s\n", humanNumber(s.TotalReprovides))
|
||||
fmt.Fprintf(wtr, "AvgReprovideDuration:\t%s\n", humanDuration(s.AvgReprovideDuration))
|
||||
fmt.Fprintf(wtr, "LastReprovideDuration:\t%s\n", humanDuration(s.LastReprovideDuration))
|
||||
if !s.LastRun.IsZero() {
|
||||
fmt.Fprintf(wtr, "LastReprovide:\t%s\n", humanTime(s.LastRun))
|
||||
if s.fullRT {
|
||||
fmt.Fprintf(wtr, "NextReprovide:\t%s\n", humanTime(s.LastRun.Add(s.ReprovideInterval)))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}),
|
||||
},
|
||||
Type: reprovideStats{},
|
||||
}
|
||||
|
||||
func humanDuration(val time.Duration) string {
|
||||
return val.Truncate(time.Microsecond).String()
|
||||
}
|
||||
|
||||
func humanTime(val time.Time) string {
|
||||
return val.Format("2006-01-02 15:04:05")
|
||||
}
|
||||
|
||||
func humanNumber[T constraints.Float | constraints.Integer](n T) string {
|
||||
nf := float64(n)
|
||||
str := humanSI(nf, 0)
|
||||
fullStr := humanFull(nf, 0)
|
||||
if str != fullStr {
|
||||
return fmt.Sprintf("%s\t(%s)", str, fullStr)
|
||||
}
|
||||
return str
|
||||
}
|
||||
|
||||
func humanSI(val float64, decimals int) string {
|
||||
v, unit := humanize.ComputeSI(val)
|
||||
return fmt.Sprintf("%s%s", humanFull(v, decimals), unit)
|
||||
}
|
||||
|
||||
func humanFull(val float64, decimals int) string {
|
||||
return humanize.CommafWithDigits(val, decimals)
|
||||
Arguments: provideStatCmd.Arguments,
|
||||
Options: provideStatCmd.Options,
|
||||
Run: provideStatCmd.Run,
|
||||
Encoders: provideStatCmd.Encoders,
|
||||
Type: provideStatCmd.Type,
|
||||
}
|
||||
|
||||
10
core/core.go
10
core/core.go
@ -29,7 +29,6 @@ import (
|
||||
provider "github.com/ipfs/boxo/provider"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
goprocess "github.com/jbenet/goprocess"
|
||||
ddht "github.com/libp2p/go-libp2p-kad-dht/dual"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
psrouter "github.com/libp2p/go-libp2p-pubsub-router"
|
||||
@ -98,6 +97,7 @@ type IpfsNode struct {
|
||||
Filters *ma.Filters `optional:"true"`
|
||||
Bootstrapper io.Closer `optional:"true"` // the periodic bootstrapper
|
||||
Routing irouting.ProvideManyRouter `optional:"true"` // the routing system. recommend ipfs-dht
|
||||
ContentDiscovery routing.ContentDiscovery `optional:"true"` // the discovery part of the routing system
|
||||
DNSResolver *madns.Resolver // the DNS resolver
|
||||
IPLDPathResolver pathresolver.Resolver `name:"ipldPathResolver"` // The IPLD path resolver
|
||||
UnixFSPathResolver pathresolver.Resolver `name:"unixFSPathResolver"` // The UnixFS path resolver
|
||||
@ -107,6 +107,8 @@ type IpfsNode struct {
|
||||
Bitswap *bitswap.Bitswap `optional:"true"` // The Bitswap instance
|
||||
Namesys namesys.NameSystem // the name system, resolves paths to hashes
|
||||
Provider provider.System // the value provider system
|
||||
ProvidingStrategy config.ReproviderStrategy `optional:"true"`
|
||||
ProvidingKeyChanFunc provider.KeyChanFunc `optional:"true"`
|
||||
IpnsRepub *ipnsrp.Republisher `optional:"true"`
|
||||
ResourceManager network.ResourceManager `optional:"true"`
|
||||
|
||||
@ -118,8 +120,7 @@ type IpfsNode struct {
|
||||
|
||||
P2P *p2p.P2P `optional:"true"`
|
||||
|
||||
Process goprocess.Process
|
||||
ctx context.Context
|
||||
ctx context.Context
|
||||
|
||||
stop func() error
|
||||
|
||||
@ -212,7 +213,8 @@ func (n *IpfsNode) loadBootstrapPeers() ([]peer.AddrInfo, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cfg.BootstrapPeers()
|
||||
// Use auto-config resolution for actual bootstrap connectivity
|
||||
return cfg.BootstrapPeersWithAutoConf()
|
||||
}
|
||||
|
||||
func (n *IpfsNode) saveTempBootstrapPeers(ctx context.Context, peerList []peer.AddrInfo) error {
|
||||
|
||||
@ -26,6 +26,7 @@ import (
|
||||
provider "github.com/ipfs/boxo/provider"
|
||||
offlineroute "github.com/ipfs/boxo/routing/offline"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"github.com/ipfs/kubo/config"
|
||||
coreiface "github.com/ipfs/kubo/core/coreiface"
|
||||
"github.com/ipfs/kubo/core/coreiface/options"
|
||||
@ -44,6 +45,8 @@ import (
|
||||
"github.com/ipfs/kubo/repo"
|
||||
)
|
||||
|
||||
var log = logging.Logger("coreapi")
|
||||
|
||||
type CoreAPI struct {
|
||||
nctx context.Context
|
||||
|
||||
@ -70,7 +73,8 @@ type CoreAPI struct {
|
||||
ipldPathResolver pathresolver.Resolver
|
||||
unixFSPathResolver pathresolver.Resolver
|
||||
|
||||
provider provider.System
|
||||
provider provider.System
|
||||
providingStrategy config.ReproviderStrategy
|
||||
|
||||
pubSub *pubsub.PubSub
|
||||
|
||||
@ -185,7 +189,8 @@ func (api *CoreAPI) WithOptions(opts ...options.ApiOption) (coreiface.CoreAPI, e
|
||||
ipldPathResolver: n.IPLDPathResolver,
|
||||
unixFSPathResolver: n.UnixFSPathResolver,
|
||||
|
||||
provider: n.Provider,
|
||||
provider: n.Provider,
|
||||
providingStrategy: n.ProvidingStrategy,
|
||||
|
||||
pubSub: n.PubSub,
|
||||
|
||||
@ -235,8 +240,6 @@ func (api *CoreAPI) WithOptions(opts ...options.ApiOption) (coreiface.CoreAPI, e
|
||||
return nil, fmt.Errorf("error constructing namesys: %w", err)
|
||||
}
|
||||
|
||||
subAPI.provider = provider.NewNoopProvider()
|
||||
|
||||
subAPI.peerstore = nil
|
||||
subAPI.peerHost = nil
|
||||
subAPI.recordValidator = nil
|
||||
|
||||
@ -45,9 +45,25 @@ func (api *NameAPI) Publish(ctx context.Context, p path.Path, opts ...caopts.Nam
|
||||
span.SetAttributes(attribute.Float64("ttl", options.TTL.Seconds()))
|
||||
}
|
||||
|
||||
err = api.checkOnline(options.AllowOffline)
|
||||
if err != nil {
|
||||
return ipns.Name{}, err
|
||||
// Handle different publishing modes
|
||||
if options.AllowDelegated {
|
||||
// AllowDelegated mode: check if delegated publishers are configured
|
||||
cfg, err := api.repo.Config()
|
||||
if err != nil {
|
||||
return ipns.Name{}, fmt.Errorf("failed to read config: %w", err)
|
||||
}
|
||||
delegatedPublishers := cfg.DelegatedPublishersWithAutoConf()
|
||||
if len(delegatedPublishers) == 0 {
|
||||
return ipns.Name{}, errors.New("no delegated publishers configured: add Ipns.DelegatedPublishers or use --allow-offline for local-only publishing")
|
||||
}
|
||||
// For allow-delegated mode, we only require that we have delegated publishers configured
|
||||
// The node doesn't need P2P connectivity since we're using HTTP publishing
|
||||
} else {
|
||||
// Normal mode: check online status with allow-offline flag
|
||||
err = api.checkOnline(options.AllowOffline)
|
||||
if err != nil {
|
||||
return ipns.Name{}, err
|
||||
}
|
||||
}
|
||||
|
||||
k, err := keylookup(api.privateKey, api.repo.Keystore(), options.Key)
|
||||
@ -66,6 +82,10 @@ func (api *NameAPI) Publish(ctx context.Context, p path.Path, opts ...caopts.Nam
|
||||
publishOptions = append(publishOptions, namesys.PublishWithTTL(*options.TTL))
|
||||
}
|
||||
|
||||
if options.Sequence != nil {
|
||||
publishOptions = append(publishOptions, namesys.PublishWithSequence(*options.Sequence))
|
||||
}
|
||||
|
||||
err = api.namesys.Publish(ctx, k, p, publishOptions...)
|
||||
if err != nil {
|
||||
return ipns.Name{}, err
|
||||
|
||||
@ -44,10 +44,6 @@ func (api *PinAPI) Add(ctx context.Context, p path.Path, opts ...caopts.PinAddOp
|
||||
return fmt.Errorf("pin: %s", err)
|
||||
}
|
||||
|
||||
if err := api.provider.Provide(ctx, dagNode.Cid(), true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return api.pinning.Flush(ctx)
|
||||
}
|
||||
|
||||
|
||||
@ -70,6 +70,9 @@ func (NodeProvider) MakeAPISwarm(t *testing.T, ctx context.Context, fullIdentity
|
||||
c.Identity = ident
|
||||
c.Experimental.FilestoreEnabled = true
|
||||
c.AutoTLS.Enabled = config.False // disable so no /ws listener is added
|
||||
// For provider tests, avoid that content gets
|
||||
// auto-provided without calling "provide" (unless pinned).
|
||||
c.Reprovider.Strategy = config.NewOptionalString("roots")
|
||||
|
||||
ds := syncds.MutexWrap(datastore.NewMapDatastore())
|
||||
r := &repo.Mock{
|
||||
|
||||
@ -39,7 +39,7 @@ func TestPathUnixFSHAMTPartial(t *testing.T) {
|
||||
dir[strconv.Itoa(i)] = files.NewBytesFile([]byte(strconv.Itoa(i)))
|
||||
}
|
||||
|
||||
r, err := a.Unixfs().Add(ctx, files.NewMapDirectory(dir), options.Unixfs.Pin(false))
|
||||
r, err := a.Unixfs().Add(ctx, files.NewMapDirectory(dir), options.Unixfs.Pin(false, ""))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@ -16,6 +16,7 @@ import (
|
||||
uio "github.com/ipfs/boxo/ipld/unixfs/io"
|
||||
"github.com/ipfs/boxo/mfs"
|
||||
"github.com/ipfs/boxo/path"
|
||||
provider "github.com/ipfs/boxo/provider"
|
||||
cid "github.com/ipfs/go-cid"
|
||||
cidutil "github.com/ipfs/go-cidutil"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
@ -58,6 +59,7 @@ func (api *UnixfsAPI) Add(ctx context.Context, files files.Node, opts ...options
|
||||
attribute.Bool("maxhamtfanoutset", settings.MaxHAMTFanoutSet),
|
||||
attribute.Int("layout", int(settings.Layout)),
|
||||
attribute.Bool("pin", settings.Pin),
|
||||
attribute.String("pin-name", settings.PinName),
|
||||
attribute.Bool("onlyhash", settings.OnlyHash),
|
||||
attribute.Bool("fscache", settings.FsCache),
|
||||
attribute.Bool("nocopy", settings.NoCopy),
|
||||
@ -101,7 +103,22 @@ func (api *UnixfsAPI) Add(ctx context.Context, files files.Node, opts ...options
|
||||
bserv := blockservice.New(addblockstore, exch,
|
||||
blockservice.WriteThrough(cfg.Datastore.WriteThrough.WithDefault(config.DefaultWriteThrough)),
|
||||
) // hash security 001
|
||||
dserv := merkledag.NewDAGService(bserv)
|
||||
|
||||
var dserv ipld.DAGService = merkledag.NewDAGService(bserv)
|
||||
|
||||
// wrap the DAGService in a providingDAG service which provides every block written.
|
||||
// note about strategies:
|
||||
// - "all" gets handled directly at the blockstore so no need to provide
|
||||
// - "roots" gets handled in the pinner
|
||||
// - "mfs" gets handled in mfs
|
||||
// We need to provide the "pinned" cases only. Added blocks are not
|
||||
// going to be provided by the blockstore (wrong strategy for that),
|
||||
// nor by the pinner (the pinner doesn't traverse the pinned DAG itself, it only
|
||||
// handles roots). This wrapping ensures all blocks of pinned content get provided.
|
||||
if settings.Pin && !settings.OnlyHash &&
|
||||
(api.providingStrategy&config.ReproviderStrategyPinned) != 0 {
|
||||
dserv = &providingDagService{dserv, api.provider}
|
||||
}
|
||||
|
||||
// add a sync call to the DagService
|
||||
// this ensures that data written to the DagService is persisted to the underlying datastore
|
||||
@ -125,6 +142,11 @@ func (api *UnixfsAPI) Add(ctx context.Context, files files.Node, opts ...options
|
||||
}
|
||||
}
|
||||
|
||||
// Note: the dag service gets wrapped multiple times:
|
||||
// 1. providingDagService (if pinned strategy) - provides blocks as they're added
|
||||
// 2. syncDagService - ensures data persistence
|
||||
// 3. batchingDagService (in coreunix.Adder) - batches operations for efficiency
|
||||
|
||||
fileAdder, err := coreunix.NewAdder(ctx, pinning, addblockstore, syncDserv)
|
||||
if err != nil {
|
||||
return path.ImmutablePath{}, err
|
||||
@ -136,6 +158,9 @@ func (api *UnixfsAPI) Add(ctx context.Context, files files.Node, opts ...options
|
||||
fileAdder.Progress = settings.Progress
|
||||
}
|
||||
fileAdder.Pin = settings.Pin && !settings.OnlyHash
|
||||
if settings.Pin {
|
||||
fileAdder.PinName = settings.PinName
|
||||
}
|
||||
fileAdder.Silent = settings.Silent
|
||||
fileAdder.RawLeaves = settings.RawLeaves
|
||||
if settings.MaxFileLinksSet {
|
||||
@ -179,7 +204,8 @@ func (api *UnixfsAPI) Add(ctx context.Context, files files.Node, opts ...options
|
||||
if err != nil {
|
||||
return path.ImmutablePath{}, err
|
||||
}
|
||||
mr, err := mfs.NewRoot(ctx, md, emptyDirNode, nil)
|
||||
// MFS root for OnlyHash mode: provider is nil since we're not storing/providing anything
|
||||
mr, err := mfs.NewRoot(ctx, md, emptyDirNode, nil, nil)
|
||||
if err != nil {
|
||||
return path.ImmutablePath{}, err
|
||||
}
|
||||
@ -192,12 +218,6 @@ func (api *UnixfsAPI) Add(ctx context.Context, files files.Node, opts ...options
|
||||
return path.ImmutablePath{}, err
|
||||
}
|
||||
|
||||
if !settings.OnlyHash {
|
||||
if err := api.provider.Provide(ctx, nd.Cid(), true); err != nil {
|
||||
return path.ImmutablePath{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return path.FromCid(nd.Cid()), nil
|
||||
}
|
||||
|
||||
@ -363,3 +383,40 @@ type syncDagService struct {
|
||||
func (s *syncDagService) Sync() error {
|
||||
return s.syncFn()
|
||||
}
|
||||
|
||||
type providingDagService struct {
|
||||
ipld.DAGService
|
||||
provider provider.System
|
||||
}
|
||||
|
||||
func (pds *providingDagService) Add(ctx context.Context, n ipld.Node) error {
|
||||
if err := pds.DAGService.Add(ctx, n); err != nil {
|
||||
return err
|
||||
}
|
||||
// Provider errors are logged but not propagated.
|
||||
// We don't want DAG operations to fail due to providing issues.
|
||||
// The user's data is still stored successfully even if the
|
||||
// announcement to the routing system fails temporarily.
|
||||
if err := pds.provider.Provide(ctx, n.Cid(), true); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pds *providingDagService) AddMany(ctx context.Context, nds []ipld.Node) error {
|
||||
if err := pds.DAGService.AddMany(ctx, nds); err != nil {
|
||||
return err
|
||||
}
|
||||
// Same error handling philosophy as Add(): log but don't fail.
|
||||
// Note: Provide calls are intentionally blocking here - the Provider
|
||||
// implementation should handle concurrency/queuing internally.
|
||||
for _, n := range nds {
|
||||
if err := pds.provider.Provide(ctx, n.Cid(), true); err != nil {
|
||||
log.Error(err)
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ ipld.DAGService = (*providingDagService)(nil)
|
||||
|
||||
@ -13,8 +13,6 @@ import (
|
||||
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
core "github.com/ipfs/kubo/core"
|
||||
"github.com/jbenet/goprocess"
|
||||
periodicproc "github.com/jbenet/goprocess/periodic"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
manet "github.com/multiformats/go-multiaddr/net"
|
||||
)
|
||||
@ -97,7 +95,7 @@ func Serve(node *core.IpfsNode, lis net.Listener, options ...ServeOption) error
|
||||
}
|
||||
|
||||
select {
|
||||
case <-node.Process.Closing():
|
||||
case <-node.Context().Done():
|
||||
return fmt.Errorf("failed to start server, process closing")
|
||||
default:
|
||||
}
|
||||
@ -107,20 +105,31 @@ func Serve(node *core.IpfsNode, lis net.Listener, options ...ServeOption) error
|
||||
}
|
||||
|
||||
var serverError error
|
||||
serverProc := node.Process.Go(func(p goprocess.Process) {
|
||||
serverClosed := make(chan struct{})
|
||||
go func() {
|
||||
serverError = server.Serve(lis)
|
||||
})
|
||||
close(serverClosed)
|
||||
}()
|
||||
|
||||
// wait for server to exit.
|
||||
select {
|
||||
case <-serverProc.Closed():
|
||||
case <-serverClosed:
|
||||
// if node being closed before server exits, close server
|
||||
case <-node.Process.Closing():
|
||||
case <-node.Context().Done():
|
||||
log.Infof("server at %s terminating...", addr)
|
||||
|
||||
warnProc := periodicproc.Tick(5*time.Second, func(_ goprocess.Process) {
|
||||
log.Infof("waiting for server at %s to terminate...", addr)
|
||||
})
|
||||
go func() {
|
||||
ticker := time.NewTicker(5 * time.Second)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
log.Infof("waiting for server at %s to terminate...", addr)
|
||||
case <-serverClosed:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// This timeout shouldn't be necessary if all of our commands
|
||||
// are obeying their contexts but we should have *some* timeout.
|
||||
@ -130,10 +139,8 @@ func Serve(node *core.IpfsNode, lis net.Listener, options ...ServeOption) error
|
||||
|
||||
// Should have already closed but we still need to wait for it
|
||||
// to set the error.
|
||||
<-serverProc.Closed()
|
||||
<-serverClosed
|
||||
serverError = err
|
||||
|
||||
warnProc.Close()
|
||||
}
|
||||
|
||||
log.Infof("server at %s terminated", addr)
|
||||
|
||||
@ -97,11 +97,21 @@ func Libp2pGatewayOption() ServeOption {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get gateway configuration from the node's config
|
||||
cfg, err := n.Repo.Config()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
gwConfig := gateway.Config{
|
||||
DeserializedResponses: false,
|
||||
NoDNSLink: true,
|
||||
// Keep these constraints for security
|
||||
DeserializedResponses: false, // Trustless-only
|
||||
NoDNSLink: true, // No DNS resolution
|
||||
PublicGateways: nil,
|
||||
Menu: nil,
|
||||
// Apply timeout and concurrency limits from user config
|
||||
RetrievalTimeout: cfg.Gateway.RetrievalTimeout.WithDefault(config.DefaultRetrievalTimeout),
|
||||
MaxConcurrentRequests: int(cfg.Gateway.MaxConcurrentRequests.WithDefault(int64(config.DefaultMaxConcurrentRequests))),
|
||||
}
|
||||
|
||||
handler := gateway.NewHandler(gwConfig, &offlineGatewayErrWrapper{gwimpl: backend})
|
||||
@ -258,6 +268,8 @@ func getGatewayConfig(n *core.IpfsNode) (gateway.Config, map[string][]string, er
|
||||
DisableHTMLErrors: cfg.Gateway.DisableHTMLErrors.WithDefault(config.DefaultDisableHTMLErrors),
|
||||
NoDNSLink: cfg.Gateway.NoDNSLink,
|
||||
PublicGateways: map[string]*gateway.PublicGateway{},
|
||||
RetrievalTimeout: cfg.Gateway.RetrievalTimeout.WithDefault(config.DefaultRetrievalTimeout),
|
||||
MaxConcurrentRequests: int(cfg.Gateway.MaxConcurrentRequests.WithDefault(int64(config.DefaultMaxConcurrentRequests))),
|
||||
}
|
||||
|
||||
// Add default implicit known gateways, such as subdomain gateway on localhost.
|
||||
|
||||
@ -87,6 +87,7 @@ func MetricsCollectionOption(handlerName string) ServeOption {
|
||||
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
||||
}
|
||||
|
||||
// Legacy metric - new metrics are provided by boxo/gateway as gw_http_responses_total
|
||||
reqCnt := prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: opts.Namespace,
|
||||
|
||||
@ -5,9 +5,8 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ipfs/kubo/thirdparty/assert"
|
||||
|
||||
protocol "github.com/libp2p/go-libp2p/core/protocol"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type TestCase struct {
|
||||
@ -29,12 +28,10 @@ func TestParseRequest(t *testing.T) {
|
||||
req, _ := http.NewRequest(http.MethodGet, url, strings.NewReader(""))
|
||||
|
||||
parsed, err := parseRequest(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.True(parsed.httpPath == tc.path, t, "proxy request path")
|
||||
assert.True(parsed.name == protocol.ID(tc.name), t, "proxy request name")
|
||||
assert.True(parsed.target == tc.target, t, "proxy request peer-id")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.path, parsed.httpPath, "proxy request path")
|
||||
require.Equal(t, protocol.ID(tc.name), parsed.name, "proxy request name")
|
||||
require.Equal(t, tc.target, parsed.target, "proxy request peer-id")
|
||||
}
|
||||
}
|
||||
|
||||
@ -49,8 +46,6 @@ func TestParseRequestInvalidPath(t *testing.T) {
|
||||
req, _ := http.NewRequest(http.MethodGet, url, strings.NewReader(""))
|
||||
|
||||
_, err := parseRequest(req)
|
||||
if err == nil {
|
||||
t.Fail()
|
||||
}
|
||||
require.Error(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,11 +1,12 @@
|
||||
package corehttp
|
||||
|
||||
// WebUI version confirmed to work with this Kubo version
|
||||
const WebUIPath = "/ipfs/bafybeibfd5kbebqqruouji6ct5qku3tay273g7mt24mmrfzrsfeewaal5y" // v4.7.0
|
||||
const WebUIPath = "/ipfs/bafybeifplj2s3yegn7ko7tdnwpoxa4c5uaqnk2ajnw5geqm34slcj6b6mu" // v4.8.0
|
||||
|
||||
// WebUIPaths is a list of all past webUI paths.
|
||||
var WebUIPaths = []string{
|
||||
WebUIPath,
|
||||
"/ipfs/bafybeibfd5kbebqqruouji6ct5qku3tay273g7mt24mmrfzrsfeewaal5y", // v4.7.0
|
||||
"/ipfs/bafybeibpaa5kqrj4gkemiswbwndjqiryl65cks64ypwtyerxixu56gnvvm", // v4.6.0
|
||||
"/ipfs/bafybeiata4qg7xjtwgor6r5dw63jjxyouenyromrrb4lrewxrlvav7gzgi", // v4.5.0
|
||||
"/ipfs/bafybeigp3zm7cqoiciqk5anlheenqjsgovp7j7zq6hah4nu6iugdgb4nby", // v4.4.2
|
||||
|
||||
@ -16,6 +16,8 @@ type NamePublishSettings struct {
|
||||
TTL *time.Duration
|
||||
CompatibleWithV1 bool
|
||||
AllowOffline bool
|
||||
AllowDelegated bool
|
||||
Sequence *uint64
|
||||
}
|
||||
|
||||
type NameResolveSettings struct {
|
||||
@ -34,7 +36,8 @@ func NamePublishOptions(opts ...NamePublishOption) (*NamePublishSettings, error)
|
||||
ValidTime: DefaultNameValidTime,
|
||||
Key: "self",
|
||||
|
||||
AllowOffline: false,
|
||||
AllowOffline: false,
|
||||
AllowDelegated: false,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
@ -96,6 +99,16 @@ func (nameOpts) AllowOffline(allow bool) NamePublishOption {
|
||||
}
|
||||
}
|
||||
|
||||
// AllowDelegated is an option for Name.Publish which allows publishing without
|
||||
// DHT connectivity, using local datastore and HTTP delegated publishers only.
|
||||
// Default value is false
|
||||
func (nameOpts) AllowDelegated(allowDelegated bool) NamePublishOption {
|
||||
return func(settings *NamePublishSettings) error {
|
||||
settings.AllowDelegated = allowDelegated
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// TTL is an option for Name.Publish which specifies the time duration the
|
||||
// published record should be cached for (caution: experimental).
|
||||
func (nameOpts) TTL(ttl time.Duration) NamePublishOption {
|
||||
@ -105,6 +118,15 @@ func (nameOpts) TTL(ttl time.Duration) NamePublishOption {
|
||||
}
|
||||
}
|
||||
|
||||
// Sequence is an option for Name.Publish which specifies the sequence number of
|
||||
// a namesys record.
|
||||
func (nameOpts) Sequence(seq uint64) NamePublishOption {
|
||||
return func(settings *NamePublishSettings) error {
|
||||
settings.Sequence = &seq
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// CompatibleWithV1 is an option for [Name.Publish] which specifies if the
|
||||
// created record should be backwards compatible with V1 IPNS Records.
|
||||
func (nameOpts) CompatibleWithV1(compatible bool) NamePublishOption {
|
||||
|
||||
@ -39,6 +39,7 @@ type UnixfsAddSettings struct {
|
||||
Layout Layout
|
||||
|
||||
Pin bool
|
||||
PinName string
|
||||
OnlyHash bool
|
||||
FsCache bool
|
||||
NoCopy bool
|
||||
@ -83,6 +84,7 @@ func UnixfsAddOptions(opts ...UnixfsAddOption) (*UnixfsAddSettings, cid.Prefix,
|
||||
Layout: BalancedLayout,
|
||||
|
||||
Pin: false,
|
||||
PinName: "",
|
||||
OnlyHash: false,
|
||||
FsCache: false,
|
||||
NoCopy: false,
|
||||
@ -280,9 +282,12 @@ func (unixfsOpts) Layout(layout Layout) UnixfsAddOption {
|
||||
}
|
||||
|
||||
// Pin tells the adder to pin the file root recursively after adding
|
||||
func (unixfsOpts) Pin(pin bool) UnixfsAddOption {
|
||||
func (unixfsOpts) Pin(pin bool, pinName string) UnixfsAddOption {
|
||||
return func(settings *UnixfsAddSettings) error {
|
||||
settings.Pin = pin
|
||||
if pin {
|
||||
settings.PinName = pinName
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@ -142,8 +142,6 @@ func (tp *TestSuite) TestBasicPublishResolveKey(t *testing.T) {
|
||||
}
|
||||
|
||||
func (tp *TestSuite) TestBasicPublishResolveTimeout(t *testing.T) {
|
||||
t.Skip("ValidTime doesn't appear to work at this time resolution")
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
apis, err := tp.MakeAPISwarm(t, ctx, 5)
|
||||
@ -155,14 +153,25 @@ func (tp *TestSuite) TestBasicPublishResolveTimeout(t *testing.T) {
|
||||
self, err := api.Key().Self(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
name, err := api.Name().Publish(ctx, p, opt.Name.ValidTime(time.Millisecond*100))
|
||||
name, err := api.Name().Publish(ctx, p, opt.Name.ValidTime(time.Second*1))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, name.String(), ipns.NameFromPeer(self.ID()).String())
|
||||
|
||||
time.Sleep(time.Second)
|
||||
|
||||
_, err = api.Name().Resolve(ctx, name.String())
|
||||
// First resolve should succeed (before expiration)
|
||||
resPath, err := api.Name().Resolve(ctx, name.String())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, p.String(), resPath.String())
|
||||
|
||||
// Wait for record to expire (1 second ValidTime + buffer)
|
||||
time.Sleep(time.Second * 2)
|
||||
|
||||
// Second resolve should now fail after ValidTime expiration (cached)
|
||||
_, err = api.Name().Resolve(ctx, name.String())
|
||||
require.Error(t, err, "IPNS resolution should fail after ValidTime expires (cached)")
|
||||
|
||||
// Third resolve should also fail after ValidTime expiration (non-cached)
|
||||
_, err = api.Name().Resolve(ctx, name.String(), opt.Name.Cache(false))
|
||||
require.Error(t, err, "IPNS resolution should fail after ValidTime expires (non-cached)")
|
||||
}
|
||||
|
||||
// TODO: When swarm api is created, add multinode tests
|
||||
|
||||
@ -433,7 +433,7 @@ func getThreeChainedNodes(t *testing.T, ctx context.Context, api iface.CoreAPI,
|
||||
return immutablePathCidContainer{leaf}, parent, grandparent
|
||||
}
|
||||
|
||||
func assertPinTypes(t *testing.T, ctx context.Context, api iface.CoreAPI, recusive, direct, indirect []cidContainer) {
|
||||
func assertPinTypes(t *testing.T, ctx context.Context, api iface.CoreAPI, recursive, direct, indirect []cidContainer) {
|
||||
assertPinLsAllConsistency(t, ctx, api)
|
||||
|
||||
list, err := accPins(ctx, api, opt.Pin.Ls.Recursive())
|
||||
@ -441,7 +441,7 @@ func assertPinTypes(t *testing.T, ctx context.Context, api iface.CoreAPI, recusi
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
assertPinCids(t, list, recusive...)
|
||||
assertPinCids(t, list, recursive...)
|
||||
|
||||
list, err = accPins(ctx, api, opt.Pin.Ls.Direct())
|
||||
if err != nil {
|
||||
|
||||
@ -171,6 +171,13 @@ func (tp *TestSuite) TestRoutingFindProviders(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Pin so that it is provided, given that providing strategy is
|
||||
// "roots" and addTestObject does not pin.
|
||||
err = apis[0].Pin().Add(ctx, p)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
out, err := apis[2].Routing().FindProviders(ctx, p, options.Routing.NumProviders(1))
|
||||
|
||||
@ -539,7 +539,7 @@ func (tp *TestSuite) TestAddPinned(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = api.Unixfs().Add(ctx, strFile(helloStr)(), options.Unixfs.Pin(true))
|
||||
_, err = api.Unixfs().Add(ctx, strFile(helloStr)(), options.Unixfs.Pin(true, ""))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@ -76,6 +76,7 @@ type Adder struct {
|
||||
Out chan<- interface{}
|
||||
Progress bool
|
||||
Pin bool
|
||||
PinName string
|
||||
Trickle bool
|
||||
RawLeaves bool
|
||||
MaxLinks int
|
||||
@ -102,7 +103,7 @@ func (adder *Adder) mfsRoot() (*mfs.Root, error) {
|
||||
}
|
||||
|
||||
// Note, this adds it to DAGService already.
|
||||
mr, err := mfs.NewEmptyRoot(adder.ctx, adder.dagService, nil, mfs.MkdirOpts{
|
||||
mr, err := mfs.NewEmptyRoot(adder.ctx, adder.dagService, nil, nil, mfs.MkdirOpts{
|
||||
CidBuilder: adder.CidBuilder,
|
||||
MaxLinks: adder.MaxDirectoryLinks,
|
||||
MaxHAMTFanout: adder.MaxHAMTFanout,
|
||||
@ -182,9 +183,10 @@ func (adder *Adder) curRootNode() (ipld.Node, error) {
|
||||
return root, err
|
||||
}
|
||||
|
||||
// Recursively pins the root node of Adder and
|
||||
// writes the pin state to the backing datastore.
|
||||
func (adder *Adder) PinRoot(ctx context.Context, root ipld.Node) error {
|
||||
// PinRoot recursively pins the root node of Adder with an optional name and
|
||||
// writes the pin state to the backing datastore. If name is empty, the pin
|
||||
// will be created without a name.
|
||||
func (adder *Adder) PinRoot(ctx context.Context, root ipld.Node, name string) error {
|
||||
ctx, span := tracing.Span(ctx, "CoreUnix.Adder", "PinRoot")
|
||||
defer span.End()
|
||||
|
||||
@ -207,7 +209,7 @@ func (adder *Adder) PinRoot(ctx context.Context, root ipld.Node) error {
|
||||
adder.tempRoot = rnk
|
||||
}
|
||||
|
||||
err = adder.pinning.PinWithMode(ctx, rnk, pin.Recursive, "")
|
||||
err = adder.pinning.PinWithMode(ctx, rnk, pin.Recursive, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -369,7 +371,12 @@ func (adder *Adder) AddAllAndPin(ctx context.Context, file files.Node) (ipld.Nod
|
||||
if !adder.Pin {
|
||||
return nd, nil
|
||||
}
|
||||
return nd, adder.PinRoot(ctx, nd)
|
||||
|
||||
if err := adder.PinRoot(ctx, nd, adder.PinName); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nd, nil
|
||||
}
|
||||
|
||||
func (adder *Adder) addFileNode(ctx context.Context, path string, file files.Node, toplevel bool) error {
|
||||
@ -409,7 +416,7 @@ func (adder *Adder) addFileNode(ctx context.Context, path string, file files.Nod
|
||||
case files.Directory:
|
||||
return adder.addDir(ctx, path, f, toplevel)
|
||||
case *files.Symlink:
|
||||
return adder.addSymlink(path, f)
|
||||
return adder.addSymlink(ctx, path, f)
|
||||
case files.File:
|
||||
return adder.addFile(path, f)
|
||||
default:
|
||||
@ -417,7 +424,7 @@ func (adder *Adder) addFileNode(ctx context.Context, path string, file files.Nod
|
||||
}
|
||||
}
|
||||
|
||||
func (adder *Adder) addSymlink(path string, l *files.Symlink) error {
|
||||
func (adder *Adder) addSymlink(ctx context.Context, path string, l *files.Symlink) error {
|
||||
sdata, err := unixfs.SymlinkData(l.Target)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -475,7 +482,7 @@ func (adder *Adder) addDir(ctx context.Context, path string, dir files.Directory
|
||||
|
||||
// if we need to store mode or modification time then create a new root which includes that data
|
||||
if toplevel && (adder.FileMode != 0 || !adder.FileMtime.IsZero()) {
|
||||
mr, err := mfs.NewEmptyRoot(ctx, adder.dagService, nil,
|
||||
mr, err := mfs.NewEmptyRoot(ctx, adder.dagService, nil, nil,
|
||||
mfs.MkdirOpts{
|
||||
CidBuilder: adder.CidBuilder,
|
||||
MaxLinks: adder.MaxDirectoryLinks,
|
||||
@ -530,7 +537,7 @@ func (adder *Adder) maybePauseForGC(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
err = adder.PinRoot(ctx, rn)
|
||||
err = adder.PinRoot(ctx, rn, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -93,8 +93,15 @@ func TestAddMultipleGCLive(t *testing.T) {
|
||||
// finish write and unblock gc
|
||||
pipew1.Close()
|
||||
|
||||
// Should have gotten the lock at this point
|
||||
<-gc1started
|
||||
// Wait for GC to acquire the lock
|
||||
// The adder needs to finish processing file 'a' and call maybePauseForGC
|
||||
// when starting file 'b' before GC can proceed
|
||||
select {
|
||||
case <-gc1started:
|
||||
// GC got the lock as expected
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatal("timeout waiting for GC to start - possible deadlock")
|
||||
}
|
||||
|
||||
removedHashes := make(map[string]struct{})
|
||||
for r := range gc1out {
|
||||
@ -123,7 +130,15 @@ func TestAddMultipleGCLive(t *testing.T) {
|
||||
|
||||
pipew2.Close()
|
||||
|
||||
<-gc2started
|
||||
// Wait for second GC to acquire the lock
|
||||
// The adder needs to finish processing file 'b' and call maybePauseForGC
|
||||
// when starting file 'c' before GC can proceed
|
||||
select {
|
||||
case <-gc2started:
|
||||
// GC got the lock as expected
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatal("timeout waiting for second GC to start - possible deadlock")
|
||||
}
|
||||
|
||||
for r := range gc2out {
|
||||
if r.Error != nil {
|
||||
|
||||
@ -14,16 +14,14 @@ import (
|
||||
"github.com/ipfs/boxo/bitswap/network/httpnet"
|
||||
blockstore "github.com/ipfs/boxo/blockstore"
|
||||
exchange "github.com/ipfs/boxo/exchange"
|
||||
"github.com/ipfs/boxo/exchange/providing"
|
||||
provider "github.com/ipfs/boxo/provider"
|
||||
rpqm "github.com/ipfs/boxo/routing/providerquerymanager"
|
||||
"github.com/ipfs/go-cid"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
version "github.com/ipfs/kubo"
|
||||
"github.com/ipfs/kubo/config"
|
||||
irouting "github.com/ipfs/kubo/routing"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
peer "github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/routing"
|
||||
"go.uber.org/fx"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
@ -75,7 +73,7 @@ type bitswapIn struct {
|
||||
Mctx helpers.MetricsCtx
|
||||
Cfg *config.Config
|
||||
Host host.Host
|
||||
Rt irouting.ProvideManyRouter
|
||||
Discovery routing.ContentDiscovery
|
||||
Bs blockstore.GCBlockstore
|
||||
BitswapOpts []bitswap.Option `group:"bitswap-options"`
|
||||
}
|
||||
@ -88,9 +86,14 @@ func Bitswap(serverEnabled, libp2pEnabled, httpEnabled bool) interface{} {
|
||||
var bitswapNetworks, bitswapLibp2p network.BitSwapNetwork
|
||||
var bitswapBlockstore blockstore.Blockstore = in.Bs
|
||||
|
||||
connEvtMgr := network.NewConnectEventManager()
|
||||
|
||||
libp2pEnabled := in.Cfg.Bitswap.Libp2pEnabled.WithDefault(config.DefaultBitswapLibp2pEnabled)
|
||||
if libp2pEnabled {
|
||||
bitswapLibp2p = bsnet.NewFromIpfsHost(in.Host)
|
||||
bitswapLibp2p = bsnet.NewFromIpfsHost(
|
||||
in.Host,
|
||||
bsnet.WithConnectEventManager(connEvtMgr),
|
||||
)
|
||||
}
|
||||
|
||||
if httpEnabled {
|
||||
@ -112,6 +115,7 @@ func Bitswap(serverEnabled, libp2pEnabled, httpEnabled bool) interface{} {
|
||||
httpnet.WithMaxBlockSize(int64(maxBlockSize)),
|
||||
httpnet.WithUserAgent(version.GetUserAgentVersion()),
|
||||
httpnet.WithMetricsLabelsForEndpoints(httpCfg.Allowlist),
|
||||
httpnet.WithConnectEventManager(connEvtMgr),
|
||||
)
|
||||
bitswapNetworks = network.New(in.Host.Peerstore(), bitswapLibp2p, bitswapHTTP)
|
||||
} else if libp2pEnabled {
|
||||
@ -178,7 +182,7 @@ func Bitswap(serverEnabled, libp2pEnabled, httpEnabled bool) interface{} {
|
||||
ignoredPeerIDs = append(ignoredPeerIDs, pid)
|
||||
}
|
||||
providerQueryMgr, err := rpqm.New(bitswapNetworks,
|
||||
in.Rt,
|
||||
in.Discovery,
|
||||
rpqm.WithMaxProviders(maxProviders),
|
||||
rpqm.WithIgnoreProviders(ignoredPeerIDs...),
|
||||
)
|
||||
@ -216,32 +220,6 @@ func OnlineExchange(isBitswapActive bool) interface{} {
|
||||
}
|
||||
}
|
||||
|
||||
type providingExchangeIn struct {
|
||||
fx.In
|
||||
|
||||
BaseExch exchange.Interface
|
||||
Provider provider.System
|
||||
}
|
||||
|
||||
// ProvidingExchange creates a providing.Exchange with the existing exchange
|
||||
// and the provider.System.
|
||||
// We cannot do this in OnlineExchange because it causes cycles so this is for
|
||||
// a decorator.
|
||||
func ProvidingExchange(provide bool) interface{} {
|
||||
return func(in providingExchangeIn, lc fx.Lifecycle) exchange.Interface {
|
||||
exch := in.BaseExch
|
||||
if provide {
|
||||
exch = providing.New(in.BaseExch, in.Provider)
|
||||
lc.Append(fx.Hook{
|
||||
OnStop: func(ctx context.Context) error {
|
||||
return exch.Close()
|
||||
},
|
||||
})
|
||||
}
|
||||
return exch
|
||||
}
|
||||
}
|
||||
|
||||
type noopExchange struct {
|
||||
closer io.Closer
|
||||
}
|
||||
|
||||
@ -7,6 +7,7 @@ import (
|
||||
|
||||
"go.uber.org/fx"
|
||||
|
||||
"github.com/ipfs/boxo/autoconf"
|
||||
"github.com/ipfs/kubo/core/node/helpers"
|
||||
"github.com/ipfs/kubo/core/node/libp2p"
|
||||
"github.com/ipfs/kubo/repo"
|
||||
@ -125,7 +126,7 @@ func defaultRepo(dstore repo.Datastore) (repo.Repo, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c.Bootstrap = cfg.DefaultBootstrapAddresses
|
||||
c.Bootstrap = autoconf.FallbackBootstrapPeers
|
||||
c.Addresses.Swarm = []string{"/ip4/0.0.0.0/tcp/4001", "/ip4/0.0.0.0/udp/4001/quic-v1"}
|
||||
c.Identity.PeerID = pid.String()
|
||||
c.Identity.PrivKey = base64.StdEncoding.EncodeToString(privkeyb)
|
||||
|
||||
@ -2,6 +2,7 @@ package node
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/ipfs/boxo/blockservice"
|
||||
@ -17,6 +18,7 @@ import (
|
||||
pathresolver "github.com/ipfs/boxo/path/resolver"
|
||||
pin "github.com/ipfs/boxo/pinning/pinner"
|
||||
"github.com/ipfs/boxo/pinning/pinner/dspinner"
|
||||
provider "github.com/ipfs/boxo/provider"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipfs/go-datastore"
|
||||
format "github.com/ipfs/go-ipld-format"
|
||||
@ -47,25 +49,50 @@ func BlockService(cfg *config.Config) func(lc fx.Lifecycle, bs blockstore.Blocks
|
||||
}
|
||||
|
||||
// Pinning creates new pinner which tells GC which blocks should be kept
|
||||
func Pinning(bstore blockstore.Blockstore, ds format.DAGService, repo repo.Repo) (pin.Pinner, error) {
|
||||
rootDS := repo.Datastore()
|
||||
func Pinning(strategy string) func(bstore blockstore.Blockstore, ds format.DAGService, repo repo.Repo, prov provider.System) (pin.Pinner, error) {
|
||||
// Parse strategy at function creation time (not inside the returned function)
|
||||
// This happens before the provider is created, which is why we pass the strategy
|
||||
// string and parse it here, rather than using fx-provided ProvidingStrategy.
|
||||
strategyFlag := config.ParseReproviderStrategy(strategy)
|
||||
|
||||
syncFn := func(ctx context.Context) error {
|
||||
if err := rootDS.Sync(ctx, blockstore.BlockPrefix); err != nil {
|
||||
return err
|
||||
return func(bstore blockstore.Blockstore,
|
||||
ds format.DAGService,
|
||||
repo repo.Repo,
|
||||
prov provider.System) (pin.Pinner, error) {
|
||||
rootDS := repo.Datastore()
|
||||
|
||||
syncFn := func(ctx context.Context) error {
|
||||
if err := rootDS.Sync(ctx, blockstore.BlockPrefix); err != nil {
|
||||
return err
|
||||
}
|
||||
return rootDS.Sync(ctx, filestore.FilestorePrefix)
|
||||
}
|
||||
return rootDS.Sync(ctx, filestore.FilestorePrefix)
|
||||
syncDs := &syncDagService{ds, syncFn}
|
||||
|
||||
ctx := context.TODO()
|
||||
|
||||
var opts []dspinner.Option
|
||||
roots := (strategyFlag & config.ReproviderStrategyRoots) != 0
|
||||
pinned := (strategyFlag & config.ReproviderStrategyPinned) != 0
|
||||
|
||||
// Important: Only one of WithPinnedProvider or WithRootsProvider should be active.
|
||||
// Having both would cause duplicate root advertisements since "pinned" includes all
|
||||
// pinned content (roots + children), while "roots" is just the root CIDs.
|
||||
// We prioritize "pinned" if both are somehow set (though this shouldn't happen
|
||||
// with proper strategy parsing).
|
||||
if pinned {
|
||||
opts = append(opts, dspinner.WithPinnedProvider(prov))
|
||||
} else if roots {
|
||||
opts = append(opts, dspinner.WithRootsProvider(prov))
|
||||
}
|
||||
|
||||
pinning, err := dspinner.New(ctx, rootDS, syncDs, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return pinning, nil
|
||||
}
|
||||
syncDs := &syncDagService{ds, syncFn}
|
||||
|
||||
ctx := context.TODO()
|
||||
|
||||
pinning, err := dspinner.New(ctx, rootDS, syncDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return pinning, nil
|
||||
}
|
||||
|
||||
var (
|
||||
@ -152,63 +179,76 @@ func Dag(bs blockservice.BlockService) format.DAGService {
|
||||
}
|
||||
|
||||
// Files loads persisted MFS root
|
||||
func Files(mctx helpers.MetricsCtx, lc fx.Lifecycle, repo repo.Repo, dag format.DAGService, bs blockstore.Blockstore) (*mfs.Root, error) {
|
||||
dsk := datastore.NewKey("/local/filesroot")
|
||||
pf := func(ctx context.Context, c cid.Cid) error {
|
||||
rootDS := repo.Datastore()
|
||||
if err := rootDS.Sync(ctx, blockstore.BlockPrefix); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := rootDS.Sync(ctx, filestore.FilestorePrefix); err != nil {
|
||||
return err
|
||||
func Files(strategy string) func(mctx helpers.MetricsCtx, lc fx.Lifecycle, repo repo.Repo, dag format.DAGService, bs blockstore.Blockstore, prov provider.System) (*mfs.Root, error) {
|
||||
return func(mctx helpers.MetricsCtx, lc fx.Lifecycle, repo repo.Repo, dag format.DAGService, bs blockstore.Blockstore, prov provider.System) (*mfs.Root, error) {
|
||||
dsk := datastore.NewKey("/local/filesroot")
|
||||
pf := func(ctx context.Context, c cid.Cid) error {
|
||||
rootDS := repo.Datastore()
|
||||
if err := rootDS.Sync(ctx, blockstore.BlockPrefix); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := rootDS.Sync(ctx, filestore.FilestorePrefix); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := rootDS.Put(ctx, dsk, c.Bytes()); err != nil {
|
||||
return err
|
||||
}
|
||||
return rootDS.Sync(ctx, dsk)
|
||||
}
|
||||
|
||||
if err := rootDS.Put(ctx, dsk, c.Bytes()); err != nil {
|
||||
return err
|
||||
}
|
||||
return rootDS.Sync(ctx, dsk)
|
||||
}
|
||||
var nd *merkledag.ProtoNode
|
||||
ctx := helpers.LifecycleCtx(mctx, lc)
|
||||
val, err := repo.Datastore().Get(ctx, dsk)
|
||||
|
||||
var nd *merkledag.ProtoNode
|
||||
ctx := helpers.LifecycleCtx(mctx, lc)
|
||||
val, err := repo.Datastore().Get(ctx, dsk)
|
||||
switch {
|
||||
case errors.Is(err, datastore.ErrNotFound):
|
||||
nd = unixfs.EmptyDirNode()
|
||||
err := dag.Add(ctx, nd)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failure writing filesroot to dagstore: %s", err)
|
||||
}
|
||||
case err == nil:
|
||||
c, err := cid.Cast(val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch {
|
||||
case err == datastore.ErrNotFound || val == nil:
|
||||
nd = unixfs.EmptyDirNode()
|
||||
err := dag.Add(ctx, nd)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failure writing filesroot to dagstore: %s", err)
|
||||
}
|
||||
case err == nil:
|
||||
c, err := cid.Cast(val)
|
||||
if err != nil {
|
||||
offlineDag := merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs)))
|
||||
rnd, err := offlineDag.Get(ctx, c)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error loading filesroot from dagservice: %s", err)
|
||||
}
|
||||
|
||||
pbnd, ok := rnd.(*merkledag.ProtoNode)
|
||||
if !ok {
|
||||
return nil, merkledag.ErrNotProtobuf
|
||||
}
|
||||
|
||||
nd = pbnd
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
|
||||
offineDag := merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs)))
|
||||
rnd, err := offineDag.Get(ctx, c)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error loading filesroot from dagservice: %s", err)
|
||||
// MFS (Mutable File System) provider integration:
|
||||
// Only pass the provider to MFS when the strategy includes "mfs".
|
||||
// MFS will call Provide() on every DAGService.Add() operation,
|
||||
// which is sufficient for the "mfs" strategy - it ensures all
|
||||
// MFS content gets announced as it's added or modified.
|
||||
// For non-mfs strategies, we set provider to nil to avoid unnecessary providing.
|
||||
strategyFlag := config.ParseReproviderStrategy(strategy)
|
||||
if strategyFlag&config.ReproviderStrategyMFS == 0 {
|
||||
prov = nil
|
||||
}
|
||||
|
||||
pbnd, ok := rnd.(*merkledag.ProtoNode)
|
||||
if !ok {
|
||||
return nil, merkledag.ErrNotProtobuf
|
||||
}
|
||||
root, err := mfs.NewRoot(ctx, dag, nd, pf, prov)
|
||||
|
||||
nd = pbnd
|
||||
default:
|
||||
return nil, err
|
||||
lc.Append(fx.Hook{
|
||||
OnStop: func(ctx context.Context) error {
|
||||
return root.Close()
|
||||
},
|
||||
})
|
||||
|
||||
return root, err
|
||||
}
|
||||
|
||||
root, err := mfs.NewRoot(ctx, dag, nd, pf)
|
||||
|
||||
lc.Append(fx.Hook{
|
||||
OnStop: func(ctx context.Context) error {
|
||||
return root.Close()
|
||||
},
|
||||
})
|
||||
|
||||
return root, err
|
||||
}
|
||||
|
||||
@ -16,5 +16,8 @@ func DNSResolver(cfg *config.Config) (*madns.Resolver, error) {
|
||||
dohOpts = append(dohOpts, doh.WithMaxCacheTTL(cfg.DNS.MaxCacheTTL.WithDefault(time.Duration(math.MaxUint32)*time.Second)))
|
||||
}
|
||||
|
||||
return gateway.NewDNSResolver(cfg.DNS.Resolvers, dohOpts...)
|
||||
// Replace "auto" DNS resolver placeholders with autoconf values
|
||||
resolvers := cfg.DNSResolversWithAutoConf()
|
||||
|
||||
return gateway.NewDNSResolver(resolvers, dohOpts...)
|
||||
}
|
||||
|
||||
@ -216,6 +216,7 @@ func LibP2P(bcfg *BuildCfg, cfg *config.Config, userResourceOverrides rcmgr.Part
|
||||
|
||||
fx.Provide(libp2p.Routing),
|
||||
fx.Provide(libp2p.ContentRouting),
|
||||
fx.Provide(libp2p.ContentDiscovery),
|
||||
|
||||
fx.Provide(libp2p.BaseRouting(cfg)),
|
||||
maybeProvide(libp2p.PubsubRouter, bcfg.getOpt("ipnsps")),
|
||||
@ -249,7 +250,12 @@ func Storage(bcfg *BuildCfg, cfg *config.Config) fx.Option {
|
||||
return fx.Options(
|
||||
fx.Provide(RepoConfig),
|
||||
fx.Provide(Datastore),
|
||||
fx.Provide(BaseBlockstoreCtor(cacheOpts, cfg.Datastore.HashOnRead, cfg.Datastore.WriteThrough.WithDefault(config.DefaultWriteThrough))),
|
||||
fx.Provide(BaseBlockstoreCtor(
|
||||
cacheOpts,
|
||||
cfg.Datastore.HashOnRead,
|
||||
cfg.Datastore.WriteThrough.WithDefault(config.DefaultWriteThrough),
|
||||
cfg.Reprovider.Strategy.WithDefault(config.DefaultReproviderStrategy),
|
||||
)),
|
||||
finalBstore,
|
||||
)
|
||||
}
|
||||
@ -349,8 +355,6 @@ func Online(bcfg *BuildCfg, cfg *config.Config, userResourceOverrides rcmgr.Part
|
||||
fx.Provide(BitswapOptions(cfg)),
|
||||
fx.Provide(Bitswap(isBitswapServerEnabled, isBitswapLibp2pEnabled, isHTTPRetrievalEnabled)),
|
||||
fx.Provide(OnlineExchange(isBitswapLibp2pEnabled)),
|
||||
// Replace our Exchange with a Providing exchange!
|
||||
fx.Decorate(ProvidingExchange(isProviderEnabled && isBitswapServerEnabled)),
|
||||
fx.Provide(DNSResolver),
|
||||
fx.Provide(Namesys(ipnsCacheSize, cfg.Ipns.MaxCacheTTL.WithDefault(config.DefaultIpnsMaxCacheTTL))),
|
||||
fx.Provide(Peering),
|
||||
@ -380,6 +384,7 @@ func Offline(cfg *config.Config) fx.Option {
|
||||
fx.Provide(libp2p.Routing),
|
||||
fx.Provide(libp2p.ContentRouting),
|
||||
fx.Provide(libp2p.OfflineRouting),
|
||||
fx.Provide(libp2p.ContentDiscovery),
|
||||
OfflineProviders(),
|
||||
)
|
||||
}
|
||||
@ -389,8 +394,6 @@ var Core = fx.Options(
|
||||
fx.Provide(Dag),
|
||||
fx.Provide(FetcherConfig),
|
||||
fx.Provide(PathResolverConfig),
|
||||
fx.Provide(Pinning),
|
||||
fx.Provide(Files),
|
||||
)
|
||||
|
||||
func Networked(bcfg *BuildCfg, cfg *config.Config, userResourceOverrides rcmgr.PartialLimitConfig) fx.Option {
|
||||
@ -440,16 +443,18 @@ func IPFS(ctx context.Context, bcfg *BuildCfg) fx.Option {
|
||||
uio.HAMTShardingSize = int(shardSingThresholdInt)
|
||||
uio.DefaultShardWidth = int(shardMaxFanout)
|
||||
|
||||
providerStrategy := cfg.Reprovider.Strategy.WithDefault(config.DefaultReproviderStrategy)
|
||||
|
||||
return fx.Options(
|
||||
bcfgOpts,
|
||||
|
||||
fx.Provide(baseProcess),
|
||||
|
||||
Storage(bcfg, cfg),
|
||||
Identity(cfg),
|
||||
IPNS,
|
||||
Networked(bcfg, cfg, userResourceOverrides),
|
||||
fx.Provide(BlockService(cfg)),
|
||||
fx.Provide(Pinning(providerStrategy)),
|
||||
fx.Provide(Files(providerStrategy)),
|
||||
Core,
|
||||
)
|
||||
}
|
||||
|
||||
@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/jbenet/goprocess"
|
||||
"go.uber.org/fx"
|
||||
)
|
||||
|
||||
@ -55,14 +54,3 @@ func maybeInvoke(opt interface{}, enable bool) fx.Option {
|
||||
}
|
||||
return fx.Options()
|
||||
}
|
||||
|
||||
// baseProcess creates a goprocess which is closed when the lifecycle signals it to stop
|
||||
func baseProcess(lc fx.Lifecycle) goprocess.Process {
|
||||
p := goprocess.WithParent(goprocess.Background())
|
||||
lc.Append(fx.Hook{
|
||||
OnStop: func(_ context.Context) error {
|
||||
return p.Close()
|
||||
},
|
||||
})
|
||||
return p
|
||||
}
|
||||
|
||||
@ -36,7 +36,7 @@ func AddrFilters(filters []string) func() (*ma.Filters, Libp2pOpts, error) {
|
||||
}
|
||||
}
|
||||
|
||||
func makeAddrsFactory(announce []string, appendAnnouce []string, noAnnounce []string) (p2pbhost.AddrsFactory, error) {
|
||||
func makeAddrsFactory(announce []string, appendAnnounce []string, noAnnounce []string) (p2pbhost.AddrsFactory, error) {
|
||||
var err error // To assign to the slice in the for loop
|
||||
existing := make(map[string]bool) // To avoid duplicates
|
||||
|
||||
@ -50,7 +50,7 @@ func makeAddrsFactory(announce []string, appendAnnouce []string, noAnnounce []st
|
||||
}
|
||||
|
||||
var appendAnnAddrs []ma.Multiaddr
|
||||
for _, addr := range appendAnnouce {
|
||||
for _, addr := range appendAnnounce {
|
||||
if existing[addr] {
|
||||
// skip AppendAnnounce that is on the Announce list already
|
||||
continue
|
||||
@ -99,14 +99,14 @@ func makeAddrsFactory(announce []string, appendAnnouce []string, noAnnounce []st
|
||||
}, nil
|
||||
}
|
||||
|
||||
func AddrsFactory(announce []string, appendAnnouce []string, noAnnounce []string) interface{} {
|
||||
func AddrsFactory(announce []string, appendAnnounce []string, noAnnounce []string) interface{} {
|
||||
return func(params struct {
|
||||
fx.In
|
||||
ForgeMgr *p2pforge.P2PForgeCertMgr `optional:"true"`
|
||||
},
|
||||
) (opts Libp2pOpts, err error) {
|
||||
var addrsFactory p2pbhost.AddrsFactory
|
||||
announceAddrsFactory, err := makeAddrsFactory(announce, appendAnnouce, noAnnounce)
|
||||
announceAddrsFactory, err := makeAddrsFactory(announce, appendAnnounce, noAnnounce)
|
||||
if err != nil {
|
||||
return opts, err
|
||||
}
|
||||
@ -115,8 +115,8 @@ func AddrsFactory(announce []string, appendAnnouce []string, noAnnounce []string
|
||||
} else {
|
||||
addrsFactory = func(multiaddrs []ma.Multiaddr) []ma.Multiaddr {
|
||||
forgeProcessing := params.ForgeMgr.AddressFactory()(multiaddrs)
|
||||
annouceProcessing := announceAddrsFactory(forgeProcessing)
|
||||
return annouceProcessing
|
||||
announceProcessing := announceAddrsFactory(forgeProcessing)
|
||||
return announceProcessing
|
||||
}
|
||||
}
|
||||
opts.Opts = append(opts.Opts, libp2p.AddrsFactory(addrsFactory))
|
||||
|
||||
@ -49,7 +49,8 @@ func Host(mctx helpers.MetricsCtx, lc fx.Lifecycle, params P2PHostIn) (out P2PHo
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
bootstrappers, err := cfg.BootstrapPeers()
|
||||
// Use auto-config resolution for actual connectivity
|
||||
bootstrappers, err := cfg.BootstrapPeersWithAutoConf()
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
|
||||
@ -95,7 +95,8 @@ func BaseRouting(cfg *config.Config) interface{} {
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
bspeers, err := cfg.BootstrapPeers()
|
||||
// Use auto-config resolution for actual connectivity
|
||||
bspeers, err := cfg.BootstrapPeersWithAutoConf()
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
@ -177,6 +178,12 @@ func ContentRouting(in p2pOnlineContentRoutingIn) routing.ContentRouting {
|
||||
}
|
||||
}
|
||||
|
||||
// ContentDiscovery narrows down the given content routing facility so that it
|
||||
// only does discovery.
|
||||
func ContentDiscovery(in irouting.ProvideManyRouter) routing.ContentDiscovery {
|
||||
return in
|
||||
}
|
||||
|
||||
type p2pOnlineRoutingIn struct {
|
||||
fx.In
|
||||
|
||||
@ -185,7 +192,7 @@ type p2pOnlineRoutingIn struct {
|
||||
}
|
||||
|
||||
// Routing will get all routers obtained from different methods (delegated
|
||||
// routers, pub-sub, and so on) and add them all together using a TieredRouter.
|
||||
// routers, pub-sub, and so on) and add them all together using a ParallelRouter.
|
||||
func Routing(in p2pOnlineRoutingIn) irouting.ProvideManyRouter {
|
||||
routers := in.Routers
|
||||
|
||||
@ -291,24 +298,36 @@ func autoRelayFeeder(cfgPeering config.Peering, peerChan chan<- peer.AddrInfo) f
|
||||
}
|
||||
|
||||
// Additionally, feed closest peers discovered via DHT
|
||||
if dht == nil {
|
||||
/* noop due to missing dht.WAN. happens in some unit tests,
|
||||
not worth fixing as we will refactor this after go-libp2p 0.20 */
|
||||
continue
|
||||
if dht != nil {
|
||||
closestPeers, err := dht.WAN.GetClosestPeers(ctx, h.ID().String())
|
||||
if err == nil {
|
||||
for _, p := range closestPeers {
|
||||
addrs := h.Peerstore().Addrs(p)
|
||||
if len(addrs) == 0 {
|
||||
continue
|
||||
}
|
||||
dhtPeer := peer.AddrInfo{ID: p, Addrs: addrs}
|
||||
select {
|
||||
case peerChan <- dhtPeer:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
closestPeers, err := dht.WAN.GetClosestPeers(ctx, h.ID().String())
|
||||
if err != nil {
|
||||
// no-op: usually 'failed to find any peer in table' during startup
|
||||
continue
|
||||
}
|
||||
for _, p := range closestPeers {
|
||||
|
||||
// Additionally, feed all connected swarm peers as potential relay candidates.
|
||||
// This includes peers from HTTP routing, manual swarm connect, mDNS discovery, etc.
|
||||
// (fixes https://github.com/ipfs/kubo/issues/10899)
|
||||
connectedPeers := h.Network().Peers()
|
||||
for _, p := range connectedPeers {
|
||||
addrs := h.Peerstore().Addrs(p)
|
||||
if len(addrs) == 0 {
|
||||
continue
|
||||
}
|
||||
dhtPeer := peer.AddrInfo{ID: p, Addrs: addrs}
|
||||
swarmPeer := peer.AddrInfo{ID: p, Addrs: addrs}
|
||||
select {
|
||||
case peerChan <- dhtPeer:
|
||||
case peerChan <- swarmPeer:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
|
||||
@ -2,9 +2,12 @@ package libp2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/boxo/autoconf"
|
||||
"github.com/ipfs/go-datastore"
|
||||
"github.com/ipfs/kubo/config"
|
||||
irouting "github.com/ipfs/kubo/routing"
|
||||
@ -32,46 +35,144 @@ type RoutingOption func(args RoutingOptionArgs) (routing.Routing, error)
|
||||
|
||||
var noopRouter = routinghelpers.Null{}
|
||||
|
||||
// EndpointSource tracks where a URL came from to determine appropriate capabilities
|
||||
type EndpointSource struct {
|
||||
URL string
|
||||
SupportsRead bool // came from DelegatedRoutersWithAutoConf (Read operations)
|
||||
SupportsWrite bool // came from DelegatedPublishersWithAutoConf (Write operations)
|
||||
}
|
||||
|
||||
// determineCapabilities determines endpoint capabilities based on URL path and source
|
||||
func determineCapabilities(endpoint EndpointSource) (string, autoconf.EndpointCapabilities, error) {
|
||||
parsed, err := autoconf.DetermineKnownCapabilities(endpoint.URL, endpoint.SupportsRead, endpoint.SupportsWrite)
|
||||
if err != nil {
|
||||
log.Debugf("Skipping endpoint %q: %v", endpoint.URL, err)
|
||||
return "", autoconf.EndpointCapabilities{}, nil // Return empty caps, not error
|
||||
}
|
||||
|
||||
return parsed.BaseURL, parsed.Capabilities, nil
|
||||
}
|
||||
|
||||
// collectAllEndpoints gathers URLs from both router and publisher sources
|
||||
func collectAllEndpoints(cfg *config.Config) []EndpointSource {
|
||||
var endpoints []EndpointSource
|
||||
|
||||
// Get router URLs (Read operations)
|
||||
var routerURLs []string
|
||||
if envRouters := os.Getenv(config.EnvHTTPRouters); envRouters != "" {
|
||||
// Use environment variable override if set (space or comma separated)
|
||||
splitFunc := func(r rune) bool { return r == ',' || r == ' ' }
|
||||
routerURLs = strings.FieldsFunc(envRouters, splitFunc)
|
||||
log.Warnf("Using HTTP routers from %s environment variable instead of config/autoconf: %v", config.EnvHTTPRouters, routerURLs)
|
||||
} else {
|
||||
// Use delegated routers from autoconf
|
||||
routerURLs = cfg.DelegatedRoutersWithAutoConf()
|
||||
// No fallback - if autoconf doesn't provide endpoints, use empty list
|
||||
// This exposes any autoconf issues rather than masking them with hardcoded defaults
|
||||
}
|
||||
|
||||
// Add router URLs to collection
|
||||
for _, url := range routerURLs {
|
||||
endpoints = append(endpoints, EndpointSource{
|
||||
URL: url,
|
||||
SupportsRead: true,
|
||||
SupportsWrite: false,
|
||||
})
|
||||
}
|
||||
|
||||
// Get publisher URLs (Write operations)
|
||||
publisherURLs := cfg.DelegatedPublishersWithAutoConf()
|
||||
|
||||
// Add publisher URLs, merging with existing router URLs if they match
|
||||
for _, url := range publisherURLs {
|
||||
found := false
|
||||
for i, existing := range endpoints {
|
||||
if existing.URL == url {
|
||||
endpoints[i].SupportsWrite = true
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
endpoints = append(endpoints, EndpointSource{
|
||||
URL: url,
|
||||
SupportsRead: false,
|
||||
SupportsWrite: true,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return endpoints
|
||||
}
|
||||
|
||||
func constructDefaultHTTPRouters(cfg *config.Config) ([]*routinghelpers.ParallelRouter, error) {
|
||||
var routers []*routinghelpers.ParallelRouter
|
||||
httpRetrievalEnabled := cfg.HTTPRetrieval.Enabled.WithDefault(config.DefaultHTTPRetrievalEnabled)
|
||||
|
||||
// Use config.DefaultHTTPRouters if custom override was sent via config.EnvHTTPRouters
|
||||
// or if user did not set any preference in cfg.Routing.DelegatedRouters
|
||||
var httpRouterEndpoints []string
|
||||
if os.Getenv(config.EnvHTTPRouters) != "" || len(cfg.Routing.DelegatedRouters) == 0 {
|
||||
httpRouterEndpoints = config.DefaultHTTPRouters
|
||||
} else {
|
||||
httpRouterEndpoints = cfg.Routing.DelegatedRouters
|
||||
// Collect URLs from both router and publisher sources
|
||||
endpoints := collectAllEndpoints(cfg)
|
||||
|
||||
// Group endpoints by origin (base URL) and aggregate capabilities
|
||||
originCapabilities := make(map[string]autoconf.EndpointCapabilities)
|
||||
for _, endpoint := range endpoints {
|
||||
// Parse endpoint and determine capabilities based on source
|
||||
baseURL, capabilities, err := determineCapabilities(endpoint)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse endpoint %q: %w", endpoint.URL, err)
|
||||
}
|
||||
|
||||
// Aggregate capabilities for this origin
|
||||
existing := originCapabilities[baseURL]
|
||||
existing.Merge(capabilities)
|
||||
originCapabilities[baseURL] = existing
|
||||
}
|
||||
|
||||
// Append HTTP routers for additional speed
|
||||
for _, endpoint := range httpRouterEndpoints {
|
||||
httpRouter, err := irouting.ConstructHTTPRouter(endpoint, cfg.Identity.PeerID, httpAddrsFromConfig(cfg.Addresses), cfg.Identity.PrivKey, httpRetrievalEnabled)
|
||||
// Create single HTTP router and composer per origin
|
||||
for baseURL, capabilities := range originCapabilities {
|
||||
// Construct HTTP router using base URL (without path)
|
||||
httpRouter, err := irouting.ConstructHTTPRouter(baseURL, cfg.Identity.PeerID, httpAddrsFromConfig(cfg.Addresses), cfg.Identity.PrivKey, httpRetrievalEnabled)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Mapping router to /routing/v1/* endpoints
|
||||
|
||||
// Configure router operations based on aggregated capabilities
|
||||
// https://specs.ipfs.tech/routing/http-routing-v1/
|
||||
r := &irouting.Composer{
|
||||
GetValueRouter: httpRouter, // GET /routing/v1/ipns
|
||||
PutValueRouter: httpRouter, // PUT /routing/v1/ipns
|
||||
composer := &irouting.Composer{
|
||||
GetValueRouter: noopRouter, // Default disabled, enabled below based on capabilities
|
||||
PutValueRouter: noopRouter, // Default disabled, enabled below based on capabilities
|
||||
ProvideRouter: noopRouter, // we don't have spec for sending provides to /routing/v1 (revisit once https://github.com/ipfs/specs/pull/378 or similar is ratified)
|
||||
FindPeersRouter: httpRouter, // /routing/v1/peers
|
||||
FindProvidersRouter: httpRouter, // /routing/v1/providers
|
||||
FindPeersRouter: noopRouter, // Default disabled, enabled below based on capabilities
|
||||
FindProvidersRouter: noopRouter, // Default disabled, enabled below based on capabilities
|
||||
}
|
||||
|
||||
if endpoint == config.CidContactRoutingURL {
|
||||
// Special-case: cid.contact only supports /routing/v1/providers/cid
|
||||
// we disable other endpoints to avoid sending requests that always fail
|
||||
r.GetValueRouter = noopRouter
|
||||
r.PutValueRouter = noopRouter
|
||||
r.ProvideRouter = noopRouter
|
||||
r.FindPeersRouter = noopRouter
|
||||
// Enable specific capabilities
|
||||
if capabilities.IPNSGet {
|
||||
composer.GetValueRouter = httpRouter // GET /routing/v1/ipns for IPNS resolution
|
||||
}
|
||||
if capabilities.IPNSPut {
|
||||
composer.PutValueRouter = httpRouter // PUT /routing/v1/ipns for IPNS publishing
|
||||
}
|
||||
if capabilities.Peers {
|
||||
composer.FindPeersRouter = httpRouter // GET /routing/v1/peers
|
||||
}
|
||||
if capabilities.Providers {
|
||||
composer.FindProvidersRouter = httpRouter // GET /routing/v1/providers
|
||||
}
|
||||
|
||||
// Handle special cases and backward compatibility
|
||||
if baseURL == config.CidContactRoutingURL {
|
||||
// Special-case: cid.contact only supports /routing/v1/providers/cid endpoint
|
||||
// Override any capabilities detected from URL path to ensure only providers is enabled
|
||||
// TODO: Consider moving this to configuration or removing once cid.contact adds more capabilities
|
||||
composer.GetValueRouter = noopRouter
|
||||
composer.PutValueRouter = noopRouter
|
||||
composer.ProvideRouter = noopRouter
|
||||
composer.FindPeersRouter = noopRouter
|
||||
composer.FindProvidersRouter = httpRouter // Only providers supported
|
||||
}
|
||||
|
||||
routers = append(routers, &routinghelpers.ParallelRouter{
|
||||
Router: r,
|
||||
Router: composer,
|
||||
IgnoreError: true, // https://github.com/ipfs/kubo/pull/9475#discussion_r1042507387
|
||||
Timeout: 15 * time.Second, // 5x server value from https://github.com/ipfs/kubo/pull/9475#discussion_r1042428529
|
||||
DoNotWaitForSearchValue: true,
|
||||
@ -81,6 +182,31 @@ func constructDefaultHTTPRouters(cfg *config.Config) ([]*routinghelpers.Parallel
|
||||
return routers, nil
|
||||
}
|
||||
|
||||
// ConstructDelegatedOnlyRouting returns routers used when Routing.Type is set to "delegated"
|
||||
// This provides HTTP-only routing without DHT, using only delegated routers and IPNS publishers.
|
||||
// Useful for environments where DHT connectivity is not available or desired
|
||||
func ConstructDelegatedOnlyRouting(cfg *config.Config) RoutingOption {
|
||||
return func(args RoutingOptionArgs) (routing.Routing, error) {
|
||||
// Use only HTTP routers (includes both read and write capabilities) - no DHT
|
||||
var routers []*routinghelpers.ParallelRouter
|
||||
|
||||
// Add HTTP delegated routers (includes both router and publisher capabilities)
|
||||
httpRouters, err := constructDefaultHTTPRouters(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
routers = append(routers, httpRouters...)
|
||||
|
||||
// Validate that we have at least one router configured
|
||||
if len(routers) == 0 {
|
||||
return nil, fmt.Errorf("no delegated routers or publishers configured for 'delegated' routing mode")
|
||||
}
|
||||
|
||||
routing := routinghelpers.NewComposableParallel(routers)
|
||||
return routing, nil
|
||||
}
|
||||
}
|
||||
|
||||
// ConstructDefaultRouting returns routers used when Routing.Type is unset or set to "auto"
|
||||
func ConstructDefaultRouting(cfg *config.Config, routingOpt RoutingOption) RoutingOption {
|
||||
return func(args RoutingOptionArgs) (routing.Routing, error) {
|
||||
|
||||
@ -3,7 +3,9 @@ package libp2p
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ipfs/boxo/autoconf"
|
||||
config "github.com/ipfs/kubo/config"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@ -32,3 +34,191 @@ func TestHttpAddrsFromConfig(t *testing.T) {
|
||||
AppendAnnounce: []string{"/ip4/192.168.0.2/tcp/4001"},
|
||||
}), "AppendAnnounce addrs should be included if specified")
|
||||
}
|
||||
|
||||
func TestDetermineCapabilities(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
endpoint EndpointSource
|
||||
expectedBaseURL string
|
||||
expectedCapabilities autoconf.EndpointCapabilities
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "URL with no path should have all Read capabilities",
|
||||
endpoint: EndpointSource{
|
||||
URL: "https://example.com",
|
||||
SupportsRead: true,
|
||||
SupportsWrite: false,
|
||||
},
|
||||
expectedBaseURL: "https://example.com",
|
||||
expectedCapabilities: autoconf.EndpointCapabilities{
|
||||
Providers: true,
|
||||
Peers: true,
|
||||
IPNSGet: true,
|
||||
IPNSPut: false,
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "URL with trailing slash should have all Read capabilities",
|
||||
endpoint: EndpointSource{
|
||||
URL: "https://example.com/",
|
||||
SupportsRead: true,
|
||||
SupportsWrite: false,
|
||||
},
|
||||
expectedBaseURL: "https://example.com",
|
||||
expectedCapabilities: autoconf.EndpointCapabilities{
|
||||
Providers: true,
|
||||
Peers: true,
|
||||
IPNSGet: true,
|
||||
IPNSPut: false,
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "URL with IPNS path should have only IPNS capabilities",
|
||||
endpoint: EndpointSource{
|
||||
URL: "https://example.com/routing/v1/ipns",
|
||||
SupportsRead: true,
|
||||
SupportsWrite: true,
|
||||
},
|
||||
expectedBaseURL: "https://example.com",
|
||||
expectedCapabilities: autoconf.EndpointCapabilities{
|
||||
Providers: false,
|
||||
Peers: false,
|
||||
IPNSGet: true,
|
||||
IPNSPut: true,
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "URL with providers path should have only Providers capability",
|
||||
endpoint: EndpointSource{
|
||||
URL: "https://example.com/routing/v1/providers",
|
||||
SupportsRead: true,
|
||||
SupportsWrite: false,
|
||||
},
|
||||
expectedBaseURL: "https://example.com",
|
||||
expectedCapabilities: autoconf.EndpointCapabilities{
|
||||
Providers: true,
|
||||
Peers: false,
|
||||
IPNSGet: false,
|
||||
IPNSPut: false,
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "URL with peers path should have only Peers capability",
|
||||
endpoint: EndpointSource{
|
||||
URL: "https://example.com/routing/v1/peers",
|
||||
SupportsRead: true,
|
||||
SupportsWrite: false,
|
||||
},
|
||||
expectedBaseURL: "https://example.com",
|
||||
expectedCapabilities: autoconf.EndpointCapabilities{
|
||||
Providers: false,
|
||||
Peers: true,
|
||||
IPNSGet: false,
|
||||
IPNSPut: false,
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "URL with Write support only should enable IPNSPut for no-path endpoint",
|
||||
endpoint: EndpointSource{
|
||||
URL: "https://example.com",
|
||||
SupportsRead: false,
|
||||
SupportsWrite: true,
|
||||
},
|
||||
expectedBaseURL: "https://example.com",
|
||||
expectedCapabilities: autoconf.EndpointCapabilities{
|
||||
Providers: false,
|
||||
Peers: false,
|
||||
IPNSGet: false,
|
||||
IPNSPut: true,
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
baseURL, capabilities, err := determineCapabilities(tt.endpoint)
|
||||
|
||||
if tt.expectError {
|
||||
assert.Error(t, err)
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.expectedBaseURL, baseURL)
|
||||
assert.Equal(t, tt.expectedCapabilities, capabilities)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEndpointCapabilitiesReadWriteLogic(t *testing.T) {
|
||||
t.Run("Read endpoint with no path should enable read capabilities", func(t *testing.T) {
|
||||
endpoint := EndpointSource{
|
||||
URL: "https://example.com",
|
||||
SupportsRead: true,
|
||||
SupportsWrite: false,
|
||||
}
|
||||
_, capabilities, err := determineCapabilities(endpoint)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Read endpoint with no path should enable all read capabilities
|
||||
assert.True(t, capabilities.Providers)
|
||||
assert.True(t, capabilities.Peers)
|
||||
assert.True(t, capabilities.IPNSGet)
|
||||
assert.False(t, capabilities.IPNSPut) // Write capability should be false
|
||||
})
|
||||
|
||||
t.Run("Write endpoint with no path should enable write capabilities", func(t *testing.T) {
|
||||
endpoint := EndpointSource{
|
||||
URL: "https://example.com",
|
||||
SupportsRead: false,
|
||||
SupportsWrite: true,
|
||||
}
|
||||
_, capabilities, err := determineCapabilities(endpoint)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Write endpoint with no path should only enable IPNS write capability
|
||||
assert.False(t, capabilities.Providers)
|
||||
assert.False(t, capabilities.Peers)
|
||||
assert.False(t, capabilities.IPNSGet)
|
||||
assert.True(t, capabilities.IPNSPut) // Only write capability should be true
|
||||
})
|
||||
|
||||
t.Run("Specific path should only enable matching capabilities", func(t *testing.T) {
|
||||
endpoint := EndpointSource{
|
||||
URL: "https://example.com/routing/v1/ipns",
|
||||
SupportsRead: true,
|
||||
SupportsWrite: true,
|
||||
}
|
||||
_, capabilities, err := determineCapabilities(endpoint)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Specific IPNS path should only enable IPNS capabilities based on source
|
||||
assert.False(t, capabilities.Providers)
|
||||
assert.False(t, capabilities.Peers)
|
||||
assert.True(t, capabilities.IPNSGet) // Read capability enabled
|
||||
assert.True(t, capabilities.IPNSPut) // Write capability enabled
|
||||
})
|
||||
|
||||
t.Run("Unsupported paths should result in empty capabilities", func(t *testing.T) {
|
||||
endpoint := EndpointSource{
|
||||
URL: "https://example.com/routing/v1/unsupported",
|
||||
SupportsRead: true,
|
||||
SupportsWrite: false,
|
||||
}
|
||||
_, capabilities, err := determineCapabilities(endpoint)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Unsupported paths should result in no capabilities
|
||||
assert.False(t, capabilities.Providers)
|
||||
assert.False(t, capabilities.Peers)
|
||||
assert.False(t, capabilities.IPNSGet)
|
||||
assert.False(t, capabilities.IPNSPut)
|
||||
})
|
||||
}
|
||||
|
||||
@ -2,6 +2,7 @@ package node
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
@ -9,8 +10,12 @@ import (
|
||||
"github.com/ipfs/boxo/fetcher"
|
||||
"github.com/ipfs/boxo/mfs"
|
||||
pin "github.com/ipfs/boxo/pinning/pinner"
|
||||
"github.com/ipfs/boxo/pinning/pinner/dspinner"
|
||||
provider "github.com/ipfs/boxo/provider"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipfs/go-datastore"
|
||||
"github.com/ipfs/go-datastore/query"
|
||||
"github.com/ipfs/kubo/config"
|
||||
"github.com/ipfs/kubo/repo"
|
||||
irouting "github.com/ipfs/kubo/routing"
|
||||
"go.uber.org/fx"
|
||||
@ -21,12 +26,17 @@ import (
|
||||
// and in 'ipfs stats provide' report.
|
||||
const sampledBatchSize = 1000
|
||||
|
||||
// Datastore key used to store previous reprovide strategy.
|
||||
const reprovideStrategyKey = "/reprovideStrategy"
|
||||
|
||||
func ProviderSys(reprovideInterval time.Duration, acceleratedDHTClient bool, provideWorkerCount int) fx.Option {
|
||||
return fx.Provide(func(lc fx.Lifecycle, cr irouting.ProvideManyRouter, keyProvider provider.KeyChanFunc, repo repo.Repo, bs blockstore.Blockstore) (provider.System, error) {
|
||||
return fx.Provide(func(lc fx.Lifecycle, cr irouting.ProvideManyRouter, repo repo.Repo) (provider.System, error) {
|
||||
// Initialize provider.System first, before pinner/blockstore/etc.
|
||||
// The KeyChanFunc will be set later via SetKeyProvider() once we have
|
||||
// created the pinner, blockstore and other dependencies.
|
||||
opts := []provider.Option{
|
||||
provider.Online(cr),
|
||||
provider.ReproviderInterval(reprovideInterval),
|
||||
provider.KeyProvider(keyProvider),
|
||||
provider.ProvideWorkerCount(provideWorkerCount),
|
||||
}
|
||||
if !acceleratedDHTClient && reprovideInterval > 0 {
|
||||
@ -45,16 +55,20 @@ func ProviderSys(reprovideInterval time.Duration, acceleratedDHTClient bool, pro
|
||||
defer cancel()
|
||||
|
||||
// FIXME: I want a running counter of blocks so size of blockstore can be an O(1) lookup.
|
||||
ch, err := bs.AllKeysChan(ctx)
|
||||
// Note: talk to datastore directly, as to not depend on Blockstore here.
|
||||
qr, err := repo.Datastore().Query(ctx, query.Query{
|
||||
Prefix: blockstore.BlockPrefix.String(),
|
||||
KeysOnly: true})
|
||||
if err != nil {
|
||||
logger.Errorf("fetching AllKeysChain in provider ThroughputReport: %v", err)
|
||||
return false
|
||||
}
|
||||
defer qr.Close()
|
||||
count = 0
|
||||
countLoop:
|
||||
for {
|
||||
select {
|
||||
case _, ok := <-ch:
|
||||
case _, ok := <-qr.Next():
|
||||
if !ok {
|
||||
break countLoop
|
||||
}
|
||||
@ -114,6 +128,7 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#routingaccelerateddhtcli
|
||||
return false
|
||||
}, sampledBatchSize))
|
||||
}
|
||||
|
||||
sys, err := provider.New(repo.Datastore(), opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -132,21 +147,18 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#routingaccelerateddhtcli
|
||||
// ONLINE/OFFLINE
|
||||
|
||||
// OnlineProviders groups units managing provider routing records online
|
||||
func OnlineProviders(provide bool, reprovideStrategy string, reprovideInterval time.Duration, acceleratedDHTClient bool, provideWorkerCount int) fx.Option {
|
||||
func OnlineProviders(provide bool, providerStrategy string, reprovideInterval time.Duration, acceleratedDHTClient bool, provideWorkerCount int) fx.Option {
|
||||
if !provide {
|
||||
return OfflineProviders()
|
||||
}
|
||||
|
||||
var keyProvider fx.Option
|
||||
switch reprovideStrategy {
|
||||
case "all", "", "roots", "pinned", "mfs", "pinned+mfs", "flat":
|
||||
keyProvider = fx.Provide(newProvidingStrategy(reprovideStrategy))
|
||||
default:
|
||||
return fx.Error(fmt.Errorf("unknown reprovider strategy %q", reprovideStrategy))
|
||||
strategyFlag := config.ParseReproviderStrategy(providerStrategy)
|
||||
if strategyFlag == 0 {
|
||||
return fx.Error(fmt.Errorf("unknown reprovider strategy %q", providerStrategy))
|
||||
}
|
||||
|
||||
return fx.Options(
|
||||
keyProvider,
|
||||
fx.Provide(setReproviderKeyProvider(providerStrategy)),
|
||||
ProviderSys(reprovideInterval, acceleratedDHTClient, provideWorkerCount),
|
||||
)
|
||||
}
|
||||
@ -172,51 +184,120 @@ func mfsProvider(mfsRoot *mfs.Root, fetcher fetcher.Factory) provider.KeyChanFun
|
||||
}
|
||||
}
|
||||
|
||||
func mfsRootProvider(mfsRoot *mfs.Root) provider.KeyChanFunc {
|
||||
return func(ctx context.Context) (<-chan cid.Cid, error) {
|
||||
rootNode, err := mfsRoot.GetDirectory().GetNode()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error loading mfs root, cannot provide MFS: %w", err)
|
||||
}
|
||||
ch := make(chan cid.Cid, 1)
|
||||
ch <- rootNode.Cid()
|
||||
close(ch)
|
||||
return ch, nil
|
||||
type provStrategyIn struct {
|
||||
fx.In
|
||||
Pinner pin.Pinner
|
||||
Blockstore blockstore.Blockstore
|
||||
OfflineIPLDFetcher fetcher.Factory `name:"offlineIpldFetcher"`
|
||||
OfflineUnixFSFetcher fetcher.Factory `name:"offlineUnixfsFetcher"`
|
||||
MFSRoot *mfs.Root
|
||||
Provider provider.System
|
||||
Repo repo.Repo
|
||||
}
|
||||
|
||||
type provStrategyOut struct {
|
||||
fx.Out
|
||||
ProvidingStrategy config.ReproviderStrategy
|
||||
ProvidingKeyChanFunc provider.KeyChanFunc
|
||||
}
|
||||
|
||||
// createKeyProvider creates the appropriate KeyChanFunc based on strategy.
|
||||
// Each strategy has different behavior:
|
||||
// - "roots": Only root CIDs of pinned content
|
||||
// - "pinned": All pinned content (roots + children)
|
||||
// - "mfs": Only MFS content
|
||||
// - "all": all blocks
|
||||
func createKeyProvider(strategyFlag config.ReproviderStrategy, in provStrategyIn) provider.KeyChanFunc {
|
||||
switch strategyFlag {
|
||||
case config.ReproviderStrategyRoots:
|
||||
return provider.NewBufferedProvider(dspinner.NewPinnedProvider(true, in.Pinner, in.OfflineIPLDFetcher))
|
||||
case config.ReproviderStrategyPinned:
|
||||
return provider.NewBufferedProvider(dspinner.NewPinnedProvider(false, in.Pinner, in.OfflineIPLDFetcher))
|
||||
case config.ReproviderStrategyPinned | config.ReproviderStrategyMFS:
|
||||
return provider.NewPrioritizedProvider(
|
||||
provider.NewBufferedProvider(dspinner.NewPinnedProvider(false, in.Pinner, in.OfflineIPLDFetcher)),
|
||||
mfsProvider(in.MFSRoot, in.OfflineUnixFSFetcher),
|
||||
)
|
||||
case config.ReproviderStrategyMFS:
|
||||
return mfsProvider(in.MFSRoot, in.OfflineUnixFSFetcher)
|
||||
default: // "all", "", "flat" (compat)
|
||||
return in.Blockstore.AllKeysChan
|
||||
}
|
||||
}
|
||||
|
||||
func newProvidingStrategy(strategy string) interface{} {
|
||||
type input struct {
|
||||
fx.In
|
||||
Pinner pin.Pinner
|
||||
Blockstore blockstore.Blockstore
|
||||
OfflineIPLDFetcher fetcher.Factory `name:"offlineIpldFetcher"`
|
||||
OfflineUnixFSFetcher fetcher.Factory `name:"offlineUnixfsFetcher"`
|
||||
MFSRoot *mfs.Root
|
||||
// detectStrategyChange checks if the reproviding strategy has changed from what's persisted.
|
||||
// Returns: (previousStrategy, hasChanged, error)
|
||||
func detectStrategyChange(ctx context.Context, strategy string, ds datastore.Datastore) (string, bool, error) {
|
||||
strategyKey := datastore.NewKey(reprovideStrategyKey)
|
||||
|
||||
prev, err := ds.Get(ctx, strategyKey)
|
||||
if err != nil {
|
||||
if errors.Is(err, datastore.ErrNotFound) {
|
||||
return "", strategy != "", nil
|
||||
}
|
||||
return "", false, err
|
||||
}
|
||||
return func(in input) provider.KeyChanFunc {
|
||||
switch strategy {
|
||||
case "roots":
|
||||
return provider.NewBufferedProvider(provider.NewPinnedProvider(true, in.Pinner, in.OfflineIPLDFetcher))
|
||||
case "pinned":
|
||||
return provider.NewBufferedProvider(provider.NewPinnedProvider(false, in.Pinner, in.OfflineIPLDFetcher))
|
||||
case "pinned+mfs":
|
||||
return provider.NewPrioritizedProvider(
|
||||
provider.NewBufferedProvider(provider.NewPinnedProvider(false, in.Pinner, in.OfflineIPLDFetcher)),
|
||||
mfsProvider(in.MFSRoot, in.OfflineUnixFSFetcher),
|
||||
)
|
||||
case "mfs":
|
||||
return mfsProvider(in.MFSRoot, in.OfflineUnixFSFetcher)
|
||||
case "flat":
|
||||
return provider.NewBlockstoreProvider(in.Blockstore)
|
||||
default: // "all", ""
|
||||
return provider.NewPrioritizedProvider(
|
||||
provider.NewPrioritizedProvider(
|
||||
provider.NewBufferedProvider(provider.NewPinnedProvider(true, in.Pinner, in.OfflineIPLDFetcher)),
|
||||
mfsRootProvider(in.MFSRoot),
|
||||
),
|
||||
provider.NewBlockstoreProvider(in.Blockstore),
|
||||
)
|
||||
|
||||
previousStrategy := string(prev)
|
||||
return previousStrategy, previousStrategy != strategy, nil
|
||||
}
|
||||
|
||||
// persistStrategy saves the current reproviding strategy to the datastore.
|
||||
// Empty string strategies are deleted rather than stored.
|
||||
func persistStrategy(ctx context.Context, strategy string, ds datastore.Datastore) error {
|
||||
strategyKey := datastore.NewKey(reprovideStrategyKey)
|
||||
|
||||
if strategy == "" {
|
||||
return ds.Delete(ctx, strategyKey)
|
||||
}
|
||||
return ds.Put(ctx, strategyKey, []byte(strategy))
|
||||
}
|
||||
|
||||
// handleStrategyChange manages strategy change detection and queue clearing.
|
||||
// Strategy change detection: when the reproviding strategy changes,
|
||||
// we clear the provide queue to avoid unexpected behavior from mixing
|
||||
// strategies. This ensures a clean transition between different providing modes.
|
||||
func handleStrategyChange(strategy string, provider provider.System, ds datastore.Datastore) {
|
||||
ctx := context.Background()
|
||||
|
||||
previous, changed, err := detectStrategyChange(ctx, strategy, ds)
|
||||
if err != nil {
|
||||
logger.Error("cannot read previous reprovide strategy", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
if !changed {
|
||||
return
|
||||
}
|
||||
|
||||
logger.Infow("Reprovider.Strategy changed, clearing provide queue", "previous", previous, "current", strategy)
|
||||
provider.Clear()
|
||||
|
||||
if err := persistStrategy(ctx, strategy, ds); err != nil {
|
||||
logger.Error("cannot update reprovide strategy", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
func setReproviderKeyProvider(strategy string) func(in provStrategyIn) provStrategyOut {
|
||||
strategyFlag := config.ParseReproviderStrategy(strategy)
|
||||
|
||||
return func(in provStrategyIn) provStrategyOut {
|
||||
// Create the appropriate key provider based on strategy
|
||||
kcf := createKeyProvider(strategyFlag, in)
|
||||
|
||||
// SetKeyProvider breaks the circular dependency between provider, blockstore, and pinner.
|
||||
// We cannot create the blockstore without the provider (it needs to provide blocks),
|
||||
// and we cannot determine the reproviding strategy without the pinner/blockstore.
|
||||
// This deferred initialization allows us to create provider.System first,
|
||||
// then set the actual key provider function after all dependencies are ready.
|
||||
in.Provider.SetKeyProvider(kcf)
|
||||
|
||||
// Handle strategy changes (detection, queue clearing, persistence)
|
||||
handleStrategyChange(strategy, in.Provider, in.Repo.Datastore())
|
||||
|
||||
return provStrategyOut{
|
||||
ProvidingStrategy: strategyFlag,
|
||||
ProvidingKeyChanFunc: kcf,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -2,6 +2,7 @@ package node
|
||||
|
||||
import (
|
||||
blockstore "github.com/ipfs/boxo/blockstore"
|
||||
provider "github.com/ipfs/boxo/provider"
|
||||
"github.com/ipfs/go-datastore"
|
||||
config "github.com/ipfs/kubo/config"
|
||||
"go.uber.org/fx"
|
||||
@ -27,11 +28,30 @@ func Datastore(repo repo.Repo) datastore.Datastore {
|
||||
type BaseBlocks blockstore.Blockstore
|
||||
|
||||
// BaseBlockstoreCtor creates cached blockstore backed by the provided datastore
|
||||
func BaseBlockstoreCtor(cacheOpts blockstore.CacheOpts, hashOnRead bool, writeThrough bool) func(mctx helpers.MetricsCtx, repo repo.Repo, lc fx.Lifecycle) (bs BaseBlocks, err error) {
|
||||
return func(mctx helpers.MetricsCtx, repo repo.Repo, lc fx.Lifecycle) (bs BaseBlocks, err error) {
|
||||
func BaseBlockstoreCtor(
|
||||
cacheOpts blockstore.CacheOpts,
|
||||
hashOnRead bool,
|
||||
writeThrough bool,
|
||||
providingStrategy string,
|
||||
|
||||
) func(mctx helpers.MetricsCtx, repo repo.Repo, prov provider.System, lc fx.Lifecycle) (bs BaseBlocks, err error) {
|
||||
return func(mctx helpers.MetricsCtx, repo repo.Repo, prov provider.System, lc fx.Lifecycle) (bs BaseBlocks, err error) {
|
||||
opts := []blockstore.Option{blockstore.WriteThrough(writeThrough)}
|
||||
|
||||
// Blockstore providing integration:
|
||||
// When strategy includes "all" the blockstore directly provides blocks as they're Put.
|
||||
// Important: Provide calls from blockstore are intentionally BLOCKING.
|
||||
// The Provider implementation (not the blockstore) should handle concurrency/queuing.
|
||||
// This avoids spawning unbounded goroutines for concurrent block additions.
|
||||
strategyFlag := config.ParseReproviderStrategy(providingStrategy)
|
||||
if strategyFlag&config.ReproviderStrategyAll != 0 {
|
||||
opts = append(opts, blockstore.Provider(prov))
|
||||
}
|
||||
|
||||
// hash security
|
||||
bs = blockstore.NewBlockstore(repo.Datastore(),
|
||||
blockstore.WriteThrough(writeThrough),
|
||||
bs = blockstore.NewBlockstore(
|
||||
repo.Datastore(),
|
||||
opts...,
|
||||
)
|
||||
bs = &verifbs.VerifBS{Blockstore: bs}
|
||||
bs, err = blockstore.CachedBlockstore(helpers.LifecycleCtx(mctx, lc), bs, cacheOpts)
|
||||
@ -41,8 +61,8 @@ func BaseBlockstoreCtor(cacheOpts blockstore.CacheOpts, hashOnRead bool, writeTh
|
||||
|
||||
bs = blockstore.NewIdStore(bs)
|
||||
|
||||
if hashOnRead { // TODO: review: this is how it was done originally, is there a reason we can't just pass this directly?
|
||||
bs.HashOnRead(true)
|
||||
if hashOnRead {
|
||||
bs = &blockstore.ValidatingBlockstore{Blockstore: bs}
|
||||
}
|
||||
|
||||
return
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
<!-- Last updated during [v0.33.0 release](https://github.com/ipfs/kubo/pull/10674) -->
|
||||
<!-- Last updated during [v0.36.0 release](https://github.com/ipfs/kubo/issues/10816) -->
|
||||
|
||||
# ✅ Release Checklist (vX.Y.Z[-rcN])
|
||||
|
||||
@ -30,7 +30,7 @@ This section covers tasks to be done during each release.
|
||||
|
||||
### 1. Prepare release branch
|
||||
|
||||
- [ ] Prepare the release branch and update version numbers accordingly
|
||||
- [ ] Prepare the release branch and update version numbers accordingly
|
||||
- [ ] create a new branch `release-vX.Y.Z`
|
||||
- use `master` as base if `Z == 0`
|
||||
- use `release` as base if `Z > 0`
|
||||
@ -39,16 +39,16 @@ This section covers tasks to be done during each release.
|
||||
- [ ] create a draft PR from `release-vX.Y.Z` to `release` ([example](https://github.com/ipfs/kubo/pull/9306))
|
||||
- [ ] Cherry-pick commits from `master` to the `release-vX.Y.Z` using `git cherry-pick -x <commit>` ([example](https://github.com/ipfs/kubo/pull/10636/commits/033de22e3bc6191dbb024ad6472f5b96b34e3ccf))
|
||||
- **NOTE:** cherry-picking with `-x` is important
|
||||
- [ ]  Replace the `Changelog` and `Contributors` sections of the [changelog](docs/changelogs/vX.Y.md) with the stdout (do **NOT** copy the stderr) of `./bin/mkreleaselog`.
|
||||
- **NOTE:** `mkreleaselog` expects your `$GOPATH/src/github.com/ipfs/kubo` to include latest commits from `release-vX.Y.Z`
|
||||
- [ ] verify all CI checks on the PR from `release-vX.Y.Z` to `release` are passing
|
||||
- [ ]  Replace the `Changelog` and `Contributors` sections of the [changelog](docs/changelogs/vX.Y.md) with the stdout (do **NOT** copy the stderr) of `./bin/mkreleaselog`.
|
||||
- **NOTE:** `mkreleaselog` expects your `$GOPATH/src/github.com/ipfs/kubo` to include latest commits from `release-vX.Y.Z`
|
||||
- [ ]  Merge the PR from `release-vX.Y.Z` to `release` using the `Create a merge commit`
|
||||
- do **NOT** use `Squash and merge` nor `Rebase and merge` because we need to be able to sign the merge commit
|
||||
- do **NOT** delete the `release-vX.Y.Z` branch
|
||||
|
||||
### 2. Tag release
|
||||
|
||||
- [ ] Create the release tag
|
||||
- [ ] Create the release tag
|
||||
- ⚠️ **NOTE:** This is a dangerous operation! Go and Docker publishing are difficult to reverse! Have the release reviewer verify all the commands marked with !
|
||||
- [ ]  tag the HEAD commit using `git tag -s vX.Y.Z(-rcN) -m 'Prerelease X.Y.Z(-rcN)'`
|
||||
- [ ]  tag the HEAD commit of the `release` branch using `git tag -s vX.Y.Z -m 'Release X.Y.Z'`
|
||||
@ -61,31 +61,28 @@ This section covers tasks to be done during each release.
|
||||
- [ ] Publish Docker image to [DockerHub](https://hub.docker.com/r/ipfs/kubo/tags)
|
||||
- [ ] Wait for [Publish docker image](https://github.com/ipfs/kubo/actions/workflows/docker-image.yml) workflow run initiated by the tag push to finish
|
||||
- [ ] verify the image is available on [Docker Hub → tags](https://hub.docker.com/r/ipfs/kubo/tags)
|
||||
- [ ] Publish the release to [dist.ipfs.tech](https://dist.ipfs.tech)
|
||||
- [ ] Publish the release to [dist.ipfs.tech](https://dist.ipfs.tech)
|
||||
- [ ] check out [ipfs/distributions](https://github.com/ipfs/distributions)
|
||||
- [ ] create new branch: run `git checkout -b release-kubo-X.Y.Z(-rcN)`
|
||||
- [ ] run `./dist.sh add-version kubo vX.Y.Z(-rcN)` to add the new version to the `versions` file ([usage](https://github.com/ipfs/distributions#usage))
|
||||
- [ ] create new branch: run `git checkout -b release-kubo-X.Y.Z(-rcN)`
|
||||
- [ ] Verify [ipfs/distributions](https://github.com/ipfs/distributions)'s `.tool-versions`'s `golang` entry is set to the [latest go release](https://go.dev/doc/devel/release) on the major go branch [Kubo is being tested on](https://github.com/ipfs/kubo/blob/master/.github/workflows/gotest.yml) (see `go-version:`). If not, update `.tool-versions` to match the latest golang.
|
||||
- [ ] create and merge the PR which updates `dists/kubo/versions` and `dists/go-ipfs/versions` (**NOTE:**  will also have `dists/kubo/current` and `dists/go-ipfs/current` – [example](https://github.com/ipfs/distributions/pull/1125))
|
||||
- [ ] run `./dist.sh add-version kubo vX.Y.Z(-rcN)` to add the new version to the `versions` file ([usage](https://github.com/ipfs/distributions#usage))
|
||||
- [ ] create and merge the PR which updates `dists/kubo/versions` (**NOTE:**  will also have `dists/kubo/current` – [example](https://github.com/ipfs/distributions/pull/1125))
|
||||
- [ ] wait for the [CI](https://github.com/ipfs/distributions/actions/workflows/main.yml) workflow run initiated by the merge to master to finish
|
||||
- [ ] verify the release is available on [dist.ipfs.tech](https://dist.ipfs.tech/#kubo)
|
||||
- [ ] Publish the release to [NPM](https://www.npmjs.com/package/kubo?activeTab=versions)
|
||||
- [ ] manually dispatch the [Release to npm](https://github.com/ipfs/npm-go-ipfs/actions/workflows/main.yml) workflow
|
||||
- [ ] check [Release to npm](https://github.com/ipfs/npm-go-ipfs/actions/workflows/main.yml) workflow run logs to verify it discovered the new release
|
||||
- [ ] manually dispatch the [Release to npm](https://github.com/ipfs/npm-kubo/actions/workflows/main.yml) workflow if it was not executed already and verify it discovered the new release
|
||||
- [ ] verify the release is available on [NPM](https://www.npmjs.com/package/kubo?activeTab=versions)
|
||||
- [ ] Publish the release to [GitHub kubo/releases](https://github.com/ipfs/kubo/releases)
|
||||
- [ ] create a new release on [github.com/ipfs/kubo/releases](https://docs.github.com/en/repositories/releasing-projects-on-github/managing-releases-in-a-repository#creating-a-release)
|
||||
- [RC example](https://github.com/ipfs/kubo/releases/tag/v0.17.0-rc1)
|
||||
- [FINAL example](https://github.com/ipfs/kubo/releases/tag/v0.17.0)
|
||||
- [ ] [create](https://docs.github.com/en/repositories/releasing-projects-on-github/managing-releases-in-a-repository#creating-a-release) a new release
|
||||
- [RC example](https://github.com/ipfs/kubo/releases/tag/v0.36.0-rc1)
|
||||
- [FINAL example](https://github.com/ipfs/kubo/releases/tag/v0.35.0)
|
||||
- [ ] use the `vX.Y.Z(-rcN)` tag
|
||||
- [ ] link to the release issue
|
||||
- [ ]  link to the changelog in the description
|
||||
- [ ]  check the `This is a pre-release` checkbox
|
||||
- [ ]  copy the changelog (without the header) in the description
|
||||
- [ ]  do **NOT** check the `This is a pre-release` checkbox
|
||||
- [ ] run the [sync-release-assets](https://github.com/ipfs/kubo/actions/workflows/sync-release-assets.yml) workflow
|
||||
- [ ] wait for the [sync-release-assets](https://github.com/ipfs/kubo/actions/workflows/sync-release-assets.yml) workflow run to finish
|
||||
- [ ] verify the release assets are present in the [GitHub release](https://github.com/ipfs/kubo/releases/tag/vX.Y.Z(-rcN))
|
||||
- [ ] run the [sync-release-assets](https://github.com/ipfs/kubo/actions/workflows/sync-release-assets.yml) workflow and verify the release assets are attached to the GitHub release
|
||||
|
||||
### 4. After Publishing
|
||||
|
||||
@ -95,12 +92,15 @@ This section covers tasks to be done during each release.
|
||||
- [ ] Create and merge a PR from `merge-release-vX.Y.Z` to `master`
|
||||
- ⚠️ do **NOT** use `Squash and merge` nor `Rebase and merge` because we need to be able to sign the merge commit
|
||||
- ⚠️ **NOTE:** make sure to ignore the changes to [version.go](version.go) (keep the `-dev` in `master`)
|
||||
- [ ] Update Kubo staging environment, see the [Running Kubo tests on staging](https://www.notion.so/Running-Kubo-tests-on-staging-488578bb46154f9bad982e4205621af8) for details.
|
||||
- [ ]  Test last release against the current RC
|
||||
- [ ]  Test last release against the current one
|
||||
- [ ] Promote the release
|
||||
- [ ] Update [ipshipyard/waterworks-infra](https://github.com/ipshipyard/waterworks-infra)
|
||||
- [ ] Update Kubo staging environment, see the [Running Kubo tests on staging](https://www.notion.so/Running-Kubo-tests-on-staging-488578bb46154f9bad982e4205621af8) for details.
|
||||
- [ ]  Test last release against the current RC
|
||||
- [ ]  Test last release against the current one
|
||||
- [ ] Update collab cluster boxes to the tagged release (final or RC)
|
||||
- [ ] Update libp2p bootstrappers to the tagged release (final or RC)
|
||||
- [ ] Promote the release
|
||||
- [ ] create an [IPFS Discourse](https://discuss.ipfs.tech) topic ([prerelease example](https://discuss.ipfs.tech/t/kubo-v0-16-0-rc1-release-candidate-is-out/15248), [release example](https://discuss.ipfs.tech/t/kubo-v0-16-0-release-is-out/15249))
|
||||
- [ ] use `Kubo vX.Y.Z(-rcN) is out!` as the title and `kubo` and `go-ipfs` as tags
|
||||
- [ ] use `Kubo vX.Y.Z(-rcN) is out!` as the title and `kubo` as tags
|
||||
- [ ] repeat the title as a heading (`##`) in the description
|
||||
- [ ] link to the GitHub Release, binaries on IPNS, docker pull command and release notes in the description
|
||||
- [ ] pin the [IPFS Discourse](https://discuss.ipfs.tech) topic globally, you can make the topic a banner if there is no banner already
|
||||
@ -112,25 +112,24 @@ This section covers tasks to be done during each release.
|
||||
- [ ]  create an issue comment mentioning early testers on the release issue ([example](https://github.com/ipfs/kubo/issues/9319#issuecomment-1311002478))
|
||||
- [ ]  create an issue comment linking to the release on the release issue ([example](https://github.com/ipfs/kubo/issues/9417#issuecomment-1400740975))
|
||||
- [ ]   promote on bsky.app ([example](https://bsky.app/profile/ipshipyard.com/post/3lh2brzrwbs2c))
|
||||
- [ ]   promote on x.com ([example](https://x.com/ipshipyard/status/1885346348808929609))
|
||||
- [ ]   promote on x.com ([example](https://x.com/ipshipyard/status/1885346348808929609))
|
||||
- [ ]   post the link to the [GitHub Release](https://github.com/ipfs/kubo/releases/tag/vX.Y.Z(-rcN)) to [Reddit](https://reddit.com/r/ipfs) ([example](https://www.reddit.com/r/ipfs/comments/9x0q0k/kubo_v0160_release_is_out/))
|
||||
- [ ] Manually smoke-test the new version with [IPFS Companion Browser Extension](https://docs.ipfs.tech/install/ipfs-companion/)
|
||||
- [ ]  Update Kubo in [ipfs-desktop](https://github.com/ipfs/ipfs-desktop)
|
||||
- [ ] check out [ipfs/ipfs-desktop](https://github.com/ipfs/ipfs-desktop)
|
||||
- [ ] run `npm install `
|
||||
- [ ] create a PR which updates `package.json` and `package-lock.json`
|
||||
- [ ] Update Kubo in [ipfs-desktop](https://github.com/ipfs/ipfs-desktop)
|
||||
- [ ] create a PR which updates `kubo` version to the tagged version in `package.json` and `package-lock.json`
|
||||
- [ ]  switch to final release and merge
|
||||
- [ ]  Update Kubo docs at docs.ipfs.tech:
|
||||
- [ ]  run the [update-on-new-ipfs-tag.yml](https://github.com/ipfs/ipfs-docs/actions/workflows/update-on-new-ipfs-tag.yml) workflow
|
||||
- [ ]  merge the PR created by the [update-on-new-ipfs-tag.yml](https://github.com/ipfs/ipfs-docs/actions/workflows/update-on-new-ipfs-tag.yml) workflow run
|
||||
</details>
|
||||
- [ ]  Create a blog entry on [blog.ipfs.tech](https://blog.ipfs.tech)
|
||||
- [ ]  Create a blog entry on [blog.ipfs.tech](https://blog.ipfs.tech)
|
||||
- [ ]  create a PR which adds a release note for the new Kubo version ([example](https://github.com/ipfs/ipfs-blog/pull/529))
|
||||
- [ ]  merge the PR
|
||||
- [ ]  verify the blog entry was published
|
||||
- [ ]   Create a dependency update PR
|
||||
- [ ]   check out [ipfs/kubo](https://github.com/ipfs/kubo)
|
||||
- [ ]   go over direct dependencies from `go.mod` in the root directory (NOTE: do not run `go get -u` as it will upgrade indirect dependencies which may cause problems)
|
||||
- [ ]   run `make mod_tidy`
|
||||
- [ ]   run `make mod_tidy`
|
||||
- [ ]   create a PR which updates `go.mod` and `go.sum`
|
||||
- [ ]   add the PR to the next release milestone
|
||||
- [ ]   Create the next release issue
|
||||
|
||||
@ -80,7 +80,7 @@ Performance profiles can now be collected using `ipfs diag profile`. If you need
|
||||
|
||||
#### 🍎 Mac OS notarized binaries
|
||||
|
||||
The go-ipfs and related migration binaries (for both Intel and Apple Sillicon) are now signed and notarized to make Mac OS installation easier.
|
||||
The go-ipfs and related migration binaries (for both Intel and Apple Silicon) are now signed and notarized to make Mac OS installation easier.
|
||||
|
||||
#### 👨👩👦 Improved MDNS
|
||||
|
||||
|
||||
@ -629,7 +629,7 @@ and various improvements have been made to improve the UX including:
|
||||
- feat: WithLocalPublication option to enable local only publishing on a topic (#481) ([libp2p/go-libp2p-pubsub#481](https://github.com/libp2p/go-libp2p-pubsub/pull/481))
|
||||
- update pubsub deps (#491) ([libp2p/go-libp2p-pubsub#491](https://github.com/libp2p/go-libp2p-pubsub/pull/491))
|
||||
- Gossipsub: Unsubscribe backoff (#488) ([libp2p/go-libp2p-pubsub#488](https://github.com/libp2p/go-libp2p-pubsub/pull/488))
|
||||
- Adds exponential backoff to re-spawing new streams for supposedly dead peers (#483) ([libp2p/go-libp2p-pubsub#483](https://github.com/libp2p/go-libp2p-pubsub/pull/483))
|
||||
- Adds exponential backoff to re-spawning new streams for supposedly dead peers (#483) ([libp2p/go-libp2p-pubsub#483](https://github.com/libp2p/go-libp2p-pubsub/pull/483))
|
||||
- Publishing option for signing a message with a custom private key (#486) ([libp2p/go-libp2p-pubsub#486](https://github.com/libp2p/go-libp2p-pubsub/pull/486))
|
||||
- fix unused GossipSubHistoryGossip, make seenMessages ttl configurable, make score params SeenMsgTTL configurable
|
||||
- Update README.md
|
||||
|
||||
@ -10,7 +10,7 @@
|
||||
config file Bootstrap field changed accordingly. users
|
||||
can upgrade cleanly with:
|
||||
|
||||
ipfs bootstrap >boostrap_peers
|
||||
ipfs bootstrap >bootstrap_peers
|
||||
ipfs bootstrap rm --all
|
||||
<install new ipfs>
|
||||
<manually add .../ipfs/... to addrs in bootstrap_peers>
|
||||
|
||||
@ -471,7 +471,7 @@ You can read more about the rationale behind this decision on the [tracking issu
|
||||
- identify: fix stale comment (#2179) ([libp2p/go-libp2p#2179](https://github.com/libp2p/go-libp2p/pull/2179))
|
||||
- relay service: add metrics (#2154) ([libp2p/go-libp2p#2154](https://github.com/libp2p/go-libp2p/pull/2154))
|
||||
- identify: Fix IdentifyWait when Connected events happen out of order (#2173) ([libp2p/go-libp2p#2173](https://github.com/libp2p/go-libp2p/pull/2173))
|
||||
- chore: fix ressource manager's README (#2168) ([libp2p/go-libp2p#2168](https://github.com/libp2p/go-libp2p/pull/2168))
|
||||
- chore: fix resource manager's README (#2168) ([libp2p/go-libp2p#2168](https://github.com/libp2p/go-libp2p/pull/2168))
|
||||
- relay: fix deadlock when closing (#2171) ([libp2p/go-libp2p#2171](https://github.com/libp2p/go-libp2p/pull/2171))
|
||||
- core: remove LocalPrivateKey method from network.Conn interface (#2144) ([libp2p/go-libp2p#2144](https://github.com/libp2p/go-libp2p/pull/2144))
|
||||
- routed host: return connection error instead of routing error (#2169) ([libp2p/go-libp2p#2169](https://github.com/libp2p/go-libp2p/pull/2169))
|
||||
|
||||
@ -263,7 +263,7 @@ should be using AcceleratedDHTClient because they are falling behind.
|
||||
- chore: release v0.24.0
|
||||
- fix: don't add unresponsive DHT servers to the Routing Table (#820) ([libp2p/go-libp2p-kad-dht#820](https://github.com/libp2p/go-libp2p-kad-dht/pull/820))
|
||||
- github.com/libp2p/go-libp2p-kbucket (v0.5.0 -> v0.6.3):
|
||||
- fix: fix abba bug in UsefullNewPeer ([libp2p/go-libp2p-kbucket#122](https://github.com/libp2p/go-libp2p-kbucket/pull/122))
|
||||
- fix: fix abba bug in UsefulNewPeer ([libp2p/go-libp2p-kbucket#122](https://github.com/libp2p/go-libp2p-kbucket/pull/122))
|
||||
- chore: release v0.6.2 ([libp2p/go-libp2p-kbucket#121](https://github.com/libp2p/go-libp2p-kbucket/pull/121))
|
||||
- Replacing UsefulPeer() with UsefulNewPeer() ([libp2p/go-libp2p-kbucket#120](https://github.com/libp2p/go-libp2p-kbucket/pull/120))
|
||||
- chore: release 0.6.1 ([libp2p/go-libp2p-kbucket#119](https://github.com/libp2p/go-libp2p-kbucket/pull/119))
|
||||
|
||||
@ -236,7 +236,7 @@ This includes a breaking change to `ipfs id` and some of the `ipfs swarm` comman
|
||||
- chore: cleanup error handling in compparallel
|
||||
- fix: correctly handle errors in compparallel
|
||||
- fix: make the ProvideMany docs clearer
|
||||
- perf: remove goroutine that just waits before closing with a synchrous waitgroup
|
||||
- perf: remove goroutine that just waits before closing with a synchronous waitgroup
|
||||
- github.com/libp2p/go-nat (v0.1.0 -> v0.2.0):
|
||||
- release v0.2.0 (#30) ([libp2p/go-nat#30](https://github.com/libp2p/go-nat/pull/30))
|
||||
- update deps, use contexts on UPnP functions (#29) ([libp2p/go-nat#29](https://github.com/libp2p/go-nat/pull/29))
|
||||
|
||||
@ -36,7 +36,7 @@ For a description of the available tuning parameters, see [kubo/docs/datastores.
|
||||
|
||||
We've notices users were applying `lowpower` profile, and then reporting content routing issues. This was because `lowpower` disabled reprovider system and locally hosted data was no longer announced on Amino DHT.
|
||||
|
||||
This release changes [`lowpower` profile](https://github.com/ipfs/kubo/blob/master/docs/config.md#lowpower-profile) to not change reprovider settings, ensuring the new users are not sabotaging themselves. It also adds [`annouce-on`](https://github.com/ipfs/kubo/blob/master/docs/config.md#announce-on-profile) and [`announce-off`](https://github.com/ipfs/kubo/blob/master/docs/config.md#announce-off-profile) profiles for controlling announcement settings separately.
|
||||
This release changes [`lowpower` profile](https://github.com/ipfs/kubo/blob/master/docs/config.md#lowpower-profile) to not change reprovider settings, ensuring the new users are not sabotaging themselves. It also adds [`announce-on`](https://github.com/ipfs/kubo/blob/master/docs/config.md#announce-on-profile) and [`announce-off`](https://github.com/ipfs/kubo/blob/master/docs/config.md#announce-off-profile) profiles for controlling announcement settings separately.
|
||||
|
||||
> [!IMPORTANT]
|
||||
> If you've ever applied the `lowpower` profile before, there is a high chance your node is not announcing to DHT anymore.
|
||||
|
||||
@ -40,7 +40,7 @@ See [`HTTPRetrieval`](https://github.com/ipfs/kubo/blob/master/docs/config.md#ht
|
||||
|
||||
The Bitswap client now supports broadcast reduction logic, which is enabled by default. This feature significantly reduces the number of broadcast messages sent to peers, resulting in lower bandwidth usage during load spikes.
|
||||
|
||||
The overall logic works by sending to non-local peers only if those peers have previously replied that they want data blocks. To minimize impact on existing workloads, by default, broadcasts are still always sent to peers on the local network, or the ones defined in `Peering.Peers`.
|
||||
The overall logic works by sending to non-local peers only if those peers have previously replied that they have data blocks. To minimize impact on existing workloads, by default, broadcasts are still always sent to peers on the local network, or the ones defined in `Peering.Peers`.
|
||||
|
||||
At Shipyard, we conducted A/B testing on our internal Kubo staging gateway with organic CID requests to `ipfs.io`. While these results may not exactly match your specific workload, the benefits proved significant enough to make this feature default. Here are the key findings:
|
||||
|
||||
@ -215,7 +215,7 @@ The `ipfs config edit` command did not correctly handle the `EDITOR` environment
|
||||
- chore: update to boxo merkledag package
|
||||
- feat: car debug handles the zero length block ([ipld/go-car#569](https://github.com/ipld/go-car/pull/569))
|
||||
- chore(deps): bump github.com/rogpeppe/go-internal from 1.13.1 to 1.14.1 in /cmd ([ipld/go-car#566](https://github.com/ipld/go-car/pull/566))
|
||||
- Add a concatination cli utility ([ipld/go-car#565](https://github.com/ipld/go-car/pull/565))
|
||||
- Add a concatenation cli utility ([ipld/go-car#565](https://github.com/ipld/go-car/pull/565))
|
||||
- github.com/ipld/go-codec-dagpb (v1.6.0 -> v1.7.0):
|
||||
- chore: v1.7.0 bump
|
||||
- github.com/libp2p/go-flow-metrics (v0.2.0 -> v0.3.0):
|
||||
|
||||
438
docs/changelogs/v0.37.md
Normal file
438
docs/changelogs/v0.37.md
Normal file
@ -0,0 +1,438 @@
|
||||
# Kubo changelog v0.37
|
||||
|
||||
<a href="https://ipshipyard.com/"><img align="right" src="https://github.com/user-attachments/assets/39ed3504-bb71-47f6-9bf8-cb9a1698f272" /></a>
|
||||
|
||||
This release was brought to you by the [Shipyard](https://ipshipyard.com/) team.
|
||||
|
||||
- [v0.37.0](#v0370)
|
||||
|
||||
## v0.37.0
|
||||
|
||||
- [Overview](#overview)
|
||||
- [🔦 Highlights](#-highlights)
|
||||
- [🚀 Repository migration from v16 to v17 with embedded tooling](#-repository-migration-from-v16-to-v17-with-embedded-tooling)
|
||||
- [🚦 Gateway concurrent request limits and retrieval timeouts](#-gateway-concurrent-request-limits-and-retrieval-timeouts)
|
||||
- [🔧 AutoConf: Complete control over network defaults](#-autoconf-complete-control-over-network-defaults)
|
||||
- [🗑️ Clear provide queue when reprovide strategy changes](#-clear-provide-queue-when-reprovide-strategy-changes)
|
||||
- [🪵 Revamped `ipfs log level` command](#-revamped-ipfs-log-level-command)
|
||||
- [📌 Named pins in `ipfs add` command](#-named-pins-in-ipfs-add-command)
|
||||
- [📝 New IPNS publishing options](#-new-ipns-publishing-options)
|
||||
- [🔢 Custom sequence numbers in `ipfs name publish`](#-custom-sequence-numbers-in-ipfs-name-publish)
|
||||
- [⚙️ `Reprovider.Strategy` is now consistently respected](#-reprovider-strategy-is-now-consistently-respected)
|
||||
- [⚙️ `Reprovider.Strategy=all`: improved memory efficiency](#-reproviderstrategyall-improved-memory-efficiency)
|
||||
- [🧹 Removed unnecessary dependencies](#-removed-unnecessary-dependencies)
|
||||
- [🔍 Improved `ipfs cid`](#-improved-ipfs-cid)
|
||||
- [⚠️ Deprecated `ipfs stats reprovide`](#-deprecated-ipfs-stats-reprovide)
|
||||
- [🔄 AutoRelay now uses all connected peers for relay discovery](#-autorelay-now-uses-all-connected-peers-for-relay-discovery)
|
||||
- [📊 Anonymous telemetry for better feature prioritization](#-anonymous-telemetry-for-better-feature-prioritization)
|
||||
- [📦️ Important dependency updates](#-important-dependency-updates)
|
||||
- [📝 Changelog](#-changelog)
|
||||
- [👨👩👧👦 Contributors](#-contributors)
|
||||
|
||||
### Overview
|
||||
|
||||
Kubo 0.37.0 introduces embedded repository migrations, gateway resource protection, complete AutoConf control, improved reprovider strategies, and anonymous telemetry for better feature prioritization. This release significantly improves memory efficiency, network configuration flexibility, and operational reliability while maintaining full backward compatibility.
|
||||
|
||||
### 🔦 Highlights
|
||||
|
||||
#### 🚀 Repository migration from v16 to v17 with embedded tooling
|
||||
|
||||
This release migrates the Kubo repository from version 16 to version 17. Migrations are now built directly into the binary - completing in milliseconds without internet access or external downloads.
|
||||
|
||||
`ipfs daemon --migrate` performs migrations automatically. Manual migration: `ipfs repo migrate --to=17` (or `--to=16 --allow-downgrade` for compatibility). Embedded migrations apply to v17+; older versions still require external tools.
|
||||
|
||||
**Legacy migration deprecation**: Support for legacy migrations that download binaries from the internet will be removed in a future version. Only embedded migrations for the last 3 releases will be supported. Users with very old repositories should update in stages rather than skipping multiple versions.
|
||||
|
||||
#### 🚦 Gateway concurrent request limits and retrieval timeouts
|
||||
|
||||
New configurable limits protect gateway resources during high load:
|
||||
|
||||
- **[`Gateway.RetrievalTimeout`](https://github.com/ipfs/kubo/blob/master/docs/config.md#gatewayretrievaltimeout)** (default: 30s): Maximum duration for content retrieval. Returns 504 Gateway Timeout when exceeded - applies to both initial retrieval (time to first byte) and between subsequent writes.
|
||||
- **[`Gateway.MaxConcurrentRequests`](https://github.com/ipfs/kubo/blob/master/docs/config.md#gatewaymaxconcurrentrequests)** (default: 4096): Limits concurrent HTTP requests. Returns 429 Too Many Requests when exceeded. Protects nodes from traffic spikes and resource exhaustion, especially useful behind reverse proxies without rate-limiting.
|
||||
|
||||
New Prometheus metrics for monitoring:
|
||||
|
||||
- `ipfs_http_gw_concurrent_requests`: Current requests being processed
|
||||
- `ipfs_http_gw_responses_total`: HTTP responses by status code
|
||||
- `ipfs_http_gw_retrieval_timeouts_total`: Timeouts by status code and truncation status
|
||||
|
||||
Tuning tips:
|
||||
|
||||
- Monitor metrics to understand gateway behavior and adjust based on observations
|
||||
- Watch `ipfs_http_gw_concurrent_requests` for saturation
|
||||
- Track `ipfs_http_gw_retrieval_timeouts_total` vs success rates to identify timeout patterns indicating routing or storage provider issues
|
||||
|
||||
#### 🔧 AutoConf: Complete control over network defaults
|
||||
|
||||
Configuration fields now support `["auto"]` placeholders that resolve to network defaults from [`AutoConf.URL`](https://github.com/ipfs/kubo/blob/master/docs/config.md#autoconfurl). These defaults can be inspected, replaced with custom values, or disabled entirely. Previously, empty configuration fields like `Routing.DelegatedRouters: []` would use hardcoded defaults - this system makes those defaults explicit through `"auto"` values. When upgrading to Kubo 0.37, custom configurations remain unchanged.
|
||||
|
||||
New `--expand-auto` flag shows resolved values for any config field:
|
||||
|
||||
```bash
|
||||
ipfs config show --expand-auto # View all resolved endpoints
|
||||
ipfs config Bootstrap --expand-auto # Check specific values
|
||||
ipfs config Routing.DelegatedRouters --expand-auto
|
||||
ipfs config DNS.Resolvers --expand-auto
|
||||
```
|
||||
|
||||
Configuration can be managed via:
|
||||
- Replace `"auto"` with custom endpoints or set `[]` to disable features
|
||||
- Switch modes with `--profile=autoconf-on|autoconf-off`
|
||||
- Configure via `AutoConf.Enabled` and custom manifests via `AutoConf.URL`
|
||||
|
||||
```bash
|
||||
# Enable automatic configuration
|
||||
ipfs config profiles apply autoconf-on
|
||||
|
||||
# Or manually set specific fields
|
||||
ipfs config Bootstrap '["auto"]'
|
||||
ipfs config --json DNS.Resolvers '{".": ["https://dns.example.com/dns-query"], "eth.": ["auto"]}'
|
||||
```
|
||||
|
||||
Organizations can host custom AutoConf manifests for private networks. See [AutoConf documentation](https://github.com/ipfs/kubo/blob/master/docs/config.md#autoconf) and format spec at https://conf.ipfs-mainnet.org/
|
||||
|
||||
#### 🗑️ Clear provide queue when reprovide strategy changes
|
||||
|
||||
Changing [`Reprovider.Strategy`](https://github.com/ipfs/kubo/blob/master/docs/config.md#reproviderstrategy) and restarting Kubo now automatically clears the provide queue. Only content matching the new strategy will be announced.
|
||||
|
||||
Manual queue clearing is also available:
|
||||
|
||||
- `ipfs provide clear` - clear all queued content announcements
|
||||
|
||||
> [!NOTE]
|
||||
> Upgrading to Kubo 0.37 will automatically clear any preexisting provide queue. The next time `Reprovider.Interval` hits, `Reprovider.Strategy` will be executed on a clean slate, ensuring consistent behavior with your current configuration.
|
||||
|
||||
#### 🪵 Revamped `ipfs log level` command
|
||||
|
||||
The `ipfs log level` command has been completely revamped to support both getting and setting log levels with a unified interface.
|
||||
|
||||
**New: Getting log levels**
|
||||
|
||||
- `ipfs log level` - Shows default level only
|
||||
- `ipfs log level all` - Shows log level for every subsystem, including default level
|
||||
- `ipfs log level foo` - Shows log level for a specific subsystem only
|
||||
- Kubo RPC API: `POST /api/v0/log/level?arg=<subsystem>`
|
||||
|
||||
**Enhanced: Setting log levels**
|
||||
|
||||
- `ipfs log level foo debug` - Sets "foo" subsystem to "debug" level
|
||||
- `ipfs log level all info` - Sets all subsystems to "info" level (convenient, no escaping)
|
||||
- `ipfs log level '*' info` - Equivalent to above but requires shell escaping
|
||||
- `ipfs log level foo default` - Sets "foo" subsystem to current default level
|
||||
|
||||
The command now provides full visibility into your current logging configuration while maintaining full backward compatibility. Both `all` and `*` work for specifying all subsystems, with `all` being more convenient since it doesn't require shell escaping.
|
||||
|
||||
#### 🧷 Named pins in `ipfs add` command
|
||||
|
||||
Added `--pin-name` flag to `ipfs add` for assigning names to pins.
|
||||
|
||||
```console
|
||||
$ ipfs add --pin-name=testname cat.jpg
|
||||
added bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi cat.jpg
|
||||
|
||||
$ ipfs pin ls --names
|
||||
bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi recursive testname
|
||||
```
|
||||
|
||||
#### 📝 New IPNS publishing options
|
||||
|
||||
Added support for controlling IPNS record publishing strategies with new command flags and configuration.
|
||||
|
||||
**New command flags:**
|
||||
```bash
|
||||
# Publish without network connectivity (local datastore only)
|
||||
ipfs name publish --allow-offline /ipfs/QmHash
|
||||
|
||||
# Publish without DHT connectivity (uses local datastore and HTTP delegated publishers)
|
||||
ipfs name publish --allow-delegated /ipfs/QmHash
|
||||
```
|
||||
|
||||
**Delegated publishers configuration:**
|
||||
|
||||
[`Ipns.DelegatedPublishers`](https://github.com/ipfs/kubo/blob/master/docs/config.md#ipnsdelegatedpublishers) configures HTTP endpoints for IPNS publishing. Supports `"auto"` for network defaults or custom HTTP endpoints. The `--allow-delegated` flag enables publishing through these endpoints without requiring DHT connectivity, useful for nodes behind restrictive networks or during testing.
|
||||
|
||||
#### 🔢 Custom sequence numbers in `ipfs name publish`
|
||||
|
||||
Added `--sequence` flag to `ipfs name publish` for setting custom sequence numbers in IPNS records. This enables advanced use cases like manually coordinating updates across multiple nodes. See `ipfs name publish --help` for details.
|
||||
|
||||
#### ⚙️ `Reprovider.Strategy` is now consistently respected
|
||||
|
||||
Prior to this version, files added, blocks received etc. were "provided" to the network (announced on the DHT) regardless of the ["reproviding strategy" setting](https://github.com/ipfs/kubo/blob/master/docs/config.md#reproviderstrategy). For example:
|
||||
|
||||
- Strategy set to "pinned" + `ipfs add --pin=false` → file was provided regardless
|
||||
- Strategy set to "roots" + `ipfs pin add` → all blocks (not only the root) were provided
|
||||
|
||||
Only the periodic "reproviding" action (runs every 22h by default) respected the strategy.
|
||||
|
||||
This was inefficient as content that should not be provided was getting provided once. Now all operations respect `Reprovider.Strategy`. If set to "roots", no blocks other than pin roots will be provided regardless of what is fetched, added etc.
|
||||
|
||||
> [!NOTE]
|
||||
> **Behavior change:** The `--offline` flag no longer affects providing behavior. Both `ipfs add` and `ipfs --offline add` now provide blocks according to the reproviding strategy when run against an online daemon (previously `--offline add` did not provide). Since `ipfs add` has been nearly as fast as offline mode [since v0.35](https://github.com/ipfs/kubo/blob/master/docs/changelogs/v0.35.md#fast-ipfs-add-in-online-mode), `--offline` is rarely needed. To run truly offline operations, use `ipfs --offline daemon`.
|
||||
|
||||
#### ⚙️ `Reprovider.Strategy=all`: improved memory efficiency
|
||||
|
||||
The memory cost of `Reprovider.Strategy=all` no longer grows with the number of pins. The strategy now processes blocks directly from the datastore in undefined order, eliminating the memory pressure tied to the number of pins.
|
||||
|
||||
As part of this improvement, the `flat` reprovider strategy has been renamed to `all` (the default). This cleanup removes the workaround introduced in v0.28 for pin root prioritization. With the introduction of more granular strategies like [`pinned+mfs`](https://github.com/ipfs/kubo/blob/master/docs/config.md#reproviderstrategy), we can now optimize the default `all` strategy for lower memory usage without compromising users who need pin root prioritization ([rationale](https://github.com/ipfs/kubo/pull/10928#issuecomment-3211040182)).
|
||||
|
||||
> [!NOTE]
|
||||
> **Migration guidance:** If you experience undesired announcement delays of root CIDs with the new `all` strategy, switch to `pinned+mfs` for root prioritization.
|
||||
|
||||
#### 🧹 Removed unnecessary dependencies
|
||||
|
||||
Kubo has been cleaned up by removing unnecessary dependencies and packages:
|
||||
|
||||
- Removed `thirdparty/assert` (replaced by `github.com/stretchr/testify/require`)
|
||||
- Removed `thirdparty/dir` (replaced by `misc/fsutil`)
|
||||
- Removed `thirdparty/notifier` (unused)
|
||||
- Removed `goprocess` dependency (replaced with native Go `context` patterns)
|
||||
|
||||
These changes reduce the dependency footprint while improving code maintainability and following Go best practices.
|
||||
|
||||
#### 🔍 Improved `ipfs cid`
|
||||
|
||||
Certain `ipfs cid` commands can now be run without a daemon or repository, and return correct exit code 1 on error, making it easier to perform CID conversion in scripts and CI/CD pipelines.
|
||||
|
||||
While at it, we also fixed unicode support in `ipfs cid bases --prefix` to correctly show `base256emoji` 🚀 :-)
|
||||
|
||||
#### ⚠️ Deprecated `ipfs stats reprovide`
|
||||
|
||||
The `ipfs stats reprovide` command has moved to `ipfs provide stat`. This was done to organize provider commands in one location.
|
||||
|
||||
> [!NOTE]
|
||||
> `ipfs stats reprovide` still works, but is marked as deprecated and will be removed in a future release.
|
||||
|
||||
#### 🔄 AutoRelay now uses all connected peers for relay discovery
|
||||
|
||||
AutoRelay's relay discovery now includes all connected peers as potential relay candidates, not just peers discovered through the DHT. This allows peers connected via HTTP routing and manual `ipfs swarm connect` commands to serve as relays, improving connectivity for nodes using non-DHT routing configurations.
|
||||
|
||||
#### 📊 Anonymous telemetry for better feature prioritization
|
||||
|
||||
Per a suggestion from the IPFS Foundation, Kubo now sends optional anonymized telemetry information to Shipyard [maintainers](https://github.com/ipshipyard/roadmaps/issues/20).
|
||||
|
||||
**Privacy first**: The telemetry system collects only anonymous data - no personally identifiable information, file paths, or content data. A random UUID is generated on first run for anonymous identification. Users are notified before any data is sent and have time to opt-out.
|
||||
|
||||
**Why**: We want to better understand Kubo usage across the ecosystem so we can better direct funding and work efforts. For example, we have little insights into how many nodes are NAT'ed and rely on AutoNAT for reachability. Some of the information can be inferred by crawling the network or logging `/identify` details in the bootstrappers, but users have no way of opting out from that, so we believe it is more transparent to concentrate this functionality in one place.
|
||||
|
||||
**What**: Currently, we send the following anonymous metrics:
|
||||
|
||||
<details><summary>Click to see telemetry metrics example</summary>
|
||||
|
||||
```
|
||||
"uuid": "<unique_uuid>",
|
||||
"agent_version": "kubo/0.37.0-dev",
|
||||
"private_network": false,
|
||||
"bootstrappers_custom": false,
|
||||
"repo_size_bucket": 1073741824,
|
||||
"uptime_bucket": 86400000000000,
|
||||
"reprovider_strategy": "pinned",
|
||||
"routing_type": "auto",
|
||||
"routing_accelerated_dht_client": false,
|
||||
"routing_delegated_count": 0,
|
||||
"autonat_service_mode": "enabled",
|
||||
"autonat_reachability": "",
|
||||
"autoconf": true,
|
||||
"autoconf_custom": false,
|
||||
"swarm_enable_hole_punching": true,
|
||||
"swarm_circuit_addresses": false,
|
||||
"swarm_ipv4_public_addresses": true,
|
||||
"swarm_ipv6_public_addresses": true,
|
||||
"auto_tls_auto_wss": true,
|
||||
"auto_tls_domain_suffix_custom": false,
|
||||
"discovery_mdns_enabled": true,
|
||||
"platform_os": "linux",
|
||||
"platform_arch": "amd64",
|
||||
"platform_containerized": false,
|
||||
"platform_vm": false
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
The exact data sent for your node can be inspected by setting `GOLOG_LOG_LEVEL="telemetry=debug"`. Users will see an informative message the first time they launch a telemetry-enabled daemon, with time to opt-out before any data is collected. Telemetry data is sent every 24h, with the first collection starting 15 minutes after daemon launch.
|
||||
|
||||
**User control**: You can opt-out at any time:
|
||||
|
||||
- Set environment variable `IPFS_TELEMETRY=off` before starting the daemon
|
||||
- Or run `ipfs config Plugins.Plugins.telemetry.Config.Mode off` and restart the daemon
|
||||
|
||||
The telemetry plugin code lives in `plugin/plugins/telemetry`.
|
||||
|
||||
Learn more: [`/kubo/docs/telemetry.md`](https://github.com/ipfs/kubo/blob/master/docs/telemetry.md)
|
||||
|
||||
### 📦️ Important dependency updates
|
||||
|
||||
- update `boxo` to [v0.34.0](https://github.com/ipfs/boxo/releases/tag/v0.34.0) (incl. [v0.33.1](https://github.com/ipfs/boxo/releases/tag/v0.33.1))
|
||||
- update `go-libp2p` to [v0.43.0](https://github.com/libp2p/go-libp2p/releases/tag/v0.43.0)
|
||||
- update `go-libp2p-kad-dht` to [v0.34.0](https://github.com/libp2p/go-libp2p-kad-dht/releases/tag/v0.34.0)
|
||||
- update `go-libp2p-pubsub` to [v0.14.2](https://github.com/libp2p/go-libp2p-pubsub/releases/tag/v0.14.2) (incl. [v0.14.1](https://github.com/libp2p/go-libp2p-pubsub/releases/tag/v0.14.1), [v0.14.0](https://github.com/libp2p/go-libp2p-pubsub/releases/tag/v0.14.0))
|
||||
- update `ipfs-webui` to [v4.8.0](https://github.com/ipfs/ipfs-webui/releases/tag/v4.8.0)
|
||||
- update to [Go 1.25](https://go.dev/doc/go1.25)
|
||||
|
||||
### 📝 Changelog
|
||||
|
||||
<details><summary>Full Changelog</summary>
|
||||
|
||||
- github.com/ipfs/kubo:
|
||||
- chore: set version to v0.37.0
|
||||
- feat(ci): docker linting (#10927) ([ipfs/kubo#10927](https://github.com/ipfs/kubo/pull/10927))
|
||||
- fix: disable telemetry in test profile (#10931) ([ipfs/kubo#10931](https://github.com/ipfs/kubo/pull/10931))
|
||||
- fix: harness tests random panic (#10933) ([ipfs/kubo#10933](https://github.com/ipfs/kubo/pull/10933))
|
||||
- chore: v0.37.0-rc1
|
||||
- feat: Reprovider.Strategy: rename "flat" to "all" (#10928) ([ipfs/kubo#10928](https://github.com/ipfs/kubo/pull/10928))
|
||||
- docs: improve `ipfs add --help` (#10926) ([ipfs/kubo#10926](https://github.com/ipfs/kubo/pull/10926))
|
||||
- feat: optimize docker builds (#10925) ([ipfs/kubo#10925](https://github.com/ipfs/kubo/pull/10925))
|
||||
- feat(config): AutoConf with "auto" placeholders (#10883) ([ipfs/kubo#10883](https://github.com/ipfs/kubo/pull/10883))
|
||||
- fix(ci): make NewRandPort thread-safe (#10921) ([ipfs/kubo#10921](https://github.com/ipfs/kubo/pull/10921))
|
||||
- fix: resolve TestAddMultipleGCLive race condition (#10916) ([ipfs/kubo#10916](https://github.com/ipfs/kubo/pull/10916))
|
||||
- feat: telemetry plugin (#10866) ([ipfs/kubo#10866](https://github.com/ipfs/kubo/pull/10866))
|
||||
- fix typos in docs and comments (#10920) ([ipfs/kubo#10920](https://github.com/ipfs/kubo/pull/10920))
|
||||
- Upgrade to Boxo v0.34.0 (#10917) ([ipfs/kubo#10917](https://github.com/ipfs/kubo/pull/10917))
|
||||
- test: fix flaky repo verify (#10743) ([ipfs/kubo#10743](https://github.com/ipfs/kubo/pull/10743))
|
||||
- feat(config): `Gateway.RetrievalTimeout|MaxConcurrentRequests` (#10905) ([ipfs/kubo#10905](https://github.com/ipfs/kubo/pull/10905))
|
||||
- chore: replace random test utils with equivalents in go-test/random (#10915) ([ipfs/kubo#10915](https://github.com/ipfs/kubo/pull/10915))
|
||||
- feat: require go1.25 for building kubo (#10913) ([ipfs/kubo#10913](https://github.com/ipfs/kubo/pull/10913))
|
||||
- feat(ci): reusable spellcheck from unified CI (#10873) ([ipfs/kubo#10873](https://github.com/ipfs/kubo/pull/10873))
|
||||
- fix(ci): docker build (#10914) ([ipfs/kubo#10914](https://github.com/ipfs/kubo/pull/10914))
|
||||
- Replace `uber-go/multierr` with `errors.Join` (#10912) ([ipfs/kubo#10912](https://github.com/ipfs/kubo/pull/10912))
|
||||
- feat(ipns): support passing custom sequence number during publishing (#10851) ([ipfs/kubo#10851](https://github.com/ipfs/kubo/pull/10851))
|
||||
- fix(relay): feed connected peers to AutoRelay discovery (#10901) ([ipfs/kubo#10901](https://github.com/ipfs/kubo/pull/10901))
|
||||
- fix(sharness): no blocking on unclean FUSE unmount (#10906) ([ipfs/kubo#10906](https://github.com/ipfs/kubo/pull/10906))
|
||||
- feat: add query functionality to log level command (#10885) ([ipfs/kubo#10885](https://github.com/ipfs/kubo/pull/10885))
|
||||
- fix(ci): switch to debian:bookworm-slim
|
||||
- Fix failing FUSE test (#10904) ([ipfs/kubo#10904](https://github.com/ipfs/kubo/pull/10904))
|
||||
- fix(cmd): exit 1 on error (#10903) ([ipfs/kubo#10903](https://github.com/ipfs/kubo/pull/10903))
|
||||
- feat: go-libp2p v0.43.0 (#10892) ([ipfs/kubo#10892](https://github.com/ipfs/kubo/pull/10892))
|
||||
- fix: `ipfs cid` without repo (#10897) ([ipfs/kubo#10897](https://github.com/ipfs/kubo/pull/10897))
|
||||
- client/rpc: re-enable tests on windows. (#10895) ([ipfs/kubo#10895](https://github.com/ipfs/kubo/pull/10895))
|
||||
- fix: Provide according to Reprovider.Strategy (#10886) ([ipfs/kubo#10886](https://github.com/ipfs/kubo/pull/10886))
|
||||
- feat: ipfs-webui v4.8.0 (#10902) ([ipfs/kubo#10902](https://github.com/ipfs/kubo/pull/10902))
|
||||
- refactor: move `ipfs stat provide/reprovide` to `ipfs provide stat` (#10896) ([ipfs/kubo#10896](https://github.com/ipfs/kubo/pull/10896))
|
||||
- Bitswap: use a single ConnectEventManager. ([ipfs/kubo#10889](https://github.com/ipfs/kubo/pull/10889))
|
||||
- feat(add): add support for naming pinned CIDs (#10877) ([ipfs/kubo#10877](https://github.com/ipfs/kubo/pull/10877))
|
||||
- refactor: remove goprocess (#10872) ([ipfs/kubo#10872](https://github.com/ipfs/kubo/pull/10872))
|
||||
- feat(daemon): accelerated client startup note (#10859) ([ipfs/kubo#10859](https://github.com/ipfs/kubo/pull/10859))
|
||||
- docs:added GOLOG_LOG_LEVEL to debug-guide for logging more info (#10894) ([ipfs/kubo#10894](https://github.com/ipfs/kubo/pull/10894))
|
||||
- core: Add a ContentDiscovery field ([ipfs/kubo#10890](https://github.com/ipfs/kubo/pull/10890))
|
||||
- chore: update go-libp2p and p2p-forge (#10887) ([ipfs/kubo#10887](https://github.com/ipfs/kubo/pull/10887))
|
||||
- Upgrade to Boxo v0.33.1 (#10888) ([ipfs/kubo#10888](https://github.com/ipfs/kubo/pull/10888))
|
||||
- remove unneeded thirdparty packages (#10871) ([ipfs/kubo#10871](https://github.com/ipfs/kubo/pull/10871))
|
||||
- provider: clear provide queue when reprovide strategy changes (#10863) ([ipfs/kubo#10863](https://github.com/ipfs/kubo/pull/10863))
|
||||
- chore: merge release v0.36.0 ([ipfs/kubo#10868](https://github.com/ipfs/kubo/pull/10868))
|
||||
- docs: release checklist fixes from 0.36 (#10861) ([ipfs/kubo#10861](https://github.com/ipfs/kubo/pull/10861))
|
||||
- docs(config): add network exposure considerations (#10856) ([ipfs/kubo#10856](https://github.com/ipfs/kubo/pull/10856))
|
||||
- fix: handling of EDITOR env var (#10855) ([ipfs/kubo#10855](https://github.com/ipfs/kubo/pull/10855))
|
||||
- refactor: use slices.Sort where appropriate (#10858) ([ipfs/kubo#10858](https://github.com/ipfs/kubo/pull/10858))
|
||||
- Upgrade to Boxo v0.33.0 (#10857) ([ipfs/kubo#10857](https://github.com/ipfs/kubo/pull/10857))
|
||||
- chore: Upgrade github.com/cockroachdb/pebble/v2 to v2.0.6 for Go 1.25 support (#10850) ([ipfs/kubo#10850](https://github.com/ipfs/kubo/pull/10850))
|
||||
- core:constructor: add a log line about http retrieval ([ipfs/kubo#10852](https://github.com/ipfs/kubo/pull/10852))
|
||||
- chore: p2p-forge v0.6.0 + go-libp2p 0.42.0 (#10840) ([ipfs/kubo#10840](https://github.com/ipfs/kubo/pull/10840))
|
||||
- docs: fix minor typos (#10849) ([ipfs/kubo#10849](https://github.com/ipfs/kubo/pull/10849))
|
||||
- Replace use of go-car v1 with go-car/v2 (#10845) ([ipfs/kubo#10845](https://github.com/ipfs/kubo/pull/10845))
|
||||
- chore: 0.37.0-dev
|
||||
- github.com/ipfs/boxo (v0.33.0 -> v0.34.0):
|
||||
- Release v0.34.0 ([ipfs/boxo#1003](https://github.com/ipfs/boxo/pull/1003))
|
||||
- blockstore: remove HashOnRead ([ipfs/boxo#1001](https://github.com/ipfs/boxo/pull/1001))
|
||||
- Update go-log to v2.8.1 ([ipfs/boxo#998](https://github.com/ipfs/boxo/pull/998))
|
||||
- feat: autoconf client library (#997) ([ipfs/boxo#997](https://github.com/ipfs/boxo/pull/997))
|
||||
- feat(gateway): concurrency and retrieval timeout limits (#994) ([ipfs/boxo#994](https://github.com/ipfs/boxo/pull/994))
|
||||
- update dependencies ([ipfs/boxo#999](https://github.com/ipfs/boxo/pull/999))
|
||||
- fix: cidqueue gc must iterate all elements in queue ([ipfs/boxo#1000](https://github.com/ipfs/boxo/pull/1000))
|
||||
- Replace `uber-go/multierr` with `errors.Join` ([ipfs/boxo#996](https://github.com/ipfs/boxo/pull/996))
|
||||
- feat(namesys/IPNSPublisher): expose ability to set Sequence (#962) ([ipfs/boxo#962](https://github.com/ipfs/boxo/pull/962))
|
||||
- upgrade to go-libp2p v0.43.0 ([ipfs/boxo#993](https://github.com/ipfs/boxo/pull/993))
|
||||
- Remove providing Exchange. Call Provide() from relevant places. ([ipfs/boxo#976](https://github.com/ipfs/boxo/pull/976))
|
||||
- reprovider: s/initial/initial ([ipfs/boxo#992](https://github.com/ipfs/boxo/pull/992))
|
||||
- Release v0.33.1 ([ipfs/boxo#991](https://github.com/ipfs/boxo/pull/991))
|
||||
- fix(bootstrap): filter-out peers behind relays (#987) ([ipfs/boxo#987](https://github.com/ipfs/boxo/pull/987))
|
||||
- Bitswap: fix double-worker in connectEventManager. Logging improvements. ([ipfs/boxo#986](https://github.com/ipfs/boxo/pull/986))
|
||||
- upgrade to go-libp2p v0.42.1 (#988) ([ipfs/boxo#988](https://github.com/ipfs/boxo/pull/988))
|
||||
- bitswap/httpnet: fix sudden stop of http retrieval requests (#984) ([ipfs/boxo#984](https://github.com/ipfs/boxo/pull/984))
|
||||
- bitswap/client: disable use of traceability block by default (#956) ([ipfs/boxo#956](https://github.com/ipfs/boxo/pull/956))
|
||||
- test(gateway): fix race in TestCarBackendTar (#985) ([ipfs/boxo#985](https://github.com/ipfs/boxo/pull/985))
|
||||
- Shutdown the sessionWantSender changes queue when session is shutdown (#983) ([ipfs/boxo#983](https://github.com/ipfs/boxo/pull/983))
|
||||
- bitswap/httpnet: start pinging before signaling Connected ([ipfs/boxo#982](https://github.com/ipfs/boxo/pull/982))
|
||||
- Queue all changes in order using non-blocking async queue ([ipfs/boxo#981](https://github.com/ipfs/boxo/pull/981))
|
||||
- bitswap/httpnet: fix peers silently stopping from doing http requests ([ipfs/boxo#980](https://github.com/ipfs/boxo/pull/980))
|
||||
- provider: clear provide queue (#978) ([ipfs/boxo#978](https://github.com/ipfs/boxo/pull/978))
|
||||
- update dependencies ([ipfs/boxo#977](https://github.com/ipfs/boxo/pull/977))
|
||||
- github.com/ipfs/go-datastore (v0.8.2 -> v0.8.3):
|
||||
- new version (#245) ([ipfs/go-datastore#245](https://github.com/ipfs/go-datastore/pull/245))
|
||||
- sort using slices.Sort (#243) ([ipfs/go-datastore#243](https://github.com/ipfs/go-datastore/pull/243))
|
||||
- Replace `uber-go/multierr` with `errors.Join` (#242) ([ipfs/go-datastore#242](https://github.com/ipfs/go-datastore/pull/242))
|
||||
- replace gopkg.in/check.v1 with github.com/stretchr/testify (#241) ([ipfs/go-datastore#241](https://github.com/ipfs/go-datastore/pull/241))
|
||||
- github.com/ipfs/go-ipld-cbor (v0.2.0 -> v0.2.1):
|
||||
- new version ([ipfs/go-ipld-cbor#111](https://github.com/ipfs/go-ipld-cbor/pull/111))
|
||||
- update dependencies ([ipfs/go-ipld-cbor#110](https://github.com/ipfs/go-ipld-cbor/pull/110))
|
||||
- github.com/ipfs/go-log/v2 (v2.6.0 -> v2.8.1):
|
||||
- new version (#171) ([ipfs/go-log#171](https://github.com/ipfs/go-log/pull/171))
|
||||
- feat: add LevelEnabled function to check if log level enabled (#170) ([ipfs/go-log#170](https://github.com/ipfs/go-log/pull/170))
|
||||
- Replace `uber-go/multierr` with `errors.Join` (#168) ([ipfs/go-log#168](https://github.com/ipfs/go-log/pull/168))
|
||||
- new version (#167) ([ipfs/go-log#167](https://github.com/ipfs/go-log/pull/167))
|
||||
- Test using testify package (#166) ([ipfs/go-log#166](https://github.com/ipfs/go-log/pull/166))
|
||||
- Revise the loglevel API to be more golang idiomatic (#165) ([ipfs/go-log#165](https://github.com/ipfs/go-log/pull/165))
|
||||
- new version (#164) ([ipfs/go-log#164](https://github.com/ipfs/go-log/pull/164))
|
||||
- feat: add GetLogLevel and GetAllLogLevels (#160) ([ipfs/go-log#160](https://github.com/ipfs/go-log/pull/160))
|
||||
- github.com/ipfs/go-test (v0.2.2 -> v0.2.3):
|
||||
- new version (#30) ([ipfs/go-test#30](https://github.com/ipfs/go-test/pull/30))
|
||||
- fix: multihash random generation (#28) ([ipfs/go-test#28](https://github.com/ipfs/go-test/pull/28))
|
||||
- Add RandomName function to generate random filename (#26) ([ipfs/go-test#26](https://github.com/ipfs/go-test/pull/26))
|
||||
- github.com/libp2p/go-libp2p (v0.42.0 -> v0.43.0):
|
||||
- Release v0.43 (#3353) ([libp2p/go-libp2p#3353](https://github.com/libp2p/go-libp2p/pull/3353))
|
||||
- basichost: fix deadlock with addrs_manager (#3348) ([libp2p/go-libp2p#3348](https://github.com/libp2p/go-libp2p/pull/3348))
|
||||
- basichost: fix Addrs docstring (#3341) ([libp2p/go-libp2p#3341](https://github.com/libp2p/go-libp2p/pull/3341))
|
||||
- quic: upgrade quic-go to v0.53 (#3323) ([libp2p/go-libp2p#3323](https://github.com/libp2p/go-libp2p/pull/3323))
|
||||
- github.com/libp2p/go-libp2p-kad-dht (v0.33.1 -> v0.34.0):
|
||||
- chore: release v0.34.0 (#1130) ([libp2p/go-libp2p-kad-dht#1130](https://github.com/libp2p/go-libp2p-kad-dht/pull/1130))
|
||||
- make crawler protocol messenger configurable (#1128) ([libp2p/go-libp2p-kad-dht#1128](https://github.com/libp2p/go-libp2p-kad-dht/pull/1128))
|
||||
- fix: move non-error log to warning level (#1119) ([libp2p/go-libp2p-kad-dht#1119](https://github.com/libp2p/go-libp2p-kad-dht/pull/1119))
|
||||
- migrate providers package (#1094) ([libp2p/go-libp2p-kad-dht#1094](https://github.com/libp2p/go-libp2p-kad-dht/pull/1094))
|
||||
- github.com/libp2p/go-libp2p-pubsub (v0.13.1 -> v0.14.2):
|
||||
- Release v0.14.2 (#629) ([libp2p/go-libp2p-pubsub#629](https://github.com/libp2p/go-libp2p-pubsub/pull/629))
|
||||
- Fix test races and enable race tests in CI (#626) ([libp2p/go-libp2p-pubsub#626](https://github.com/libp2p/go-libp2p-pubsub/pull/626))
|
||||
- Fix race when calling Preprocess and msg ID generator(#627) ([libp2p/go-libp2p-pubsub#627](https://github.com/libp2p/go-libp2p-pubsub/pull/627))
|
||||
- Release v0.14.1 (#623) ([libp2p/go-libp2p-pubsub#623](https://github.com/libp2p/go-libp2p-pubsub/pull/623))
|
||||
- fix(BatchPublishing): Make topic.AddToBatch threadsafe (#622) ([libp2p/go-libp2p-pubsub#622](https://github.com/libp2p/go-libp2p-pubsub/pull/622))
|
||||
- Release v0.14.0 (#614) ([libp2p/go-libp2p-pubsub#614](https://github.com/libp2p/go-libp2p-pubsub/pull/614))
|
||||
- refactor: 10x faster RPC splitting (#615) ([libp2p/go-libp2p-pubsub#615](https://github.com/libp2p/go-libp2p-pubsub/pull/615))
|
||||
- test: Fix flaky TestMessageBatchPublish (#616) ([libp2p/go-libp2p-pubsub#616](https://github.com/libp2p/go-libp2p-pubsub/pull/616))
|
||||
- Send IDONTWANT before first publish (#612) ([libp2p/go-libp2p-pubsub#612](https://github.com/libp2p/go-libp2p-pubsub/pull/612))
|
||||
- feat(gossipsub): Add MessageBatch (#607) ([libp2p/go-libp2p-pubsub#607](https://github.com/libp2p/go-libp2p-pubsub/pull/607))
|
||||
- fix(IDONTWANT)!: Do not IDONTWANT your sender (#609) ([libp2p/go-libp2p-pubsub#609](https://github.com/libp2p/go-libp2p-pubsub/pull/609))
|
||||
- github.com/multiformats/go-multiaddr (v0.16.0 -> v0.16.1):
|
||||
- Release v0.16.1 (#281) ([multiformats/go-multiaddr#281](https://github.com/multiformats/go-multiaddr/pull/281))
|
||||
- reduce allocations in Bytes() and manet methods (#280) ([multiformats/go-multiaddr#280](https://github.com/multiformats/go-multiaddr/pull/280))
|
||||
- github.com/whyrusleeping/cbor-gen (v0.1.2 -> v0.3.1):
|
||||
- fix: capture field count early for "optional" length check (#112) ([whyrusleeping/cbor-gen#112](https://github.com/whyrusleeping/cbor-gen/pull/112))
|
||||
- doc: basic cbor-gen documentation (#110) ([whyrusleeping/cbor-gen#110](https://github.com/whyrusleeping/cbor-gen/pull/110))
|
||||
- feat: add support for optional fields at the end of tuple structs (#109) ([whyrusleeping/cbor-gen#109](https://github.com/whyrusleeping/cbor-gen/pull/109))
|
||||
- Regenerate test files ([whyrusleeping/cbor-gen#107](https://github.com/whyrusleeping/cbor-gen/pull/107))
|
||||
- improve allocations in map serialization ([whyrusleeping/cbor-gen#105](https://github.com/whyrusleeping/cbor-gen/pull/105))
|
||||
- fixed array in struct instead of heap slice ([whyrusleeping/cbor-gen#104](https://github.com/whyrusleeping/cbor-gen/pull/104))
|
||||
- optionally sort type names in generated code file ([whyrusleeping/cbor-gen#102](https://github.com/whyrusleeping/cbor-gen/pull/102))
|
||||
- fix handling of an []*string field ([whyrusleeping/cbor-gen#101](https://github.com/whyrusleeping/cbor-gen/pull/101))
|
||||
- fix: reject negative big integers ([whyrusleeping/cbor-gen#100](https://github.com/whyrusleeping/cbor-gen/pull/100))
|
||||
|
||||
</details>
|
||||
|
||||
### 👨👩👧👦 Contributors
|
||||
|
||||
| Contributor | Commits | Lines ± | Files Changed |
|
||||
|-------------|---------|---------|---------------|
|
||||
| Marcin Rataj | 26 | +16033/-755 | 176 |
|
||||
| Andrew Gillis | 35 | +2656/-1911 | 142 |
|
||||
| Hector Sanjuan | 30 | +2638/-760 | 114 |
|
||||
| Marco Munizaga | 11 | +1244/-362 | 41 |
|
||||
| Russell Dempsey | 2 | +1031/-33 | 7 |
|
||||
| Guillaume Michel | 4 | +899/-65 | 15 |
|
||||
| whyrusleeping | 4 | +448/-177 | 15 |
|
||||
| sukun | 9 | +312/-191 | 31 |
|
||||
| gammazero | 23 | +239/-216 | 45 |
|
||||
| Brian Olson | 5 | +343/-16 | 11 |
|
||||
| Steven Allen | 3 | +294/-7 | 9 |
|
||||
| Sergey Gorbunov | 2 | +247/-11 | 9 |
|
||||
| Kapil Sareen | 1 | +86/-13 | 10 |
|
||||
| Masih H. Derkani | 1 | +72/-24 | 1 |
|
||||
| Piotr Galar | 1 | +40/-55 | 23 |
|
||||
| Rod Vagg | 1 | +13/-11 | 3 |
|
||||
| Ankita Sahu | 1 | +2/-0 | 1 |
|
||||
| Štefan Baebler | 1 | +1/-0 | 1 |
|
||||
29
docs/changelogs/v0.38.md
Normal file
29
docs/changelogs/v0.38.md
Normal file
@ -0,0 +1,29 @@
|
||||
# Kubo changelog v0.38
|
||||
|
||||
<a href="https://ipshipyard.com/"><img align="right" src="https://github.com/user-attachments/assets/39ed3504-bb71-47f6-9bf8-cb9a1698f272" /></a>
|
||||
|
||||
This release was brought to you by the [Shipyard](https://ipshipyard.com/) team.
|
||||
|
||||
- [v0.38.0](#v0380)
|
||||
|
||||
## v0.38.0
|
||||
|
||||
- [Overview](#overview)
|
||||
- [🔦 Highlights](#-highlights)
|
||||
- [📦️ Important dependency updates](#-important-dependency-updates)
|
||||
- [📝 Changelog](#-changelog)
|
||||
- [👨👩👧👦 Contributors](#-contributors)
|
||||
|
||||
### Overview
|
||||
|
||||
### 🔦 Highlights
|
||||
|
||||
### 📦️ Important dependency updates
|
||||
|
||||
### 📝 Changelog
|
||||
|
||||
<details><summary>Full Changelog</summary>
|
||||
|
||||
</details>
|
||||
|
||||
### 👨👩👧👦 Contributors
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user