mirror of
https://github.com/ipfs/kubo.git
synced 2026-02-21 18:37:45 +08:00
Merge remote-tracking branch 'origin/master' into schomatis/feat/files-cmd/mfs-new-root
# Conflicts: # core/node/core.go
This commit is contained in:
commit
9e7bf8fee1
6
.cspell.yml
Normal file
6
.cspell.yml
Normal file
@ -0,0 +1,6 @@
|
||||
ignoreWords:
|
||||
- childs # This spelling is used in the files command
|
||||
- NodeCreater # This spelling is used in the fuse dependency
|
||||
- Boddy # One of the contributors to the project - Chris Boddy
|
||||
- Botto # One of the contributors to the project - Santiago Botto
|
||||
- cose # dag-cose
|
||||
20
.gitattributes
vendored
20
.gitattributes
vendored
@ -15,3 +15,23 @@ LICENSE text eol=auto
|
||||
# Binary assets
|
||||
assets/init-doc/* binary
|
||||
core/coreunix/test_data/** binary
|
||||
test/cli/migrations/testdata/** binary
|
||||
|
||||
# Generated test data
|
||||
test/cli/migrations/testdata/** linguist-generated=true
|
||||
test/cli/autoconf/testdata/** linguist-generated=true
|
||||
test/cli/fixtures/** linguist-generated=true
|
||||
test/sharness/t0054-dag-car-import-export-data/** linguist-generated=true
|
||||
test/sharness/t0109-gateway-web-_redirects-data/** linguist-generated=true
|
||||
test/sharness/t0114-gateway-subdomains/** linguist-generated=true
|
||||
test/sharness/t0115-gateway-dir-listing/** linguist-generated=true
|
||||
test/sharness/t0116-gateway-cache/** linguist-generated=true
|
||||
test/sharness/t0119-prometheus-data/** linguist-generated=true
|
||||
test/sharness/t0165-keystore-data/** linguist-generated=true
|
||||
test/sharness/t0275-cid-security-data/** linguist-generated=true
|
||||
test/sharness/t0280-plugin-dag-jose-data/** linguist-generated=true
|
||||
test/sharness/t0280-plugin-data/** linguist-generated=true
|
||||
test/sharness/t0280-plugin-git-data/** linguist-generated=true
|
||||
test/sharness/t0400-api-no-gateway/** linguist-generated=true
|
||||
test/sharness/t0701-delegated-routing-reframe/** linguist-generated=true
|
||||
test/sharness/t0702-delegated-routing-http/** linguist-generated=true
|
||||
|
||||
3
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
3
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
@ -32,8 +32,9 @@ body:
|
||||
label: Installation method
|
||||
description: Please select your installation method
|
||||
options:
|
||||
- dist.ipfs.tech or ipfs-update
|
||||
- docker image
|
||||
- ipfs-desktop
|
||||
- ipfs-update or dist.ipfs.tech
|
||||
- third-party binary
|
||||
- built from source
|
||||
- type: textarea
|
||||
|
||||
1
.github/ISSUE_TEMPLATE/enhancement.yml
vendored
1
.github/ISSUE_TEMPLATE/enhancement.yml
vendored
@ -2,6 +2,7 @@ name: Enhancement
|
||||
description: Suggest an improvement to an existing kubo feature.
|
||||
labels:
|
||||
- kind/enhancement
|
||||
- need/triage
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
|
||||
1
.github/ISSUE_TEMPLATE/feature.yml
vendored
1
.github/ISSUE_TEMPLATE/feature.yml
vendored
@ -2,6 +2,7 @@ name: Feature
|
||||
description: Suggest a new feature in Kubo.
|
||||
labels:
|
||||
- kind/feature
|
||||
- need/triage
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
|
||||
17
.github/build-platforms.yml
vendored
Normal file
17
.github/build-platforms.yml
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
# Build platforms configuration for Kubo
|
||||
# Matches https://github.com/ipfs/distributions/blob/master/dists/kubo/build_matrix
|
||||
# plus linux-riscv64 for emerging architecture support
|
||||
#
|
||||
# The Go compiler handles FUSE support automatically via build tags.
|
||||
# Platforms are simply listed - no need to specify FUSE capability.
|
||||
|
||||
platforms:
|
||||
- darwin-amd64
|
||||
- darwin-arm64
|
||||
- freebsd-amd64
|
||||
- linux-amd64
|
||||
- linux-arm64
|
||||
- linux-riscv64
|
||||
- openbsd-amd64
|
||||
- windows-amd64
|
||||
- windows-arm64
|
||||
12
.github/workflows/codeql-analysis.yml
vendored
12
.github/workflows/codeql-analysis.yml
vendored
@ -29,21 +29,21 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: 1.22.x
|
||||
go-version-file: 'go.mod'
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v3
|
||||
uses: github/codeql-action/init@v4
|
||||
with:
|
||||
languages: go
|
||||
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v3
|
||||
uses: github/codeql-action/autobuild@v4
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v3
|
||||
uses: github/codeql-action/analyze@v4
|
||||
|
||||
34
.github/workflows/docker-build.yml
vendored
34
.github/workflows/docker-build.yml
vendored
@ -1,34 +0,0 @@
|
||||
# If we decide to run build-image.yml on every PR, we could deprecate this workflow.
|
||||
name: Docker Build
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
paths-ignore:
|
||||
- '**/*.md'
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
docker-build:
|
||||
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
env:
|
||||
IMAGE_NAME: ipfs/kubo
|
||||
WIP_IMAGE_TAG: wip
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
steps:
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.22.x
|
||||
- uses: actions/checkout@v4
|
||||
- run: docker build -t $IMAGE_NAME:$WIP_IMAGE_TAG .
|
||||
- run: docker run --rm $IMAGE_NAME:$WIP_IMAGE_TAG --version
|
||||
62
.github/workflows/docker-check.yml
vendored
Normal file
62
.github/workflows/docker-check.yml
vendored
Normal file
@ -0,0 +1,62 @@
|
||||
# This workflow performs a quick Docker build check on PRs and pushes to master.
|
||||
# It builds the Docker image and runs a basic smoke test to ensure the image works.
|
||||
# This is a lightweight check - for full multi-platform builds and publishing, see docker-image.yml
|
||||
name: Docker Check
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
paths-ignore:
|
||||
- '**/*.md'
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- uses: hadolint/hadolint-action@v3.3.0
|
||||
with:
|
||||
dockerfile: Dockerfile
|
||||
failure-threshold: warning
|
||||
verbose: true
|
||||
format: tty
|
||||
|
||||
build:
|
||||
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
env:
|
||||
IMAGE_NAME: ipfs/kubo
|
||||
WIP_IMAGE_TAG: wip
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Build Docker image with BuildKit
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
push: false
|
||||
load: true
|
||||
tags: ${{ env.IMAGE_NAME }}:${{ env.WIP_IMAGE_TAG }}
|
||||
cache-from: |
|
||||
type=gha
|
||||
type=registry,ref=${{ env.IMAGE_NAME }}:buildcache
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Test Docker image
|
||||
run: docker run --rm $IMAGE_NAME:$WIP_IMAGE_TAG --version
|
||||
75
.github/workflows/docker-image.yml
vendored
75
.github/workflows/docker-image.yml
vendored
@ -1,3 +1,7 @@
|
||||
# This workflow builds and publishes official Docker images to Docker Hub.
|
||||
# It handles multi-platform builds (amd64, arm/v7, arm64/v8) and pushes tagged releases.
|
||||
# This workflow is triggered on tags, specific branches, and can be manually dispatched.
|
||||
# For quick build checks during development, see docker-check.yml
|
||||
name: Docker Push
|
||||
|
||||
on:
|
||||
@ -19,6 +23,7 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'staging'
|
||||
- 'bifrost-*'
|
||||
tags:
|
||||
- 'v*'
|
||||
@ -31,13 +36,14 @@ jobs:
|
||||
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
|
||||
name: Push Docker image to Docker Hub
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 90
|
||||
timeout-minutes: 15
|
||||
env:
|
||||
IMAGE_NAME: ipfs/kubo
|
||||
LEGACY_IMAGE_NAME: ipfs/go-ipfs
|
||||
outputs:
|
||||
tags: ${{ steps.tags.outputs.value }}
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
@ -45,13 +51,11 @@ jobs:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v4
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildx-
|
||||
username: ${{ vars.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Get tags
|
||||
id: tags
|
||||
@ -62,12 +66,6 @@ jobs:
|
||||
echo "EOF" >> $GITHUB_OUTPUT
|
||||
shell: bash
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ vars.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
# We have to build each platform separately because when using multi-arch
|
||||
# builds, only one platform is being loaded into the cache. This would
|
||||
# prevent us from testing the other platforms.
|
||||
@ -80,8 +78,10 @@ jobs:
|
||||
load: true
|
||||
file: ./Dockerfile
|
||||
tags: ${{ env.IMAGE_NAME }}:linux-amd64
|
||||
cache-from: type=local,src=/tmp/.buildx-cache
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache-new
|
||||
cache-from: |
|
||||
type=gha
|
||||
type=registry,ref=${{ env.IMAGE_NAME }}:buildcache
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Build Docker image (linux/arm/v7)
|
||||
uses: docker/build-push-action@v6
|
||||
@ -92,8 +92,10 @@ jobs:
|
||||
load: true
|
||||
file: ./Dockerfile
|
||||
tags: ${{ env.IMAGE_NAME }}:linux-arm-v7
|
||||
cache-from: type=local,src=/tmp/.buildx-cache
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache-new
|
||||
cache-from: |
|
||||
type=gha
|
||||
type=registry,ref=${{ env.IMAGE_NAME }}:buildcache
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Build Docker image (linux/arm64/v8)
|
||||
uses: docker/build-push-action@v6
|
||||
@ -104,14 +106,24 @@ jobs:
|
||||
load: true
|
||||
file: ./Dockerfile
|
||||
tags: ${{ env.IMAGE_NAME }}:linux-arm64-v8
|
||||
cache-from: type=local,src=/tmp/.buildx-cache
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache-new
|
||||
cache-from: |
|
||||
type=gha
|
||||
type=registry,ref=${{ env.IMAGE_NAME }}:buildcache
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
# We test all the images on amd64 host here. This uses QEMU to emulate
|
||||
# the other platforms.
|
||||
- run: docker run --rm $IMAGE_NAME:linux-amd64 --version
|
||||
- run: docker run --rm $IMAGE_NAME:linux-arm-v7 --version
|
||||
- run: docker run --rm $IMAGE_NAME:linux-arm64-v8 --version
|
||||
# NOTE: --version should finish instantly, but sometimes
|
||||
# it hangs on github CI (could be qemu issue), so we retry to remove false negatives
|
||||
- name: Smoke-test linux-amd64
|
||||
run: for i in {1..3}; do timeout 15s docker run --rm $IMAGE_NAME:linux-amd64 version --all && break || [ $i = 3 ] && exit 1; done
|
||||
timeout-minutes: 1
|
||||
- name: Smoke-test linux-arm-v7
|
||||
run: for i in {1..3}; do timeout 15s docker run --rm $IMAGE_NAME:linux-arm-v7 version --all && break || [ $i = 3 ] && exit 1; done
|
||||
timeout-minutes: 1
|
||||
- name: Smoke-test linux-arm64-v8
|
||||
run: for i in {1..3}; do timeout 15s docker run --rm $IMAGE_NAME:linux-arm64-v8 version --all && break || [ $i = 3 ] && exit 1; done
|
||||
timeout-minutes: 1
|
||||
|
||||
# This will only push the previously built images.
|
||||
- if: github.event_name != 'workflow_dispatch' || github.event.inputs.push == 'true'
|
||||
@ -123,12 +135,9 @@ jobs:
|
||||
push: true
|
||||
file: ./Dockerfile
|
||||
tags: "${{ github.event.inputs.tags || steps.tags.outputs.value }}"
|
||||
cache-from: type=local,src=/tmp/.buildx-cache-new
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache-new
|
||||
|
||||
# https://github.com/docker/build-push-action/issues/252
|
||||
# https://github.com/moby/buildkit/issues/1896
|
||||
- name: Move cache to limit growth
|
||||
run: |
|
||||
rm -rf /tmp/.buildx-cache
|
||||
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
|
||||
cache-from: |
|
||||
type=gha
|
||||
type=registry,ref=${{ env.IMAGE_NAME }}:buildcache
|
||||
cache-to: |
|
||||
type=gha,mode=max
|
||||
type=registry,ref=${{ env.IMAGE_NAME }}:buildcache,mode=max
|
||||
|
||||
46
.github/workflows/gateway-conformance.yml
vendored
46
.github/workflows/gateway-conformance.yml
vendored
@ -41,22 +41,21 @@ jobs:
|
||||
steps:
|
||||
# 1. Download the gateway-conformance fixtures
|
||||
- name: Download gateway-conformance fixtures
|
||||
uses: ipfs/gateway-conformance/.github/actions/extract-fixtures@v0.6
|
||||
uses: ipfs/gateway-conformance/.github/actions/extract-fixtures@v0.8
|
||||
with:
|
||||
output: fixtures
|
||||
|
||||
# 2. Build the kubo-gateway
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.22.x
|
||||
- uses: protocol/cache-go-action@v1
|
||||
with:
|
||||
name: ${{ github.job }}
|
||||
- name: Checkout kubo-gateway
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
path: kubo-gateway
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: 'kubo-gateway/go.mod'
|
||||
cache: true
|
||||
cache-dependency-path: kubo-gateway/go.sum
|
||||
- name: Build kubo-gateway
|
||||
run: make build
|
||||
working-directory: kubo-gateway
|
||||
@ -94,7 +93,7 @@ jobs:
|
||||
|
||||
# 6. Run the gateway-conformance tests
|
||||
- name: Run gateway-conformance tests
|
||||
uses: ipfs/gateway-conformance/.github/actions/test@v0.6
|
||||
uses: ipfs/gateway-conformance/.github/actions/test@v0.8
|
||||
with:
|
||||
gateway-url: http://127.0.0.1:8080
|
||||
subdomain-url: http://localhost:8080
|
||||
@ -110,13 +109,13 @@ jobs:
|
||||
run: cat output.md >> $GITHUB_STEP_SUMMARY
|
||||
- name: Upload HTML report
|
||||
if: failure() || success()
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: gateway-conformance.html
|
||||
path: output.html
|
||||
- name: Upload JSON report
|
||||
if: failure() || success()
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: gateway-conformance.json
|
||||
path: output.json
|
||||
@ -128,22 +127,21 @@ jobs:
|
||||
steps:
|
||||
# 1. Download the gateway-conformance fixtures
|
||||
- name: Download gateway-conformance fixtures
|
||||
uses: ipfs/gateway-conformance/.github/actions/extract-fixtures@v0.6
|
||||
uses: ipfs/gateway-conformance/.github/actions/extract-fixtures@v0.8
|
||||
with:
|
||||
output: fixtures
|
||||
|
||||
# 2. Build the kubo-gateway
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.22.x
|
||||
- uses: protocol/cache-go-action@v1
|
||||
with:
|
||||
name: ${{ github.job }}
|
||||
- name: Checkout kubo-gateway
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
path: kubo-gateway
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: 'kubo-gateway/go.mod'
|
||||
cache: true
|
||||
cache-dependency-path: kubo-gateway/go.sum
|
||||
- name: Build kubo-gateway
|
||||
run: make build
|
||||
working-directory: kubo-gateway
|
||||
@ -201,7 +199,7 @@ jobs:
|
||||
|
||||
# 9. Run the gateway-conformance tests over libp2p
|
||||
- name: Run gateway-conformance tests over libp2p
|
||||
uses: ipfs/gateway-conformance/.github/actions/test@v0.6
|
||||
uses: ipfs/gateway-conformance/.github/actions/test@v0.8
|
||||
with:
|
||||
gateway-url: http://127.0.0.1:8092
|
||||
args: --specs "trustless-gateway,-trustless-ipns-gateway" -skip 'TestGatewayCar/GET_response_for_application/vnd.ipld.car/Header_Content-Length'
|
||||
@ -216,13 +214,13 @@ jobs:
|
||||
run: cat output.md >> $GITHUB_STEP_SUMMARY
|
||||
- name: Upload HTML report
|
||||
if: failure() || success()
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: gateway-conformance-libp2p.html
|
||||
path: output.html
|
||||
- name: Upload JSON report
|
||||
if: failure() || success()
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: gateway-conformance-libp2p.json
|
||||
path: output.json
|
||||
|
||||
14
.github/workflows/generated-pr.yml
vendored
Normal file
14
.github/workflows/generated-pr.yml
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
name: Close Generated PRs
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 0 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
uses: ipdxco/unified-github-workflows/.github/workflows/reusable-generated-pr.yml@v1
|
||||
38
.github/workflows/gobuild.yml
vendored
38
.github/workflows/gobuild.yml
vendored
@ -21,20 +21,38 @@ jobs:
|
||||
env:
|
||||
TEST_DOCKER: 0
|
||||
TEST_VERBOSE: 1
|
||||
TRAVIS: 1
|
||||
GIT_PAGER: cat
|
||||
IPFS_CHECK_RCMGR_DEFAULTS: 1
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
steps:
|
||||
- uses: actions/setup-go@v5
|
||||
- uses: actions/checkout@v6
|
||||
- uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: 1.22.x
|
||||
- uses: actions/checkout@v4
|
||||
- run: make cmd/ipfs-try-build
|
||||
env:
|
||||
TEST_FUSE: 1
|
||||
- run: make cmd/ipfs-try-build
|
||||
env:
|
||||
TEST_FUSE: 0
|
||||
go-version-file: 'go.mod'
|
||||
cache: true
|
||||
cache-dependency-path: go.sum
|
||||
|
||||
- name: Build all platforms
|
||||
run: |
|
||||
# Read platforms from build-platforms.yml and build each one
|
||||
echo "Building kubo for all platforms..."
|
||||
|
||||
# Read and build each platform
|
||||
grep '^ - ' .github/build-platforms.yml | sed 's/^ - //' | while read -r platform; do
|
||||
if [ -z "$platform" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
echo "::group::Building $platform"
|
||||
GOOS=$(echo "$platform" | cut -d- -f1)
|
||||
GOARCH=$(echo "$platform" | cut -d- -f2)
|
||||
|
||||
echo "Building $platform"
|
||||
echo " GOOS=$GOOS GOARCH=$GOARCH go build -o /dev/null ./cmd/ipfs"
|
||||
GOOS=$GOOS GOARCH=$GOARCH go build -o /dev/null ./cmd/ipfs
|
||||
echo "::endgroup::"
|
||||
done
|
||||
|
||||
echo "All platforms built successfully"
|
||||
6
.github/workflows/golang-analysis.yml
vendored
6
.github/workflows/golang-analysis.yml
vendored
@ -22,12 +22,12 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
submodules: recursive
|
||||
- uses: actions/setup-go@v5
|
||||
- uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: "1.22.x"
|
||||
go-version-file: 'go.mod'
|
||||
- name: Check that go.mod is tidy
|
||||
uses: protocol/multiple-go-modules@v1.4
|
||||
with:
|
||||
|
||||
7
.github/workflows/golint.yml
vendored
7
.github/workflows/golint.yml
vendored
@ -22,15 +22,14 @@ jobs:
|
||||
TEST_DOCKER: 0
|
||||
TEST_FUSE: 0
|
||||
TEST_VERBOSE: 1
|
||||
TRAVIS: 1
|
||||
GIT_PAGER: cat
|
||||
IPFS_CHECK_RCMGR_DEFAULTS: 1
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
steps:
|
||||
- uses: actions/setup-go@v5
|
||||
- uses: actions/checkout@v6
|
||||
- uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: 1.22.x
|
||||
- uses: actions/checkout@v4
|
||||
go-version-file: 'go.mod'
|
||||
- run: make -O test_go_lint
|
||||
|
||||
141
.github/workflows/gotest.yml
vendored
141
.github/workflows/gotest.yml
vendored
@ -14,64 +14,42 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
go-test:
|
||||
# Unit tests with coverage collection (uploaded to Codecov)
|
||||
unit-tests:
|
||||
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
|
||||
runs-on: ${{ fromJSON(github.repository == 'ipfs/kubo' && '["self-hosted", "linux", "x64", "2xlarge"]' || '"ubuntu-latest"') }}
|
||||
timeout-minutes: 20
|
||||
timeout-minutes: 15
|
||||
env:
|
||||
GOTRACEBACK: single # reduce noise on test timeout panics
|
||||
TEST_DOCKER: 0
|
||||
TEST_FUSE: 0
|
||||
TEST_VERBOSE: 1
|
||||
TRAVIS: 1
|
||||
GIT_PAGER: cat
|
||||
IPFS_CHECK_RCMGR_DEFAULTS: 1
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
steps:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.22.x
|
||||
- name: Check out Kubo
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
- name: Install missing tools
|
||||
run: sudo apt update && sudo apt install -y zsh
|
||||
- name: 👉️ If this step failed, go to «Summary» (top left) → inspect the «Failures/Errors» table
|
||||
env:
|
||||
# increasing parallelism beyond 2 doesn't speed up the tests much
|
||||
PARALLEL: 2
|
||||
- name: Run unit tests
|
||||
run: |
|
||||
make -j "$PARALLEL" test/unit/gotest.junit.xml &&
|
||||
make test_unit &&
|
||||
[[ ! $(jq -s -c 'map(select(.Action == "fail")) | .[]' test/unit/gotest.json) ]]
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@6d798873df2b1b8e5846dba6fb86631229fbcb17 # v4.4.0
|
||||
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # v5.5.2
|
||||
if: failure() || success()
|
||||
with:
|
||||
name: unittests
|
||||
files: coverage/unit_tests.coverprofile
|
||||
- name: Test kubo-as-a-library example
|
||||
run: |
|
||||
# we want to first test with the kubo version in the go.mod file
|
||||
go test -v ./...
|
||||
|
||||
# we also want to test the examples against the current version of kubo
|
||||
# however, that version might be in a fork so we need to replace the dependency
|
||||
|
||||
# backup the go.mod and go.sum files to restore them after we run the tests
|
||||
cp go.mod go.mod.bak
|
||||
cp go.sum go.sum.bak
|
||||
|
||||
# make sure the examples run against the current version of kubo
|
||||
go mod edit -replace github.com/ipfs/kubo=./../../..
|
||||
go mod tidy
|
||||
|
||||
go test -v ./...
|
||||
|
||||
# restore the go.mod and go.sum files to their original state
|
||||
mv go.mod.bak go.mod
|
||||
mv go.sum.bak go.sum
|
||||
working-directory: docs/examples/kubo-as-a-library
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
fail_ci_if_error: false
|
||||
- name: Create a proper JUnit XML report
|
||||
uses: ipdxco/gotest-json-to-junit-xml@v1
|
||||
with:
|
||||
@ -79,9 +57,9 @@ jobs:
|
||||
output: test/unit/gotest.junit.xml
|
||||
if: failure() || success()
|
||||
- name: Archive the JUnit XML report
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: unit
|
||||
name: unit-tests-junit
|
||||
path: test/unit/gotest.junit.xml
|
||||
if: failure() || success()
|
||||
- name: Create a HTML report
|
||||
@ -92,9 +70,9 @@ jobs:
|
||||
output: test/unit/gotest.html
|
||||
if: failure() || success()
|
||||
- name: Archive the HTML report
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: html
|
||||
name: unit-tests-html
|
||||
path: test/unit/gotest.html
|
||||
if: failure() || success()
|
||||
- name: Create a Markdown report
|
||||
@ -107,3 +85,86 @@ jobs:
|
||||
- name: Set the summary
|
||||
run: cat test/unit/gotest.md >> $GITHUB_STEP_SUMMARY
|
||||
if: failure() || success()
|
||||
|
||||
# End-to-end integration/regression tests from test/cli
|
||||
# (Go-based replacement for legacy test/sharness shell scripts)
|
||||
cli-tests:
|
||||
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
|
||||
runs-on: ${{ fromJSON(github.repository == 'ipfs/kubo' && '["self-hosted", "linux", "x64", "2xlarge"]' || '"ubuntu-latest"') }}
|
||||
timeout-minutes: 15
|
||||
env:
|
||||
GOTRACEBACK: single # reduce noise on test timeout panics
|
||||
TEST_VERBOSE: 1
|
||||
GIT_PAGER: cat
|
||||
IPFS_CHECK_RCMGR_DEFAULTS: 1
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
steps:
|
||||
- name: Check out Kubo
|
||||
uses: actions/checkout@v6
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
- name: Install missing tools
|
||||
run: sudo apt update && sudo apt install -y zsh
|
||||
- name: Run CLI tests
|
||||
env:
|
||||
IPFS_PATH: ${{ runner.temp }}/ipfs-test
|
||||
run: make test_cli
|
||||
- name: Create JUnit XML report
|
||||
uses: ipdxco/gotest-json-to-junit-xml@v1
|
||||
with:
|
||||
input: test/cli/cli-tests.json
|
||||
output: test/cli/cli-tests.junit.xml
|
||||
if: failure() || success()
|
||||
- name: Archive JUnit XML report
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: cli-tests-junit
|
||||
path: test/cli/cli-tests.junit.xml
|
||||
if: failure() || success()
|
||||
- name: Create HTML report
|
||||
uses: ipdxco/junit-xml-to-html@v1
|
||||
with:
|
||||
mode: no-frames
|
||||
input: test/cli/cli-tests.junit.xml
|
||||
output: test/cli/cli-tests.html
|
||||
if: failure() || success()
|
||||
- name: Archive HTML report
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: cli-tests-html
|
||||
path: test/cli/cli-tests.html
|
||||
if: failure() || success()
|
||||
- name: Create Markdown report
|
||||
uses: ipdxco/junit-xml-to-html@v1
|
||||
with:
|
||||
mode: summary
|
||||
input: test/cli/cli-tests.junit.xml
|
||||
output: test/cli/cli-tests.md
|
||||
if: failure() || success()
|
||||
- name: Set summary
|
||||
run: cat test/cli/cli-tests.md >> $GITHUB_STEP_SUMMARY
|
||||
if: failure() || success()
|
||||
|
||||
# Example tests (kubo-as-a-library)
|
||||
example-tests:
|
||||
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
|
||||
runs-on: ${{ fromJSON(github.repository == 'ipfs/kubo' && '["self-hosted", "linux", "x64", "2xlarge"]' || '"ubuntu-latest"') }}
|
||||
timeout-minutes: 5
|
||||
env:
|
||||
GOTRACEBACK: single
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
steps:
|
||||
- name: Check out Kubo
|
||||
uses: actions/checkout@v6
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
- name: Run example tests
|
||||
run: make test_examples
|
||||
|
||||
29
.github/workflows/interop.yml
vendored
29
.github/workflows/interop.yml
vendored
@ -9,9 +9,6 @@ on:
|
||||
branches:
|
||||
- 'master'
|
||||
|
||||
env:
|
||||
GO_VERSION: 1.22.x
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }}
|
||||
cancel-in-progress: true
|
||||
@ -29,19 +26,18 @@ jobs:
|
||||
TEST_DOCKER: 0
|
||||
TEST_FUSE: 0
|
||||
TEST_VERBOSE: 1
|
||||
TRAVIS: 1
|
||||
GIT_PAGER: cat
|
||||
IPFS_CHECK_RCMGR_DEFAULTS: 1
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
steps:
|
||||
- uses: actions/setup-go@v5
|
||||
- uses: actions/checkout@v6
|
||||
- uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- uses: actions/checkout@v4
|
||||
go-version-file: 'go.mod'
|
||||
- run: make build
|
||||
- uses: actions/upload-artifact@v4
|
||||
- uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: kubo
|
||||
path: cmd/ipfs/ipfs
|
||||
@ -53,17 +49,17 @@ jobs:
|
||||
run:
|
||||
shell: bash
|
||||
steps:
|
||||
- uses: actions/setup-node@v4
|
||||
- uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: lts/*
|
||||
- uses: actions/download-artifact@v4
|
||||
- uses: actions/download-artifact@v7
|
||||
with:
|
||||
name: kubo
|
||||
path: cmd/ipfs
|
||||
- run: chmod +x cmd/ipfs/ipfs
|
||||
- run: echo "dir=$(npm config get cache)" >> $GITHUB_OUTPUT
|
||||
id: npm-cache-dir
|
||||
- uses: actions/cache@v4
|
||||
- uses: actions/cache@v5
|
||||
with:
|
||||
path: ${{ steps.npm-cache-dir.outputs.dir }}
|
||||
key: ${{ runner.os }}-${{ github.job }}-helia-${{ hashFiles('**/package-lock.json') }}
|
||||
@ -82,29 +78,28 @@ jobs:
|
||||
LIBP2P_TCP_REUSEPORT: false
|
||||
LIBP2P_ALLOW_WEAK_RSA_KEYS: 1
|
||||
E2E_IPFSD_TYPE: go
|
||||
TRAVIS: 1
|
||||
GIT_PAGER: cat
|
||||
IPFS_CHECK_RCMGR_DEFAULTS: 1
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
steps:
|
||||
- uses: actions/setup-node@v4
|
||||
- uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: 18.14.0
|
||||
- uses: actions/download-artifact@v4
|
||||
node-version: 20.x
|
||||
- uses: actions/download-artifact@v7
|
||||
with:
|
||||
name: kubo
|
||||
path: cmd/ipfs
|
||||
- run: chmod +x cmd/ipfs/ipfs
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
repository: ipfs/ipfs-webui
|
||||
path: ipfs-webui
|
||||
- run: |
|
||||
echo "dir=$(npm config get cache)" >> $GITHUB_OUTPUT
|
||||
id: npm-cache-dir
|
||||
- uses: actions/cache@v4
|
||||
- uses: actions/cache@v5
|
||||
with:
|
||||
path: ${{ steps.npm-cache-dir.outputs.dir }}
|
||||
key: ${{ runner.os }}-${{ github.job }}-${{ hashFiles('**/package-lock.json') }}
|
||||
|
||||
26
.github/workflows/sharness.yml
vendored
26
.github/workflows/sharness.yml
vendored
@ -4,10 +4,10 @@ on:
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
paths-ignore:
|
||||
- '**/*.md'
|
||||
- "**/*.md"
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
- "master"
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }}
|
||||
@ -17,22 +17,22 @@ jobs:
|
||||
sharness-test:
|
||||
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
|
||||
runs-on: ${{ fromJSON(github.repository == 'ipfs/kubo' && '["self-hosted", "linux", "x64", "4xlarge"]' || '"ubuntu-latest"') }}
|
||||
timeout-minutes: 20
|
||||
timeout-minutes: ${{ github.repository == 'ipfs/kubo' && 15 || 60 }}
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
steps:
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.22.x
|
||||
- name: Checkout Kubo
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
path: kubo
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: 'kubo/go.mod'
|
||||
- name: Install missing tools
|
||||
run: sudo apt update && sudo apt install -y socat net-tools fish libxml2-utils
|
||||
- uses: actions/cache@v4
|
||||
- uses: actions/cache@v5
|
||||
with:
|
||||
path: test/sharness/lib/dependencies
|
||||
key: ${{ runner.os }}-test-generate-junit-html-${{ hashFiles('test/sharness/lib/test-generate-junit-html.sh') }}
|
||||
@ -55,11 +55,13 @@ jobs:
|
||||
# increasing parallelism beyond 10 doesn't speed up the tests much
|
||||
PARALLEL: ${{ github.repository == 'ipfs/kubo' && 10 || 3 }}
|
||||
- name: Upload coverage report
|
||||
uses: codecov/codecov-action@6d798873df2b1b8e5846dba6fb86631229fbcb17 # v4.4.0
|
||||
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # v5.5.2
|
||||
if: failure() || success()
|
||||
with:
|
||||
name: sharness
|
||||
files: kubo/coverage/sharness_tests.coverprofile
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
fail_ci_if_error: false
|
||||
- name: Aggregate results
|
||||
run: find kubo/test/sharness/test-results -name 't*-*.sh.*.counts' | kubo/test/sharness/lib/sharness/aggregate-results.sh > kubo/test/sharness/test-results/summary.txt
|
||||
- name: 👉️ If this step failed, go to «Summary» (top left) → «HTML Report» → inspect the «Failures» column
|
||||
@ -88,7 +90,7 @@ jobs:
|
||||
destination: sharness.html
|
||||
- name: Upload one-page HTML report
|
||||
if: github.repository != 'ipfs/kubo' && (failure() || success())
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: sharness.html
|
||||
path: kubo/test/sharness/test-results/sharness.html
|
||||
@ -108,7 +110,7 @@ jobs:
|
||||
destination: sharness-html/
|
||||
- name: Upload full HTML report
|
||||
if: github.repository != 'ipfs/kubo' && (failure() || success())
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: sharness-html
|
||||
path: kubo/test/sharness/test-results/sharness-html
|
||||
|
||||
18
.github/workflows/spellcheck.yml
vendored
Normal file
18
.github/workflows/spellcheck.yml
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
name: Spell Check
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches: ["master"]
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
spellcheck:
|
||||
uses: ipdxco/unified-github-workflows/.github/workflows/reusable-spellcheck.yml@v1
|
||||
5
.github/workflows/stale.yml
vendored
5
.github/workflows/stale.yml
vendored
@ -1,8 +1,9 @@
|
||||
name: Close and mark stale issue
|
||||
name: Close Stale Issues
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 0 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
@ -10,4 +11,4 @@ permissions:
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
uses: pl-strflt/.github/.github/workflows/reusable-stale-issue.yml@v0.3
|
||||
uses: ipdxco/unified-github-workflows/.github/workflows/reusable-stale-issue.yml@v1
|
||||
|
||||
4
.github/workflows/sync-release-assets.yml
vendored
4
.github/workflows/sync-release-assets.yml
vendored
@ -22,11 +22,11 @@ jobs:
|
||||
- uses: ipfs/start-ipfs-daemon-action@v1
|
||||
with:
|
||||
args: --init --init-profile=flatfs,server --enable-gc=false
|
||||
- uses: actions/setup-node@v4
|
||||
- uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: 14
|
||||
- name: Sync the latest 5 github releases
|
||||
uses: actions/github-script@v7
|
||||
uses: actions/github-script@v8
|
||||
with:
|
||||
script: |
|
||||
const fs = require('fs').promises
|
||||
|
||||
85
.github/workflows/test-migrations.yml
vendored
Normal file
85
.github/workflows/test-migrations.yml
vendored
Normal file
@ -0,0 +1,85 @@
|
||||
name: Migrations
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
paths:
|
||||
# Migration implementation files
|
||||
- 'repo/fsrepo/migrations/**'
|
||||
- 'test/cli/migrations/**'
|
||||
# Config and repo handling
|
||||
- 'repo/fsrepo/**'
|
||||
# This workflow file itself
|
||||
- '.github/workflows/test-migrations.yml'
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'release-*'
|
||||
paths:
|
||||
- 'repo/fsrepo/migrations/**'
|
||||
- 'test/cli/migrations/**'
|
||||
- 'repo/fsrepo/**'
|
||||
- '.github/workflows/test-migrations.yml'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
test:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ubuntu-latest, windows-latest, macos-latest]
|
||||
runs-on: ${{ matrix.os }}
|
||||
timeout-minutes: 20
|
||||
env:
|
||||
TEST_VERBOSE: 1
|
||||
IPFS_CHECK_RCMGR_DEFAULTS: 1
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
steps:
|
||||
- name: Check out Kubo
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
|
||||
- name: Build kubo binary
|
||||
run: |
|
||||
make build
|
||||
echo "Built ipfs binary at $(pwd)/cmd/ipfs/"
|
||||
|
||||
- name: Add kubo to PATH
|
||||
run: |
|
||||
echo "$(pwd)/cmd/ipfs" >> $GITHUB_PATH
|
||||
|
||||
- name: Verify ipfs in PATH
|
||||
run: |
|
||||
which ipfs || echo "ipfs not in PATH"
|
||||
ipfs version || echo "Failed to run ipfs version"
|
||||
|
||||
- name: Run migration unit tests
|
||||
run: |
|
||||
go test ./repo/fsrepo/migrations/...
|
||||
|
||||
- name: Run CLI migration tests
|
||||
env:
|
||||
IPFS_PATH: ${{ runner.temp }}/ipfs-test
|
||||
run: |
|
||||
export PATH="${{ github.workspace }}/cmd/ipfs:$PATH"
|
||||
which ipfs || echo "ipfs not found in PATH"
|
||||
ipfs version || echo "Failed to run ipfs version"
|
||||
go test ./test/cli/migrations/...
|
||||
|
||||
- name: Upload test results
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: ${{ matrix.os }}-test-results
|
||||
path: |
|
||||
test/**/*.log
|
||||
${{ runner.temp }}/ipfs-test/
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@ -28,6 +28,11 @@ go-ipfs-source.tar.gz
|
||||
docs/examples/go-ipfs-as-a-library/example-folder/Qm*
|
||||
/test/sharness/t0054-dag-car-import-export-data/*.car
|
||||
|
||||
# test artifacts from make test_unit / test_cli
|
||||
/test/unit/gotest.json
|
||||
/test/unit/gotest.junit.xml
|
||||
/test/cli/cli-tests.json
|
||||
|
||||
# ignore build output from snapcraft
|
||||
/ipfs_*.snap
|
||||
/parts
|
||||
|
||||
13
.hadolint.yaml
Normal file
13
.hadolint.yaml
Normal file
@ -0,0 +1,13 @@
|
||||
# Hadolint configuration for Kubo Docker image
|
||||
# https://github.com/hadolint/hadolint
|
||||
|
||||
# Ignore specific rules
|
||||
ignored:
|
||||
# DL3008: Pin versions in apt-get install
|
||||
# We use stable base images and prefer smaller layers over version pinning
|
||||
- DL3008
|
||||
|
||||
# Trust base images from these registries
|
||||
trustedRegistries:
|
||||
- docker.io
|
||||
- gcr.io
|
||||
@ -1,5 +1,14 @@
|
||||
# Kubo Changelogs
|
||||
|
||||
- [v0.40](docs/changelogs/v0.40.md)
|
||||
- [v0.39](docs/changelogs/v0.39.md)
|
||||
- [v0.38](docs/changelogs/v0.38.md)
|
||||
- [v0.37](docs/changelogs/v0.37.md)
|
||||
- [v0.36](docs/changelogs/v0.36.md)
|
||||
- [v0.35](docs/changelogs/v0.35.md)
|
||||
- [v0.34](docs/changelogs/v0.34.md)
|
||||
- [v0.33](docs/changelogs/v0.33.md)
|
||||
- [v0.32](docs/changelogs/v0.32.md)
|
||||
- [v0.31](docs/changelogs/v0.31.md)
|
||||
- [v0.30](docs/changelogs/v0.30.md)
|
||||
- [v0.29](docs/changelogs/v0.29.md)
|
||||
|
||||
@ -1,6 +1,10 @@
|
||||
IPFS as a project, including go-ipfs and all of its modules, follows the [standard IPFS Community contributing guidelines](https://github.com/ipfs/community/blob/master/CONTRIBUTING.md).
|
||||
# Contributing to Kubo
|
||||
|
||||
We also adhere to the [GO IPFS Community contributing guidelines](https://github.com/ipfs/community/blob/master/CONTRIBUTING_GO.md) which provide additional information of how to collaborate and contribute in the Go implementation of IPFS.
|
||||
**For development setup, building, and testing, see the [Developer Guide](docs/developer-guide.md).**
|
||||
|
||||
IPFS as a project, including Kubo and all of its modules, follows the [standard IPFS Community contributing guidelines](https://github.com/ipfs/community/blob/master/CONTRIBUTING.md).
|
||||
|
||||
We also adhere to the [Go IPFS Community contributing guidelines](https://github.com/ipfs/community/blob/master/CONTRIBUTING_GO.md) which provide additional information on how to collaborate and contribute to the Go implementation of IPFS.
|
||||
|
||||
We appreciate your time and attention for going over these. Please open an issue on ipfs/community if you have any questions.
|
||||
|
||||
|
||||
95
Dockerfile
95
Dockerfile
@ -1,13 +1,16 @@
|
||||
FROM --platform=${BUILDPLATFORM:-linux/amd64} golang:1.22 AS builder
|
||||
# syntax=docker/dockerfile:1
|
||||
# Enables BuildKit with cache mounts for faster builds
|
||||
FROM --platform=${BUILDPLATFORM:-linux/amd64} golang:1.25 AS builder
|
||||
|
||||
ARG TARGETOS TARGETARCH
|
||||
|
||||
ENV SRC_DIR /kubo
|
||||
ENV SRC_DIR=/kubo
|
||||
|
||||
# Download packages first so they can be cached.
|
||||
# Cache go module downloads between builds for faster rebuilds
|
||||
COPY go.mod go.sum $SRC_DIR/
|
||||
RUN cd $SRC_DIR \
|
||||
&& go mod download
|
||||
WORKDIR $SRC_DIR
|
||||
RUN --mount=type=cache,target=/go/pkg/mod \
|
||||
go mod download
|
||||
|
||||
COPY . $SRC_DIR
|
||||
|
||||
@ -18,92 +21,78 @@ ARG IPFS_PLUGINS
|
||||
# Allow for other targets to be built, e.g.: docker build --build-arg MAKE_TARGET="nofuse"
|
||||
ARG MAKE_TARGET=build
|
||||
|
||||
# Build the thing.
|
||||
# Also: fix getting HEAD commit hash via git rev-parse.
|
||||
RUN cd $SRC_DIR \
|
||||
&& mkdir -p .git/objects \
|
||||
# Build ipfs binary with cached go modules and build cache.
|
||||
# mkdir .git/objects allows git rev-parse to read commit hash for version info
|
||||
RUN --mount=type=cache,target=/go/pkg/mod \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
mkdir -p .git/objects \
|
||||
&& GOOS=$TARGETOS GOARCH=$TARGETARCH GOFLAGS=-buildvcs=false make ${MAKE_TARGET} IPFS_PLUGINS=$IPFS_PLUGINS
|
||||
|
||||
# Using Debian Buster because the version of busybox we're using is based on it
|
||||
# and we want to make sure the libraries we're using are compatible. That's also
|
||||
# why we're running this for the target platform.
|
||||
FROM debian:stable-slim AS utilities
|
||||
# Extract required runtime tools from Debian.
|
||||
# We use Debian instead of Alpine because we need glibc compatibility
|
||||
# for the busybox base image we're using.
|
||||
FROM debian:bookworm-slim AS utilities
|
||||
RUN set -eux; \
|
||||
apt-get update; \
|
||||
apt-get install -y \
|
||||
apt-get install -y --no-install-recommends \
|
||||
tini \
|
||||
# Using gosu (~2MB) instead of su-exec (~20KB) because it's easier to
|
||||
# install on Debian. Useful links:
|
||||
# - https://github.com/ncopa/su-exec#why-reinvent-gosu
|
||||
# - https://github.com/tianon/gosu/issues/52#issuecomment-441946745
|
||||
gosu \
|
||||
# This installs fusermount which we later copy over to the target image.
|
||||
# fusermount enables IPFS mount commands
|
||||
fuse \
|
||||
ca-certificates \
|
||||
; \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
apt-get clean; \
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
# Now comes the actual target image, which aims to be as small as possible.
|
||||
# Final minimal image with shell for debugging (busybox provides sh)
|
||||
FROM busybox:stable-glibc
|
||||
|
||||
# Get the ipfs binary, entrypoint script, and TLS CAs from the build container.
|
||||
ENV SRC_DIR /kubo
|
||||
# Copy ipfs binary, startup scripts, and runtime dependencies
|
||||
ENV SRC_DIR=/kubo
|
||||
COPY --from=utilities /usr/sbin/gosu /sbin/gosu
|
||||
COPY --from=utilities /usr/bin/tini /sbin/tini
|
||||
COPY --from=utilities /bin/fusermount /usr/local/bin/fusermount
|
||||
COPY --from=utilities /etc/ssl/certs /etc/ssl/certs
|
||||
COPY --from=builder $SRC_DIR/cmd/ipfs/ipfs /usr/local/bin/ipfs
|
||||
COPY --from=builder $SRC_DIR/bin/container_daemon /usr/local/bin/start_ipfs
|
||||
COPY --from=builder --chmod=755 $SRC_DIR/bin/container_daemon /usr/local/bin/start_ipfs
|
||||
COPY --from=builder $SRC_DIR/bin/container_init_run /usr/local/bin/container_init_run
|
||||
|
||||
# Add suid bit on fusermount so it will run properly
|
||||
# Set SUID for fusermount to enable FUSE mounting by non-root user
|
||||
RUN chmod 4755 /usr/local/bin/fusermount
|
||||
|
||||
# Fix permissions on start_ipfs (ignore the build machine's permissions)
|
||||
RUN chmod 0755 /usr/local/bin/start_ipfs
|
||||
|
||||
# Swarm TCP; should be exposed to the public
|
||||
EXPOSE 4001
|
||||
# Swarm UDP; should be exposed to the public
|
||||
EXPOSE 4001/udp
|
||||
# Daemon API; must not be exposed publicly but to client services under you control
|
||||
# Swarm P2P port (TCP/UDP) - expose publicly for peer connections
|
||||
EXPOSE 4001 4001/udp
|
||||
# API port - keep private, only for trusted clients
|
||||
EXPOSE 5001
|
||||
# Web Gateway; can be exposed publicly with a proxy, e.g. as https://ipfs.example.org
|
||||
# Gateway port - can be exposed publicly via reverse proxy
|
||||
EXPOSE 8080
|
||||
# Swarm Websockets; must be exposed publicly when the node is listening using the websocket transport (/ipX/.../tcp/8081/ws).
|
||||
# Swarm WebSockets - expose publicly for browser-based peers
|
||||
EXPOSE 8081
|
||||
|
||||
# Create the fs-repo directory and switch to a non-privileged user.
|
||||
ENV IPFS_PATH /data/ipfs
|
||||
RUN mkdir -p $IPFS_PATH \
|
||||
# Create ipfs user (uid 1000) and required directories with proper ownership
|
||||
ENV IPFS_PATH=/data/ipfs
|
||||
RUN mkdir -p $IPFS_PATH /ipfs /ipns /mfs /container-init.d \
|
||||
&& adduser -D -h $IPFS_PATH -u 1000 -G users ipfs \
|
||||
&& chown ipfs:users $IPFS_PATH
|
||||
&& chown ipfs:users $IPFS_PATH /ipfs /ipns /mfs /container-init.d
|
||||
|
||||
# Create mount points for `ipfs mount` command
|
||||
RUN mkdir /ipfs /ipns \
|
||||
&& chown ipfs:users /ipfs /ipns
|
||||
|
||||
# Create the init scripts directory
|
||||
RUN mkdir /container-init.d \
|
||||
&& chown ipfs:users /container-init.d
|
||||
|
||||
# Expose the fs-repo as a volume.
|
||||
# start_ipfs initializes an fs-repo if none is mounted.
|
||||
# Important this happens after the USER directive so permissions are correct.
|
||||
# Volume for IPFS repository data persistence
|
||||
VOLUME $IPFS_PATH
|
||||
|
||||
# The default logging level
|
||||
ENV IPFS_LOGGING ""
|
||||
ENV GOLOG_LOG_LEVEL=""
|
||||
|
||||
# This just makes sure that:
|
||||
# 1. There's an fs-repo, and initializes one if there isn't.
|
||||
# 2. The API and Gateway are accessible from outside the container.
|
||||
# Entrypoint initializes IPFS repo if needed and configures networking.
|
||||
# tini ensures proper signal handling and zombie process cleanup
|
||||
ENTRYPOINT ["/sbin/tini", "--", "/usr/local/bin/start_ipfs"]
|
||||
|
||||
# Healthcheck for the container
|
||||
# QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn is the CID of empty folder
|
||||
# Health check verifies IPFS daemon is responsive.
|
||||
# Uses empty directory CID (QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn) as test
|
||||
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
|
||||
CMD ipfs --api=/ip4/127.0.0.1/tcp/5001 dag stat /ipfs/QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn || exit 1
|
||||
|
||||
# Execute the daemon subcommand by default
|
||||
# Default: run IPFS daemon with auto-migration enabled
|
||||
CMD ["daemon", "--migrate=true", "--agent-version-suffix=docker"]
|
||||
|
||||
5
FUNDING.json
Normal file
5
FUNDING.json
Normal file
@ -0,0 +1,5 @@
|
||||
{
|
||||
"opRetro": {
|
||||
"projectId": "0x7f330267969cf845a983a9d4e7b7dbcca5c700a5191269af377836d109e0bb69"
|
||||
}
|
||||
}
|
||||
546
README.md
546
README.md
@ -1,8 +1,8 @@
|
||||
<h1 align="center">
|
||||
<br>
|
||||
<a href="https://docs.ipfs.tech/how-to/command-line-quick-start/"><img src="https://user-images.githubusercontent.com/157609/250148884-d6d12db8-fdcf-4be3-8546-2550b69845d8.png" alt="Kubo logo" title="Kubo logo" width="200"></a>
|
||||
<a href="https://github.com/ipfs/kubo/blob/master/docs/logo/"><img src="https://user-images.githubusercontent.com/157609/250148884-d6d12db8-fdcf-4be3-8546-2550b69845d8.png" alt="Kubo logo" title="Kubo logo" width="200"></a>
|
||||
<br>
|
||||
Kubo: IPFS Implementation in GO
|
||||
Kubo: IPFS Implementation in Go
|
||||
<br>
|
||||
</h1>
|
||||
|
||||
@ -11,488 +11,214 @@
|
||||
<p align="center">
|
||||
<a href="https://ipfs.tech"><img src="https://img.shields.io/badge/project-IPFS-blue.svg?style=flat-square" alt="Official Part of IPFS Project"></a>
|
||||
<a href="https://discuss.ipfs.tech"><img alt="Discourse Forum" src="https://img.shields.io/discourse/posts?server=https%3A%2F%2Fdiscuss.ipfs.tech"></a>
|
||||
<a href="https://matrix.to/#/#ipfs-space:ipfs.io"><img alt="Matrix" src="https://img.shields.io/matrix/ipfs-space%3Aipfs.io?server_fqdn=matrix.org"></a>
|
||||
<a href="https://github.com/ipfs/kubo/actions"><img src="https://img.shields.io/github/actions/workflow/status/ipfs/kubo/build.yml?branch=master" alt="ci"></a>
|
||||
<a href="https://docs.ipfs.tech/community/"><img alt="Matrix" src="https://img.shields.io/matrix/ipfs-space%3Aipfs.io?server_fqdn=matrix.org"></a>
|
||||
<a href="https://github.com/ipfs/kubo/actions"><img src="https://img.shields.io/github/actions/workflow/status/ipfs/kubo/gobuild.yml?branch=master"></a>
|
||||
<a href="https://github.com/ipfs/kubo/releases"><img alt="GitHub release" src="https://img.shields.io/github/v/release/ipfs/kubo?filter=!*rc*"></a>
|
||||
<a href="https://godoc.org/github.com/ipfs/kubo"><img src="https://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square" alt="godoc reference"></a>
|
||||
</p>
|
||||
|
||||
<hr />
|
||||
|
||||
<p align="center">
|
||||
<b><a href="#what-is-kubo">What is Kubo?</a></b> | <b><a href="#quick-taste">Quick Taste</a></b> | <b><a href="#install">Install</a></b> | <b><a href="#documentation">Documentation</a></b> | <b><a href="#development">Development</a></b> | <b><a href="#getting-help">Getting Help</a></b>
|
||||
</p>
|
||||
|
||||
## What is Kubo?
|
||||
|
||||
Kubo was the first IPFS implementation and is the most widely used one today. Implementing the *Interplanetary Filesystem* - the Web3 standard for content-addressing, interoperable with HTTP. Thus powered by IPLD's data models and the libp2p for network communication. Kubo is written in Go.
|
||||
Kubo was the first [IPFS](https://docs.ipfs.tech/concepts/what-is-ipfs/) implementation and is the [most widely used one today](https://probelab.io/ipfs/topology/#chart-agent-types-avg). It takes an opinionated approach to content-addressing ([CIDs](https://docs.ipfs.tech/concepts/glossary/#cid), [DAGs](https://docs.ipfs.tech/concepts/glossary/#dag)) that maximizes interoperability: [UnixFS](https://docs.ipfs.tech/concepts/glossary/#unixfs) for files and directories, [HTTP Gateways](https://docs.ipfs.tech/concepts/glossary/#gateway) for web browsers, [Bitswap](https://docs.ipfs.tech/concepts/glossary/#bitswap) and [HTTP](https://specs.ipfs.tech/http-gateways/trustless-gateway/) for verifiable data transfer.
|
||||
|
||||
Featureset
|
||||
- Runs an IPFS-Node as a network service that is part of LAN and WAN DHT
|
||||
- [HTTP Gateway](https://specs.ipfs.tech/http-gateways/) (`/ipfs` and `/ipns`) functionality for trusted and [trustless](https://docs.ipfs.tech/reference/http/gateway/#trustless-verifiable-retrieval) content retrieval
|
||||
- [HTTP Routing V1](https://specs.ipfs.tech/routing/http-routing-v1/) (`/routing/v1`) client and server implementation for [delegated routing](./docs/delegated-routing.md) lookups
|
||||
- [HTTP Kubo RPC API](https://docs.ipfs.tech/reference/kubo/rpc/) (`/api/v0`) to access and control the daemon
|
||||
- [Command Line Interface](https://docs.ipfs.tech/reference/kubo/cli/) based on (`/api/v0`) RPC API
|
||||
- [WebUI](https://github.com/ipfs/ipfs-webui/#readme) to manage the Kubo node
|
||||
- [Content blocking](/docs/content-blocking.md) support for operators of public nodes
|
||||
**Features:**
|
||||
|
||||
### Other implementations
|
||||
- Runs an IPFS node as a network service (LAN [mDNS](https://github.com/libp2p/specs/blob/master/discovery/mdns.md) and WAN [Amino DHT](https://docs.ipfs.tech/concepts/glossary/#dht))
|
||||
- [Command-line interface](https://docs.ipfs.tech/reference/kubo/cli/) (`ipfs --help`)
|
||||
- [WebUI](https://github.com/ipfs/ipfs-webui/#readme) for node management
|
||||
- [HTTP Gateway](https://specs.ipfs.tech/http-gateways/) for trusted and [trustless](https://docs.ipfs.tech/reference/http/gateway/#trustless-verifiable-retrieval) content retrieval
|
||||
- [HTTP RPC API](https://docs.ipfs.tech/reference/kubo/rpc/) to control the daemon
|
||||
- [HTTP Routing V1](https://specs.ipfs.tech/routing/http-routing-v1/) client and server for [delegated routing](./docs/delegated-routing.md)
|
||||
- [Content blocking](./docs/content-blocking.md) for public node operators
|
||||
|
||||
See [List](https://docs.ipfs.tech/basics/ipfs-implementations/)
|
||||
**Other IPFS implementations:** [Helia](https://github.com/ipfs/helia) (JavaScript), [more...](https://docs.ipfs.tech/concepts/ipfs-implementations/)
|
||||
|
||||
## What is IPFS?
|
||||
## Quick Taste
|
||||
|
||||
IPFS is a global, versioned, peer-to-peer filesystem. It combines good ideas from previous systems such as Git, BitTorrent, Kademlia, SFS, and the Web. It is like a single BitTorrent swarm, exchanging git objects. IPFS provides an interface as simple as the HTTP web, but with permanence built-in. You can also mount the world at /ipfs.
|
||||
After [installing Kubo](#install), verify it works:
|
||||
|
||||
For more info see: https://docs.ipfs.tech/concepts/what-is-ipfs/
|
||||
```console
|
||||
$ ipfs init
|
||||
generating ED25519 keypair...done
|
||||
peer identity: 12D3KooWGcSLQdLDBi2BvoP8WnpdHvhWPbxpGcqkf93rL2XMZK7R
|
||||
|
||||
Before opening an issue, consider using one of the following locations to ensure you are opening your thread in the right place:
|
||||
- kubo (previously named go-ipfs) _implementation_ bugs in [this repo](https://github.com/ipfs/kubo/issues).
|
||||
- Documentation issues in [ipfs/docs issues](https://github.com/ipfs/ipfs-docs/issues).
|
||||
- IPFS _design_ in [ipfs/specs issues](https://github.com/ipfs/specs/issues).
|
||||
- Exploration of new ideas in [ipfs/notes issues](https://github.com/ipfs/notes/issues).
|
||||
- Ask questions and meet the rest of the community at the [IPFS Forum](https://discuss.ipfs.tech).
|
||||
- Or [chat with us](https://docs.ipfs.tech/community/chat/).
|
||||
$ ipfs daemon &
|
||||
Daemon is ready
|
||||
|
||||
[](https://www.youtube.com/channel/UCdjsUXJ3QawK4O5L1kqqsew) [](https://twitter.com/IPFS)
|
||||
$ echo "hello IPFS" | ipfs add -q --cid-version 1
|
||||
bafkreicouv3sksjuzxb3rbb6rziy6duakk2aikegsmtqtz5rsuppjorxsa
|
||||
|
||||
## Next milestones
|
||||
$ ipfs cat bafkreicouv3sksjuzxb3rbb6rziy6duakk2aikegsmtqtz5rsuppjorxsa
|
||||
hello IPFS
|
||||
```
|
||||
|
||||
[Milestones on GitHub](https://github.com/ipfs/kubo/milestones)
|
||||
Verify this CID is provided by your node to the IPFS network: <https://check.ipfs.network/?cid=bafkreicouv3sksjuzxb3rbb6rziy6duakk2aikegsmtqtz5rsuppjorxsa>
|
||||
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [What is Kubo?](#what-is-kubo)
|
||||
- [What is IPFS?](#what-is-ipfs)
|
||||
- [Next milestones](#next-milestones)
|
||||
- [Table of Contents](#table-of-contents)
|
||||
- [Security Issues](#security-issues)
|
||||
- [Minimal System Requirements](#minimal-system-requirements)
|
||||
- [Install](#install)
|
||||
- [Docker](#docker)
|
||||
- [Official prebuilt binaries](#official-prebuilt-binaries)
|
||||
- [Updating](#updating)
|
||||
- [Using ipfs-update](#using-ipfs-update)
|
||||
- [Downloading builds using IPFS](#downloading-builds-using-ipfs)
|
||||
- [Unofficial Linux packages](#unofficial-linux-packages)
|
||||
- [ArchLinux](#arch-linux)
|
||||
- [Gentoo Linux](#gentoo-linux)
|
||||
- [Nix](#nix)
|
||||
- [Solus](#solus)
|
||||
- [openSUSE](#opensuse)
|
||||
- [Guix](#guix)
|
||||
- [Snap](#snap)
|
||||
- [Ubuntu PPA](#ubuntu-ppa)
|
||||
- [Unofficial Windows packages](#unofficial-windows-packages)
|
||||
- [Chocolatey](#chocolatey)
|
||||
- [Scoop](#scoop)
|
||||
- [Unofficial MacOS packages](#unofficial-macos-packages)
|
||||
- [MacPorts](#macports)
|
||||
- [Nix](#nix-macos)
|
||||
- [Homebrew](#homebrew)
|
||||
- [Build from Source](#build-from-source)
|
||||
- [Install Go](#install-go)
|
||||
- [Download and Compile IPFS](#download-and-compile-ipfs)
|
||||
- [Cross Compiling](#cross-compiling)
|
||||
- [Troubleshooting](#troubleshooting)
|
||||
- [Getting Started](#getting-started)
|
||||
- [Usage](#usage)
|
||||
- [Some things to try](#some-things-to-try)
|
||||
- [Troubleshooting](#troubleshooting-1)
|
||||
- [Packages](#packages)
|
||||
- [Development](#development)
|
||||
- [Map of Implemented Subsystems](#map-of-implemented-subsystems)
|
||||
- [CLI, HTTP-API, Architecture Diagram](#cli-http-api-architecture-diagram)
|
||||
- [Testing](#testing)
|
||||
- [Development Dependencies](#development-dependencies)
|
||||
- [Developer Notes](#developer-notes)
|
||||
- [Maintainer Info](#maintainer-info)
|
||||
- [Contributing](#contributing)
|
||||
- [License](#license)
|
||||
|
||||
## Security Issues
|
||||
|
||||
Please follow [`SECURITY.md`](SECURITY.md).
|
||||
|
||||
### Minimal System Requirements
|
||||
|
||||
IPFS can run on most Linux, macOS, and Windows systems. We recommend running it on a machine with at least 4 GB of RAM and 2 CPU cores (kubo is highly parallel). On systems with less memory, it may not be completely stable, and you run on your own risk.
|
||||
See `ipfs add --help` for all import options. Ready for more? Follow the [command-line quick start](https://docs.ipfs.tech/how-to/command-line-quick-start/).
|
||||
|
||||
## Install
|
||||
|
||||
The canonical download instructions for IPFS are over at: https://docs.ipfs.tech/install/. It is **highly recommended** you follow those instructions if you are not interested in working on IPFS development.
|
||||
Follow the [official installation guide](https://docs.ipfs.tech/install/command-line/), or choose: [prebuilt binary](#official-prebuilt-binaries) | [Docker](#docker) | [package manager](#package-managers) | [from source](#build-from-source).
|
||||
|
||||
Prefer a GUI? Try [IPFS Desktop](https://docs.ipfs.tech/install/ipfs-desktop/) and/or [IPFS Companion](https://docs.ipfs.tech/install/ipfs-companion/).
|
||||
|
||||
### Minimal System Requirements
|
||||
|
||||
Kubo runs on most Linux, macOS, and Windows systems. For optimal performance, we recommend at least 6 GB of RAM and 2 CPU cores (more is ideal, as Kubo is highly parallel).
|
||||
|
||||
> [!IMPORTANT]
|
||||
> Larger pinsets require additional memory, with an estimated ~1 GiB of RAM per 20 million items for reproviding to the Amino DHT.
|
||||
|
||||
> [!CAUTION]
|
||||
> Systems with less than the recommended memory may experience instability, frequent OOM errors or restarts, and missing data announcement (reprovider window), which can make data fully or partially inaccessible to other peers. Running Kubo on underprovisioned hardware is at your own risk.
|
||||
|
||||
### Official Prebuilt Binaries
|
||||
|
||||
Download from https://dist.ipfs.tech#kubo or [GitHub Releases](https://github.com/ipfs/kubo/releases/latest).
|
||||
|
||||
### Docker
|
||||
|
||||
Official images are published at https://hub.docker.com/r/ipfs/kubo/:
|
||||
Official images are published at https://hub.docker.com/r/ipfs/kubo/: [](https://hub.docker.com/r/ipfs/kubo/)
|
||||
|
||||
[](https://hub.docker.com/r/ipfs/kubo/)
|
||||
#### 🟢 Release Images
|
||||
|
||||
- 🟢 Releases
|
||||
- `latest` and `release` tags always point at [the latest stable release](https://github.com/ipfs/kubo/releases/latest)
|
||||
- `vN.N.N` points at a specific [release tag](https://github.com/ipfs/kubo/releases)
|
||||
- These are production grade images.
|
||||
- 🟠 We also provide experimental developer builds
|
||||
- `master-latest` always points at the `HEAD` of the `master` branch
|
||||
- `master-YYYY-DD-MM-GITSHA` points at a specific commit from the `master` branch
|
||||
- These tags are used by developers for internal testing, not intended for end users or production use.
|
||||
Use these for production deployments.
|
||||
|
||||
- `latest` and [`release`](https://hub.docker.com/r/ipfs/kubo/tags?name=release) always point at [the latest stable release](https://github.com/ipfs/kubo/releases/latest)
|
||||
- [`vN.N.N`](https://hub.docker.com/r/ipfs/kubo/tags?name=v) points at a specific [release tag](https://github.com/ipfs/kubo/releases)
|
||||
|
||||
```console
|
||||
$ docker pull ipfs/kubo:latest
|
||||
$ docker run --rm -it --net=host ipfs/kubo:latest
|
||||
```
|
||||
|
||||
To [customize your node](https://docs.ipfs.tech/install/run-ipfs-inside-docker/#customizing-your-node),
|
||||
pass necessary config via `-e` or by mounting scripts in the `/container-init.d`.
|
||||
To [customize your node](https://docs.ipfs.tech/install/run-ipfs-inside-docker/#customizing-your-node), pass config via `-e` or mount scripts in `/container-init.d`.
|
||||
|
||||
Learn more at https://docs.ipfs.tech/install/run-ipfs-inside-docker/
|
||||
#### 🟠 Developer Preview Images
|
||||
|
||||
### Official prebuilt binaries
|
||||
For internal testing, not intended for production.
|
||||
|
||||
The official binaries are published at https://dist.ipfs.tech#kubo:
|
||||
- [`master-latest`](https://hub.docker.com/r/ipfs/kubo/tags?name=master-latest) points at `HEAD` of [`master`](https://github.com/ipfs/kubo/commits/master/)
|
||||
- [`master-YYYY-DD-MM-GITSHA`](https://hub.docker.com/r/ipfs/kubo/tags?name=master-2) points at a specific commit
|
||||
|
||||
[](https://dist.ipfs.tech#kubo)
|
||||
#### 🔴 Internal Staging Images
|
||||
|
||||
From there:
|
||||
- Click the blue "Download Kubo" on the right side of the page.
|
||||
- Open/extract the archive.
|
||||
- Move kubo (`ipfs`) to your path (`install.sh` can do it for you).
|
||||
For testing arbitrary commits and experimental patches (force push to `staging` branch).
|
||||
|
||||
If you are unable to access [dist.ipfs.tech](https://dist.ipfs.tech#kubo), you can also download kubo (go-ipfs) from:
|
||||
- this project's GitHub [releases](https://github.com/ipfs/kubo/releases/latest) page
|
||||
- `/ipns/dist.ipfs.tech` at [dweb.link](https://dweb.link/ipns/dist.ipfs.tech#kubo) gateway
|
||||
|
||||
#### Updating
|
||||
|
||||
##### Using ipfs-update
|
||||
|
||||
IPFS has an updating tool that can be accessed through `ipfs update`. The tool is
|
||||
not installed alongside IPFS in order to keep that logic independent of the main
|
||||
codebase. To install `ipfs-update` tool, [download it here](https://dist.ipfs.tech/#ipfs-update).
|
||||
|
||||
##### Downloading builds using IPFS
|
||||
|
||||
List the available versions of Kubo (go-ipfs) implementation:
|
||||
|
||||
```console
|
||||
$ ipfs cat /ipns/dist.ipfs.tech/kubo/versions
|
||||
```
|
||||
|
||||
Then, to view available builds for a version from the previous command (`$VERSION`):
|
||||
|
||||
```console
|
||||
$ ipfs ls /ipns/dist.ipfs.tech/kubo/$VERSION
|
||||
```
|
||||
|
||||
To download a given build of a version:
|
||||
|
||||
```console
|
||||
$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_darwin-386.tar.gz # darwin 32-bit build
|
||||
$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_darwin-amd64.tar.gz # darwin 64-bit build
|
||||
$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_freebsd-amd64.tar.gz # freebsd 64-bit build
|
||||
$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_linux-386.tar.gz # linux 32-bit build
|
||||
$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_linux-amd64.tar.gz # linux 64-bit build
|
||||
$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_linux-arm.tar.gz # linux arm build
|
||||
$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_windows-amd64.zip # windows 64-bit build
|
||||
```
|
||||
|
||||
### Unofficial Linux packages
|
||||
|
||||
<a href="https://repology.org/project/kubo/versions">
|
||||
<img src="https://repology.org/badge/vertical-allrepos/kubo.svg" alt="Packaging status" align="right">
|
||||
</a>
|
||||
|
||||
- [ArchLinux](#arch-linux)
|
||||
- [Gentoo Linux](#gentoo-linux)
|
||||
- [Nix](#nix-linux)
|
||||
- [Solus](#solus)
|
||||
- [openSUSE](#opensuse)
|
||||
- [Guix](#guix)
|
||||
- [Snap](#snap)
|
||||
- [Ubuntu PPA](#ubuntu-ppa)
|
||||
|
||||
#### Arch Linux
|
||||
|
||||
[](https://wiki.archlinux.org/title/IPFS)
|
||||
|
||||
```bash
|
||||
# pacman -S kubo
|
||||
```
|
||||
|
||||
[](https://aur.archlinux.org/packages/kubo/)
|
||||
|
||||
#### <a name="gentoo-linux">Gentoo Linux</a>
|
||||
|
||||
https://wiki.gentoo.org/wiki/Kubo
|
||||
|
||||
```bash
|
||||
# emerge -a net-p2p/kubo
|
||||
```
|
||||
|
||||
https://packages.gentoo.org/packages/net-p2p/kubo
|
||||
|
||||
#### <a name="nix-linux">Nix</a>
|
||||
|
||||
With the purely functional package manager [Nix](https://nixos.org/nix/) you can install kubo (go-ipfs) like this:
|
||||
|
||||
```
|
||||
$ nix-env -i kubo
|
||||
```
|
||||
|
||||
You can also install the Package by using its attribute name, which is also `kubo`.
|
||||
|
||||
#### Solus
|
||||
|
||||
[Package for Solus](https://dev.getsol.us/source/kubo/repository/master/)
|
||||
|
||||
```
|
||||
$ sudo eopkg install kubo
|
||||
```
|
||||
|
||||
You can also install it through the Solus software center.
|
||||
|
||||
#### openSUSE
|
||||
|
||||
[Community Package for go-ipfs](https://software.opensuse.org/package/go-ipfs)
|
||||
|
||||
#### Guix
|
||||
|
||||
[Community Package for go-ipfs](https://packages.guix.gnu.org/packages/go-ipfs/0.11.0/) is no out-of-date.
|
||||
|
||||
#### Snap
|
||||
|
||||
No longer supported, see rationale in [kubo#8688](https://github.com/ipfs/kubo/issues/8688).
|
||||
|
||||
#### Ubuntu PPA
|
||||
|
||||
[PPA homepage](https://launchpad.net/~twdragon/+archive/ubuntu/ipfs) on Launchpad.
|
||||
|
||||
##### Latest Ubuntu (>= 20.04 LTS)
|
||||
```sh
|
||||
sudo add-apt-repository ppa:twdragon/ipfs
|
||||
sudo apt update
|
||||
sudo apt install ipfs-kubo
|
||||
```
|
||||
|
||||
##### Any Ubuntu version
|
||||
|
||||
```sh
|
||||
sudo su
|
||||
echo 'deb https://ppa.launchpadcontent.net/twdragon/ipfs/ubuntu <<DISTRO>> main' >> /etc/apt/sources.list.d/ipfs
|
||||
echo 'deb-src https://ppa.launchpadcontent.net/twdragon/ipfs/ubuntu <<DISTRO>> main' >> /etc/apt/sources.list.d/ipfs
|
||||
exit
|
||||
sudo apt update
|
||||
sudo apt install ipfs-kubo
|
||||
```
|
||||
where `<<DISTRO>>` is the codename of your Ubuntu distribution (for example, `jammy` for 22.04 LTS). During the first installation the package maintenance script may automatically ask you about which networking profile, CPU accounting model, and/or existing node configuration file you want to use.
|
||||
|
||||
**NOTE**: this method also may work with any compatible Debian-based distro which has `libc6` inside, and APT as a package manager.
|
||||
|
||||
### Unofficial Windows packages
|
||||
|
||||
- [Chocolatey](#chocolatey)
|
||||
- [Scoop](#scoop)
|
||||
|
||||
#### Chocolatey
|
||||
|
||||
No longer supported, see rationale in [kubo#9341](https://github.com/ipfs/kubo/issues/9341).
|
||||
|
||||
#### Scoop
|
||||
|
||||
Scoop provides kubo as `kubo` in its 'extras' bucket.
|
||||
|
||||
```Powershell
|
||||
PS> scoop bucket add extras
|
||||
PS> scoop install kubo
|
||||
```
|
||||
|
||||
### Unofficial macOS packages
|
||||
|
||||
- [MacPorts](#macports)
|
||||
- [Nix](#nix-macos)
|
||||
- [Homebrew](#homebrew)
|
||||
|
||||
#### MacPorts
|
||||
|
||||
The package [ipfs](https://ports.macports.org/port/ipfs) currently points to kubo (go-ipfs) and is being maintained.
|
||||
|
||||
```
|
||||
$ sudo port install ipfs
|
||||
```
|
||||
|
||||
#### <a name="nix-macos">Nix</a>
|
||||
|
||||
In macOS you can use the purely functional package manager [Nix](https://nixos.org/nix/):
|
||||
|
||||
```
|
||||
$ nix-env -i kubo
|
||||
```
|
||||
|
||||
You can also install the Package by using its attribute name, which is also `kubo`.
|
||||
|
||||
#### Homebrew
|
||||
|
||||
A Homebrew formula [ipfs](https://formulae.brew.sh/formula/ipfs) is maintained too.
|
||||
|
||||
```
|
||||
$ brew install --formula ipfs
|
||||
```
|
||||
- [`staging-latest`](https://hub.docker.com/r/ipfs/kubo/tags?name=staging-latest) points at `HEAD` of [`staging`](https://github.com/ipfs/kubo/commits/staging/)
|
||||
- [`staging-YYYY-DD-MM-GITSHA`](https://hub.docker.com/r/ipfs/kubo/tags?name=staging-2) points at a specific commit
|
||||
|
||||
### Build from Source
|
||||
|
||||

|
||||
|
||||
kubo's build system requires Go and some standard POSIX build tools:
|
||||
|
||||
* GNU make
|
||||
* Git
|
||||
* GCC (or some other go compatible C Compiler) (optional)
|
||||
|
||||
To build without GCC, build with `CGO_ENABLED=0` (e.g., `make build CGO_ENABLED=0`).
|
||||
|
||||
#### Install Go
|
||||
|
||||

|
||||
|
||||
If you need to update: [Download latest version of Go](https://golang.org/dl/).
|
||||
|
||||
You'll need to add Go's bin directories to your `$PATH` environment variable e.g., by adding these lines to your `/etc/profile` (for a system-wide installation) or `$HOME/.profile`:
|
||||
|
||||
```
|
||||
export PATH=$PATH:/usr/local/go/bin
|
||||
export PATH=$PATH:$GOPATH/bin
|
||||
```bash
|
||||
git clone https://github.com/ipfs/kubo.git
|
||||
cd kubo
|
||||
make build # creates cmd/ipfs/ipfs
|
||||
make install # installs to $GOPATH/bin/ipfs
|
||||
```
|
||||
|
||||
(If you run into trouble, see the [Go install instructions](https://golang.org/doc/install)).
|
||||
See the [Developer Guide](docs/developer-guide.md) for details, Windows instructions, and troubleshooting.
|
||||
|
||||
#### Download and Compile IPFS
|
||||
### Package Managers
|
||||
|
||||
```
|
||||
$ git clone https://github.com/ipfs/kubo.git
|
||||
Kubo is available in community-maintained packages across many operating systems, Linux distributions, and package managers. See [Repology](https://repology.org/project/kubo/versions) for the full list: [](https://repology.org/project/kubo/versions)
|
||||
|
||||
$ cd kubo
|
||||
$ make install
|
||||
```
|
||||
> [!WARNING]
|
||||
> These packages are maintained by third-party volunteers. The IPFS Project and Kubo maintainers are not responsible for their contents or supply chain security. For increased security, [build from source](#build-from-source).
|
||||
|
||||
Alternatively, you can run `make build` to build the go-ipfs binary (storing it in `cmd/ipfs/ipfs`) without installing it.
|
||||
#### Linux
|
||||
|
||||
**NOTE:** If you get an error along the lines of "fatal error: stdlib.h: No such file or directory", you're missing a C compiler. Either re-run `make` with `CGO_ENABLED=0` or install GCC.
|
||||
| Distribution | Install | Version |
|
||||
|--------------|---------|---------|
|
||||
| Ubuntu | [PPA](https://launchpad.net/~twdragon/+archive/ubuntu/ipfs): `sudo apt install ipfs-kubo` | [](https://launchpad.net/~twdragon/+archive/ubuntu/ipfs) |
|
||||
| Arch | `pacman -S kubo` | [](https://archlinux.org/packages/extra/x86_64/kubo/) |
|
||||
| Fedora | [COPR](https://copr.fedorainfracloud.org/coprs/taw/ipfs/): `dnf install kubo` | [](https://copr.fedorainfracloud.org/coprs/taw/ipfs/) |
|
||||
| Nix | `nix-env -i kubo` | [](https://search.nixos.org/packages?query=kubo) |
|
||||
| Gentoo | `emerge -a net-p2p/kubo` | [](https://packages.gentoo.org/packages/net-p2p/kubo) |
|
||||
| openSUSE | `zypper install kubo` | [](https://software.opensuse.org/package/kubo) |
|
||||
| Solus | `sudo eopkg install kubo` | [](https://packages.getsol.us/shannon/k/kubo/) |
|
||||
| Guix | `guix install kubo` | [](https://packages.guix.gnu.org/packages/kubo/) |
|
||||
| _other_ | [See Repology for the full list](https://repology.org/project/kubo/versions) | |
|
||||
|
||||
##### Cross Compiling
|
||||
~~Snap~~ no longer supported ([#8688](https://github.com/ipfs/kubo/issues/8688))
|
||||
|
||||
Compiling for a different platform is as simple as running:
|
||||
#### macOS
|
||||
|
||||
```
|
||||
make build GOOS=myTargetOS GOARCH=myTargetArchitecture
|
||||
```
|
||||
| Manager | Install | Version |
|
||||
|---------|---------|---------|
|
||||
| Homebrew | `brew install ipfs` | [](https://formulae.brew.sh/formula/ipfs) |
|
||||
| MacPorts | `sudo port install ipfs` | [](https://ports.macports.org/port/ipfs/) |
|
||||
| Nix | `nix-env -i kubo` | [](https://search.nixos.org/packages?query=kubo) |
|
||||
| _other_ | [See Repology for the full list](https://repology.org/project/kubo/versions) | |
|
||||
|
||||
#### Troubleshooting
|
||||
#### Windows
|
||||
|
||||
- Separate [instructions are available for building on Windows](docs/windows.md).
|
||||
- `git` is required in order for `go get` to fetch all dependencies.
|
||||
- Package managers often contain out-of-date `golang` packages.
|
||||
Ensure that `go version` reports at least 1.10. See above for how to install go.
|
||||
- If you are interested in development, please install the development
|
||||
dependencies as well.
|
||||
- Shell command completions can be generated with one of the `ipfs commands completion` subcommands. Read [docs/command-completion.md](docs/command-completion.md) to learn more.
|
||||
- See the [misc folder](https://github.com/ipfs/kubo/tree/master/misc) for how to connect IPFS to systemd or whatever init system your distro uses.
|
||||
| Manager | Install | Version |
|
||||
|---------|---------|---------|
|
||||
| Scoop | `scoop install kubo` | [](https://scoop.sh/#/apps?q=kubo) |
|
||||
| _other_ | [See Repology for the full list](https://repology.org/project/kubo/versions) | |
|
||||
|
||||
## Getting Started
|
||||
~~Chocolatey~~ no longer supported ([#9341](https://github.com/ipfs/kubo/issues/9341))
|
||||
|
||||
### Usage
|
||||
## Documentation
|
||||
|
||||
[](https://docs.ipfs.tech/how-to/command-line-quick-start/)
|
||||
[](https://docs.ipfs.tech/reference/kubo/cli/)
|
||||
|
||||
To start using IPFS, you must first initialize IPFS's config files on your
|
||||
system, this is done with `ipfs init`. See `ipfs init --help` for information on
|
||||
the optional arguments it takes. After initialization is complete, you can use
|
||||
`ipfs mount`, `ipfs add` and any of the other commands to explore!
|
||||
|
||||
### Some things to try
|
||||
|
||||
Basic proof of 'ipfs working' locally:
|
||||
|
||||
echo "hello world" > hello
|
||||
ipfs add hello
|
||||
# This should output a hash string that looks something like:
|
||||
# QmT78zSuBmuS4z925WZfrqQ1qHaJ56DQaTfyMUF7F8ff5o
|
||||
ipfs cat <that hash>
|
||||
|
||||
### HTTP/RPC clients
|
||||
|
||||
For programmatic interaction with Kubo, see our [list of HTTP/RPC clients](docs/http-rpc-clients.md).
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
If you have previously installed IPFS before and you are running into problems getting a newer version to work, try deleting (or backing up somewhere else) your IPFS config directory (~/.ipfs by default) and rerunning `ipfs init`. This will reinitialize the config file to its defaults and clear out the local datastore of any bad entries.
|
||||
|
||||
Please direct general questions and help requests to our [forums](https://discuss.ipfs.tech).
|
||||
|
||||
If you believe you've found a bug, check the [issues list](https://github.com/ipfs/kubo/issues) and, if you don't see your problem there, either come talk to us on [Matrix chat](https://docs.ipfs.tech/community/chat/), or file an issue of your own!
|
||||
|
||||
## Packages
|
||||
|
||||
See [IPFS in GO](https://docs.ipfs.tech/reference/go/api/) documentation.
|
||||
| Topic | Description |
|
||||
|-------|-------------|
|
||||
| [Configuration](docs/config.md) | All config options reference |
|
||||
| [Environment variables](docs/environment-variables.md) | Runtime settings via env vars |
|
||||
| [Experimental features](docs/experimental-features.md) | Opt-in features in development |
|
||||
| [HTTP Gateway](docs/gateway.md) | Path, subdomain, and trustless gateway setup |
|
||||
| [HTTP RPC clients](docs/http-rpc-clients.md) | Client libraries for Go, JS |
|
||||
| [Delegated routing](docs/delegated-routing.md) | Multi-router and HTTP routing |
|
||||
| [Metrics & monitoring](docs/metrics.md) | Prometheus metrics |
|
||||
| [Content blocking](docs/content-blocking.md) | Denylist for public nodes |
|
||||
| [Customizing](docs/customizing.md) | Unsure if use Plugins, Boxo, or fork? |
|
||||
| [Debug guide](docs/debug-guide.md) | CPU profiles, memory analysis, tracing |
|
||||
| [Changelogs](docs/changelogs/) | Release notes for each version |
|
||||
| [All documentation](https://github.com/ipfs/kubo/tree/master/docs) | Full list of docs |
|
||||
|
||||
## Development
|
||||
|
||||
Some places to get you started on the codebase:
|
||||
See the [Developer Guide](docs/developer-guide.md) for build instructions, testing, and contribution workflow.
|
||||
|
||||
- Main file: [./cmd/ipfs/main.go](https://github.com/ipfs/kubo/blob/master/cmd/ipfs/main.go)
|
||||
- CLI Commands: [./core/commands/](https://github.com/ipfs/kubo/tree/master/core/commands)
|
||||
- Bitswap (the data trading engine): [go-bitswap](https://github.com/ipfs/go-bitswap)
|
||||
- libp2p
|
||||
- libp2p: https://github.com/libp2p/go-libp2p
|
||||
- DHT: https://github.com/libp2p/go-libp2p-kad-dht
|
||||
- [IPFS : The `Add` command demystified](https://github.com/ipfs/kubo/tree/master/docs/add-code-flow.md)
|
||||
## Getting Help
|
||||
|
||||
### Map of Implemented Subsystems
|
||||
**WIP**: This is a high-level architecture diagram of the various sub-systems of this specific implementation. To be updated with how they interact. Anyone who has suggestions is welcome to comment [here](https://docs.google.com/drawings/d/1OVpBT2q-NtSJqlPX3buvjYhOnWfdzb85YEsM_njesME/edit) on how we can improve this!
|
||||
<img src="https://docs.google.com/drawings/d/e/2PACX-1vS_n1FvSu6mdmSirkBrIIEib2gqhgtatD9awaP2_WdrGN4zTNeg620XQd9P95WT-IvognSxIIdCM5uE/pub?w=1446&h=1036">
|
||||
- [IPFS Forum](https://discuss.ipfs.tech) - community support, questions, and discussion
|
||||
- [Community](https://docs.ipfs.tech/community/) - chat, events, and working groups
|
||||
- [GitHub Issues](https://github.com/ipfs/kubo/issues) - bug reports for Kubo specifically
|
||||
- [IPFS Docs Issues](https://github.com/ipfs/ipfs-docs/issues) - documentation issues
|
||||
|
||||
### CLI, HTTP-API, Architecture Diagram
|
||||
|
||||

|
||||
|
||||
> [Origin](https://github.com/ipfs/pm/pull/678#discussion_r210410924)
|
||||
|
||||
Description: Dotted means "likely going away". The "Legacy" parts are thin wrappers around some commands to translate between the new system and the old system. The grayed-out parts on the "daemon" diagram are there to show that the code is all the same, it's just that we turn some pieces on and some pieces off depending on whether we're running on the client or the server.
|
||||
|
||||
### Testing
|
||||
|
||||
```
|
||||
make test
|
||||
```
|
||||
|
||||
### Development Dependencies
|
||||
|
||||
If you make changes to the protocol buffers, you will need to install the [protoc compiler](https://github.com/google/protobuf).
|
||||
|
||||
### Developer Notes
|
||||
|
||||
Find more documentation for developers on [docs](./docs)
|
||||
|
||||
## Maintainer Info
|
||||
|
||||
Kubo is maintained by [Shipyard](https://ipshipyard.com/).
|
||||
|
||||
* This repository is part of [Shipyard's GO Triage triage](https://ipshipyard.notion.site/IPFS-Go-Triage-Boxo-Kubo-Rainbow-0ddee6b7f28d412da7dabe4f9107c29a).
|
||||
* [Release Process](https://ipshipyard.notion.site/Kubo-Release-Process-6dba4f5755c9458ab5685eeb28173778)
|
||||
## Security Issues
|
||||
|
||||
See [`SECURITY.md`](SECURITY.md).
|
||||
|
||||
## Contributing
|
||||
|
||||
[](https://github.com/ipfs/community/blob/master/CONTRIBUTING.md)
|
||||
|
||||
We ❤️ all [our contributors](docs/AUTHORS); this project wouldn’t be what it is without you! If you want to help out, please see [CONTRIBUTING.md](CONTRIBUTING.md).
|
||||
We welcome contributions. See [CONTRIBUTING.md](CONTRIBUTING.md) and the [Developer Guide](docs/developer-guide.md).
|
||||
|
||||
This repository falls under the IPFS [Code of Conduct](https://github.com/ipfs/community/blob/master/code-of-conduct.md).
|
||||
This repository follows the IPFS [Code of Conduct](https://github.com/ipfs/community/blob/master/code-of-conduct.md).
|
||||
|
||||
Members of IPFS community provide Kubo support on [discussion forum category here](https://discuss.ipfs.tech/c/help/help-kubo/23).
|
||||
## Maintainer Info
|
||||
|
||||
Need help with IPFS itself? Learn where to get help and support at https://ipfs.tech/help.
|
||||
<a href="https://ipshipyard.com/"><img align="right" src="https://github.com/user-attachments/assets/39ed3504-bb71-47f6-9bf8-cb9a1698f272" /></a>
|
||||
|
||||
> [!NOTE]
|
||||
> Kubo is maintained by the [Shipyard](https://ipshipyard.com/) team.
|
||||
>
|
||||
> [Release Process](https://ipshipyard.notion.site/Kubo-Release-Process-6dba4f5755c9458ab5685eeb28173778)
|
||||
|
||||
## License
|
||||
|
||||
This project is dual-licensed under Apache 2.0 and MIT terms:
|
||||
Dual-licensed under Apache 2.0 and MIT:
|
||||
|
||||
- Apache License, Version 2.0, ([LICENSE-APACHE](https://github.com/ipfs/kubo/blob/master/LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
- MIT license ([LICENSE-MIT](https://github.com/ipfs/kubo/blob/master/LICENSE-MIT) or http://opensource.org/licenses/MIT)
|
||||
- [LICENSE-APACHE](LICENSE-APACHE)
|
||||
- [LICENSE-MIT](LICENSE-MIT)
|
||||
|
||||
20
Rules.mk
20
Rules.mk
@ -107,8 +107,8 @@ uninstall:
|
||||
.PHONY: uninstall
|
||||
|
||||
supported:
|
||||
@echo "Currently supported platforms:"
|
||||
@for p in ${SUPPORTED_PLATFORMS}; do echo $$p; done
|
||||
@echo "Currently supported platforms (from .github/build-platforms.yml):"
|
||||
@grep '^ - ' .github/build-platforms.yml | sed 's/^ - //' || (echo "Error: .github/build-platforms.yml not found"; exit 1)
|
||||
.PHONY: supported
|
||||
|
||||
help:
|
||||
@ -134,14 +134,14 @@ help:
|
||||
@echo ''
|
||||
@echo 'TESTING TARGETS:'
|
||||
@echo ''
|
||||
@echo ' test - Run all tests'
|
||||
@echo ' test_short - Run short go tests and short sharness tests'
|
||||
@echo ' test_go_short - Run short go tests'
|
||||
@echo ' test_go_test - Run all go tests'
|
||||
@echo ' test_go_expensive - Run all go tests and compile on all platforms'
|
||||
@echo ' test_go_race - Run go tests with the race detector enabled'
|
||||
@echo ' test_go_lint - Run the `golangci-lint` vetting tool'
|
||||
@echo ' test - Run all tests (test_go_fmt, test_unit, test_cli, test_sharness)'
|
||||
@echo ' test_short - Run fast tests (test_go_fmt, test_unit)'
|
||||
@echo ' test_unit - Run unit tests with coverage (excludes test/cli)'
|
||||
@echo ' test_cli - Run CLI integration tests (requires built binary)'
|
||||
@echo ' test_go_fmt - Check Go source formatting'
|
||||
@echo ' test_go_build - Build kubo for all platforms from .github/build-platforms.yml'
|
||||
@echo ' test_go_lint - Run golangci-lint'
|
||||
@echo ' test_sharness - Run sharness tests'
|
||||
@echo ' coverage - Collects coverage info from unit tests and sharness'
|
||||
@echo ' coverage - Collect coverage info from unit tests and sharness'
|
||||
@echo
|
||||
.PHONY: help
|
||||
|
||||
49
appveyor.yml
49
appveyor.yml
@ -1,49 +0,0 @@
|
||||
# Notes:
|
||||
# - Minimal appveyor.yml file is an empty file. All sections are optional.
|
||||
# - Indent each level of configuration with 2 spaces. Do not use tabs!
|
||||
# - All section names are case-sensitive.
|
||||
# - Section names should be unique on each level.
|
||||
|
||||
version: "{build}"
|
||||
|
||||
os: Windows Server 2012 R2
|
||||
|
||||
clone_folder: c:\gopath\src\github.com\ipfs\go-ipfs
|
||||
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
TEST_VERBOSE: 1
|
||||
#TEST_NO_FUSE: 1
|
||||
#TEST_SUITE: test_sharness
|
||||
#GOFLAGS: -tags nofuse
|
||||
global:
|
||||
BASH: C:\cygwin\bin\bash
|
||||
matrix:
|
||||
- GOARCH: amd64
|
||||
GOVERSION: 1.5.1
|
||||
GOROOT: c:\go
|
||||
DOWNLOADPLATFORM: "x64"
|
||||
|
||||
install:
|
||||
# Enable make
|
||||
#- SET PATH=c:\MinGW\bin;%PATH%
|
||||
#- copy c:\MinGW\bin\mingw32-make.exe c:\MinGW\bin\make.exe
|
||||
- go version
|
||||
- go env
|
||||
|
||||
# Cygwin build script
|
||||
#
|
||||
# NOTES:
|
||||
#
|
||||
# The stdin/stdout file descriptor appears not to be valid for the Appveyor
|
||||
# build which causes failures as certain functions attempt to redirect
|
||||
# default file handles. Ensure a dummy file descriptor is opened with 'exec'.
|
||||
#
|
||||
build_script:
|
||||
- '%BASH% -lc "cd $APPVEYOR_BUILD_FOLDER; exec 0</dev/null; export PATH=$GOPATH/bin:$PATH; make nofuse"'
|
||||
|
||||
test_script:
|
||||
- '%BASH% -lc "cd $APPVEYOR_BUILD_FOLDER; exec 0</dev/null; export PATH=$GOPATH/bin:$PATH; export GOFLAGS=''-tags nofuse''; export TEST_NO_FUSE=1; export TEST_VERBOSE=1; export TEST_EXPENSIVE=1; export TEST_SUITE=test_sharness; make $TEST_SUITE"'
|
||||
|
||||
#build:
|
||||
# parallel: true
|
||||
@ -1,44 +0,0 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
# Check that the go version is at least equal to a minimum version
|
||||
# number.
|
||||
#
|
||||
# Call it for example like this:
|
||||
#
|
||||
# $ check_go_version "1.5.2"
|
||||
#
|
||||
|
||||
USAGE="$0 GO_MIN_VERSION"
|
||||
|
||||
die() {
|
||||
printf >&2 "fatal: %s\n" "$@"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Get arguments
|
||||
|
||||
test "$#" -eq "1" || die "This program must be passed exactly 1 arguments" "Usage: $USAGE"
|
||||
|
||||
GO_MIN_VERSION="$1"
|
||||
|
||||
UPGRADE_MSG="Please take a look at https://golang.org/doc/install to install or upgrade go."
|
||||
|
||||
# Get path to the directory containing this file
|
||||
# If $0 has no slashes, uses "./"
|
||||
PREFIX=$(expr "$0" : "\(.*\/\)") || PREFIX='./'
|
||||
# Include the 'check_at_least_version' function
|
||||
. ${PREFIX}check_version
|
||||
|
||||
# Check that the go binary exists and is in the path
|
||||
|
||||
GOCC=${GOCC="go"}
|
||||
|
||||
type ${GOCC} >/dev/null 2>&1 || die_upgrade "go is not installed or not in the PATH!"
|
||||
|
||||
# Check the go binary version
|
||||
|
||||
VERS_STR=$(${GOCC} version 2>&1) || die "'go version' failed with output: $VERS_STR"
|
||||
|
||||
GO_CUR_VERSION=$(expr "$VERS_STR" : ".*go version.* go\([^[:space:]]*\) .*") || die "Invalid 'go version' output: $VERS_STR"
|
||||
|
||||
check_at_least_version "$GO_MIN_VERSION" "$GO_CUR_VERSION" "${GOCC}"
|
||||
@ -1,77 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
if test "x$UPGRADE_MSG" = "x"; then
|
||||
printf >&2 "fatal: Please set '"'$UPGRADE_MSG'"' before sourcing this script\n"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
die_upgrade() {
|
||||
printf >&2 "fatal: %s\n" "$@"
|
||||
printf >&2 "=> %s\n" "$UPGRADE_MSG"
|
||||
exit 1
|
||||
}
|
||||
|
||||
major_number() {
|
||||
vers="$1"
|
||||
|
||||
# Hack around 'expr' exiting with code 1 when it outputs 0
|
||||
case "$vers" in
|
||||
0) echo "0" ;;
|
||||
0.*) echo "0" ;;
|
||||
*) expr "$vers" : "\([^.]*\).*" || return 1
|
||||
esac
|
||||
}
|
||||
|
||||
check_at_least_version() {
|
||||
MIN_VERS="$1"
|
||||
CUR_VERS="$2"
|
||||
PROG_NAME="$3"
|
||||
|
||||
# Get major, minor and fix numbers for each version
|
||||
MIN_MAJ=$(major_number "$MIN_VERS") || die "No major version number in '$MIN_VERS' for '$PROG_NAME'"
|
||||
CUR_MAJ=$(major_number "$CUR_VERS") || die "No major version number in '$CUR_VERS' for '$PROG_NAME'"
|
||||
|
||||
# We expect a version to be of form X.X.X
|
||||
# if the second dot doesn't match, we consider it a prerelease
|
||||
|
||||
if MIN_MIN=$(expr "$MIN_VERS" : "[^.]*\.\([0-9][0-9]*\)"); then
|
||||
# this captured digit is necessary, since expr returns code 1 if the output is empty
|
||||
if expr "$MIN_VERS" : "[^.]*\.[0-9]*\([0-9]\.\|[0-9]\$\)" >/dev/null; then
|
||||
MIN_PRERELEASE="0"
|
||||
else
|
||||
MIN_PRERELEASE="1"
|
||||
fi
|
||||
MIN_FIX=$(expr "$MIN_VERS" : "[^.]*\.[0-9][0-9]*[^0-9][^0-9]*\([0-9][0-9]*\)") || MIN_FIX="0"
|
||||
else
|
||||
MIN_MIN="0"
|
||||
MIN_PRERELEASE="0"
|
||||
MIN_FIX="0"
|
||||
fi
|
||||
if CUR_MIN=$(expr "$CUR_VERS" : "[^.]*\.\([0-9][0-9]*\)"); then
|
||||
# this captured digit is necessary, since expr returns code 1 if the output is empty
|
||||
if expr "$CUR_VERS" : "[^.]*\.[0-9]*\([0-9]\.\|[0-9]\$\)" >/dev/null; then
|
||||
CUR_PRERELEASE="0"
|
||||
else
|
||||
CUR_PRERELEASE="1"
|
||||
fi
|
||||
CUR_FIX=$(expr "$CUR_VERS" : "[^.]*\.[0-9][0-9]*[^0-9][^0-9]*\([0-9][0-9]*\)") || CUR_FIX="0"
|
||||
else
|
||||
CUR_MIN="0"
|
||||
CUR_PRERELEASE="0"
|
||||
CUR_FIX="0"
|
||||
fi
|
||||
|
||||
# Compare versions
|
||||
VERS_LEAST="$PROG_NAME version '$CUR_VERS' should be at least '$MIN_VERS'"
|
||||
test "$CUR_MAJ" -lt "$MIN_MAJ" && die_upgrade "$VERS_LEAST"
|
||||
test "$CUR_MAJ" -gt "$MIN_MAJ" || {
|
||||
test "$CUR_MIN" -lt "$MIN_MIN" && die_upgrade "$VERS_LEAST"
|
||||
test "$CUR_MIN" -gt "$MIN_MIN" || {
|
||||
test "$CUR_PRERELEASE" -gt "$MIN_PRERELEASE" && die_upgrade "$VERS_LEAST"
|
||||
test "$CUR_PRERELEASE" -lt "$MIN_PRERELEASE" || {
|
||||
test "$CUR_FIX" -lt "$MIN_FIX" && die_upgrade "$VERS_LEAST"
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -50,6 +50,6 @@ else
|
||||
unset IPFS_SWARM_KEY_FILE
|
||||
fi
|
||||
|
||||
find /container-init.d -maxdepth 1 -type f -iname '*.sh' -print0 | sort -z | xargs -n 1 -0 -r container_init_run
|
||||
find /container-init.d -maxdepth 1 \( -type f -o -type l \) -iname '*.sh' -print0 | sort -z | xargs -n 1 -0 -r container_init_run
|
||||
|
||||
exec ipfs "$@"
|
||||
|
||||
@ -18,7 +18,7 @@ set -euo pipefail
|
||||
if [[ $# -lt 1 ]] ; then
|
||||
echo 'At least 1 arg required.'
|
||||
echo 'Usage:'
|
||||
echo './push-docker-tags.sh <build number> [git commit sha1] [git branch name] [git tag name]'
|
||||
echo './get-docker-tags.sh <build number> [git commit sha1] [git branch name] [git tag name]'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@ -29,12 +29,10 @@ GIT_BRANCH=${3:-$(git symbolic-ref -q --short HEAD || echo "unknown")}
|
||||
GIT_TAG=${4:-$(git describe --tags --exact-match 2> /dev/null || echo "")}
|
||||
|
||||
IMAGE_NAME=${IMAGE_NAME:-ipfs/kubo}
|
||||
LEGACY_IMAGE_NAME=${LEGACY_IMAGE_NAME:-ipfs/go-ipfs}
|
||||
|
||||
echoImageName () {
|
||||
local IMAGE_TAG=$1
|
||||
echo "$IMAGE_NAME:$IMAGE_TAG"
|
||||
echo "$LEGACY_IMAGE_NAME:$IMAGE_TAG"
|
||||
}
|
||||
|
||||
if [[ $GIT_TAG =~ ^v[0-9]+\.[0-9]+\.[0-9]+-rc ]]; then
|
||||
@ -43,16 +41,16 @@ if [[ $GIT_TAG =~ ^v[0-9]+\.[0-9]+\.[0-9]+-rc ]]; then
|
||||
elif [[ $GIT_TAG =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
echoImageName "$GIT_TAG"
|
||||
echoImageName "latest"
|
||||
echoImageName "release" # see: https://github.com/ipfs/go-ipfs/issues/3999#issuecomment-742228981
|
||||
echoImageName "release" # see: https://github.com/ipfs/kubo/issues/3999#issuecomment-742228981
|
||||
|
||||
elif [[ $GIT_BRANCH =~ ^bifrost-.* ]]; then
|
||||
# sanitize the branch name since docker tags have stricter char limits than git branch names
|
||||
branch=$(echo "$GIT_BRANCH" | tr '/' '-' | tr --delete --complement '[:alnum:]-')
|
||||
echoImageName "${branch}-${BUILD_NUM}-${GIT_SHA1_SHORT}"
|
||||
|
||||
elif [ "$GIT_BRANCH" = "master" ]; then
|
||||
echoImageName "master-${BUILD_NUM}-${GIT_SHA1_SHORT}"
|
||||
echoImageName "master-latest"
|
||||
elif [ "$GIT_BRANCH" = "master" ] || [ "$GIT_BRANCH" = "staging" ]; then
|
||||
echoImageName "${GIT_BRANCH}-${BUILD_NUM}-${GIT_SHA1_SHORT}"
|
||||
echoImageName "${GIT_BRANCH}-latest"
|
||||
|
||||
else
|
||||
echo "Nothing to do. No docker tag defined for branch: $GIT_BRANCH, tag: $GIT_TAG"
|
||||
|
||||
560
bin/mkreleaselog
560
bin/mkreleaselog
@ -1,10 +1,19 @@
|
||||
#!/bin/zsh
|
||||
#!/bin/bash
|
||||
#
|
||||
# Invocation: mkreleaselog [FIRST_REF [LAST_REF]]
|
||||
#
|
||||
# Generates release notes with contributor statistics, deduplicating by GitHub handle.
|
||||
# GitHub handles are resolved from:
|
||||
# 1. GitHub noreply emails (user@users.noreply.github.com)
|
||||
# 2. Merge commit messages (Merge pull request #N from user/branch)
|
||||
# 3. GitHub API via gh CLI (for squash merges)
|
||||
#
|
||||
# Results are cached in ~/.cache/mkreleaselog/github-handles.json
|
||||
|
||||
set -euo pipefail
|
||||
export GO111MODULE=on
|
||||
export GOPATH="$(go env GOPATH)"
|
||||
GOPATH="$(go env GOPATH)"
|
||||
export GOPATH
|
||||
|
||||
# List of PCRE regular expressions to match "included" modules.
|
||||
INCLUDE_MODULES=(
|
||||
@ -15,10 +24,15 @@ INCLUDE_MODULES=(
|
||||
"^github.com/multiformats/"
|
||||
"^github.com/filecoin-project/"
|
||||
"^github.com/ipfs-shipyard/"
|
||||
"^github.com/ipshipyard/"
|
||||
"^github.com/probe-lab/"
|
||||
|
||||
# Authors of personal modules used by go-ipfs that should be mentioned in the
|
||||
# release notes.
|
||||
"^github.com/whyrusleeping/"
|
||||
"^github.com/gammazero/"
|
||||
"^github.com/Jorropo/"
|
||||
"^github.com/guillaumemichel/"
|
||||
"^github.com/Kubuxu/"
|
||||
"^github.com/jbenet/"
|
||||
"^github.com/Stebalien/"
|
||||
@ -48,15 +62,348 @@ IGNORE_FILES=(
|
||||
)
|
||||
|
||||
##########################################################################################
|
||||
# GitHub Handle Resolution Infrastructure
|
||||
##########################################################################################
|
||||
|
||||
# Cache location following XDG spec
|
||||
GITHUB_CACHE_DIR="${XDG_CACHE_HOME:-$HOME/.cache}/mkreleaselog"
|
||||
GITHUB_CACHE_FILE="$GITHUB_CACHE_DIR/github-handles.json"
|
||||
|
||||
# Timeout for gh CLI commands (seconds)
|
||||
GH_TIMEOUT=10
|
||||
|
||||
# Associative array for email -> github handle mapping (runtime cache)
|
||||
declare -A EMAIL_TO_GITHUB
|
||||
|
||||
# Check if gh CLI is available and authenticated
|
||||
gh_available() {
|
||||
command -v gh >/dev/null 2>&1 && gh auth status >/dev/null 2>&1
|
||||
}
|
||||
|
||||
# Load cached email -> github handle mappings from disk
|
||||
load_github_cache() {
|
||||
EMAIL_TO_GITHUB=()
|
||||
|
||||
if [[ ! -f "$GITHUB_CACHE_FILE" ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Validate JSON before loading
|
||||
if ! jq -e '.' "$GITHUB_CACHE_FILE" >/dev/null 2>&1; then
|
||||
msg "Warning: corrupted cache file, ignoring"
|
||||
return 0
|
||||
fi
|
||||
|
||||
local email handle
|
||||
while IFS=$'\t' read -r email handle; do
|
||||
# Validate handle format (alphanumeric, hyphens, max 39 chars)
|
||||
if [[ -n "$email" && -n "$handle" && "$handle" =~ ^[a-zA-Z0-9]([a-zA-Z0-9-]{0,37}[a-zA-Z0-9])?$ ]]; then
|
||||
EMAIL_TO_GITHUB["$email"]="$handle"
|
||||
fi
|
||||
done < <(jq -r 'to_entries[] | "\(.key)\t\(.value)"' "$GITHUB_CACHE_FILE" 2>/dev/null)
|
||||
|
||||
msg "Loaded ${#EMAIL_TO_GITHUB[@]} cached GitHub handle mappings"
|
||||
}
|
||||
|
||||
# Save email -> github handle mappings to disk (atomic write)
|
||||
save_github_cache() {
|
||||
if [[ ${#EMAIL_TO_GITHUB[@]} -eq 0 ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
mkdir -p "$GITHUB_CACHE_DIR"
|
||||
|
||||
local tmp_file
|
||||
tmp_file="$(mktemp "$GITHUB_CACHE_DIR/cache.XXXXXX")" || return 1
|
||||
|
||||
# Build JSON from associative array
|
||||
{
|
||||
echo "{"
|
||||
local first=true
|
||||
local key
|
||||
for key in "${!EMAIL_TO_GITHUB[@]}"; do
|
||||
if [[ "$first" == "true" ]]; then
|
||||
first=false
|
||||
else
|
||||
echo ","
|
||||
fi
|
||||
# Escape special characters in email for JSON
|
||||
printf ' %s: %s' "$(jq -n --arg e "$key" '$e')" "$(jq -n --arg h "${EMAIL_TO_GITHUB[$key]}" '$h')"
|
||||
done
|
||||
echo
|
||||
echo "}"
|
||||
} > "$tmp_file"
|
||||
|
||||
# Validate before replacing
|
||||
if jq -e '.' "$tmp_file" >/dev/null 2>&1; then
|
||||
mv "$tmp_file" "$GITHUB_CACHE_FILE"
|
||||
msg "Saved ${#EMAIL_TO_GITHUB[@]} GitHub handle mappings to cache"
|
||||
else
|
||||
rm -f "$tmp_file"
|
||||
msg "Warning: failed to save cache (invalid JSON)"
|
||||
fi
|
||||
}
|
||||
|
||||
# Extract GitHub handle from email if it's a GitHub noreply address
|
||||
# Handles: user@users.noreply.github.com and 12345678+user@users.noreply.github.com
|
||||
extract_handle_from_noreply() {
|
||||
local email="$1"
|
||||
|
||||
if [[ "$email" =~ ^([0-9]+\+)?([a-zA-Z0-9]([a-zA-Z0-9-]{0,37}[a-zA-Z0-9])?)@users\.noreply\.github\.com$ ]]; then
|
||||
echo "${BASH_REMATCH[2]}"
|
||||
return 0
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
# Extract GitHub handle from merge commit subject
|
||||
# Handles: "Merge pull request #123 from username/branch"
|
||||
extract_handle_from_merge_commit() {
|
||||
local subject="$1"
|
||||
|
||||
if [[ "$subject" =~ ^Merge\ pull\ request\ \#[0-9]+\ from\ ([a-zA-Z0-9]([a-zA-Z0-9-]{0,37}[a-zA-Z0-9])?)/.*$ ]]; then
|
||||
echo "${BASH_REMATCH[1]}"
|
||||
return 0
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
# Extract PR number from commit subject
|
||||
# Handles: "Subject (#123)" and "Merge pull request #123 from"
|
||||
extract_pr_number() {
|
||||
local subject="$1"
|
||||
|
||||
if [[ "$subject" =~ \(#([0-9]+)\)$ ]]; then
|
||||
echo "${BASH_REMATCH[1]}"
|
||||
return 0
|
||||
elif [[ "$subject" =~ ^Merge\ pull\ request\ \#([0-9]+)\ from ]]; then
|
||||
echo "${BASH_REMATCH[1]}"
|
||||
return 0
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
# Query GitHub API for PR author (with timeout and error handling)
|
||||
query_pr_author() {
|
||||
local gh_repo="$1" # e.g., "ipfs/kubo"
|
||||
local pr_num="$2"
|
||||
|
||||
if ! gh_available; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
local handle
|
||||
handle="$(timeout "$GH_TIMEOUT" gh pr view "$pr_num" --repo "$gh_repo" --json author -q '.author.login' 2>/dev/null)" || return 1
|
||||
|
||||
# Validate handle format
|
||||
if [[ -n "$handle" && "$handle" =~ ^[a-zA-Z0-9]([a-zA-Z0-9-]{0,37}[a-zA-Z0-9])?$ ]]; then
|
||||
echo "$handle"
|
||||
return 0
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
# Query GitHub API for commit author (fallback when no PR available)
|
||||
query_commit_author() {
|
||||
local gh_repo="$1" # e.g., "ipfs/kubo"
|
||||
local commit_sha="$2"
|
||||
|
||||
if ! gh_available; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
local handle
|
||||
handle="$(timeout "$GH_TIMEOUT" gh api "/repos/$gh_repo/commits/$commit_sha" --jq '.author.login // empty' 2>/dev/null)" || return 1
|
||||
|
||||
# Validate handle format
|
||||
if [[ -n "$handle" && "$handle" =~ ^[a-zA-Z0-9]([a-zA-Z0-9-]{0,37}[a-zA-Z0-9])?$ ]]; then
|
||||
echo "$handle"
|
||||
return 0
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
# Resolve email to GitHub handle using all available methods
|
||||
# Args: email, commit_hash (optional), repo_dir (optional), gh_repo (optional)
|
||||
resolve_github_handle() {
|
||||
local email="$1"
|
||||
local commit="${2:-}"
|
||||
local repo_dir="${3:-}"
|
||||
local gh_repo="${4:-}"
|
||||
|
||||
# Skip empty emails
|
||||
[[ -z "$email" ]] && return 1
|
||||
|
||||
# Check runtime cache first
|
||||
if [[ -n "${EMAIL_TO_GITHUB[$email]:-}" ]]; then
|
||||
echo "${EMAIL_TO_GITHUB[$email]}"
|
||||
return 0
|
||||
fi
|
||||
|
||||
local handle=""
|
||||
|
||||
# Method 1: Extract from noreply email
|
||||
if handle="$(extract_handle_from_noreply "$email")"; then
|
||||
EMAIL_TO_GITHUB["$email"]="$handle"
|
||||
echo "$handle"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Method 2: Look at commit message for merge commit pattern
|
||||
if [[ -n "$commit" && -n "$repo_dir" ]]; then
|
||||
local subject
|
||||
subject="$(git -C "$repo_dir" log -1 --format='%s' "$commit" 2>/dev/null)" || true
|
||||
|
||||
if [[ -n "$subject" ]]; then
|
||||
if handle="$(extract_handle_from_merge_commit "$subject")"; then
|
||||
EMAIL_TO_GITHUB["$email"]="$handle"
|
||||
echo "$handle"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Method 3: Query GitHub API for PR author
|
||||
if [[ -n "$gh_repo" ]]; then
|
||||
local pr_num
|
||||
if pr_num="$(extract_pr_number "$subject")"; then
|
||||
if handle="$(query_pr_author "$gh_repo" "$pr_num")"; then
|
||||
EMAIL_TO_GITHUB["$email"]="$handle"
|
||||
echo "$handle"
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
# Build GitHub handle mappings for all commits in a range
|
||||
# This does a single pass to collect PR numbers, then batch queries them
|
||||
build_github_mappings() {
|
||||
local module="$1"
|
||||
local start="$2"
|
||||
local end="${3:-HEAD}"
|
||||
local repo
|
||||
repo="$(strip_version "$module")"
|
||||
local dir
|
||||
local gh_repo=""
|
||||
|
||||
if [[ "$module" == "github.com/ipfs/kubo" ]]; then
|
||||
dir="$ROOT_DIR"
|
||||
else
|
||||
dir="$GOPATH/src/$repo"
|
||||
fi
|
||||
|
||||
# Extract gh_repo for API calls (e.g., "ipfs/kubo" from "github.com/ipfs/kubo")
|
||||
if [[ "$repo" =~ ^github\.com/(.+)$ ]]; then
|
||||
gh_repo="${BASH_REMATCH[1]}"
|
||||
fi
|
||||
|
||||
msg "Building GitHub handle mappings for $module..."
|
||||
|
||||
# Collect all unique emails and their commit context
|
||||
declare -A email_commits=()
|
||||
local hash email subject
|
||||
|
||||
while IFS=$'\t' read -r hash email subject; do
|
||||
[[ -z "$email" ]] && continue
|
||||
|
||||
# Skip if already resolved
|
||||
[[ -n "${EMAIL_TO_GITHUB[$email]:-}" ]] && continue
|
||||
|
||||
# Try to resolve without API first
|
||||
local handle=""
|
||||
|
||||
# Method 1: noreply email
|
||||
if handle="$(extract_handle_from_noreply "$email")"; then
|
||||
EMAIL_TO_GITHUB["$email"]="$handle"
|
||||
continue
|
||||
fi
|
||||
|
||||
# Method 2: merge commit message
|
||||
if handle="$(extract_handle_from_merge_commit "$subject")"; then
|
||||
EMAIL_TO_GITHUB["$email"]="$handle"
|
||||
continue
|
||||
fi
|
||||
|
||||
# Store for potential API lookup
|
||||
if [[ -z "${email_commits[$email]:-}" ]]; then
|
||||
email_commits["$email"]="$hash"
|
||||
fi
|
||||
done < <(git -C "$dir" log --format='tformat:%H%x09%aE%x09%s' --no-merges "$start..$end" 2>/dev/null)
|
||||
|
||||
# API batch lookup for remaining emails (if gh is available)
|
||||
if gh_available && [[ -n "$gh_repo" && ${#email_commits[@]} -gt 0 ]]; then
|
||||
msg "Querying GitHub API for ${#email_commits[@]} unknown contributors..."
|
||||
local key
|
||||
for key in "${!email_commits[@]}"; do
|
||||
# Skip if already resolved
|
||||
[[ -n "${EMAIL_TO_GITHUB[$key]:-}" ]] && continue
|
||||
|
||||
local commit_hash="${email_commits[$key]}"
|
||||
local subj handle
|
||||
subj="$(git -C "$dir" log -1 --format='%s' "$commit_hash" 2>/dev/null)" || true
|
||||
|
||||
# Try PR author lookup first (cheaper API call)
|
||||
local pr_num
|
||||
if pr_num="$(extract_pr_number "$subj")"; then
|
||||
if handle="$(query_pr_author "$gh_repo" "$pr_num")"; then
|
||||
EMAIL_TO_GITHUB["$key"]="$handle"
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
|
||||
# Fallback: commit author API (works for any commit)
|
||||
if handle="$(query_commit_author "$gh_repo" "$commit_hash")"; then
|
||||
EMAIL_TO_GITHUB["$key"]="$handle"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
##########################################################################################
|
||||
# Original infrastructure with modifications
|
||||
##########################################################################################
|
||||
|
||||
build_include_regex() {
|
||||
local result=""
|
||||
local mod
|
||||
for mod in "${INCLUDE_MODULES[@]}"; do
|
||||
if [[ -n "$result" ]]; then
|
||||
result="$result|$mod"
|
||||
else
|
||||
result="$mod"
|
||||
fi
|
||||
done
|
||||
echo "($result)"
|
||||
}
|
||||
|
||||
build_exclude_regex() {
|
||||
local result=""
|
||||
local mod
|
||||
for mod in "${EXCLUDE_MODULES[@]}"; do
|
||||
if [[ -n "$result" ]]; then
|
||||
result="$result|$mod"
|
||||
else
|
||||
result="$mod"
|
||||
fi
|
||||
done
|
||||
if [[ -n "$result" ]]; then
|
||||
echo "($result)"
|
||||
else
|
||||
echo '$^' # match nothing
|
||||
fi
|
||||
}
|
||||
|
||||
if [[ ${#INCLUDE_MODULES[@]} -gt 0 ]]; then
|
||||
INCLUDE_REGEX="(${$(printf "|%s" "${INCLUDE_MODULES[@]}"):1})"
|
||||
INCLUDE_REGEX="$(build_include_regex)"
|
||||
else
|
||||
INCLUDE_REGEX="" # "match anything"
|
||||
fi
|
||||
|
||||
if [[ ${#EXCLUDE_MODULES[@]} -gt 0 ]]; then
|
||||
EXCLUDE_REGEX="(${$(printf "|%s" "${EXCLUDE_MODULES[@]}"):1})"
|
||||
EXCLUDE_REGEX="$(build_exclude_regex)"
|
||||
else
|
||||
EXCLUDE_REGEX='$^' # "match nothing"
|
||||
fi
|
||||
@ -71,15 +418,28 @@ NL=$'\n'
|
||||
|
||||
ROOT_DIR="$(git rev-parse --show-toplevel)"
|
||||
|
||||
alias jq="jq --unbuffered"
|
||||
|
||||
msg() {
|
||||
echo "$*" >&2
|
||||
}
|
||||
|
||||
statlog() {
|
||||
local module="$1"
|
||||
local rpath="$GOPATH/src/$(strip_version "$module")"
|
||||
local rpath
|
||||
local gh_repo=""
|
||||
|
||||
if [[ "$module" == "github.com/ipfs/kubo" ]]; then
|
||||
rpath="$ROOT_DIR"
|
||||
else
|
||||
rpath="$GOPATH/src/$(strip_version "$module")"
|
||||
fi
|
||||
|
||||
# Extract gh_repo for API calls
|
||||
local repo
|
||||
repo="$(strip_version "$module")"
|
||||
if [[ "$repo" =~ ^github\.com/(.+)$ ]]; then
|
||||
gh_repo="${BASH_REMATCH[1]}"
|
||||
fi
|
||||
|
||||
local start="${2:-}"
|
||||
local end="${3:-HEAD}"
|
||||
local mailmap_file="$rpath/.mailmap"
|
||||
@ -88,18 +448,21 @@ statlog() {
|
||||
fi
|
||||
|
||||
local stack=()
|
||||
git -C "$rpath" -c mailmap.file="$mailmap_file" log --use-mailmap --shortstat --no-merges --pretty="tformat:%H%x09%aN%x09%aE" "$start..$end" -- . "${IGNORE_FILES_PATHSPEC[@]}" | while read -r line; do
|
||||
local line
|
||||
while read -r line; do
|
||||
if [[ -n "$line" ]]; then
|
||||
stack+=("$line")
|
||||
continue
|
||||
fi
|
||||
|
||||
local changes
|
||||
read -r changes
|
||||
|
||||
changed=0
|
||||
insertions=0
|
||||
deletions=0
|
||||
while read count event; do
|
||||
local changed=0
|
||||
local insertions=0
|
||||
local deletions=0
|
||||
local count event
|
||||
while read -r count event; do
|
||||
if [[ "$event" =~ ^file ]]; then
|
||||
changed=$count
|
||||
elif [[ "$event" =~ ^insertion ]]; then
|
||||
@ -112,27 +475,32 @@ statlog() {
|
||||
fi
|
||||
done<<<"${changes//,/$NL}"
|
||||
|
||||
local author
|
||||
for author in "${stack[@]}"; do
|
||||
local hash name email
|
||||
IFS=$'\t' read -r hash name email <<<"$author"
|
||||
|
||||
# Resolve GitHub handle
|
||||
local github_handle=""
|
||||
github_handle="$(resolve_github_handle "$email" "$hash" "$rpath" "$gh_repo")" || true
|
||||
|
||||
jq -n \
|
||||
--arg "hash" "$hash" \
|
||||
--arg "name" "$name" \
|
||||
--arg "email" "$email" \
|
||||
--arg "github" "$github_handle" \
|
||||
--argjson "changed" "$changed" \
|
||||
--argjson "insertions" "$insertions" \
|
||||
--argjson "deletions" "$deletions" \
|
||||
'{Commit: $hash, Author: $name, Email: $email, Files: $changed, Insertions: $insertions, Deletions: $deletions}'
|
||||
'{Commit: $hash, Author: $name, Email: $email, GitHub: $github, Files: $changed, Insertions: $insertions, Deletions: $deletions}'
|
||||
done
|
||||
stack=()
|
||||
done
|
||||
done < <(git -C "$rpath" -c mailmap.file="$mailmap_file" log --use-mailmap --shortstat --no-merges --pretty="tformat:%H%x09%aN%x09%aE" "$start..$end" -- . "${IGNORE_FILES_PATHSPEC[@]}")
|
||||
}
|
||||
|
||||
# Returns a stream of deps changed between $1 and $2.
|
||||
dep_changes() {
|
||||
{
|
||||
<"$1"
|
||||
<"$2"
|
||||
} | jq -s 'JOIN(INDEX(.[0][]; .Path); .[1][]; .Path; {Path: .[0].Path, Old: (.[1] | del(.Path)), New: (.[0] | del(.Path))}) | select(.New.Version != .Old.Version)'
|
||||
cat "$1" "$2" | jq -s 'JOIN(INDEX(.[0][]; .Path); .[1][]; .Path; {Path: .[0].Path, Old: (.[1] | del(.Path)), New: (.[0] | del(.Path))}) | select(.New.Version != .Old.Version)'
|
||||
}
|
||||
|
||||
# resolve_commits resolves a git ref for each version.
|
||||
@ -160,36 +528,37 @@ ignored_commit() {
|
||||
|
||||
# Generate a release log for a range of commits in a single repo.
|
||||
release_log() {
|
||||
setopt local_options BASH_REMATCH
|
||||
|
||||
local module="$1"
|
||||
local start="$2"
|
||||
local end="${3:-HEAD}"
|
||||
local repo="$(strip_version "$1")"
|
||||
local dir="$GOPATH/src/$repo"
|
||||
local repo
|
||||
repo="$(strip_version "$1")"
|
||||
local dir
|
||||
if [[ "$module" == "github.com/ipfs/kubo" ]]; then
|
||||
dir="$ROOT_DIR"
|
||||
else
|
||||
dir="$GOPATH/src/$repo"
|
||||
fi
|
||||
|
||||
local commit pr
|
||||
git -C "$dir" log \
|
||||
--format='tformat:%H %s' \
|
||||
--first-parent \
|
||||
"$start..$end" |
|
||||
while read commit subject; do
|
||||
# Skip commits that only touch ignored files.
|
||||
if ignored_commit "$dir" "$commit"; then
|
||||
continue
|
||||
fi
|
||||
local commit subject
|
||||
while read -r commit subject; do
|
||||
# Skip commits that only touch ignored files.
|
||||
if ignored_commit "$dir" "$commit"; then
|
||||
continue
|
||||
fi
|
||||
|
||||
if [[ "$subject" =~ '^Merge pull request #([0-9]+) from' ]]; then
|
||||
local prnum="${BASH_REMATCH[2]}"
|
||||
local desc="$(git -C "$dir" show --summary --format='tformat:%b' "$commit" | head -1)"
|
||||
printf -- "- %s (%s)\n" "$desc" "$(pr_link "$repo" "$prnum")"
|
||||
elif [[ "$subject" =~ '\(#([0-9]+)\)$' ]]; then
|
||||
local prnum="${BASH_REMATCH[2]}"
|
||||
printf -- "- %s (%s)\n" "$subject" "$(pr_link "$repo" "$prnum")"
|
||||
else
|
||||
printf -- "- %s\n" "$subject"
|
||||
fi
|
||||
done
|
||||
if [[ "$subject" =~ ^Merge\ pull\ request\ \#([0-9]+)\ from ]]; then
|
||||
local prnum="${BASH_REMATCH[1]}"
|
||||
local desc
|
||||
desc="$(git -C "$dir" show --summary --format='tformat:%b' "$commit" | head -1)"
|
||||
printf -- "- %s (%s)\n" "$desc" "$(pr_link "$repo" "$prnum")"
|
||||
elif [[ "$subject" =~ \(#([0-9]+)\)$ ]]; then
|
||||
local prnum="${BASH_REMATCH[1]}"
|
||||
printf -- "- %s (%s)\n" "$subject" "$(pr_link "$repo" "$prnum")"
|
||||
else
|
||||
printf -- "- %s\n" "$subject"
|
||||
fi
|
||||
done < <(git -C "$dir" log --format='tformat:%H %s' --first-parent "$start..$end")
|
||||
}
|
||||
|
||||
indent() {
|
||||
@ -201,10 +570,16 @@ mod_deps() {
|
||||
}
|
||||
|
||||
ensure() {
|
||||
local repo="$(strip_version "$1")"
|
||||
local repo
|
||||
repo="$(strip_version "$1")"
|
||||
local commit="$2"
|
||||
local rpath="$GOPATH/src/$repo"
|
||||
if [[ ! -d "$rpath" ]]; then
|
||||
local rpath
|
||||
if [[ "$1" == "github.com/ipfs/kubo" ]]; then
|
||||
rpath="$ROOT_DIR"
|
||||
else
|
||||
rpath="$GOPATH/src/$repo"
|
||||
fi
|
||||
if [[ "$1" != "github.com/ipfs/kubo" ]] && [[ ! -d "$rpath" ]]; then
|
||||
msg "Cloning $repo..."
|
||||
git clone "http://$repo" "$rpath" >&2
|
||||
fi
|
||||
@ -217,14 +592,27 @@ ensure() {
|
||||
git -C "$rpath" rev-parse --verify "$commit" >/dev/null || return 1
|
||||
}
|
||||
|
||||
# Summarize stats, grouping by GitHub handle (with fallback to email for dedup)
|
||||
statsummary() {
|
||||
jq -s 'group_by(.Author)[] | {Author: .[0].Author, Commits: (. | length), Insertions: (map(.Insertions) | add), Deletions: (map(.Deletions) | add), Files: (map(.Files) | add)}' |
|
||||
jq '. + {Lines: (.Deletions + .Insertions)}'
|
||||
jq -s '
|
||||
# Group by GitHub handle if available, otherwise by email
|
||||
group_by(if .GitHub != "" then .GitHub else .Email end)[] |
|
||||
{
|
||||
# Use first non-empty GitHub handle, or fall back to Author name
|
||||
Author: .[0].Author,
|
||||
GitHub: (map(select(.GitHub != "")) | .[0].GitHub // ""),
|
||||
Email: .[0].Email,
|
||||
Commits: (. | length),
|
||||
Insertions: (map(.Insertions) | add),
|
||||
Deletions: (map(.Deletions) | add),
|
||||
Files: (map(.Files) | add)
|
||||
}
|
||||
' | jq '. + {Lines: (.Deletions + .Insertions)}'
|
||||
}
|
||||
|
||||
strip_version() {
|
||||
local repo="$1"
|
||||
if [[ "$repo" =~ '.*/v[0-9]+$' ]]; then
|
||||
if [[ "$repo" =~ .*/v[0-9]+$ ]]; then
|
||||
repo="$(dirname "$repo")"
|
||||
fi
|
||||
echo "$repo"
|
||||
@ -233,19 +621,24 @@ strip_version() {
|
||||
recursive_release_log() {
|
||||
local start="${1:-$(git tag -l | sort -V | grep -v -- '-rc' | grep 'v'| tail -n1)}"
|
||||
local end="${2:-$(git rev-parse HEAD)}"
|
||||
local repo_root="$(git rev-parse --show-toplevel)"
|
||||
local module="$(go list -m)"
|
||||
local dir="$(go list -m -f '{{.Dir}}')"
|
||||
local repo_root
|
||||
repo_root="$(git rev-parse --show-toplevel)"
|
||||
local module
|
||||
module="$(go list -m)"
|
||||
local dir
|
||||
dir="$(go list -m -f '{{.Dir}}')"
|
||||
|
||||
if [[ "${GOPATH}/${module}" -ef "${dir}" ]]; then
|
||||
echo "This script requires the target module and all dependencies to live in a GOPATH."
|
||||
return 1
|
||||
fi
|
||||
# Load cached GitHub handle mappings
|
||||
load_github_cache
|
||||
|
||||
# Kubo can be run from any directory, dependencies still use GOPATH
|
||||
|
||||
(
|
||||
local result=0
|
||||
local workspace="$(mktemp -d)"
|
||||
trap "$(printf 'rm -rf "%q"' "$workspace")" INT TERM EXIT
|
||||
local workspace
|
||||
workspace="$(mktemp -d)"
|
||||
# shellcheck disable=SC2064
|
||||
trap "rm -rf '$workspace'" INT TERM EXIT
|
||||
cd "$workspace"
|
||||
|
||||
echo "Computing old deps..." >&2
|
||||
@ -260,6 +653,9 @@ recursive_release_log() {
|
||||
|
||||
printf -- "Generating Changelog for %s %s..%s\n" "$module" "$start" "$end" >&2
|
||||
|
||||
# Pre-build GitHub mappings for main module
|
||||
build_github_mappings "$module" "$start" "$end"
|
||||
|
||||
echo "### 📝 Changelog"
|
||||
echo
|
||||
echo "<details><summary>Full Changelog</summary>"
|
||||
@ -270,24 +666,26 @@ recursive_release_log() {
|
||||
|
||||
statlog "$module" "$start" "$end" > statlog.json
|
||||
|
||||
dep_changes old_deps.json new_deps.json |
|
||||
local dep_module new new_ref old old_ref
|
||||
while read -r dep_module new new_ref old old_ref; do
|
||||
if ! ensure "$dep_module" "$new_ref"; then
|
||||
result=1
|
||||
local changelog="failed to fetch repo"
|
||||
else
|
||||
# Pre-build GitHub mappings for dependency
|
||||
build_github_mappings "$dep_module" "$old_ref" "$new_ref"
|
||||
statlog "$dep_module" "$old_ref" "$new_ref" >> statlog.json
|
||||
local changelog
|
||||
changelog="$(release_log "$dep_module" "$old_ref" "$new_ref")"
|
||||
fi
|
||||
if [[ -n "$changelog" ]]; then
|
||||
printf -- "- %s (%s -> %s):\n" "$dep_module" "$old" "$new"
|
||||
echo "$changelog" | indent
|
||||
fi
|
||||
done < <(dep_changes old_deps.json new_deps.json |
|
||||
jq --arg inc "$INCLUDE_REGEX" --arg exc "$EXCLUDE_REGEX" \
|
||||
'select(.Path | test($inc)) | select(.Path | test($exc) | not)' |
|
||||
# Compute changelogs
|
||||
jq -r '"\(.Path) \(.New.Version) \(.New.Ref) \(.Old.Version) \(.Old.Ref // "")"' |
|
||||
while read module new new_ref old old_ref; do
|
||||
if ! ensure "$module" "$new_ref"; then
|
||||
result=1
|
||||
local changelog="failed to fetch repo"
|
||||
else
|
||||
statlog "$module" "$old_ref" "$new_ref" >> statlog.json
|
||||
local changelog="$(release_log "$module" "$old_ref" "$new_ref")"
|
||||
fi
|
||||
if [[ -n "$changelog" ]]; then
|
||||
printf -- "- %s (%s -> %s):\n" "$module" "$old" "$new"
|
||||
echo "$changelog" | indent
|
||||
fi
|
||||
done
|
||||
jq -r '"\(.Path) \(.New.Version) \(.New.Ref) \(.Old.Version) \(.Old.Ref // "")"')
|
||||
|
||||
echo
|
||||
echo "</details>"
|
||||
@ -299,8 +697,18 @@ recursive_release_log() {
|
||||
echo "|-------------|---------|---------|---------------|"
|
||||
statsummary <statlog.json |
|
||||
jq -s 'sort_by(.Lines) | reverse | .[]' |
|
||||
jq -r '"| \(.Author) | \(.Commits) | +\(.Insertions)/-\(.Deletions) | \(.Files) |"'
|
||||
return "$status"
|
||||
jq -r '
|
||||
if .GitHub != "" then
|
||||
"| [@\(.GitHub)](https://github.com/\(.GitHub)) | \(.Commits) | +\(.Insertions)/-\(.Deletions) | \(.Files) |"
|
||||
else
|
||||
"| \(.Author) | \(.Commits) | +\(.Insertions)/-\(.Deletions) | \(.Files) |"
|
||||
end
|
||||
'
|
||||
|
||||
# Save cache before exiting
|
||||
save_github_cache
|
||||
|
||||
return "$result"
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@ -1,11 +1,13 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
#
|
||||
# TODO: this script is legacy, use get-docker-tags.sh instead.
|
||||
#
|
||||
# push-docker-tags.sh
|
||||
#
|
||||
# Run from ci to tag images based on the current branch or tag name.
|
||||
# A bit like dockerhub autobuild config, but somewhere we can version control it.
|
||||
#
|
||||
# The `docker-build` job builds the current commit in docker and tags it as ipfs/go-ipfs:wip
|
||||
# The `docker-build` job builds the current commit in docker and tags it as ipfs/kubo:wip
|
||||
#
|
||||
# Then the `docker-publish` job runs this script to decide what tag, if any,
|
||||
# to publish to dockerhub.
|
||||
@ -40,7 +42,7 @@ GIT_TAG=${4:-$(git describe --tags --exact-match || echo "")}
|
||||
DRY_RUN=${5:-false}
|
||||
|
||||
WIP_IMAGE_TAG=${WIP_IMAGE_TAG:-wip}
|
||||
IMAGE_NAME=${IMAGE_NAME:-ipfs/go-ipfs}
|
||||
IMAGE_NAME=${IMAGE_NAME:-ipfs/kubo}
|
||||
|
||||
pushTag () {
|
||||
local IMAGE_TAG=$1
|
||||
@ -61,16 +63,16 @@ if [[ $GIT_TAG =~ ^v[0-9]+\.[0-9]+\.[0-9]+-rc ]]; then
|
||||
elif [[ $GIT_TAG =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
pushTag "$GIT_TAG"
|
||||
pushTag "latest"
|
||||
pushTag "release" # see: https://github.com/ipfs/go-ipfs/issues/3999#issuecomment-742228981
|
||||
pushTag "release" # see: https://github.com/ipfs/kubo/issues/3999#issuecomment-742228981
|
||||
|
||||
elif [[ $GIT_BRANCH =~ ^bifrost-.* ]]; then
|
||||
# sanitize the branch name since docker tags have stricter char limits than git branch names
|
||||
branch=$(echo "$GIT_BRANCH" | tr '/' '-' | tr --delete --complement '[:alnum:]-')
|
||||
pushTag "${branch}-${BUILD_NUM}-${GIT_SHA1_SHORT}"
|
||||
|
||||
elif [ "$GIT_BRANCH" = "master" ]; then
|
||||
pushTag "master-${BUILD_NUM}-${GIT_SHA1_SHORT}"
|
||||
pushTag "master-latest"
|
||||
elif [ "$GIT_BRANCH" = "master" ] || [ "$GIT_BRANCH" = "staging" ]; then
|
||||
pushTag "${GIT_BRANCH}-${BUILD_NUM}-${GIT_SHA1_SHORT}"
|
||||
pushTag "${GIT_BRANCH}-latest"
|
||||
|
||||
else
|
||||
echo "Nothing to do. No docker tag defined for branch: $GIT_BRANCH, tag: $GIT_TAG"
|
||||
|
||||
24
bin/test-go-build-platforms
Executable file
24
bin/test-go-build-platforms
Executable file
@ -0,0 +1,24 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
echo "Building kubo for all platforms in .github/build-platforms.yml..."
|
||||
|
||||
if [ ! -f .github/build-platforms.yml ]; then
|
||||
echo "Error: .github/build-platforms.yml not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
grep '^ - ' .github/build-platforms.yml | sed 's/^ - //' | while read -r platform; do
|
||||
if [ -z "$platform" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
GOOS=$(echo "$platform" | cut -d- -f1)
|
||||
GOARCH=$(echo "$platform" | cut -d- -f2)
|
||||
|
||||
echo "Building $platform..."
|
||||
echo " GOOS=$GOOS GOARCH=$GOARCH go build -o /dev/null ./cmd/ipfs"
|
||||
GOOS=$GOOS GOARCH=$GOARCH go build -o /dev/null ./cmd/ipfs
|
||||
done
|
||||
|
||||
echo "All platforms built successfully"
|
||||
@ -20,10 +20,10 @@ import (
|
||||
ipfs "github.com/ipfs/kubo"
|
||||
iface "github.com/ipfs/kubo/core/coreiface"
|
||||
caopts "github.com/ipfs/kubo/core/coreiface/options"
|
||||
"github.com/ipfs/kubo/misc/fsutil"
|
||||
dagpb "github.com/ipld/go-codec-dagpb"
|
||||
_ "github.com/ipld/go-ipld-prime/codec/dagcbor"
|
||||
"github.com/ipld/go-ipld-prime/node/basicnode"
|
||||
"github.com/mitchellh/go-homedir"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
manet "github.com/multiformats/go-multiaddr/net"
|
||||
)
|
||||
@ -82,7 +82,7 @@ func NewPathApi(ipfspath string) (*HttpApi, error) {
|
||||
|
||||
// ApiAddr reads api file in specified ipfs path.
|
||||
func ApiAddr(ipfspath string) (ma.Multiaddr, error) {
|
||||
baseDir, err := homedir.Expand(ipfspath)
|
||||
baseDir, err := fsutil.ExpandHome(ipfspath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -2,9 +2,9 @@ package rpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@ -12,11 +12,11 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/boxo/path"
|
||||
"github.com/ipfs/kubo/config"
|
||||
iface "github.com/ipfs/kubo/core/coreiface"
|
||||
"github.com/ipfs/kubo/core/coreiface/tests"
|
||||
"github.com/ipfs/kubo/test/cli/harness"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
"go.uber.org/multierr"
|
||||
)
|
||||
|
||||
type NodeProvider struct{}
|
||||
@ -45,6 +45,9 @@ func (np NodeProvider) MakeAPISwarm(t *testing.T, ctx context.Context, fullIdent
|
||||
|
||||
c := n.ReadConfig()
|
||||
c.Experimental.FilestoreEnabled = true
|
||||
// only provide things we pin. Allows to test
|
||||
// provide operations.
|
||||
c.Provide.Strategy = config.NewOptionalString("roots")
|
||||
n.WriteConfig(c)
|
||||
n.StartDaemon("--enable-pubsub-experiment", "--offline="+strconv.FormatBool(!online))
|
||||
|
||||
@ -88,16 +91,12 @@ func (np NodeProvider) MakeAPISwarm(t *testing.T, ctx context.Context, fullIdent
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return apis, multierr.Combine(errs...)
|
||||
return apis, errors.Join(errs...)
|
||||
}
|
||||
|
||||
func TestHttpApi(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("skipping due to #9905")
|
||||
}
|
||||
|
||||
tests.TestApi(NodeProvider{})(t)
|
||||
}
|
||||
|
||||
|
||||
@ -52,8 +52,12 @@ func (api *PinAPI) Add(ctx context.Context, p path.Path, opts ...caopts.PinAddOp
|
||||
return err
|
||||
}
|
||||
|
||||
return api.core().Request("pin/add", p.String()).
|
||||
Option("recursive", options.Recursive).Exec(ctx, nil)
|
||||
req := api.core().Request("pin/add", p.String()).
|
||||
Option("recursive", options.Recursive)
|
||||
if options.Name != "" {
|
||||
req = req.Option("name", options.Name)
|
||||
}
|
||||
return req.Exec(ctx, nil)
|
||||
}
|
||||
|
||||
type pinLsObject struct {
|
||||
@ -62,59 +66,46 @@ type pinLsObject struct {
|
||||
Type string
|
||||
}
|
||||
|
||||
func (api *PinAPI) Ls(ctx context.Context, opts ...caopts.PinLsOption) (<-chan iface.Pin, error) {
|
||||
func (api *PinAPI) Ls(ctx context.Context, pins chan<- iface.Pin, opts ...caopts.PinLsOption) error {
|
||||
defer close(pins)
|
||||
|
||||
options, err := caopts.PinLsOptions(opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
res, err := api.core().Request("pin/ls").
|
||||
Option("type", options.Type).
|
||||
Option("names", options.Detailed).
|
||||
Option("stream", true).
|
||||
Send(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
defer res.Output.Close()
|
||||
|
||||
pins := make(chan iface.Pin)
|
||||
go func(ch chan<- iface.Pin) {
|
||||
defer res.Output.Close()
|
||||
defer close(ch)
|
||||
|
||||
dec := json.NewDecoder(res.Output)
|
||||
dec := json.NewDecoder(res.Output)
|
||||
for {
|
||||
var out pinLsObject
|
||||
for {
|
||||
switch err := dec.Decode(&out); err {
|
||||
case nil:
|
||||
case io.EOF:
|
||||
return
|
||||
default:
|
||||
select {
|
||||
case ch <- pin{err: err}:
|
||||
return
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
c, err := cid.Parse(out.Cid)
|
||||
if err != nil {
|
||||
select {
|
||||
case ch <- pin{err: err}:
|
||||
return
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case ch <- pin{typ: out.Type, name: out.Name, path: path.FromCid(c)}:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
err := dec.Decode(&out)
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}(pins)
|
||||
return pins, nil
|
||||
|
||||
c, err := cid.Parse(out.Cid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
select {
|
||||
case pins <- pin{typ: out.Type, name: out.Name, path: path.FromCid(c)}:
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// IsPinned returns whether or not the given cid is pinned
|
||||
|
||||
@ -144,10 +144,12 @@ type lsOutput struct {
|
||||
Objects []lsObject
|
||||
}
|
||||
|
||||
func (api *UnixfsAPI) Ls(ctx context.Context, p path.Path, opts ...caopts.UnixfsLsOption) (<-chan iface.DirEntry, error) {
|
||||
func (api *UnixfsAPI) Ls(ctx context.Context, p path.Path, out chan<- iface.DirEntry, opts ...caopts.UnixfsLsOption) error {
|
||||
defer close(out)
|
||||
|
||||
options, err := caopts.UnixfsLsOptions(opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := api.core().Request("ls", p.String()).
|
||||
@ -156,86 +158,64 @@ func (api *UnixfsAPI) Ls(ctx context.Context, p path.Path, opts ...caopts.Unixfs
|
||||
Option("stream", true).
|
||||
Send(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if resp.Error != nil {
|
||||
return nil, resp.Error
|
||||
return err
|
||||
}
|
||||
defer resp.Close()
|
||||
|
||||
dec := json.NewDecoder(resp.Output)
|
||||
out := make(chan iface.DirEntry)
|
||||
|
||||
go func() {
|
||||
defer resp.Close()
|
||||
defer close(out)
|
||||
|
||||
for {
|
||||
var link lsOutput
|
||||
if err := dec.Decode(&link); err != nil {
|
||||
if err == io.EOF {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case out <- iface.DirEntry{Err: err}:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if len(link.Objects) != 1 {
|
||||
select {
|
||||
case out <- iface.DirEntry{Err: errors.New("unexpected Objects len")}:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if len(link.Objects[0].Links) != 1 {
|
||||
select {
|
||||
case out <- iface.DirEntry{Err: errors.New("unexpected Links len")}:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
l0 := link.Objects[0].Links[0]
|
||||
|
||||
c, err := cid.Decode(l0.Hash)
|
||||
if err != nil {
|
||||
select {
|
||||
case out <- iface.DirEntry{Err: err}:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var ftype iface.FileType
|
||||
switch l0.Type {
|
||||
case unixfs.TRaw, unixfs.TFile:
|
||||
ftype = iface.TFile
|
||||
case unixfs.THAMTShard, unixfs.TDirectory, unixfs.TMetadata:
|
||||
ftype = iface.TDirectory
|
||||
case unixfs.TSymlink:
|
||||
ftype = iface.TSymlink
|
||||
}
|
||||
|
||||
select {
|
||||
case out <- iface.DirEntry{
|
||||
Name: l0.Name,
|
||||
Cid: c,
|
||||
Size: l0.Size,
|
||||
Type: ftype,
|
||||
Target: l0.Target,
|
||||
|
||||
Mode: l0.Mode,
|
||||
ModTime: l0.ModTime,
|
||||
}:
|
||||
case <-ctx.Done():
|
||||
for {
|
||||
var link lsOutput
|
||||
if err = dec.Decode(&link); err != nil {
|
||||
if err != io.EOF {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}()
|
||||
|
||||
return out, nil
|
||||
if len(link.Objects) != 1 {
|
||||
return errors.New("unexpected Objects len")
|
||||
}
|
||||
|
||||
if len(link.Objects[0].Links) != 1 {
|
||||
return errors.New("unexpected Links len")
|
||||
}
|
||||
|
||||
l0 := link.Objects[0].Links[0]
|
||||
|
||||
c, err := cid.Decode(l0.Hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var ftype iface.FileType
|
||||
switch l0.Type {
|
||||
case unixfs.TRaw, unixfs.TFile:
|
||||
ftype = iface.TFile
|
||||
case unixfs.THAMTShard, unixfs.TDirectory, unixfs.TMetadata:
|
||||
ftype = iface.TDirectory
|
||||
case unixfs.TSymlink:
|
||||
ftype = iface.TSymlink
|
||||
}
|
||||
|
||||
select {
|
||||
case out <- iface.DirEntry{
|
||||
Name: l0.Name,
|
||||
Cid: c,
|
||||
Size: l0.Size,
|
||||
Type: ftype,
|
||||
Target: l0.Target,
|
||||
|
||||
Mode: l0.Mode,
|
||||
ModTime: l0.ModTime,
|
||||
}:
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (api *UnixfsAPI) core() *HttpApi {
|
||||
|
||||
@ -2,7 +2,6 @@ include mk/header.mk
|
||||
IPFS_BIN_$(d) := $(call go-curr-pkg-tgt)
|
||||
|
||||
TGT_BIN += $(IPFS_BIN_$(d))
|
||||
TEST_GO_BUILD += $(d)-try-build
|
||||
CLEAN += $(IPFS_BIN_$(d))
|
||||
|
||||
PATH := $(realpath $(d)):$(PATH)
|
||||
@ -15,23 +14,12 @@ PATH := $(realpath $(d)):$(PATH)
|
||||
|
||||
$(d)_flags =-ldflags="-X "github.com/ipfs/kubo".CurrentCommit=$(git-hash)"
|
||||
|
||||
$(d)-try-build $(IPFS_BIN_$(d)): GOFLAGS += $(cmd/ipfs_flags)
|
||||
$(IPFS_BIN_$(d)): GOFLAGS += $(cmd/ipfs_flags)
|
||||
|
||||
# uses second expansion to collect all $(DEPS_GO)
|
||||
$(IPFS_BIN_$(d)): $(d) $$(DEPS_GO) ALWAYS #| $(DEPS_OO_$(d))
|
||||
$(go-build-relative)
|
||||
|
||||
TRY_BUILD_$(d)=$(addprefix $(d)-try-build-,$(SUPPORTED_PLATFORMS))
|
||||
$(d)-try-build: $(TRY_BUILD_$(d))
|
||||
.PHONY: $(d)-try-build
|
||||
|
||||
$(TRY_BUILD_$(d)): PLATFORM = $(subst -, ,$(patsubst $<-try-build-%,%,$@))
|
||||
$(TRY_BUILD_$(d)): GOOS = $(word 1,$(PLATFORM))
|
||||
$(TRY_BUILD_$(d)): GOARCH = $(word 2,$(PLATFORM))
|
||||
$(TRY_BUILD_$(d)): $(d) $$(DEPS_GO) ALWAYS
|
||||
GOOS=$(GOOS) GOARCH=$(GOARCH) $(go-try-build)
|
||||
.PHONY: $(TRY_BUILD_$(d))
|
||||
|
||||
$(d)-install: GOFLAGS += $(cmd/ipfs_flags)
|
||||
$(d)-install: $(d) $$(DEPS_GO) ALWAYS
|
||||
$(GOCC) install $(go-flags-with-tags) ./cmd/ipfs
|
||||
|
||||
@ -83,10 +83,12 @@ func addMigrationFiles(ctx context.Context, node *core.IpfsNode, paths []string,
|
||||
|
||||
fi, err := f.Stat()
|
||||
if err != nil {
|
||||
f.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
ipfsPath, err := ufs.Add(ctx, files.NewReaderStatFile(f, fi), options.Unixfs.Pin(pin))
|
||||
ipfsPath, err := ufs.Add(ctx, files.NewReaderStatFile(f, fi), options.Unixfs.Pin(pin, ""))
|
||||
f.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -17,8 +17,6 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
multierror "github.com/hashicorp/go-multierror"
|
||||
|
||||
cmds "github.com/ipfs/go-ipfs-cmds"
|
||||
mprome "github.com/ipfs/go-metrics-prometheus"
|
||||
version "github.com/ipfs/kubo"
|
||||
@ -36,8 +34,6 @@ import (
|
||||
nodeMount "github.com/ipfs/kubo/fuse/node"
|
||||
fsrepo "github.com/ipfs/kubo/repo/fsrepo"
|
||||
"github.com/ipfs/kubo/repo/fsrepo/migrations"
|
||||
"github.com/ipfs/kubo/repo/fsrepo/migrations/ipfsfetcher"
|
||||
goprocess "github.com/jbenet/goprocess"
|
||||
p2pcrypto "github.com/libp2p/go-libp2p/core/crypto"
|
||||
pnet "github.com/libp2p/go-libp2p/core/pnet"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
@ -47,6 +43,9 @@ import (
|
||||
manet "github.com/multiformats/go-multiaddr/net"
|
||||
prometheus "github.com/prometheus/client_golang/prometheus"
|
||||
promauto "github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"go.opentelemetry.io/otel"
|
||||
promexporter "go.opentelemetry.io/otel/exporters/prometheus"
|
||||
sdkmetric "go.opentelemetry.io/otel/sdk/metric"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -57,6 +56,7 @@ const (
|
||||
initProfileOptionKwd = "init-profile"
|
||||
ipfsMountKwd = "mount-ipfs"
|
||||
ipnsMountKwd = "mount-ipns"
|
||||
mfsMountKwd = "mount-mfs"
|
||||
migrateKwd = "migrate"
|
||||
mountKwd = "mount"
|
||||
offlineKwd = "offline" // global option
|
||||
@ -67,6 +67,7 @@ const (
|
||||
routingOptionDHTServerKwd = "dhtserver"
|
||||
routingOptionNoneKwd = "none"
|
||||
routingOptionCustomKwd = "custom"
|
||||
routingOptionDelegatedKwd = "delegated"
|
||||
routingOptionDefaultKwd = "default"
|
||||
routingOptionAutoKwd = "auto"
|
||||
routingOptionAutoClientKwd = "autoclient"
|
||||
@ -174,6 +175,7 @@ Headers.
|
||||
cmds.BoolOption(mountKwd, "Mounts IPFS to the filesystem using FUSE (experimental)"),
|
||||
cmds.StringOption(ipfsMountKwd, "Path to the mountpoint for IPFS (if using --mount). Defaults to config setting."),
|
||||
cmds.StringOption(ipnsMountKwd, "Path to the mountpoint for IPNS (if using --mount). Defaults to config setting."),
|
||||
cmds.StringOption(mfsMountKwd, "Path to the mountpoint for MFS (if using --mount). Defaults to config setting."),
|
||||
cmds.BoolOption(unrestrictedAPIAccessKwd, "Allow RPC API access to unlisted hashes"),
|
||||
cmds.BoolOption(unencryptTransportKwd, "Disable transport encryption (for debugging protocols)"),
|
||||
cmds.BoolOption(enableGCKwd, "Enable automatic periodic repo garbage collection"),
|
||||
@ -212,6 +214,21 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment
|
||||
log.Errorf("Injecting prometheus handler for metrics failed with message: %s\n", err.Error())
|
||||
}
|
||||
|
||||
// Set up OpenTelemetry meter provider to enable metrics from external libraries
|
||||
// like go-libp2p-kad-dht. Without this, metrics registered via otel.Meter()
|
||||
// (such as total_provide_count from sweep provider) won't be exposed at the
|
||||
// /debug/metrics/prometheus endpoint.
|
||||
if exporter, err := promexporter.New(
|
||||
promexporter.WithRegisterer(prometheus.DefaultRegisterer),
|
||||
); err != nil {
|
||||
log.Errorf("Creating prometheus exporter for OpenTelemetry failed: %s (some metrics will be missing from /debug/metrics/prometheus)\n", err.Error())
|
||||
} else {
|
||||
meterProvider := sdkmetric.NewMeterProvider(
|
||||
sdkmetric.WithReader(exporter),
|
||||
)
|
||||
otel.SetMeterProvider(meterProvider)
|
||||
}
|
||||
|
||||
// let the user know we're going.
|
||||
fmt.Printf("Initializing daemon...\n")
|
||||
|
||||
@ -276,7 +293,7 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment
|
||||
}
|
||||
|
||||
var cacheMigrations, pinMigrations bool
|
||||
var fetcher migrations.Fetcher
|
||||
var externalMigrationFetcher migrations.Fetcher
|
||||
|
||||
// acquire the repo lock _before_ constructing a node. we need to make
|
||||
// sure we are permitted to access the resources (datastore, etc.)
|
||||
@ -285,75 +302,51 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment
|
||||
default:
|
||||
return err
|
||||
case fsrepo.ErrNeedMigration:
|
||||
migrationDone := make(chan struct{})
|
||||
go func() {
|
||||
select {
|
||||
case <-req.Context.Done():
|
||||
os.Exit(1)
|
||||
case <-migrationDone:
|
||||
}
|
||||
}()
|
||||
|
||||
domigrate, found := req.Options[migrateKwd].(bool)
|
||||
fmt.Println("Found outdated fs-repo, migrations need to be run.")
|
||||
|
||||
// Get current repo version for more informative message
|
||||
currentVersion, verErr := migrations.RepoVersion(cctx.ConfigRoot)
|
||||
if verErr != nil {
|
||||
// Fallback to generic message if we can't read version
|
||||
fmt.Printf("Kubo repository at %s requires migration.\n", cctx.ConfigRoot)
|
||||
} else {
|
||||
fmt.Printf("Kubo repository at %s has version %d and needs to be migrated to version %d.\n",
|
||||
cctx.ConfigRoot, currentVersion, version.RepoVersion)
|
||||
}
|
||||
|
||||
if !found {
|
||||
domigrate = YesNoPrompt("Run migrations now? [y/N]")
|
||||
}
|
||||
close(migrationDone)
|
||||
|
||||
if !domigrate {
|
||||
fmt.Println("Not running migrations of fs-repo now.")
|
||||
fmt.Println("Please get fs-repo-migrations from https://dist.ipfs.tech")
|
||||
return fmt.Errorf("fs-repo requires migration")
|
||||
fmt.Printf("Not running migrations on repository at %s. Re-run daemon with --migrate or see 'ipfs repo migrate --help'\n", cctx.ConfigRoot)
|
||||
return errors.New("fs-repo requires migration")
|
||||
}
|
||||
|
||||
// Read Migration section of IPFS config
|
||||
configFileOpt, _ := req.Options[commands.ConfigFileOption].(string)
|
||||
migrationCfg, err := migrations.ReadMigrationConfig(cctx.ConfigRoot, configFileOpt)
|
||||
// Use hybrid migration strategy that intelligently combines external and embedded migrations
|
||||
// Use req.Context instead of cctx.Context() to avoid attempting repo open before migrations complete
|
||||
err = migrations.RunHybridMigrations(req.Context, version.RepoVersion, cctx.ConfigRoot, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Define function to create IPFS fetcher. Do not supply an
|
||||
// already-constructed IPFS fetcher, because this may be expensive and
|
||||
// not needed according to migration config. Instead, supply a function
|
||||
// to construct the particular IPFS fetcher implementation used here,
|
||||
// which is called only if an IPFS fetcher is needed.
|
||||
newIpfsFetcher := func(distPath string) migrations.Fetcher {
|
||||
return ipfsfetcher.NewIpfsFetcher(distPath, 0, &cctx.ConfigRoot, configFileOpt)
|
||||
}
|
||||
|
||||
// Fetch migrations from current distribution, or location from environ
|
||||
fetchDistPath := migrations.GetDistPathEnv(migrations.CurrentIpfsDist)
|
||||
|
||||
// Create fetchers according to migrationCfg.DownloadSources
|
||||
fetcher, err = migrations.GetMigrationFetcher(migrationCfg.DownloadSources, fetchDistPath, newIpfsFetcher)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer fetcher.Close()
|
||||
|
||||
if migrationCfg.Keep == "cache" {
|
||||
cacheMigrations = true
|
||||
} else if migrationCfg.Keep == "pin" {
|
||||
pinMigrations = true
|
||||
}
|
||||
|
||||
if cacheMigrations || pinMigrations {
|
||||
// Create temp directory to store downloaded migration archives
|
||||
migrations.DownloadDirectory, err = os.MkdirTemp("", "migrations")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Defer cleanup of download directory so that it gets cleaned up
|
||||
// if daemon returns early due to error
|
||||
defer func() {
|
||||
if migrations.DownloadDirectory != "" {
|
||||
os.RemoveAll(migrations.DownloadDirectory)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
err = migrations.RunMigration(cctx.Context(), fetcher, fsrepo.RepoVersion, "", false)
|
||||
if err != nil {
|
||||
fmt.Println("The migrations of fs-repo failed:")
|
||||
fmt.Println("Repository migration failed:")
|
||||
fmt.Printf(" %s\n", err)
|
||||
fmt.Println("If you think this is a bug, please file an issue and include this whole log output.")
|
||||
fmt.Println(" https://github.com/ipfs/fs-repo-migrations")
|
||||
fmt.Println(" https://github.com/ipfs/kubo")
|
||||
return err
|
||||
}
|
||||
|
||||
// Note: Migration caching/pinning functionality has been deprecated
|
||||
// The hybrid migration system handles legacy migrations more efficiently
|
||||
|
||||
repo, err = fsrepo.Open(cctx.ConfigRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -380,6 +373,28 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment
|
||||
return err
|
||||
}
|
||||
|
||||
// Validate autoconf setup - check for private network conflict
|
||||
swarmKey, _ := repo.SwarmKey()
|
||||
isPrivateNetwork := swarmKey != nil || pnet.ForcePrivateNetwork
|
||||
if err := config.ValidateAutoConfWithRepo(cfg, isPrivateNetwork); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Start background AutoConf updater if enabled
|
||||
if cfg.AutoConf.Enabled.WithDefault(config.DefaultAutoConfEnabled) {
|
||||
// Start autoconf client for background updates
|
||||
client, err := config.GetAutoConfClient(cfg)
|
||||
if err != nil {
|
||||
log.Errorf("failed to create autoconf client: %v", err)
|
||||
} else {
|
||||
// Start primes cache and starts background updater
|
||||
// Use req.Context for background updater lifecycle (node doesn't exist yet)
|
||||
if _, err := client.Start(req.Context); err != nil {
|
||||
log.Errorf("failed to start autoconf updater: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("PeerID: %s\n", cfg.Identity.PeerID)
|
||||
|
||||
if !psSet {
|
||||
@ -403,22 +418,39 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment
|
||||
}
|
||||
|
||||
routingOption, _ := req.Options[routingOptionKwd].(string)
|
||||
if routingOption == routingOptionDefaultKwd {
|
||||
routingOption = cfg.Routing.Type.WithDefault(routingOptionAutoKwd)
|
||||
if routingOption == routingOptionDefaultKwd || routingOption == "" {
|
||||
routingOption = cfg.Routing.Type.WithDefault(config.DefaultRoutingType)
|
||||
if routingOption == "" {
|
||||
routingOption = routingOptionAutoKwd
|
||||
}
|
||||
}
|
||||
|
||||
// Private setups can't leverage peers returned by default IPNIs (Routing.Type=auto)
|
||||
// To avoid breaking existing setups, switch them to DHT-only.
|
||||
if routingOption == routingOptionAutoKwd {
|
||||
if key, _ := repo.SwarmKey(); key != nil || pnet.ForcePrivateNetwork {
|
||||
if key, _ := repo.SwarmKey(); key != nil || pnet.ForcePrivateNetwork {
|
||||
// Private setups can't leverage peers returned by default IPNIs (Routing.Type=auto)
|
||||
// To avoid breaking existing setups, switch them to DHT-only.
|
||||
if routingOption == routingOptionAutoKwd {
|
||||
log.Error("Private networking (swarm.key / LIBP2P_FORCE_PNET) does not work with public HTTP IPNIs enabled by Routing.Type=auto. Kubo will use Routing.Type=dht instead. Update config to remove this message.")
|
||||
routingOption = routingOptionDHTKwd
|
||||
}
|
||||
|
||||
// Private setups should not use public AutoTLS infrastructure
|
||||
// as it will leak their existence and PeerID identity to CA
|
||||
// and they will show up at https://crt.sh/?q=libp2p.direct
|
||||
enableAutoTLS := cfg.AutoTLS.Enabled.WithDefault(config.DefaultAutoTLSEnabled)
|
||||
if enableAutoTLS {
|
||||
if cfg.AutoTLS.Enabled != config.Default {
|
||||
// hard fail if someone tries to explicitly enable both
|
||||
return errors.New("private networking (swarm.key / LIBP2P_FORCE_PNET) does not work with AutoTLS.Enabled=true, update config to remove this message")
|
||||
} else {
|
||||
// print error and disable autotls if user runs on default settings
|
||||
log.Error("private networking (swarm.key / LIBP2P_FORCE_PNET) is not compatible with AutoTLS. Set AutoTLS.Enabled=false in config to remove this message.")
|
||||
cfg.AutoTLS.Enabled = config.False
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Use config for routing construction
|
||||
|
||||
switch routingOption {
|
||||
case routingOptionSupernodeKwd:
|
||||
return errors.New("supernode routing was never fully implemented and has been removed")
|
||||
@ -434,9 +466,11 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment
|
||||
ncfg.Routing = libp2p.DHTServerOption
|
||||
case routingOptionNoneKwd:
|
||||
ncfg.Routing = libp2p.NilRouterOption
|
||||
case routingOptionDelegatedKwd:
|
||||
ncfg.Routing = libp2p.ConstructDelegatedOnlyRouting(cfg)
|
||||
case routingOptionCustomKwd:
|
||||
if cfg.Routing.AcceleratedDHTClient.WithDefault(config.DefaultAcceleratedDHTClient) {
|
||||
return fmt.Errorf("Routing.AcceleratedDHTClient option is set even tho Routing.Type is custom, using custom .AcceleratedDHTClient needs to be set on DHT routers individually")
|
||||
return errors.New("Routing.AcceleratedDHTClient option is set even tho Routing.Type is custom, using custom .AcceleratedDHTClient needs to be set on DHT routers individually")
|
||||
}
|
||||
ncfg.Routing = libp2p.ConstructDelegatedRouting(
|
||||
cfg.Routing.Routers,
|
||||
@ -444,6 +478,7 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment
|
||||
cfg.Identity.PeerID,
|
||||
cfg.Addresses,
|
||||
cfg.Identity.PrivKey,
|
||||
cfg.HTTPRetrieval.Enabled.WithDefault(config.DefaultHTTPRetrievalEnabled),
|
||||
)
|
||||
default:
|
||||
return fmt.Errorf("unrecognized routing option: %s", routingOption)
|
||||
@ -467,10 +502,39 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment
|
||||
fmt.Printf("Swarm key fingerprint: %x\n", node.PNetFingerprint)
|
||||
}
|
||||
|
||||
if (pnet.ForcePrivateNetwork || node.PNetFingerprint != nil) && routingOption == routingOptionAutoKwd {
|
||||
if (pnet.ForcePrivateNetwork || node.PNetFingerprint != nil) && (routingOption == routingOptionAutoKwd || routingOption == routingOptionAutoClientKwd) {
|
||||
// This should never happen, but better safe than sorry
|
||||
log.Fatal("Private network does not work with Routing.Type=auto. Update your config to Routing.Type=dht (or none, and do manual peering)")
|
||||
}
|
||||
// Check for deprecated Provider/Reprovider configuration after migration
|
||||
// This should never happen for regular users, but is useful error for people who have Docker orchestration
|
||||
// that blindly sets config keys (overriding automatic Kubo migration).
|
||||
//nolint:staticcheck // intentionally checking deprecated fields
|
||||
if cfg.Provider.Enabled != config.Default || !cfg.Provider.Strategy.IsDefault() || !cfg.Provider.WorkerCount.IsDefault() {
|
||||
log.Fatal("Deprecated configuration detected. Manually migrate 'Provider' fields to 'Provide' and remove 'Provider' from your config. Documentation: https://github.com/ipfs/kubo/blob/master/docs/config.md#provide")
|
||||
}
|
||||
//nolint:staticcheck // intentionally checking deprecated fields
|
||||
if !cfg.Reprovider.Interval.IsDefault() || !cfg.Reprovider.Strategy.IsDefault() {
|
||||
log.Fatal("Deprecated configuration detected. Manually migrate 'Reprovider' fields to 'Provide': Reprovider.Strategy -> Provide.Strategy, Reprovider.Interval -> Provide.DHT.Interval. Remove 'Reprovider' from your config. Documentation: https://github.com/ipfs/kubo/blob/master/docs/config.md#provide")
|
||||
}
|
||||
// Check for deprecated "flat" strategy (should have been migrated to "all")
|
||||
if cfg.Provide.Strategy.WithDefault("") == "flat" {
|
||||
log.Fatal("Provide.Strategy='flat' is no longer supported. Use 'all' instead. Documentation: https://github.com/ipfs/kubo/blob/master/docs/config.md#providestrategy")
|
||||
}
|
||||
if cfg.Experimental.StrategicProviding {
|
||||
log.Fatal("Experimental.StrategicProviding was removed. Remove it from your config. Documentation: https://github.com/ipfs/kubo/blob/master/docs/experimental-features.md#strategic-providing")
|
||||
}
|
||||
// Check for invalid MaxWorkers=0 with SweepEnabled
|
||||
if cfg.Provide.DHT.SweepEnabled.WithDefault(config.DefaultProvideDHTSweepEnabled) &&
|
||||
cfg.Provide.DHT.MaxWorkers.WithDefault(config.DefaultProvideDHTMaxWorkers) == 0 {
|
||||
log.Fatal("Invalid configuration: Provide.DHT.MaxWorkers cannot be 0 when Provide.DHT.SweepEnabled=true. Set Provide.DHT.MaxWorkers to a positive value (e.g., 16) to control resource usage. Documentation: https://github.com/ipfs/kubo/blob/master/docs/config.md#providedhtmaxworkers")
|
||||
}
|
||||
if routingOption == routingOptionDelegatedKwd {
|
||||
// Delegated routing is read-only mode - content providing must be disabled
|
||||
if cfg.Provide.Enabled.WithDefault(config.DefaultProvideEnabled) {
|
||||
log.Fatal("Routing.Type=delegated does not support content providing. Set Provide.Enabled=false in your config.")
|
||||
}
|
||||
}
|
||||
|
||||
printLibp2pPorts(node)
|
||||
|
||||
@ -502,6 +566,9 @@ take effect.
|
||||
}
|
||||
}()
|
||||
|
||||
// Clear any cached offline node and set the online daemon node
|
||||
// This ensures HTTP RPC server uses the online node, not any cached offline node
|
||||
cctx.ClearCachedNode()
|
||||
cctx.ConstructNode = func() (*core.IpfsNode, error) {
|
||||
return node, nil
|
||||
}
|
||||
@ -512,7 +579,20 @@ take effect.
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
node.Process.AddChild(goprocess.WithTeardown(cctx.Plugins.Close))
|
||||
|
||||
pluginErrc := make(chan error, 1)
|
||||
select {
|
||||
case <-node.Context().Done():
|
||||
close(pluginErrc)
|
||||
default:
|
||||
context.AfterFunc(node.Context(), func() {
|
||||
err := cctx.Plugins.Close()
|
||||
if err != nil {
|
||||
pluginErrc <- fmt.Errorf("closing plugins: %w", err)
|
||||
}
|
||||
close(pluginErrc)
|
||||
})
|
||||
}
|
||||
|
||||
// construct api endpoint - every time
|
||||
apiErrc, err := serveHTTPApi(req, cctx)
|
||||
@ -529,6 +609,11 @@ take effect.
|
||||
if err := mountFuse(req, cctx); err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if _err != nil {
|
||||
nodeMount.Unmount(node)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// repo blockstore GC - if --enable-gc flag is present
|
||||
@ -537,9 +622,9 @@ take effect.
|
||||
return err
|
||||
}
|
||||
|
||||
// Add any files downloaded by migration.
|
||||
if cacheMigrations || pinMigrations {
|
||||
err = addMigrations(cctx.Context(), node, fetcher, pinMigrations)
|
||||
// Add any files downloaded by external migrations (embedded migrations don't download files)
|
||||
if externalMigrationFetcher != nil && (cacheMigrations || pinMigrations) {
|
||||
err = addMigrations(cctx.Context(), node, externalMigrationFetcher, pinMigrations)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "Could not add migration to IPFS:", err)
|
||||
}
|
||||
@ -548,10 +633,10 @@ take effect.
|
||||
os.RemoveAll(migrations.DownloadDirectory)
|
||||
migrations.DownloadDirectory = ""
|
||||
}
|
||||
if fetcher != nil {
|
||||
if externalMigrationFetcher != nil {
|
||||
// If there is an error closing the IpfsFetcher, then print error, but
|
||||
// do not fail because of it.
|
||||
err = fetcher.Close()
|
||||
err = externalMigrationFetcher.Close()
|
||||
if err != nil {
|
||||
log.Errorf("error closing IPFS fetcher: %s", err)
|
||||
}
|
||||
@ -601,19 +686,32 @@ take effect.
|
||||
}()
|
||||
|
||||
if !offline {
|
||||
// Warn users who were victims of 'lowprofile' footgun (https://github.com/ipfs/kubo/pull/10524)
|
||||
if cfg.Experimental.StrategicProviding {
|
||||
// Warn users when provide systems are disabled
|
||||
if !cfg.Provide.Enabled.WithDefault(config.DefaultProvideEnabled) {
|
||||
fmt.Print(`
|
||||
⚠️ Reprovide system is disabled due to 'Experimental.StrategicProviding=true'
|
||||
|
||||
⚠️ Provide and Reprovide systems are disabled due to 'Provide.Enabled=false'
|
||||
⚠️ Local CIDs will not be announced to Amino DHT, making them impossible to retrieve without manual peering
|
||||
⚠️ If this is not intentional, call 'ipfs config profile apply announce-on'
|
||||
⚠️ If this is not intentional, call 'ipfs config profile apply announce-on' or set Provide.Enabled=true'
|
||||
|
||||
`)
|
||||
} else if cfg.Reprovider.Interval.WithDefault(config.DefaultReproviderInterval) == 0 {
|
||||
} else if cfg.Provide.DHT.Interval.WithDefault(config.DefaultProvideDHTInterval) == 0 {
|
||||
fmt.Print(`
|
||||
⚠️ Reprovider system is disabled due to 'Reprovider.Interval=0'
|
||||
⚠️ Local CIDs will not be announced to Amino DHT, making them impossible to retrieve without manual peering
|
||||
⚠️ If this is not intentional, call 'ipfs config profile apply announce-on', or set 'Reprovider.Interval=22h'
|
||||
|
||||
⚠️ Providing to the DHT is disabled due to 'Provide.DHT.Interval=0'
|
||||
⚠️ Local CIDs will not be provided to Amino DHT, making them impossible to retrieve without manual peering
|
||||
⚠️ If this is not intentional, call 'ipfs config profile apply announce-on', or set 'Provide.DHT.Interval=22h'
|
||||
|
||||
`)
|
||||
}
|
||||
|
||||
// Inform user about Routing.AcceleratedDHTClient when enabled
|
||||
if cfg.Routing.AcceleratedDHTClient.WithDefault(config.DefaultAcceleratedDHTClient) {
|
||||
fmt.Print(`
|
||||
|
||||
ℹ️ Routing.AcceleratedDHTClient is enabled for faster content discovery
|
||||
ℹ️ and DHT provides. Routing table is initializing. IPFS is ready to use,
|
||||
ℹ️ but performance will improve over time as more peers are discovered
|
||||
|
||||
`)
|
||||
}
|
||||
@ -661,16 +759,26 @@ take effect.
|
||||
log.Fatal("Support for IPFS_REUSEPORT was removed. Use LIBP2P_TCP_REUSEPORT instead.")
|
||||
}
|
||||
|
||||
unmountErrc := make(chan error)
|
||||
context.AfterFunc(node.Context(), func() {
|
||||
<-node.Context().Done()
|
||||
nodeMount.Unmount(node)
|
||||
close(unmountErrc)
|
||||
})
|
||||
|
||||
// collect long-running errors and block for shutdown
|
||||
// TODO(cryptix): our fuse currently doesn't follow this pattern for graceful shutdown
|
||||
var errs error
|
||||
for err := range merge(apiErrc, gwErrc, gcErrc, p2pGwErrc) {
|
||||
var errs []error
|
||||
for err := range merge(apiErrc, gwErrc, gcErrc, p2pGwErrc, pluginErrc, unmountErrc) {
|
||||
if err != nil {
|
||||
errs = multierror.Append(errs, err)
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
if len(errs) != 0 {
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
return errs
|
||||
return nil
|
||||
}
|
||||
|
||||
// serveHTTPApi collects options, creates listener, prints status message and starts serving requests.
|
||||
@ -723,10 +831,18 @@ func serveHTTPApi(req *cmds.Request, cctx *oldcmds.Context) (<-chan error, error
|
||||
for _, listener := range listeners {
|
||||
// we might have listened to /tcp/0 - let's see what we are listing on
|
||||
fmt.Printf("RPC API server listening on %s\n", listener.Multiaddr())
|
||||
// Browsers require TCP.
|
||||
// Browsers require TCP with explicit host.
|
||||
switch listener.Addr().Network() {
|
||||
case "tcp", "tcp4", "tcp6":
|
||||
fmt.Printf("WebUI: http://%s/webui\n", listener.Addr())
|
||||
rpc := listener.Addr().String()
|
||||
// replace catch-all with explicit localhost URL that works in browsers
|
||||
// https://github.com/ipfs/kubo/issues/10515
|
||||
if strings.Contains(rpc, "0.0.0.0:") {
|
||||
rpc = strings.Replace(rpc, "0.0.0.0:", "127.0.0.1:", 1)
|
||||
} else if strings.Contains(rpc, "[::]:") {
|
||||
rpc = strings.Replace(rpc, "[::]:", "[::1]:", 1)
|
||||
}
|
||||
fmt.Printf("WebUI: http://%s/webui\n", rpc)
|
||||
}
|
||||
}
|
||||
|
||||
@ -767,23 +883,38 @@ func serveHTTPApi(req *cmds.Request, cctx *oldcmds.Context) (<-chan error, error
|
||||
return nil, fmt.Errorf("serveHTTPApi: ConstructNode() failed: %s", err)
|
||||
}
|
||||
|
||||
// Buffer channel to prevent deadlock when multiple servers write errors simultaneously
|
||||
errc := make(chan error, len(listeners))
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Start all servers and wait for them to be ready before writing api file.
|
||||
// This prevents race conditions where external tools (like systemd path units)
|
||||
// see the file and try to connect before servers can accept connections.
|
||||
if len(listeners) > 0 {
|
||||
// Only add an api file if the API is running.
|
||||
readyChannels := make([]chan struct{}, len(listeners))
|
||||
for i, lis := range listeners {
|
||||
readyChannels[i] = make(chan struct{})
|
||||
ready := readyChannels[i]
|
||||
wg.Go(func() {
|
||||
errc <- corehttp.ServeWithReady(node, manet.NetListener(lis), ready, opts...)
|
||||
})
|
||||
}
|
||||
|
||||
// Wait for all listeners to be ready or any to fail
|
||||
for _, ready := range readyChannels {
|
||||
select {
|
||||
case <-ready:
|
||||
// This listener is ready
|
||||
case err := <-errc:
|
||||
return nil, fmt.Errorf("serveHTTPApi: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := node.Repo.SetAPIAddr(rewriteMaddrToUseLocalhostIfItsAny(listeners[0].Multiaddr())); err != nil {
|
||||
return nil, fmt.Errorf("serveHTTPApi: SetAPIAddr() failed: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
errc := make(chan error)
|
||||
var wg sync.WaitGroup
|
||||
for _, apiLis := range listeners {
|
||||
wg.Add(1)
|
||||
go func(lis manet.Listener) {
|
||||
defer wg.Done()
|
||||
errc <- corehttp.Serve(node, manet.NetListener(lis), opts...)
|
||||
}(apiLis)
|
||||
}
|
||||
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(errc)
|
||||
@ -796,9 +927,9 @@ func rewriteMaddrToUseLocalhostIfItsAny(maddr ma.Multiaddr) ma.Multiaddr {
|
||||
first, rest := ma.SplitFirst(maddr)
|
||||
|
||||
switch {
|
||||
case first.Equal(manet.IP4Unspecified):
|
||||
case first.Equal(&manet.IP4Unspecified[0]):
|
||||
return manet.IP4Loopback.Encapsulate(rest)
|
||||
case first.Equal(manet.IP6Unspecified):
|
||||
case first.Equal(&manet.IP6Unspecified[0]):
|
||||
return manet.IP6Loopback.Encapsulate(rest)
|
||||
default:
|
||||
return maddr // not ip
|
||||
@ -812,6 +943,12 @@ func printLibp2pPorts(node *core.IpfsNode) {
|
||||
return
|
||||
}
|
||||
|
||||
if node.PeerHost == nil {
|
||||
log.Error("PeerHost is nil - this should not happen and likely indicates an FX dependency injection issue or race condition")
|
||||
fmt.Println("Swarm not properly initialized - node PeerHost is nil.")
|
||||
return
|
||||
}
|
||||
|
||||
ifaceAddrs, err := node.PeerHost.Network().InterfaceListenAddresses()
|
||||
if err != nil {
|
||||
log.Errorf("failed to read listening addresses: %s", err)
|
||||
@ -936,26 +1073,42 @@ func serveHTTPGateway(req *cmds.Request, cctx *oldcmds.Context) (<-chan error, e
|
||||
return nil, fmt.Errorf("serveHTTPGateway: ConstructNode() failed: %s", err)
|
||||
}
|
||||
|
||||
// Buffer channel to prevent deadlock when multiple servers write errors simultaneously
|
||||
errc := make(chan error, len(listeners))
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Start all servers and wait for them to be ready before writing gateway file.
|
||||
// This prevents race conditions where external tools (like systemd path units)
|
||||
// see the file and try to connect before servers can accept connections.
|
||||
if len(listeners) > 0 {
|
||||
readyChannels := make([]chan struct{}, len(listeners))
|
||||
for i, lis := range listeners {
|
||||
readyChannels[i] = make(chan struct{})
|
||||
ready := readyChannels[i]
|
||||
wg.Go(func() {
|
||||
errc <- corehttp.ServeWithReady(node, manet.NetListener(lis), ready, opts...)
|
||||
})
|
||||
}
|
||||
|
||||
// Wait for all listeners to be ready or any to fail
|
||||
for _, ready := range readyChannels {
|
||||
select {
|
||||
case <-ready:
|
||||
// This listener is ready
|
||||
case err := <-errc:
|
||||
return nil, fmt.Errorf("serveHTTPGateway: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
addr, err := manet.ToNetAddr(rewriteMaddrToUseLocalhostIfItsAny(listeners[0].Multiaddr()))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("serveHTTPGateway: manet.ToIP() failed: %w", err)
|
||||
return nil, fmt.Errorf("serveHTTPGateway: manet.ToNetAddr() failed: %w", err)
|
||||
}
|
||||
if err := node.Repo.SetGatewayAddr(addr); err != nil {
|
||||
return nil, fmt.Errorf("serveHTTPGateway: SetGatewayAddr() failed: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
errc := make(chan error)
|
||||
var wg sync.WaitGroup
|
||||
for _, lis := range listeners {
|
||||
wg.Add(1)
|
||||
go func(lis manet.Listener) {
|
||||
defer wg.Done()
|
||||
errc <- corehttp.Serve(node, manet.NetListener(lis), opts...)
|
||||
}(lis)
|
||||
}
|
||||
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(errc)
|
||||
@ -993,6 +1146,10 @@ func serveTrustlessGatewayOverLibp2p(cctx *oldcmds.Context) (<-chan error, error
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if node.PeerHost == nil {
|
||||
return nil, fmt.Errorf("cannot create libp2p gateway: node PeerHost is nil (this should not happen and likely indicates an FX dependency injection issue or race condition)")
|
||||
}
|
||||
|
||||
h := p2phttp.Host{
|
||||
StreamHost: node.PeerHost,
|
||||
}
|
||||
@ -1003,14 +1160,13 @@ func serveTrustlessGatewayOverLibp2p(cctx *oldcmds.Context) (<-chan error, error
|
||||
|
||||
errc := make(chan error, 1)
|
||||
go func() {
|
||||
defer close(errc)
|
||||
errc <- h.Serve()
|
||||
close(errc)
|
||||
}()
|
||||
|
||||
go func() {
|
||||
<-node.Process.Closing()
|
||||
context.AfterFunc(node.Context(), func() {
|
||||
h.Close()
|
||||
}()
|
||||
})
|
||||
|
||||
return errc, nil
|
||||
}
|
||||
@ -1026,23 +1182,58 @@ func mountFuse(req *cmds.Request, cctx *oldcmds.Context) error {
|
||||
if !found {
|
||||
fsdir = cfg.Mounts.IPFS
|
||||
}
|
||||
if err := checkFusePath("Mounts.IPFS", fsdir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nsdir, found := req.Options[ipnsMountKwd].(string)
|
||||
if !found {
|
||||
nsdir = cfg.Mounts.IPNS
|
||||
}
|
||||
if err := checkFusePath("Mounts.IPNS", nsdir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mfsdir, found := req.Options[mfsMountKwd].(string)
|
||||
if !found {
|
||||
mfsdir = cfg.Mounts.MFS
|
||||
}
|
||||
if err := checkFusePath("Mounts.MFS", mfsdir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
node, err := cctx.ConstructNode()
|
||||
if err != nil {
|
||||
return fmt.Errorf("mountFuse: ConstructNode() failed: %s", err)
|
||||
}
|
||||
|
||||
err = nodeMount.Mount(node, fsdir, nsdir)
|
||||
err = nodeMount.Mount(node, fsdir, nsdir, mfsdir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("IPFS mounted at: %s\n", fsdir)
|
||||
fmt.Printf("IPNS mounted at: %s\n", nsdir)
|
||||
fmt.Printf("MFS mounted at: %s\n", mfsdir)
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkFusePath(name, path string) error {
|
||||
if path == "" {
|
||||
return fmt.Errorf("%s path cannot be empty", name)
|
||||
}
|
||||
|
||||
fileInfo, err := os.Stat(path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return fmt.Errorf("%s path (%q) does not exist: %w", name, path, err)
|
||||
}
|
||||
return fmt.Errorf("error while inspecting %s path (%q): %w", name, path, err)
|
||||
}
|
||||
|
||||
if !fileInfo.IsDir() {
|
||||
return fmt.Errorf("%s path (%q) is not a directory", name, path)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -1060,14 +1251,14 @@ func maybeRunGC(req *cmds.Request, node *core.IpfsNode) (<-chan error, error) {
|
||||
return errc, nil
|
||||
}
|
||||
|
||||
// merge does fan-in of multiple read-only error channels
|
||||
// taken from http://blog.golang.org/pipelines
|
||||
// merge does fan-in of multiple read-only error channels.
|
||||
func merge(cs ...<-chan error) <-chan error {
|
||||
var wg sync.WaitGroup
|
||||
out := make(chan error)
|
||||
|
||||
// Start an output goroutine for each input channel in cs. output
|
||||
// copies values from c to out until c is closed, then calls wg.Done.
|
||||
// Start a goroutine for each input channel in cs, that copies values from
|
||||
// the input channel to the output channel until the input channel is
|
||||
// closed.
|
||||
output := func(c <-chan error) {
|
||||
for n := range c {
|
||||
out <- n
|
||||
@ -1081,8 +1272,8 @@ func merge(cs ...<-chan error) <-chan error {
|
||||
}
|
||||
}
|
||||
|
||||
// Start a goroutine to close out once all the output goroutines are
|
||||
// done. This must start after the wg.Add call.
|
||||
// Start a goroutine to close out once all the output goroutines, and other
|
||||
// things to wait on, are done.
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(out)
|
||||
@ -1153,8 +1344,6 @@ Visit https://github.com/ipfs/kubo/releases or https://dist.ipfs.tech/#kubo and
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-nd.Process.Closing():
|
||||
return
|
||||
case <-ticker.C:
|
||||
continue
|
||||
}
|
||||
|
||||
@ -1,5 +1,4 @@
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package kubo
|
||||
|
||||
|
||||
@ -1,5 +1,4 @@
|
||||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
package kubo
|
||||
|
||||
|
||||
@ -88,11 +88,11 @@ environment variable:
|
||||
if it.Err() != nil {
|
||||
return it.Err()
|
||||
}
|
||||
return fmt.Errorf("file argument was nil")
|
||||
return errors.New("file argument was nil")
|
||||
}
|
||||
file := files.FileFromEntry(it)
|
||||
if file == nil {
|
||||
return fmt.Errorf("expected a regular file")
|
||||
return errors.New("expected a regular file")
|
||||
}
|
||||
|
||||
conf = &config.Config{}
|
||||
|
||||
@ -6,16 +6,14 @@ import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
peer "github.com/libp2p/go-libp2p/core/peer"
|
||||
|
||||
pinclient "github.com/ipfs/boxo/pinning/remote/client"
|
||||
cid "github.com/ipfs/go-cid"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
|
||||
config "github.com/ipfs/kubo/config"
|
||||
"github.com/ipfs/kubo/core"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
peer "github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
// mfslog is the logger for remote mfs pinning.
|
||||
@ -90,34 +88,46 @@ func pinMFSOnChange(cctx pinMFSContext, configPollInterval time.Duration, node p
|
||||
case <-cctx.Context().Done():
|
||||
return
|
||||
case <-tmo.C:
|
||||
tmo.Reset(configPollInterval)
|
||||
}
|
||||
// reread the config, which may have changed in the meantime
|
||||
cfg, err := cctx.GetConfig()
|
||||
if err != nil {
|
||||
mfslog.Errorf("pinning reading config (%v)", err)
|
||||
continue
|
||||
}
|
||||
mfslog.Debugf("pinning loop is awake, %d remote services", len(cfg.Pinning.RemoteServices))
|
||||
|
||||
// reread the config, which may have changed in the meantime
|
||||
cfg, err := cctx.GetConfig()
|
||||
if err != nil {
|
||||
mfslog.Errorf("pinning reading config (%v)", err)
|
||||
continue
|
||||
// pin to all remote services in parallel
|
||||
pinAllMFS(cctx.Context(), node, cfg, lastPins)
|
||||
}
|
||||
mfslog.Debugf("pinning loop is awake, %d remote services", len(cfg.Pinning.RemoteServices))
|
||||
|
||||
// get the most recent MFS root cid
|
||||
rootNode, err := node.RootNode()
|
||||
if err != nil {
|
||||
mfslog.Errorf("pinning reading MFS root (%v)", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// pin to all remote services in parallel
|
||||
pinAllMFS(cctx.Context(), node, cfg, rootNode.Cid(), lastPins)
|
||||
// pinAllMFS may take long. Reset interval only when we are done doing it
|
||||
// so that we are not pinning constantly.
|
||||
tmo.Reset(configPollInterval)
|
||||
}
|
||||
}
|
||||
|
||||
// pinAllMFS pins on all remote services in parallel to overcome DoS attacks.
|
||||
func pinAllMFS(ctx context.Context, node pinMFSNode, cfg *config.Config, rootCid cid.Cid, lastPins map[string]lastPin) {
|
||||
func pinAllMFS(ctx context.Context, node pinMFSNode, cfg *config.Config, lastPins map[string]lastPin) {
|
||||
ch := make(chan lastPin)
|
||||
var started int
|
||||
|
||||
// Bail out to mitigate issue below when not needing to do anything.
|
||||
if len(cfg.Pinning.RemoteServices) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// get the most recent MFS root cid.
|
||||
// Warning! This can be super expensive.
|
||||
// See https://github.com/ipfs/boxo/pull/751
|
||||
// and https://github.com/ipfs/kubo/issues/8694
|
||||
// Reading an MFS-directory nodes can take minutes due to
|
||||
// ever growing cache being synced to unixfs.
|
||||
rootNode, err := node.RootNode()
|
||||
if err != nil {
|
||||
mfslog.Errorf("pinning reading MFS root (%v)", err)
|
||||
return
|
||||
}
|
||||
rootCid := rootNode.Cid()
|
||||
|
||||
for svcName, svcConfig := range cfg.Pinning.RemoteServices {
|
||||
if ctx.Err() != nil {
|
||||
break
|
||||
@ -183,7 +193,7 @@ func pinMFS(ctx context.Context, node pinMFSNode, cid cid.Cid, svcName string, s
|
||||
|
||||
// check if MFS pin exists (across all possible states) and inspect its CID
|
||||
pinStatuses := []pinclient.Status{pinclient.StatusQueued, pinclient.StatusPinning, pinclient.StatusPinned, pinclient.StatusFailed}
|
||||
lsPinCh, lsErrCh := c.Ls(ctx, pinclient.PinOpts.FilterName(pinName), pinclient.PinOpts.FilterStatus(pinStatuses...))
|
||||
lsPinCh, lsErrCh := c.GoLs(ctx, pinclient.PinOpts.FilterName(pinName), pinclient.PinOpts.FilterStatus(pinStatuses...))
|
||||
existingRequestID := "" // is there any pre-existing MFS pin with pinName (for any CID)?
|
||||
pinning := false // is CID for current MFS already being pinned?
|
||||
pinTime := time.Now().UTC()
|
||||
|
||||
@ -94,11 +94,24 @@ func TestPinMFSRootNodeError(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*testConfigPollInterval)
|
||||
defer cancel()
|
||||
|
||||
// need at least one config to trigger
|
||||
cfg := &config.Config{
|
||||
Pinning: config.Pinning{
|
||||
RemoteServices: map[string]config.RemotePinningService{
|
||||
"A": {
|
||||
Policies: config.RemotePinningServicePolicies{
|
||||
MFS: config.RemotePinningServiceMFSPolicy{
|
||||
Enable: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
cctx := &testPinMFSContext{
|
||||
ctx: ctx,
|
||||
cfg: &config.Config{
|
||||
Pinning: config.Pinning{},
|
||||
},
|
||||
cfg: cfg,
|
||||
err: nil,
|
||||
}
|
||||
node := &testPinMFSNode{
|
||||
|
||||
@ -8,6 +8,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
@ -16,12 +17,11 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/blang/semver/v4"
|
||||
"github.com/google/uuid"
|
||||
u "github.com/ipfs/boxo/util"
|
||||
cmds "github.com/ipfs/go-ipfs-cmds"
|
||||
"github.com/ipfs/go-ipfs-cmds/cli"
|
||||
cmdhttp "github.com/ipfs/go-ipfs-cmds/http"
|
||||
logging "github.com/ipfs/go-log"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
ipfs "github.com/ipfs/kubo"
|
||||
"github.com/ipfs/kubo/client/rpc/auth"
|
||||
"github.com/ipfs/kubo/cmd/ipfs/util"
|
||||
@ -34,6 +34,7 @@ import (
|
||||
"github.com/ipfs/kubo/repo"
|
||||
"github.com/ipfs/kubo/repo/fsrepo"
|
||||
"github.com/ipfs/kubo/tracing"
|
||||
"github.com/libp2p/go-libp2p/gologshim"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
madns "github.com/multiformats/go-multiaddr-dns"
|
||||
manet "github.com/multiformats/go-multiaddr/net"
|
||||
@ -51,6 +52,17 @@ var (
|
||||
tracer trace.Tracer
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Set go-log's slog handler as the application-wide default.
|
||||
// This ensures all slog-based logging uses go-log's formatting.
|
||||
slog.SetDefault(slog.New(logging.SlogHandler()))
|
||||
|
||||
// Wire go-log's slog bridge to go-libp2p's gologshim.
|
||||
// This provides go-libp2p loggers with the "logger" attribute
|
||||
// for per-subsystem level control (e.g., `ipfs log level libp2p-swarm debug`).
|
||||
gologshim.SetDefaultHandler(logging.SlogHandler())
|
||||
}
|
||||
|
||||
// declared as a var for testing purposes.
|
||||
var dnsResolver = madns.DefaultResolver
|
||||
|
||||
@ -89,16 +101,6 @@ func printErr(err error) int {
|
||||
return 1
|
||||
}
|
||||
|
||||
func newUUID(key string) logging.Metadata {
|
||||
ids := "#UUID-ERROR#"
|
||||
if id, err := uuid.NewRandom(); err == nil {
|
||||
ids = id.String()
|
||||
}
|
||||
return logging.Metadata{
|
||||
key: ids,
|
||||
}
|
||||
}
|
||||
|
||||
func BuildDefaultEnv(ctx context.Context, req *cmds.Request) (cmds.Environment, error) {
|
||||
return BuildEnv(nil)(ctx, req)
|
||||
}
|
||||
@ -157,8 +159,7 @@ func BuildEnv(pl PluginPreloader) func(ctx context.Context, req *cmds.Request) (
|
||||
// - output the response
|
||||
// - if anything fails, print error, maybe with help.
|
||||
func Start(buildEnv func(ctx context.Context, req *cmds.Request) (cmds.Environment, error)) (exitCode int) {
|
||||
ctx := logging.ContextWithLoggable(context.Background(), newUUID("session"))
|
||||
|
||||
ctx := context.Background()
|
||||
tp, err := tracing.NewTracerProvider(ctx)
|
||||
if err != nil {
|
||||
return printErr(err)
|
||||
@ -226,7 +227,10 @@ func insideGUI() bool {
|
||||
func checkDebug(req *cmds.Request) {
|
||||
// check if user wants to debug. option OR env var.
|
||||
debug, _ := req.Options["debug"].(bool)
|
||||
if debug || os.Getenv("IPFS_LOGGING") == "debug" {
|
||||
ipfsLogLevel, _ := logging.Parse(os.Getenv("IPFS_LOGGING")) // IPFS_LOGGING is deprecated
|
||||
goLogLevel, _ := logging.Parse(os.Getenv("GOLOG_LOG_LEVEL"))
|
||||
|
||||
if debug || goLogLevel == logging.LevelDebug || ipfsLogLevel == logging.LevelDebug {
|
||||
u.Debug = true
|
||||
logging.SetDebugLogging()
|
||||
}
|
||||
@ -330,6 +334,11 @@ func makeExecutor(req *cmds.Request, env interface{}) (cmds.Executor, error) {
|
||||
switch network {
|
||||
case "tcp", "tcp4", "tcp6":
|
||||
tpt = http.DefaultTransport
|
||||
// RPC over HTTPS requires explicit schema in the address passed to cmdhttp.NewClient
|
||||
httpAddr := apiAddr.String()
|
||||
if !strings.HasPrefix(host, "http:") && !strings.HasPrefix(host, "https:") && (strings.Contains(httpAddr, "/https") || strings.Contains(httpAddr, "/tls/http")) {
|
||||
host = "https://" + host
|
||||
}
|
||||
case "unix":
|
||||
path := host
|
||||
host = "unix"
|
||||
|
||||
@ -1,5 +1,4 @@
|
||||
//go:build testrunmain
|
||||
// +build testrunmain
|
||||
|
||||
package main_test
|
||||
|
||||
|
||||
@ -1,5 +1,4 @@
|
||||
//go:build !wasm
|
||||
// +build !wasm
|
||||
|
||||
package util
|
||||
|
||||
@ -64,13 +63,7 @@ func SetupInterruptHandler(ctx context.Context) (io.Closer, context.Context) {
|
||||
switch count {
|
||||
case 1:
|
||||
fmt.Println() // Prevent un-terminated ^C character in terminal
|
||||
|
||||
ih.wg.Add(1)
|
||||
go func() {
|
||||
defer ih.wg.Done()
|
||||
cancelFunc()
|
||||
}()
|
||||
|
||||
cancelFunc()
|
||||
default:
|
||||
fmt.Println("Received another interrupt before graceful shutdown, terminating...")
|
||||
os.Exit(-1)
|
||||
|
||||
@ -1,5 +1,4 @@
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package util
|
||||
|
||||
|
||||
@ -6,7 +6,7 @@ import (
|
||||
"strconv"
|
||||
"syscall"
|
||||
|
||||
logging "github.com/ipfs/go-log"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
)
|
||||
|
||||
var log = logging.Logger("ulimit")
|
||||
|
||||
@ -1,5 +1,4 @@
|
||||
//go:build freebsd
|
||||
// +build freebsd
|
||||
|
||||
package util
|
||||
|
||||
|
||||
@ -1,5 +1,4 @@
|
||||
//go:build !windows && !plan9
|
||||
// +build !windows,!plan9
|
||||
|
||||
package util
|
||||
|
||||
|
||||
@ -1,5 +1,4 @@
|
||||
//go:build darwin || linux || netbsd || openbsd
|
||||
// +build darwin linux netbsd openbsd
|
||||
|
||||
package util
|
||||
|
||||
|
||||
@ -1,5 +1,4 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package util
|
||||
|
||||
|
||||
@ -1,16 +1,15 @@
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ipfs/kubo/thirdparty/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestIsHidden(t *testing.T) {
|
||||
assert.True(IsHidden("bar/.git"), t, "dirs beginning with . should be recognized as hidden")
|
||||
assert.False(IsHidden("."), t, ". for current dir should not be considered hidden")
|
||||
assert.False(IsHidden("bar/baz"), t, "normal dirs should not be hidden")
|
||||
require.True(t, IsHidden("bar/.git"), "dirs beginning with . should be recognized as hidden")
|
||||
require.False(t, IsHidden("."), ". for current dir should not be considered hidden")
|
||||
require.False(t, IsHidden("bar/baz"), "normal dirs should not be hidden")
|
||||
}
|
||||
|
||||
@ -1,5 +1,4 @@
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
package main
|
||||
|
||||
@ -10,26 +9,40 @@ import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"syscall"
|
||||
|
||||
commands "github.com/ipfs/kubo/commands"
|
||||
"github.com/ipfs/kubo/config"
|
||||
core "github.com/ipfs/kubo/core"
|
||||
coreapi "github.com/ipfs/kubo/core/coreapi"
|
||||
corehttp "github.com/ipfs/kubo/core/corehttp"
|
||||
"github.com/ipfs/kubo/misc/fsutil"
|
||||
"github.com/ipfs/kubo/plugin"
|
||||
pluginbadgerds "github.com/ipfs/kubo/plugin/plugins/badgerds"
|
||||
pluginflatfs "github.com/ipfs/kubo/plugin/plugins/flatfs"
|
||||
pluginlevelds "github.com/ipfs/kubo/plugin/plugins/levelds"
|
||||
pluginpebbleds "github.com/ipfs/kubo/plugin/plugins/pebbleds"
|
||||
fsrepo "github.com/ipfs/kubo/repo/fsrepo"
|
||||
|
||||
fsnotify "github.com/fsnotify/fsnotify"
|
||||
"github.com/ipfs/boxo/files"
|
||||
process "github.com/jbenet/goprocess"
|
||||
homedir "github.com/mitchellh/go-homedir"
|
||||
)
|
||||
|
||||
var (
|
||||
http = flag.Bool("http", false, "expose IPFS HTTP API")
|
||||
repoPath = flag.String("repo", os.Getenv("IPFS_PATH"), "IPFS_PATH to use")
|
||||
repoPath *string
|
||||
watchPath = flag.String("path", ".", "the path to watch")
|
||||
)
|
||||
|
||||
func init() {
|
||||
ipfsPath, err := config.PathRoot()
|
||||
if err != nil {
|
||||
ipfsPath = os.Getenv(config.EnvDir)
|
||||
}
|
||||
repoPath = flag.String("repo", ipfsPath, "repo path to use")
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
@ -53,11 +66,22 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
func loadDatastorePlugins(plugins []plugin.Plugin) error {
|
||||
for _, pl := range plugins {
|
||||
if pl, ok := pl.(plugin.PluginDatastore); ok {
|
||||
err := fsrepo.AddDatastoreConfigHandler(pl.DatastoreTypeName(), pl.DatastoreConfigParser())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func run(ipfsPath, watchPath string) error {
|
||||
proc := process.WithParent(process.Background())
|
||||
log.Printf("running IPFSWatch on '%s' using repo at '%s'...", watchPath, ipfsPath)
|
||||
|
||||
ipfsPath, err := homedir.Expand(ipfsPath)
|
||||
ipfsPath, err := fsutil.ExpandHome(ipfsPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -71,6 +95,15 @@ func run(ipfsPath, watchPath string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = loadDatastorePlugins(slices.Concat(
|
||||
pluginbadgerds.Plugins,
|
||||
pluginflatfs.Plugins,
|
||||
pluginlevelds.Plugins,
|
||||
pluginpebbleds.Plugins,
|
||||
)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r, err := fsrepo.Open(ipfsPath)
|
||||
if err != nil {
|
||||
// TODO handle case: daemon running
|
||||
@ -99,11 +132,11 @@ func run(ipfsPath, watchPath string) error {
|
||||
corehttp.WebUIOption,
|
||||
corehttp.CommandsOption(cmdCtx(node, ipfsPath)),
|
||||
}
|
||||
proc.Go(func(p process.Process) {
|
||||
go func() {
|
||||
if err := corehttp.ListenAndServe(node, addr, opts...); err != nil {
|
||||
return
|
||||
}
|
||||
})
|
||||
}()
|
||||
}
|
||||
|
||||
interrupts := make(chan os.Signal, 1)
|
||||
@ -137,7 +170,7 @@ func run(ipfsPath, watchPath string) error {
|
||||
}
|
||||
}
|
||||
}
|
||||
proc.Go(func(p process.Process) {
|
||||
go func() {
|
||||
file, err := os.Open(e.Name)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
@ -162,7 +195,7 @@ func run(ipfsPath, watchPath string) error {
|
||||
log.Println(err)
|
||||
}
|
||||
log.Printf("added %s... key: %s", e.Name, k)
|
||||
})
|
||||
}()
|
||||
}
|
||||
case err := <-watcher.Errors:
|
||||
log.Println(err)
|
||||
|
||||
@ -11,7 +11,7 @@ import (
|
||||
loader "github.com/ipfs/kubo/plugin/loader"
|
||||
|
||||
cmds "github.com/ipfs/go-ipfs-cmds"
|
||||
logging "github.com/ipfs/go-log"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
config "github.com/ipfs/kubo/config"
|
||||
coreiface "github.com/ipfs/kubo/core/coreiface"
|
||||
options "github.com/ipfs/kubo/core/coreiface/options"
|
||||
@ -53,6 +53,23 @@ func (c *Context) GetNode() (*core.IpfsNode, error) {
|
||||
return c.node, err
|
||||
}
|
||||
|
||||
// ClearCachedNode clears any cached node, forcing GetNode to construct a new one.
|
||||
//
|
||||
// This method is critical for mitigating racy FX dependency injection behavior
|
||||
// that can occur during daemon startup. The daemon may create multiple IpfsNode
|
||||
// instances during initialization - first an offline node during early init, then
|
||||
// the proper online daemon node. Without clearing the cache, HTTP RPC handlers may
|
||||
// end up using the first (offline) cached node instead of the intended online daemon node.
|
||||
//
|
||||
// This behavior was likely present forever in go-ipfs, but recent changes made it more
|
||||
// prominent and forced us to proactively mitigate FX shortcomings. The daemon calls
|
||||
// this method immediately before setting its ConstructNode function to ensure that
|
||||
// subsequent GetNode() calls use the correct online daemon node rather than any
|
||||
// stale cached offline node from initialization.
|
||||
func (c *Context) ClearCachedNode() {
|
||||
c.node = nil
|
||||
}
|
||||
|
||||
// GetAPI returns CoreAPI instance backed by ipfs node.
|
||||
// It may construct the node with the provided function.
|
||||
func (c *Context) GetAPI() (coreiface.CoreAPI, error) {
|
||||
|
||||
319
config/autoconf.go
Normal file
319
config/autoconf.go
Normal file
@ -0,0 +1,319 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"maps"
|
||||
"math/rand"
|
||||
"strings"
|
||||
|
||||
"github.com/ipfs/boxo/autoconf"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
peer "github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
var log = logging.Logger("config")
|
||||
|
||||
// AutoConf contains the configuration for the autoconf subsystem
|
||||
type AutoConf struct {
|
||||
// URL is the HTTP(S) URL to fetch the autoconf.json from
|
||||
// Default: see boxo/autoconf.MainnetAutoConfURL
|
||||
URL *OptionalString `json:",omitempty"`
|
||||
|
||||
// Enabled determines whether to use autoconf
|
||||
// Default: true
|
||||
Enabled Flag `json:",omitempty"`
|
||||
|
||||
// RefreshInterval is how often to refresh autoconf data
|
||||
// Default: 24h
|
||||
RefreshInterval *OptionalDuration `json:",omitempty"`
|
||||
|
||||
// TLSInsecureSkipVerify allows skipping TLS verification (for testing only)
|
||||
// Default: false
|
||||
TLSInsecureSkipVerify Flag `json:",omitempty"`
|
||||
}
|
||||
|
||||
const (
|
||||
// AutoPlaceholder is the string used as a placeholder for autoconf values
|
||||
AutoPlaceholder = "auto"
|
||||
|
||||
// DefaultAutoConfEnabled is the default value for AutoConf.Enabled
|
||||
DefaultAutoConfEnabled = true
|
||||
|
||||
// DefaultAutoConfURL is the default URL for fetching autoconf
|
||||
DefaultAutoConfURL = autoconf.MainnetAutoConfURL
|
||||
|
||||
// DefaultAutoConfRefreshInterval is the default interval for refreshing autoconf data
|
||||
DefaultAutoConfRefreshInterval = autoconf.DefaultRefreshInterval
|
||||
|
||||
// AutoConf client configuration constants
|
||||
DefaultAutoConfCacheSize = autoconf.DefaultCacheSize
|
||||
DefaultAutoConfTimeout = autoconf.DefaultTimeout
|
||||
)
|
||||
|
||||
// getNativeSystems returns the list of systems that should be used natively based on routing type
|
||||
func getNativeSystems(routingType string) []string {
|
||||
switch routingType {
|
||||
case "dht", "dhtclient", "dhtserver":
|
||||
return []string{autoconf.SystemAminoDHT} // Only native DHT
|
||||
case "auto", "autoclient":
|
||||
return []string{autoconf.SystemAminoDHT} // Native DHT, delegated others
|
||||
case "delegated":
|
||||
return []string{} // Everything delegated
|
||||
case "none":
|
||||
return []string{} // No native systems
|
||||
default:
|
||||
return []string{} // Custom mode
|
||||
}
|
||||
}
|
||||
|
||||
// selectRandomResolver picks a random resolver from a list for load balancing
|
||||
func selectRandomResolver(resolvers []string) string {
|
||||
if len(resolvers) == 0 {
|
||||
return ""
|
||||
}
|
||||
return resolvers[rand.Intn(len(resolvers))]
|
||||
}
|
||||
|
||||
// DNSResolversWithAutoConf returns DNS resolvers with "auto" values replaced by autoconf values
|
||||
func (c *Config) DNSResolversWithAutoConf() map[string]string {
|
||||
if c.DNS.Resolvers == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
resolved := make(map[string]string)
|
||||
autoConf := c.getAutoConf()
|
||||
autoExpanded := 0
|
||||
|
||||
// Process each configured resolver
|
||||
for domain, resolver := range c.DNS.Resolvers {
|
||||
if resolver == AutoPlaceholder {
|
||||
// Try to resolve from autoconf
|
||||
if autoConf != nil && autoConf.DNSResolvers != nil {
|
||||
if resolvers, exists := autoConf.DNSResolvers[domain]; exists && len(resolvers) > 0 {
|
||||
resolved[domain] = selectRandomResolver(resolvers)
|
||||
autoExpanded++
|
||||
}
|
||||
}
|
||||
// If autoConf is disabled or domain not found, skip this "auto" resolver
|
||||
} else {
|
||||
// Keep custom resolver as-is
|
||||
resolved[domain] = resolver
|
||||
}
|
||||
}
|
||||
|
||||
// Add default resolvers from autoconf that aren't already configured
|
||||
if autoConf != nil && autoConf.DNSResolvers != nil {
|
||||
for domain, resolvers := range autoConf.DNSResolvers {
|
||||
if _, exists := resolved[domain]; !exists && len(resolvers) > 0 {
|
||||
resolved[domain] = selectRandomResolver(resolvers)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Log expansion statistics
|
||||
if autoExpanded > 0 {
|
||||
log.Debugf("expanded %d 'auto' DNS.Resolvers from autoconf", autoExpanded)
|
||||
}
|
||||
|
||||
return resolved
|
||||
}
|
||||
|
||||
// expandAutoConfSlice is a generic helper for expanding "auto" placeholders in string slices
|
||||
// It handles the common pattern of: iterate through slice, expand "auto" once, keep custom values
|
||||
func expandAutoConfSlice(sourceSlice []string, autoConfData []string) []string {
|
||||
var resolved []string
|
||||
autoExpanded := false
|
||||
|
||||
for _, item := range sourceSlice {
|
||||
if item == AutoPlaceholder {
|
||||
// Replace with autoconf data (only once)
|
||||
if autoConfData != nil && !autoExpanded {
|
||||
resolved = append(resolved, autoConfData...)
|
||||
autoExpanded = true
|
||||
}
|
||||
// If autoConfData is nil or already expanded, skip redundant "auto" entries silently
|
||||
} else {
|
||||
// Keep custom item
|
||||
resolved = append(resolved, item)
|
||||
}
|
||||
}
|
||||
|
||||
return resolved
|
||||
}
|
||||
|
||||
// BootstrapWithAutoConf returns bootstrap config with "auto" values replaced by autoconf values
|
||||
func (c *Config) BootstrapWithAutoConf() []string {
|
||||
autoConf := c.getAutoConf()
|
||||
var autoConfData []string
|
||||
|
||||
if autoConf != nil {
|
||||
routingType := c.Routing.Type.WithDefault(DefaultRoutingType)
|
||||
nativeSystems := getNativeSystems(routingType)
|
||||
autoConfData = autoConf.GetBootstrapPeers(nativeSystems...)
|
||||
log.Debugf("BootstrapWithAutoConf: processing with routing type: %s", routingType)
|
||||
} else {
|
||||
log.Debugf("BootstrapWithAutoConf: autoConf disabled, using original config")
|
||||
}
|
||||
|
||||
result := expandAutoConfSlice(c.Bootstrap, autoConfData)
|
||||
log.Debugf("BootstrapWithAutoConf: final result contains %d peers", len(result))
|
||||
return result
|
||||
}
|
||||
|
||||
// getAutoConf is a helper to get autoconf data with fallbacks
|
||||
func (c *Config) getAutoConf() *autoconf.Config {
|
||||
if !c.AutoConf.Enabled.WithDefault(DefaultAutoConfEnabled) {
|
||||
log.Debugf("getAutoConf: AutoConf disabled, returning nil")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create or get cached client with config
|
||||
client, err := GetAutoConfClient(c)
|
||||
if err != nil {
|
||||
log.Debugf("getAutoConf: client creation failed - %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Use GetCached to avoid network I/O during config operations
|
||||
// This ensures config retrieval doesn't block on network operations
|
||||
result := client.GetCached()
|
||||
|
||||
log.Debugf("getAutoConf: returning autoconf data")
|
||||
return result
|
||||
}
|
||||
|
||||
// BootstrapPeersWithAutoConf returns bootstrap peers with "auto" values replaced by autoconf values
|
||||
// and parsed into peer.AddrInfo structures
|
||||
func (c *Config) BootstrapPeersWithAutoConf() ([]peer.AddrInfo, error) {
|
||||
bootstrapStrings := c.BootstrapWithAutoConf()
|
||||
return ParseBootstrapPeers(bootstrapStrings)
|
||||
}
|
||||
|
||||
// DelegatedRoutersWithAutoConf returns delegated router URLs without trailing slashes
|
||||
func (c *Config) DelegatedRoutersWithAutoConf() []string {
|
||||
autoConf := c.getAutoConf()
|
||||
|
||||
// Use autoconf to expand the endpoints with supported paths for read operations
|
||||
routingType := c.Routing.Type.WithDefault(DefaultRoutingType)
|
||||
nativeSystems := getNativeSystems(routingType)
|
||||
return autoconf.ExpandDelegatedEndpoints(
|
||||
c.Routing.DelegatedRouters,
|
||||
autoConf,
|
||||
nativeSystems,
|
||||
// Kubo supports all read paths
|
||||
autoconf.RoutingV1ProvidersPath,
|
||||
autoconf.RoutingV1PeersPath,
|
||||
autoconf.RoutingV1IPNSPath,
|
||||
)
|
||||
}
|
||||
|
||||
// DelegatedPublishersWithAutoConf returns delegated publisher URLs without trailing slashes
|
||||
func (c *Config) DelegatedPublishersWithAutoConf() []string {
|
||||
autoConf := c.getAutoConf()
|
||||
|
||||
// Use autoconf to expand the endpoints with IPNS write path
|
||||
routingType := c.Routing.Type.WithDefault(DefaultRoutingType)
|
||||
nativeSystems := getNativeSystems(routingType)
|
||||
return autoconf.ExpandDelegatedEndpoints(
|
||||
c.Ipns.DelegatedPublishers,
|
||||
autoConf,
|
||||
nativeSystems,
|
||||
autoconf.RoutingV1IPNSPath, // Only IPNS operations (for write)
|
||||
)
|
||||
}
|
||||
|
||||
// expandConfigField expands a specific config field with autoconf values
|
||||
// Handles both top-level fields ("Bootstrap") and nested fields ("DNS.Resolvers")
|
||||
func (c *Config) expandConfigField(expandedCfg map[string]any, fieldPath string) {
|
||||
// Check if this field supports autoconf expansion
|
||||
expandFunc, supported := supportedAutoConfFields[fieldPath]
|
||||
if !supported {
|
||||
return
|
||||
}
|
||||
|
||||
// Handle top-level fields (no dot in path)
|
||||
if !strings.Contains(fieldPath, ".") {
|
||||
if _, exists := expandedCfg[fieldPath]; exists {
|
||||
expandedCfg[fieldPath] = expandFunc(c)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Handle nested fields (section.field format)
|
||||
parts := strings.SplitN(fieldPath, ".", 2)
|
||||
if len(parts) != 2 {
|
||||
return
|
||||
}
|
||||
|
||||
sectionName, fieldName := parts[0], parts[1]
|
||||
if section, exists := expandedCfg[sectionName]; exists {
|
||||
if sectionMap, ok := section.(map[string]any); ok {
|
||||
if _, exists := sectionMap[fieldName]; exists {
|
||||
sectionMap[fieldName] = expandFunc(c)
|
||||
expandedCfg[sectionName] = sectionMap
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ExpandAutoConfValues expands "auto" placeholders in config with their actual values using the same methods as the daemon
|
||||
func (c *Config) ExpandAutoConfValues(cfg map[string]any) (map[string]any, error) {
|
||||
// Create a deep copy of the config map to avoid modifying the original
|
||||
expandedCfg := maps.Clone(cfg)
|
||||
|
||||
// Use the same expansion methods that the daemon uses - ensures runtime consistency
|
||||
// Unified expansion for all supported autoconf fields
|
||||
c.expandConfigField(expandedCfg, "Bootstrap")
|
||||
c.expandConfigField(expandedCfg, "DNS.Resolvers")
|
||||
c.expandConfigField(expandedCfg, "Routing.DelegatedRouters")
|
||||
c.expandConfigField(expandedCfg, "Ipns.DelegatedPublishers")
|
||||
|
||||
return expandedCfg, nil
|
||||
}
|
||||
|
||||
// supportedAutoConfFields maps field keys to their expansion functions
|
||||
var supportedAutoConfFields = map[string]func(*Config) any{
|
||||
"Bootstrap": func(c *Config) any {
|
||||
expanded := c.BootstrapWithAutoConf()
|
||||
return stringSliceToInterfaceSlice(expanded)
|
||||
},
|
||||
"DNS.Resolvers": func(c *Config) any {
|
||||
expanded := c.DNSResolversWithAutoConf()
|
||||
return stringMapToInterfaceMap(expanded)
|
||||
},
|
||||
"Routing.DelegatedRouters": func(c *Config) any {
|
||||
expanded := c.DelegatedRoutersWithAutoConf()
|
||||
return stringSliceToInterfaceSlice(expanded)
|
||||
},
|
||||
"Ipns.DelegatedPublishers": func(c *Config) any {
|
||||
expanded := c.DelegatedPublishersWithAutoConf()
|
||||
return stringSliceToInterfaceSlice(expanded)
|
||||
},
|
||||
}
|
||||
|
||||
// ExpandConfigField expands auto values for a specific config field using the same methods as the daemon
|
||||
func (c *Config) ExpandConfigField(key string, value any) any {
|
||||
if expandFunc, supported := supportedAutoConfFields[key]; supported {
|
||||
return expandFunc(c)
|
||||
}
|
||||
|
||||
// Return original value if no expansion needed (not a field that supports auto values)
|
||||
return value
|
||||
}
|
||||
|
||||
// Helper functions for type conversion between string types and any types for JSON compatibility
|
||||
|
||||
func stringSliceToInterfaceSlice(slice []string) []any {
|
||||
result := make([]any, len(slice))
|
||||
for i, v := range slice {
|
||||
result[i] = v
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func stringMapToInterfaceMap(m map[string]string) map[string]any {
|
||||
result := make(map[string]any)
|
||||
for k, v := range m {
|
||||
result[k] = v
|
||||
}
|
||||
return result
|
||||
}
|
||||
136
config/autoconf_client.go
Normal file
136
config/autoconf_client.go
Normal file
@ -0,0 +1,136 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/ipfs/boxo/autoconf"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
version "github.com/ipfs/kubo"
|
||||
)
|
||||
|
||||
var autoconfLog = logging.Logger("autoconf")
|
||||
|
||||
// Singleton state for autoconf client
|
||||
var (
|
||||
clientOnce sync.Once
|
||||
clientCache *autoconf.Client
|
||||
clientErr error
|
||||
)
|
||||
|
||||
// GetAutoConfClient returns a cached autoconf client or creates a new one.
|
||||
// This is thread-safe and uses a singleton pattern.
|
||||
func GetAutoConfClient(cfg *Config) (*autoconf.Client, error) {
|
||||
clientOnce.Do(func() {
|
||||
clientCache, clientErr = newAutoConfClient(cfg)
|
||||
})
|
||||
return clientCache, clientErr
|
||||
}
|
||||
|
||||
// newAutoConfClient creates a new autoconf client with the given config
|
||||
func newAutoConfClient(cfg *Config) (*autoconf.Client, error) {
|
||||
// Get repo path for cache directory
|
||||
repoPath, err := PathRoot()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get repo path: %w", err)
|
||||
}
|
||||
|
||||
// Prepare refresh interval with nil check
|
||||
refreshInterval := cfg.AutoConf.RefreshInterval
|
||||
if refreshInterval == nil {
|
||||
refreshInterval = &OptionalDuration{}
|
||||
}
|
||||
|
||||
// Use default URL if not specified
|
||||
url := cfg.AutoConf.URL.WithDefault(DefaultAutoConfURL)
|
||||
|
||||
// Build client options
|
||||
options := []autoconf.Option{
|
||||
autoconf.WithCacheDir(filepath.Join(repoPath, "autoconf")),
|
||||
autoconf.WithUserAgent(version.GetUserAgentVersion()),
|
||||
autoconf.WithCacheSize(DefaultAutoConfCacheSize),
|
||||
autoconf.WithTimeout(DefaultAutoConfTimeout),
|
||||
autoconf.WithRefreshInterval(refreshInterval.WithDefault(DefaultAutoConfRefreshInterval)),
|
||||
autoconf.WithFallback(autoconf.GetMainnetFallbackConfig),
|
||||
autoconf.WithURL(url),
|
||||
}
|
||||
|
||||
return autoconf.NewClient(options...)
|
||||
}
|
||||
|
||||
// ValidateAutoConfWithRepo validates that autoconf setup is correct at daemon startup with repo access
|
||||
func ValidateAutoConfWithRepo(cfg *Config, swarmKeyExists bool) error {
|
||||
if !cfg.AutoConf.Enabled.WithDefault(DefaultAutoConfEnabled) {
|
||||
// AutoConf is disabled, check for "auto" values and warn
|
||||
return validateAutoConfDisabled(cfg)
|
||||
}
|
||||
|
||||
// Check for private network with default mainnet URL
|
||||
url := cfg.AutoConf.URL.WithDefault(DefaultAutoConfURL)
|
||||
if swarmKeyExists && url == DefaultAutoConfURL {
|
||||
return fmt.Errorf("AutoConf cannot use the default mainnet URL (%s) on a private network (swarm.key or LIBP2P_FORCE_PNET detected). Either disable AutoConf by setting AutoConf.Enabled=false, or configure AutoConf.URL to point to a configuration service specific to your private swarm", DefaultAutoConfURL)
|
||||
}
|
||||
|
||||
// Further validation will happen lazily when config is accessed
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateAutoConfDisabled checks for "auto" values when AutoConf is disabled and logs errors
|
||||
func validateAutoConfDisabled(cfg *Config) error {
|
||||
hasAutoValues := false
|
||||
var errors []string
|
||||
|
||||
// Check Bootstrap
|
||||
for _, peer := range cfg.Bootstrap {
|
||||
if peer == AutoPlaceholder {
|
||||
hasAutoValues = true
|
||||
errors = append(errors, "Bootstrap contains 'auto' but AutoConf.Enabled=false")
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Check DNS.Resolvers
|
||||
if cfg.DNS.Resolvers != nil {
|
||||
for _, resolver := range cfg.DNS.Resolvers {
|
||||
if resolver == AutoPlaceholder {
|
||||
hasAutoValues = true
|
||||
errors = append(errors, "DNS.Resolvers contains 'auto' but AutoConf.Enabled=false")
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check Routing.DelegatedRouters
|
||||
for _, router := range cfg.Routing.DelegatedRouters {
|
||||
if router == AutoPlaceholder {
|
||||
hasAutoValues = true
|
||||
errors = append(errors, "Routing.DelegatedRouters contains 'auto' but AutoConf.Enabled=false")
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Check Ipns.DelegatedPublishers
|
||||
for _, publisher := range cfg.Ipns.DelegatedPublishers {
|
||||
if publisher == AutoPlaceholder {
|
||||
hasAutoValues = true
|
||||
errors = append(errors, "Ipns.DelegatedPublishers contains 'auto' but AutoConf.Enabled=false")
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Log all errors
|
||||
for _, errMsg := range errors {
|
||||
autoconfLog.Error(errMsg)
|
||||
}
|
||||
|
||||
// If only auto values exist and no static ones, fail to start
|
||||
if hasAutoValues {
|
||||
if len(cfg.Bootstrap) == 1 && cfg.Bootstrap[0] == AutoPlaceholder {
|
||||
autoconfLog.Error("Kubo cannot start with only 'auto' Bootstrap values when AutoConf.Enabled=false")
|
||||
return fmt.Errorf("no usable bootstrap peers: AutoConf is disabled (AutoConf.Enabled=false) but 'auto' placeholder is used in Bootstrap config. Either set AutoConf.Enabled=true to enable automatic configuration, or replace 'auto' with specific Bootstrap peer addresses")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
92
config/autoconf_test.go
Normal file
92
config/autoconf_test.go
Normal file
@ -0,0 +1,92 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAutoConfDefaults(t *testing.T) {
|
||||
// Test that AutoConf has the correct default values
|
||||
cfg := &Config{
|
||||
AutoConf: AutoConf{
|
||||
URL: NewOptionalString(DefaultAutoConfURL),
|
||||
Enabled: True,
|
||||
},
|
||||
}
|
||||
|
||||
assert.Equal(t, DefaultAutoConfURL, cfg.AutoConf.URL.WithDefault(DefaultAutoConfURL))
|
||||
assert.True(t, cfg.AutoConf.Enabled.WithDefault(DefaultAutoConfEnabled))
|
||||
|
||||
// Test default refresh interval
|
||||
if cfg.AutoConf.RefreshInterval == nil {
|
||||
// This is expected - nil means use default
|
||||
duration := (*OptionalDuration)(nil).WithDefault(DefaultAutoConfRefreshInterval)
|
||||
assert.Equal(t, DefaultAutoConfRefreshInterval, duration)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAutoConfProfile(t *testing.T) {
|
||||
cfg := &Config{
|
||||
Bootstrap: []string{"some", "existing", "peers"},
|
||||
DNS: DNS{
|
||||
Resolvers: map[string]string{
|
||||
"eth.": "https://example.com",
|
||||
},
|
||||
},
|
||||
Routing: Routing{
|
||||
DelegatedRouters: []string{"https://existing.router"},
|
||||
},
|
||||
Ipns: Ipns{
|
||||
DelegatedPublishers: []string{"https://existing.publisher"},
|
||||
},
|
||||
AutoConf: AutoConf{
|
||||
Enabled: False,
|
||||
},
|
||||
}
|
||||
|
||||
// Apply autoconf profile
|
||||
profile, ok := Profiles["autoconf-on"]
|
||||
require.True(t, ok, "autoconf-on profile not found")
|
||||
|
||||
err := profile.Transform(cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that values were set to "auto"
|
||||
assert.Equal(t, []string{AutoPlaceholder}, cfg.Bootstrap)
|
||||
assert.Equal(t, AutoPlaceholder, cfg.DNS.Resolvers["."])
|
||||
assert.Equal(t, []string{AutoPlaceholder}, cfg.Routing.DelegatedRouters)
|
||||
assert.Equal(t, []string{AutoPlaceholder}, cfg.Ipns.DelegatedPublishers)
|
||||
|
||||
// Check that AutoConf was enabled
|
||||
assert.True(t, cfg.AutoConf.Enabled.WithDefault(DefaultAutoConfEnabled))
|
||||
|
||||
// Check that URL was set
|
||||
assert.Equal(t, DefaultAutoConfURL, cfg.AutoConf.URL.WithDefault(DefaultAutoConfURL))
|
||||
}
|
||||
|
||||
func TestInitWithAutoValues(t *testing.T) {
|
||||
identity := Identity{
|
||||
PeerID: "QmTest",
|
||||
}
|
||||
|
||||
cfg, err := InitWithIdentity(identity)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that Bootstrap is set to "auto"
|
||||
assert.Equal(t, []string{AutoPlaceholder}, cfg.Bootstrap)
|
||||
|
||||
// Check that DNS resolver is set to "auto"
|
||||
assert.Equal(t, AutoPlaceholder, cfg.DNS.Resolvers["."])
|
||||
|
||||
// Check that DelegatedRouters is set to "auto"
|
||||
assert.Equal(t, []string{AutoPlaceholder}, cfg.Routing.DelegatedRouters)
|
||||
|
||||
// Check that DelegatedPublishers is set to "auto"
|
||||
assert.Equal(t, []string{AutoPlaceholder}, cfg.Ipns.DelegatedPublishers)
|
||||
|
||||
// Check that AutoConf is enabled with correct URL
|
||||
assert.True(t, cfg.AutoConf.Enabled.WithDefault(DefaultAutoConfEnabled))
|
||||
assert.Equal(t, DefaultAutoConfURL, cfg.AutoConf.URL.WithDefault(DefaultAutoConfURL))
|
||||
}
|
||||
46
config/autotls.go
Normal file
46
config/autotls.go
Normal file
@ -0,0 +1,46 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
p2pforge "github.com/ipshipyard/p2p-forge/client"
|
||||
)
|
||||
|
||||
// AutoTLS includes optional configuration of p2p-forge client of service
|
||||
// for obtaining a domain and TLS certificate to improve connectivity for web
|
||||
// browser clients. More: https://github.com/ipshipyard/p2p-forge#readme
|
||||
type AutoTLS struct {
|
||||
// Enables the p2p-forge feature and all related features.
|
||||
Enabled Flag `json:",omitempty"`
|
||||
|
||||
// Optional, controls if Kubo should add /tls/sni/.../ws listener to every /tcp port if no explicit /ws is defined in Addresses.Swarm
|
||||
AutoWSS Flag `json:",omitempty"`
|
||||
|
||||
// Optional override of the parent domain that will be used
|
||||
DomainSuffix *OptionalString `json:",omitempty"`
|
||||
|
||||
// Optional override of HTTP API that acts as ACME DNS-01 Challenge broker
|
||||
RegistrationEndpoint *OptionalString `json:",omitempty"`
|
||||
|
||||
// Optional Authorization token, used with private/test instances of p2p-forge
|
||||
RegistrationToken *OptionalString `json:",omitempty"`
|
||||
|
||||
// Optional registration delay used when AutoTLS.Enabled is not explicitly set to true in config
|
||||
RegistrationDelay *OptionalDuration `json:",omitempty"`
|
||||
|
||||
// Optional override of CA ACME API used by p2p-forge system
|
||||
CAEndpoint *OptionalString `json:",omitempty"`
|
||||
|
||||
// Optional, controls if features like AutoWSS should generate shorter /dnsX instead of /ipX/../sni/..
|
||||
ShortAddrs Flag `json:",omitempty"`
|
||||
}
|
||||
|
||||
const (
|
||||
DefaultAutoTLSEnabled = true // with DefaultAutoTLSRegistrationDelay, unless explicitly enabled in config
|
||||
DefaultDomainSuffix = p2pforge.DefaultForgeDomain
|
||||
DefaultRegistrationEndpoint = p2pforge.DefaultForgeEndpoint
|
||||
DefaultCAEndpoint = p2pforge.DefaultCAEndpoint
|
||||
DefaultAutoWSS = true // requires AutoTLS.Enabled
|
||||
DefaultAutoTLSShortAddrs = true // requires AutoTLS.Enabled
|
||||
DefaultAutoTLSRegistrationDelay = 1 * time.Hour
|
||||
)
|
||||
15
config/bitswap.go
Normal file
15
config/bitswap.go
Normal file
@ -0,0 +1,15 @@
|
||||
package config
|
||||
|
||||
// Bitswap holds Bitswap configuration options
|
||||
type Bitswap struct {
|
||||
// Libp2pEnabled controls if the node initializes bitswap over libp2p (enabled by default)
|
||||
// (This can be disabled if HTTPRetrieval.Enabled is set to true)
|
||||
Libp2pEnabled Flag `json:",omitempty"`
|
||||
// ServerEnabled controls if the node responds to WANTs (depends on Libp2pEnabled, enabled by default)
|
||||
ServerEnabled Flag `json:",omitempty"`
|
||||
}
|
||||
|
||||
const (
|
||||
DefaultBitswapLibp2pEnabled = true
|
||||
DefaultBitswapServerEnabled = true
|
||||
)
|
||||
@ -2,27 +2,11 @@ package config
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
peer "github.com/libp2p/go-libp2p/core/peer"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
)
|
||||
|
||||
// DefaultBootstrapAddresses are the hardcoded bootstrap addresses
|
||||
// for IPFS. they are nodes run by the IPFS team. docs on these later.
|
||||
// As with all p2p networks, bootstrap is an important security concern.
|
||||
//
|
||||
// NOTE: This is here -- and not inside cmd/ipfs/init.go -- because of an
|
||||
// import dependency issue. TODO: move this into a config/default/ package.
|
||||
var DefaultBootstrapAddresses = []string{
|
||||
"/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN",
|
||||
"/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa",
|
||||
"/dnsaddr/bootstrap.libp2p.io/p2p/QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb",
|
||||
"/dnsaddr/bootstrap.libp2p.io/p2p/QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt",
|
||||
"/ip4/104.131.131.82/tcp/4001/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", // mars.i.ipfs.io
|
||||
"/ip4/104.131.131.82/udp/4001/quic-v1/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", // mars.i.ipfs.io
|
||||
}
|
||||
|
||||
// ErrInvalidPeerAddr signals an address is not a valid peer address.
|
||||
var ErrInvalidPeerAddr = errors.New("invalid peer address")
|
||||
|
||||
@ -30,23 +14,11 @@ func (c *Config) BootstrapPeers() ([]peer.AddrInfo, error) {
|
||||
return ParseBootstrapPeers(c.Bootstrap)
|
||||
}
|
||||
|
||||
// DefaultBootstrapPeers returns the (parsed) set of default bootstrap peers.
|
||||
// if it fails, it returns a meaningful error for the user.
|
||||
// This is here (and not inside cmd/ipfs/init) because of module dependency problems.
|
||||
func DefaultBootstrapPeers() ([]peer.AddrInfo, error) {
|
||||
ps, err := ParseBootstrapPeers(DefaultBootstrapAddresses)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`failed to parse hardcoded bootstrap peers: %w
|
||||
This is a problem with the ipfs codebase. Please report it to the dev team`, err)
|
||||
}
|
||||
return ps, nil
|
||||
}
|
||||
|
||||
func (c *Config) SetBootstrapPeers(bps []peer.AddrInfo) {
|
||||
c.Bootstrap = BootstrapPeerStrings(bps)
|
||||
}
|
||||
|
||||
// ParseBootstrapPeer parses a bootstrap list into a list of AddrInfos.
|
||||
// ParseBootstrapPeers parses a bootstrap list into a list of AddrInfos.
|
||||
func ParseBootstrapPeers(addrs []string) ([]peer.AddrInfo, error) {
|
||||
maddrs := make([]ma.Multiaddr, len(addrs))
|
||||
for i, addr := range addrs {
|
||||
|
||||
@ -1,24 +1,28 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/ipfs/boxo/autoconf"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestBoostrapPeerStrings(t *testing.T) {
|
||||
parsed, err := ParseBootstrapPeers(DefaultBootstrapAddresses)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
func TestBootstrapPeerStrings(t *testing.T) {
|
||||
// Test round-trip: string -> parse -> format -> string
|
||||
// This ensures that parsing and formatting are inverse operations
|
||||
|
||||
formatted := BootstrapPeerStrings(parsed)
|
||||
sort.Strings(formatted)
|
||||
expected := append([]string{}, DefaultBootstrapAddresses...)
|
||||
sort.Strings(expected)
|
||||
// Start with the default bootstrap peer multiaddr strings
|
||||
originalStrings := autoconf.FallbackBootstrapPeers
|
||||
|
||||
for i, s := range formatted {
|
||||
if expected[i] != s {
|
||||
t.Fatalf("expected %s, %s", expected[i], s)
|
||||
}
|
||||
}
|
||||
// Parse multiaddr strings into structured peer data
|
||||
parsed, err := ParseBootstrapPeers(originalStrings)
|
||||
require.NoError(t, err, "parsing bootstrap peers should succeed")
|
||||
|
||||
// Format the parsed data back into multiaddr strings
|
||||
formattedStrings := BootstrapPeerStrings(parsed)
|
||||
|
||||
// Verify round-trip: we should get back exactly what we started with
|
||||
assert.ElementsMatch(t, originalStrings, formattedStrings,
|
||||
"round-trip through parse/format should preserve all bootstrap peers")
|
||||
}
|
||||
|
||||
128
config/config.go
128
config/config.go
@ -7,9 +7,10 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/mitchellh/go-homedir"
|
||||
"github.com/ipfs/kubo/misc/fsutil"
|
||||
)
|
||||
|
||||
// Config is used to load ipfs config files.
|
||||
@ -26,20 +27,27 @@ type Config struct {
|
||||
API API // local node's API settings
|
||||
Swarm SwarmConfig
|
||||
AutoNAT AutoNATConfig
|
||||
AutoTLS AutoTLS
|
||||
Pubsub PubsubConfig
|
||||
Peering Peering
|
||||
DNS DNS
|
||||
Migration Migration
|
||||
|
||||
Provider Provider
|
||||
Reprovider Reprovider
|
||||
Experimental Experiments
|
||||
Plugins Plugins
|
||||
Pinning Pinning
|
||||
Import Import
|
||||
Version Version
|
||||
Migration Migration
|
||||
AutoConf AutoConf
|
||||
|
||||
Provide Provide // Merged Provider and Reprovider configuration
|
||||
Provider Provider // Deprecated: use Provide. Will be removed in a future release.
|
||||
Reprovider Reprovider // Deprecated: use Provide. Will be removed in a future release.
|
||||
HTTPRetrieval HTTPRetrieval
|
||||
Experimental Experiments
|
||||
Plugins Plugins
|
||||
Pinning Pinning
|
||||
Import Import
|
||||
Version Version
|
||||
|
||||
Internal Internal // experimental/unstable options
|
||||
|
||||
Bitswap Bitswap `json:",omitempty"`
|
||||
}
|
||||
|
||||
const (
|
||||
@ -58,7 +66,7 @@ func PathRoot() (string, error) {
|
||||
dir := os.Getenv(EnvDir)
|
||||
var err error
|
||||
if len(dir) == 0 {
|
||||
dir, err = homedir.Expand(DefaultPathRoot)
|
||||
dir, err = fsutil.ExpandHome(DefaultPathRoot)
|
||||
}
|
||||
return dir, err
|
||||
}
|
||||
@ -136,6 +144,71 @@ func ToMap(conf *Config) (map[string]interface{}, error) {
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// Convert config to a map, without using encoding/json, since
|
||||
// zero/empty/'omitempty' fields are excluded by encoding/json during
|
||||
// marshaling.
|
||||
func ReflectToMap(conf interface{}) interface{} {
|
||||
v := reflect.ValueOf(conf)
|
||||
if !v.IsValid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Handle pointer type
|
||||
if v.Kind() == reflect.Ptr {
|
||||
if v.IsNil() {
|
||||
// Create a zero value of the pointer's element type
|
||||
elemType := v.Type().Elem()
|
||||
zero := reflect.Zero(elemType)
|
||||
return ReflectToMap(zero.Interface())
|
||||
}
|
||||
v = v.Elem()
|
||||
}
|
||||
|
||||
switch v.Kind() {
|
||||
case reflect.Struct:
|
||||
result := make(map[string]interface{})
|
||||
t := v.Type()
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
field := v.Field(i)
|
||||
// Only include exported fields
|
||||
if field.CanInterface() {
|
||||
result[t.Field(i).Name] = ReflectToMap(field.Interface())
|
||||
}
|
||||
}
|
||||
return result
|
||||
|
||||
case reflect.Map:
|
||||
result := make(map[string]interface{})
|
||||
iter := v.MapRange()
|
||||
for iter.Next() {
|
||||
key := iter.Key()
|
||||
// Convert map keys to strings for consistency
|
||||
keyStr := fmt.Sprint(ReflectToMap(key.Interface()))
|
||||
result[keyStr] = ReflectToMap(iter.Value().Interface())
|
||||
}
|
||||
// Add a sample to differentiate between a map and a struct on validation.
|
||||
sample := reflect.Zero(v.Type().Elem())
|
||||
if sample.CanInterface() {
|
||||
result["*"] = ReflectToMap(sample.Interface())
|
||||
}
|
||||
return result
|
||||
|
||||
case reflect.Slice, reflect.Array:
|
||||
result := make([]interface{}, v.Len())
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
result[i] = ReflectToMap(v.Index(i).Interface())
|
||||
}
|
||||
return result
|
||||
|
||||
default:
|
||||
// For basic types (int, string, etc.), just return the value
|
||||
if v.CanInterface() {
|
||||
return v.Interface()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Clone copies the config. Use when updating.
|
||||
func (c *Config) Clone() (*Config, error) {
|
||||
var newConfig Config
|
||||
@ -151,3 +224,38 @@ func (c *Config) Clone() (*Config, error) {
|
||||
|
||||
return &newConfig, nil
|
||||
}
|
||||
|
||||
// Check if the provided key is present in the structure.
|
||||
func CheckKey(key string) error {
|
||||
conf := Config{}
|
||||
|
||||
// Convert an empty config to a map without JSON.
|
||||
cursor := ReflectToMap(&conf)
|
||||
|
||||
// Parse the key and verify it's presence in the map.
|
||||
var ok bool
|
||||
var mapCursor map[string]interface{}
|
||||
|
||||
parts := strings.Split(key, ".")
|
||||
for i, part := range parts {
|
||||
mapCursor, ok = cursor.(map[string]interface{})
|
||||
if !ok {
|
||||
if cursor == nil {
|
||||
return nil
|
||||
}
|
||||
path := strings.Join(parts[:i], ".")
|
||||
return fmt.Errorf("%s key is not a map", path)
|
||||
}
|
||||
|
||||
cursor, ok = mapCursor[part]
|
||||
if !ok {
|
||||
// If the config sections is a map, validate against the default entry.
|
||||
if cursor, ok = mapCursor["*"]; ok {
|
||||
continue
|
||||
}
|
||||
path := strings.Join(parts[:i+1], ".")
|
||||
return fmt.Errorf("%s not found", path)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -27,3 +27,145 @@ func TestClone(t *testing.T) {
|
||||
t.Fatal("HTTP headers not preserved")
|
||||
}
|
||||
}
|
||||
|
||||
func TestReflectToMap(t *testing.T) {
|
||||
// Helper function to create a test config with various field types
|
||||
reflectedConfig := ReflectToMap(new(Config))
|
||||
|
||||
mapConfig, ok := reflectedConfig.(map[string]interface{})
|
||||
if !ok {
|
||||
t.Fatal("Config didn't convert to map")
|
||||
}
|
||||
|
||||
reflectedIdentity, ok := mapConfig["Identity"]
|
||||
if !ok {
|
||||
t.Fatal("Identity field not found")
|
||||
}
|
||||
|
||||
mapIdentity, ok := reflectedIdentity.(map[string]interface{})
|
||||
if !ok {
|
||||
t.Fatal("Identity field didn't convert to map")
|
||||
}
|
||||
|
||||
// Test string field reflection
|
||||
reflectedPeerID, ok := mapIdentity["PeerID"]
|
||||
if !ok {
|
||||
t.Fatal("PeerID field not found in Identity")
|
||||
}
|
||||
if _, ok := reflectedPeerID.(string); !ok {
|
||||
t.Fatal("PeerID field didn't convert to string")
|
||||
}
|
||||
|
||||
// Test omitempty json string field
|
||||
reflectedPrivKey, ok := mapIdentity["PrivKey"]
|
||||
if !ok {
|
||||
t.Fatal("PrivKey omitempty field not found in Identity")
|
||||
}
|
||||
if _, ok := reflectedPrivKey.(string); !ok {
|
||||
t.Fatal("PrivKey omitempty field didn't convert to string")
|
||||
}
|
||||
|
||||
// Test slices field
|
||||
reflectedBootstrap, ok := mapConfig["Bootstrap"]
|
||||
if !ok {
|
||||
t.Fatal("Bootstrap field not found in config")
|
||||
}
|
||||
bootstrap, ok := reflectedBootstrap.([]interface{})
|
||||
if !ok {
|
||||
t.Fatal("Bootstrap field didn't convert to []string")
|
||||
}
|
||||
if len(bootstrap) != 0 {
|
||||
t.Fatal("Bootstrap len is incorrect")
|
||||
}
|
||||
|
||||
reflectedDatastore, ok := mapConfig["Datastore"]
|
||||
if !ok {
|
||||
t.Fatal("Datastore field not found in config")
|
||||
}
|
||||
datastore, ok := reflectedDatastore.(map[string]interface{})
|
||||
if !ok {
|
||||
t.Fatal("Datastore field didn't convert to map")
|
||||
}
|
||||
storageGCWatermark, ok := datastore["StorageGCWatermark"]
|
||||
if !ok {
|
||||
t.Fatal("StorageGCWatermark field not found in Datastore")
|
||||
}
|
||||
// Test int field
|
||||
if _, ok := storageGCWatermark.(int64); !ok {
|
||||
t.Fatal("StorageGCWatermark field didn't convert to int64")
|
||||
}
|
||||
noSync, ok := datastore["NoSync"]
|
||||
if !ok {
|
||||
t.Fatal("NoSync field not found in Datastore")
|
||||
}
|
||||
// Test bool field
|
||||
if _, ok := noSync.(bool); !ok {
|
||||
t.Fatal("NoSync field didn't convert to bool")
|
||||
}
|
||||
|
||||
reflectedDNS, ok := mapConfig["DNS"]
|
||||
if !ok {
|
||||
t.Fatal("DNS field not found in config")
|
||||
}
|
||||
DNS, ok := reflectedDNS.(map[string]interface{})
|
||||
if !ok {
|
||||
t.Fatal("DNS field didn't convert to map")
|
||||
}
|
||||
reflectedResolvers, ok := DNS["Resolvers"]
|
||||
if !ok {
|
||||
t.Fatal("Resolvers field not found in DNS")
|
||||
}
|
||||
// Test map field
|
||||
if _, ok := reflectedResolvers.(map[string]interface{}); !ok {
|
||||
t.Fatal("Resolvers field didn't convert to map")
|
||||
}
|
||||
|
||||
// Test pointer field
|
||||
if _, ok := DNS["MaxCacheTTL"].(map[string]interface{}); !ok {
|
||||
// Since OptionalDuration only field is private, we cannot test it
|
||||
t.Fatal("MaxCacheTTL field didn't convert to map")
|
||||
}
|
||||
}
|
||||
|
||||
// Test validation of options set through "ipfs config"
|
||||
func TestCheckKey(t *testing.T) {
|
||||
err := CheckKey("Foo.Bar")
|
||||
if err == nil {
|
||||
t.Fatal("Foo.Bar isn't a valid key in the config")
|
||||
}
|
||||
|
||||
err = CheckKey("Provide.Strategy")
|
||||
if err != nil {
|
||||
t.Fatalf("%s: %s", err, "Provide.Strategy is a valid key in the config")
|
||||
}
|
||||
|
||||
err = CheckKey("Provide.DHT.MaxWorkers")
|
||||
if err != nil {
|
||||
t.Fatalf("%s: %s", err, "Provide.DHT.MaxWorkers is a valid key in the config")
|
||||
}
|
||||
|
||||
err = CheckKey("Provide.DHT.Interval")
|
||||
if err != nil {
|
||||
t.Fatalf("%s: %s", err, "Provide.DHT.Interval is a valid key in the config")
|
||||
}
|
||||
|
||||
err = CheckKey("Provide.Foo")
|
||||
if err == nil {
|
||||
t.Fatal("Provide.Foo isn't a valid key in the config")
|
||||
}
|
||||
|
||||
err = CheckKey("Gateway.PublicGateways.Foo.Paths")
|
||||
if err != nil {
|
||||
t.Fatalf("%s: %s", err, "Gateway.PublicGateways.Foo.Paths is a valid key in the config")
|
||||
}
|
||||
|
||||
err = CheckKey("Gateway.PublicGateways.Foo.Bar")
|
||||
if err == nil {
|
||||
t.Fatal("Gateway.PublicGateways.Foo.Bar isn't a valid key in the config")
|
||||
}
|
||||
|
||||
err = CheckKey("Plugins.Plugins.peerlog.Config.Enabled")
|
||||
if err != nil {
|
||||
t.Fatalf("%s: %s", err, "Plugins.Plugins.peerlog.Config.Enabled is a valid key in the config")
|
||||
}
|
||||
}
|
||||
|
||||
@ -4,8 +4,21 @@ import (
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
// DefaultDataStoreDirectory is the directory to store all the local IPFS data.
|
||||
const DefaultDataStoreDirectory = "datastore"
|
||||
const (
|
||||
// DefaultDataStoreDirectory is the directory to store all the local IPFS data.
|
||||
DefaultDataStoreDirectory = "datastore"
|
||||
|
||||
// DefaultBlockKeyCacheSize is the size for the blockstore two-queue
|
||||
// cache which caches block keys and sizes.
|
||||
DefaultBlockKeyCacheSize = 64 << 10
|
||||
|
||||
// DefaultWriteThrough specifies whether to use a "write-through"
|
||||
// Blockstore and Blockservice. This means that they will write
|
||||
// without performing any reads to check if the incoming blocks are
|
||||
// already present in the datastore. Enable for datastores with fast
|
||||
// writes and slower reads.
|
||||
DefaultWriteThrough bool = true
|
||||
)
|
||||
|
||||
// Datastore tracks the configuration of the datastore.
|
||||
type Datastore struct {
|
||||
@ -21,8 +34,10 @@ type Datastore struct {
|
||||
|
||||
Spec map[string]interface{}
|
||||
|
||||
HashOnRead bool
|
||||
BloomFilterSize int
|
||||
HashOnRead bool
|
||||
BloomFilterSize int
|
||||
BlockKeyCacheSize OptionalInteger `json:",omitempty"`
|
||||
WriteThrough Flag `json:",omitempty"`
|
||||
}
|
||||
|
||||
// DataStorePath returns the default data store path given a configuration root
|
||||
|
||||
@ -10,7 +10,7 @@ type DNS struct {
|
||||
//
|
||||
// Example:
|
||||
// - Custom resolver for ENS: `eth.` → `https://dns.eth.limo/dns-query`
|
||||
// - Override the default OS resolver: `.` → `https://doh.applied-privacy.net/query`
|
||||
// - Override the default OS resolver: `.` → `https://1.1.1.1/dns-query`
|
||||
Resolvers map[string]string
|
||||
// MaxCacheTTL is the maximum duration DNS entries are valid in the cache.
|
||||
MaxCacheTTL *OptionalDuration `json:",omitempty"`
|
||||
|
||||
@ -6,7 +6,7 @@ type Experiments struct {
|
||||
ShardingEnabled bool `json:",omitempty"` // deprecated by autosharding: https://github.com/ipfs/kubo/pull/8527
|
||||
Libp2pStreamMounting bool
|
||||
P2pHttpProxy bool //nolint
|
||||
StrategicProviding bool
|
||||
StrategicProviding bool `json:",omitempty"` // removed, use Provider.Enabled instead
|
||||
OptimisticProvide bool
|
||||
OptimisticProvideJobsPoolSize int
|
||||
GatewayOverLibp2p bool `json:",omitempty"`
|
||||
|
||||
@ -1,10 +1,20 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"github.com/ipfs/boxo/gateway"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultInlineDNSLink = false
|
||||
DefaultDeserializedResponses = true
|
||||
DefaultDisableHTMLErrors = false
|
||||
DefaultExposeRoutingAPI = false
|
||||
DefaultExposeRoutingAPI = true
|
||||
DefaultDiagnosticServiceURL = "https://check.ipfs.network"
|
||||
|
||||
// Gateway limit defaults from boxo
|
||||
DefaultRetrievalTimeout = gateway.DefaultRetrievalTimeout
|
||||
DefaultMaxConcurrentRequests = gateway.DefaultMaxConcurrentRequests
|
||||
DefaultMaxRangeRequestFileSize = 0 // 0 means no limit
|
||||
)
|
||||
|
||||
type GatewaySpec struct {
|
||||
@ -73,4 +83,33 @@ type Gateway struct {
|
||||
// ExposeRoutingAPI configures the gateway port to expose
|
||||
// routing system as HTTP API at /routing/v1 (https://specs.ipfs.tech/routing/http-routing-v1/).
|
||||
ExposeRoutingAPI Flag
|
||||
|
||||
// RetrievalTimeout enforces a maximum duration for content retrieval:
|
||||
// - Time to first byte: If the gateway cannot start writing the response within
|
||||
// this duration (e.g., stuck searching for providers), a 504 Gateway Timeout
|
||||
// is returned.
|
||||
// - Time between writes: After the first byte, the timeout resets each time new
|
||||
// bytes are written to the client. If the gateway cannot write additional data
|
||||
// within this duration after the last successful write, the response is terminated.
|
||||
// This helps free resources when the gateway gets stuck looking for providers
|
||||
// or cannot retrieve the requested content.
|
||||
// A value of 0 disables this timeout.
|
||||
RetrievalTimeout *OptionalDuration `json:",omitempty"`
|
||||
|
||||
// MaxConcurrentRequests limits concurrent HTTP requests handled by the gateway.
|
||||
// Requests beyond this limit receive 429 Too Many Requests with Retry-After header.
|
||||
// A value of 0 disables the limit.
|
||||
MaxConcurrentRequests *OptionalInteger `json:",omitempty"`
|
||||
|
||||
// MaxRangeRequestFileSize limits the maximum file size for HTTP range requests.
|
||||
// Range requests for files larger than this limit return 501 Not Implemented.
|
||||
// This protects against CDN issues with large file range requests and prevents
|
||||
// excessive bandwidth consumption. A value of 0 disables the limit.
|
||||
MaxRangeRequestFileSize *OptionalBytes `json:",omitempty"`
|
||||
|
||||
// DiagnosticServiceURL is the URL for a service to diagnose CID retrievability issues.
|
||||
// When the gateway returns a 504 Gateway Timeout error, an "Inspect retrievability of CID"
|
||||
// button will be shown that links to this service with the CID appended as ?cid=<CID-to-diagnose>.
|
||||
// Set to empty string to disable the button.
|
||||
DiagnosticServiceURL *OptionalString `json:",omitempty"`
|
||||
}
|
||||
|
||||
19
config/http_retrieval.go
Normal file
19
config/http_retrieval.go
Normal file
@ -0,0 +1,19 @@
|
||||
package config
|
||||
|
||||
// HTTPRetrieval is the configuration object for HTTP Retrieval settings.
|
||||
// Implicit defaults can be found in core/node/bitswap.go
|
||||
type HTTPRetrieval struct {
|
||||
Enabled Flag `json:",omitempty"`
|
||||
Allowlist []string `json:",omitempty"`
|
||||
Denylist []string `json:",omitempty"`
|
||||
NumWorkers *OptionalInteger `json:",omitempty"`
|
||||
MaxBlockSize *OptionalString `json:",omitempty"`
|
||||
TLSInsecureSkipVerify Flag `json:",omitempty"`
|
||||
}
|
||||
|
||||
const (
|
||||
DefaultHTTPRetrievalEnabled = true
|
||||
DefaultHTTPRetrievalNumWorkers = 16
|
||||
DefaultHTTPRetrievalTLSInsecureSkipVerify = false // only for testing with self-signed HTTPS certs
|
||||
DefaultHTTPRetrievalMaxBlockSize = "2MiB" // matching bitswap: https://specs.ipfs.tech/bitswap-protocol/#block-sizes
|
||||
)
|
||||
175
config/import.go
175
config/import.go
@ -1,17 +1,184 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/ipfs/boxo/ipld/unixfs/importer/helpers"
|
||||
"github.com/ipfs/boxo/ipld/unixfs/io"
|
||||
"github.com/ipfs/boxo/verifcid"
|
||||
mh "github.com/multiformats/go-multihash"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultCidVersion = 0
|
||||
DefaultUnixFSRawLeaves = false
|
||||
DefaultUnixFSChunker = "size-262144"
|
||||
DefaultHashFunction = "sha2-256"
|
||||
DefaultFastProvideRoot = true
|
||||
DefaultFastProvideWait = false
|
||||
|
||||
DefaultUnixFSHAMTDirectorySizeThreshold = 262144 // 256KiB - https://github.com/ipfs/boxo/blob/6c5a07602aed248acc86598f30ab61923a54a83e/ipld/unixfs/io/directory.go#L26
|
||||
|
||||
// DefaultBatchMaxNodes controls the maximum number of nodes in a
|
||||
// write-batch. The total size of the batch is limited by
|
||||
// BatchMaxnodes and BatchMaxSize.
|
||||
DefaultBatchMaxNodes = 128
|
||||
// DefaultBatchMaxSize controls the maximum size of a single
|
||||
// write-batch. The total size of the batch is limited by
|
||||
// BatchMaxnodes and BatchMaxSize.
|
||||
DefaultBatchMaxSize = 100 << 20 // 20MiB
|
||||
)
|
||||
|
||||
var (
|
||||
DefaultUnixFSFileMaxLinks = int64(helpers.DefaultLinksPerBlock)
|
||||
DefaultUnixFSDirectoryMaxLinks = int64(0)
|
||||
DefaultUnixFSHAMTDirectoryMaxFanout = int64(io.DefaultShardWidth)
|
||||
)
|
||||
|
||||
// Import configures the default options for ingesting data. This affects commands
|
||||
// that ingest data, such as 'ipfs add', 'ipfs dag put, 'ipfs block put', 'ipfs files write'.
|
||||
type Import struct {
|
||||
CidVersion OptionalInteger
|
||||
UnixFSRawLeaves Flag
|
||||
UnixFSChunker OptionalString
|
||||
HashFunction OptionalString
|
||||
CidVersion OptionalInteger
|
||||
UnixFSRawLeaves Flag
|
||||
UnixFSChunker OptionalString
|
||||
HashFunction OptionalString
|
||||
UnixFSFileMaxLinks OptionalInteger
|
||||
UnixFSDirectoryMaxLinks OptionalInteger
|
||||
UnixFSHAMTDirectoryMaxFanout OptionalInteger
|
||||
UnixFSHAMTDirectorySizeThreshold OptionalBytes
|
||||
BatchMaxNodes OptionalInteger
|
||||
BatchMaxSize OptionalInteger
|
||||
FastProvideRoot Flag
|
||||
FastProvideWait Flag
|
||||
}
|
||||
|
||||
// ValidateImportConfig validates the Import configuration according to UnixFS spec requirements.
|
||||
// See: https://specs.ipfs.tech/unixfs/#hamt-structure-and-parameters
|
||||
func ValidateImportConfig(cfg *Import) error {
|
||||
// Validate CidVersion
|
||||
if !cfg.CidVersion.IsDefault() {
|
||||
cidVer := cfg.CidVersion.WithDefault(DefaultCidVersion)
|
||||
if cidVer != 0 && cidVer != 1 {
|
||||
return fmt.Errorf("Import.CidVersion must be 0 or 1, got %d", cidVer)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate UnixFSFileMaxLinks
|
||||
if !cfg.UnixFSFileMaxLinks.IsDefault() {
|
||||
maxLinks := cfg.UnixFSFileMaxLinks.WithDefault(DefaultUnixFSFileMaxLinks)
|
||||
if maxLinks <= 0 {
|
||||
return fmt.Errorf("Import.UnixFSFileMaxLinks must be positive, got %d", maxLinks)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate UnixFSDirectoryMaxLinks
|
||||
if !cfg.UnixFSDirectoryMaxLinks.IsDefault() {
|
||||
maxLinks := cfg.UnixFSDirectoryMaxLinks.WithDefault(DefaultUnixFSDirectoryMaxLinks)
|
||||
if maxLinks < 0 {
|
||||
return fmt.Errorf("Import.UnixFSDirectoryMaxLinks must be non-negative, got %d", maxLinks)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate UnixFSHAMTDirectoryMaxFanout if set
|
||||
if !cfg.UnixFSHAMTDirectoryMaxFanout.IsDefault() {
|
||||
fanout := cfg.UnixFSHAMTDirectoryMaxFanout.WithDefault(DefaultUnixFSHAMTDirectoryMaxFanout)
|
||||
|
||||
// Check all requirements: fanout < 8 covers both non-positive and non-multiple of 8
|
||||
// Combined with power of 2 check and max limit, this ensures valid values: 8, 16, 32, 64, 128, 256, 512, 1024
|
||||
if fanout < 8 || !isPowerOfTwo(fanout) || fanout > 1024 {
|
||||
return fmt.Errorf("Import.UnixFSHAMTDirectoryMaxFanout must be a positive power of 2, multiple of 8, and not exceed 1024 (got %d)", fanout)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate BatchMaxNodes
|
||||
if !cfg.BatchMaxNodes.IsDefault() {
|
||||
maxNodes := cfg.BatchMaxNodes.WithDefault(DefaultBatchMaxNodes)
|
||||
if maxNodes <= 0 {
|
||||
return fmt.Errorf("Import.BatchMaxNodes must be positive, got %d", maxNodes)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate BatchMaxSize
|
||||
if !cfg.BatchMaxSize.IsDefault() {
|
||||
maxSize := cfg.BatchMaxSize.WithDefault(DefaultBatchMaxSize)
|
||||
if maxSize <= 0 {
|
||||
return fmt.Errorf("Import.BatchMaxSize must be positive, got %d", maxSize)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate UnixFSChunker format
|
||||
if !cfg.UnixFSChunker.IsDefault() {
|
||||
chunker := cfg.UnixFSChunker.WithDefault(DefaultUnixFSChunker)
|
||||
if !isValidChunker(chunker) {
|
||||
return fmt.Errorf("Import.UnixFSChunker invalid format: %q (expected \"size-<bytes>\", \"rabin-<min>-<avg>-<max>\", or \"buzhash\")", chunker)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate HashFunction
|
||||
if !cfg.HashFunction.IsDefault() {
|
||||
hashFunc := cfg.HashFunction.WithDefault(DefaultHashFunction)
|
||||
hashCode, ok := mh.Names[strings.ToLower(hashFunc)]
|
||||
if !ok {
|
||||
return fmt.Errorf("Import.HashFunction unrecognized: %q", hashFunc)
|
||||
}
|
||||
// Check if the hash is allowed by verifcid
|
||||
if !verifcid.DefaultAllowlist.IsAllowed(hashCode) {
|
||||
return fmt.Errorf("Import.HashFunction %q is not allowed for use in IPFS", hashFunc)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// isPowerOfTwo checks if a number is a power of 2
|
||||
func isPowerOfTwo(n int64) bool {
|
||||
return n > 0 && (n&(n-1)) == 0
|
||||
}
|
||||
|
||||
// isValidChunker validates chunker format
|
||||
func isValidChunker(chunker string) bool {
|
||||
if chunker == "buzhash" {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check for size-<bytes> format
|
||||
if strings.HasPrefix(chunker, "size-") {
|
||||
sizeStr := strings.TrimPrefix(chunker, "size-")
|
||||
if sizeStr == "" {
|
||||
return false
|
||||
}
|
||||
// Check if it's a valid positive integer (no negative sign allowed)
|
||||
if sizeStr[0] == '-' {
|
||||
return false
|
||||
}
|
||||
size, err := strconv.Atoi(sizeStr)
|
||||
// Size must be positive (not zero)
|
||||
return err == nil && size > 0
|
||||
}
|
||||
|
||||
// Check for rabin-<min>-<avg>-<max> format
|
||||
if strings.HasPrefix(chunker, "rabin-") {
|
||||
parts := strings.Split(chunker, "-")
|
||||
if len(parts) != 4 {
|
||||
return false
|
||||
}
|
||||
|
||||
// Parse and validate min, avg, max values
|
||||
values := make([]int, 3)
|
||||
for i := 0; i < 3; i++ {
|
||||
val, err := strconv.Atoi(parts[i+1])
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
values[i] = val
|
||||
}
|
||||
|
||||
// Validate ordering: min <= avg <= max
|
||||
min, avg, max := values[0], values[1], values[2]
|
||||
return min <= avg && avg <= max
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
408
config/import_test.go
Normal file
408
config/import_test.go
Normal file
@ -0,0 +1,408 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
mh "github.com/multiformats/go-multihash"
|
||||
)
|
||||
|
||||
func TestValidateImportConfig_HAMTFanout(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
fanout int64
|
||||
wantErr bool
|
||||
errMsg string
|
||||
}{
|
||||
// Valid values - powers of 2, multiples of 8, and <= 1024
|
||||
{name: "valid 8", fanout: 8, wantErr: false},
|
||||
{name: "valid 16", fanout: 16, wantErr: false},
|
||||
{name: "valid 32", fanout: 32, wantErr: false},
|
||||
{name: "valid 64", fanout: 64, wantErr: false},
|
||||
{name: "valid 128", fanout: 128, wantErr: false},
|
||||
{name: "valid 256", fanout: 256, wantErr: false},
|
||||
{name: "valid 512", fanout: 512, wantErr: false},
|
||||
{name: "valid 1024", fanout: 1024, wantErr: false},
|
||||
|
||||
// Invalid values - not powers of 2
|
||||
{name: "invalid 7", fanout: 7, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"},
|
||||
{name: "invalid 15", fanout: 15, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"},
|
||||
{name: "invalid 100", fanout: 100, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"},
|
||||
{name: "invalid 257", fanout: 257, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"},
|
||||
{name: "invalid 1000", fanout: 1000, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"},
|
||||
|
||||
// Invalid values - powers of 2 but not multiples of 8
|
||||
{name: "invalid 1", fanout: 1, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"},
|
||||
{name: "invalid 2", fanout: 2, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"},
|
||||
{name: "invalid 4", fanout: 4, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"},
|
||||
|
||||
// Invalid values - exceeds 1024
|
||||
{name: "invalid 2048", fanout: 2048, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"},
|
||||
{name: "invalid 4096", fanout: 4096, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"},
|
||||
|
||||
// Invalid values - negative or zero
|
||||
{name: "invalid 0", fanout: 0, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"},
|
||||
{name: "invalid -8", fanout: -8, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"},
|
||||
{name: "invalid -256", fanout: -256, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg := &Import{
|
||||
UnixFSHAMTDirectoryMaxFanout: *NewOptionalInteger(tt.fanout),
|
||||
}
|
||||
|
||||
err := ValidateImportConfig(cfg)
|
||||
|
||||
if tt.wantErr {
|
||||
if err == nil {
|
||||
t.Errorf("ValidateImportConfig() expected error for fanout=%d, got nil", tt.fanout)
|
||||
} else if tt.errMsg != "" && !strings.Contains(err.Error(), tt.errMsg) {
|
||||
t.Errorf("ValidateImportConfig() error = %v, want error containing %q", err, tt.errMsg)
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("ValidateImportConfig() unexpected error for fanout=%d: %v", tt.fanout, err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateImportConfig_CidVersion(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
cidVer int64
|
||||
wantErr bool
|
||||
errMsg string
|
||||
}{
|
||||
{name: "valid 0", cidVer: 0, wantErr: false},
|
||||
{name: "valid 1", cidVer: 1, wantErr: false},
|
||||
{name: "invalid 2", cidVer: 2, wantErr: true, errMsg: "must be 0 or 1"},
|
||||
{name: "invalid -1", cidVer: -1, wantErr: true, errMsg: "must be 0 or 1"},
|
||||
{name: "invalid 100", cidVer: 100, wantErr: true, errMsg: "must be 0 or 1"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg := &Import{
|
||||
CidVersion: *NewOptionalInteger(tt.cidVer),
|
||||
}
|
||||
|
||||
err := ValidateImportConfig(cfg)
|
||||
|
||||
if tt.wantErr {
|
||||
if err == nil {
|
||||
t.Errorf("ValidateImportConfig() expected error for cidVer=%d, got nil", tt.cidVer)
|
||||
} else if tt.errMsg != "" && !strings.Contains(err.Error(), tt.errMsg) {
|
||||
t.Errorf("ValidateImportConfig() error = %v, want error containing %q", err, tt.errMsg)
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("ValidateImportConfig() unexpected error for cidVer=%d: %v", tt.cidVer, err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateImportConfig_UnixFSFileMaxLinks(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
maxLinks int64
|
||||
wantErr bool
|
||||
errMsg string
|
||||
}{
|
||||
{name: "valid 1", maxLinks: 1, wantErr: false},
|
||||
{name: "valid 174", maxLinks: 174, wantErr: false},
|
||||
{name: "valid 1000", maxLinks: 1000, wantErr: false},
|
||||
{name: "invalid 0", maxLinks: 0, wantErr: true, errMsg: "must be positive"},
|
||||
{name: "invalid -1", maxLinks: -1, wantErr: true, errMsg: "must be positive"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg := &Import{
|
||||
UnixFSFileMaxLinks: *NewOptionalInteger(tt.maxLinks),
|
||||
}
|
||||
|
||||
err := ValidateImportConfig(cfg)
|
||||
|
||||
if tt.wantErr {
|
||||
if err == nil {
|
||||
t.Errorf("ValidateImportConfig() expected error for maxLinks=%d, got nil", tt.maxLinks)
|
||||
} else if tt.errMsg != "" && !strings.Contains(err.Error(), tt.errMsg) {
|
||||
t.Errorf("ValidateImportConfig() error = %v, want error containing %q", err, tt.errMsg)
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("ValidateImportConfig() unexpected error for maxLinks=%d: %v", tt.maxLinks, err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateImportConfig_UnixFSDirectoryMaxLinks(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
maxLinks int64
|
||||
wantErr bool
|
||||
errMsg string
|
||||
}{
|
||||
{name: "valid 0", maxLinks: 0, wantErr: false}, // 0 means no limit
|
||||
{name: "valid 1", maxLinks: 1, wantErr: false},
|
||||
{name: "valid 1000", maxLinks: 1000, wantErr: false},
|
||||
{name: "invalid -1", maxLinks: -1, wantErr: true, errMsg: "must be non-negative"},
|
||||
{name: "invalid -100", maxLinks: -100, wantErr: true, errMsg: "must be non-negative"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg := &Import{
|
||||
UnixFSDirectoryMaxLinks: *NewOptionalInteger(tt.maxLinks),
|
||||
}
|
||||
|
||||
err := ValidateImportConfig(cfg)
|
||||
|
||||
if tt.wantErr {
|
||||
if err == nil {
|
||||
t.Errorf("ValidateImportConfig() expected error for maxLinks=%d, got nil", tt.maxLinks)
|
||||
} else if tt.errMsg != "" && !strings.Contains(err.Error(), tt.errMsg) {
|
||||
t.Errorf("ValidateImportConfig() error = %v, want error containing %q", err, tt.errMsg)
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("ValidateImportConfig() unexpected error for maxLinks=%d: %v", tt.maxLinks, err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateImportConfig_BatchMax(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
maxNodes int64
|
||||
maxSize int64
|
||||
wantErr bool
|
||||
errMsg string
|
||||
}{
|
||||
{name: "valid nodes 1", maxNodes: 1, maxSize: -999, wantErr: false},
|
||||
{name: "valid nodes 128", maxNodes: 128, maxSize: -999, wantErr: false},
|
||||
{name: "valid size 1", maxNodes: -999, maxSize: 1, wantErr: false},
|
||||
{name: "valid size 20MB", maxNodes: -999, maxSize: 20 << 20, wantErr: false},
|
||||
{name: "invalid nodes 0", maxNodes: 0, maxSize: -999, wantErr: true, errMsg: "BatchMaxNodes must be positive"},
|
||||
{name: "invalid nodes -1", maxNodes: -1, maxSize: -999, wantErr: true, errMsg: "BatchMaxNodes must be positive"},
|
||||
{name: "invalid size 0", maxNodes: -999, maxSize: 0, wantErr: true, errMsg: "BatchMaxSize must be positive"},
|
||||
{name: "invalid size -1", maxNodes: -999, maxSize: -1, wantErr: true, errMsg: "BatchMaxSize must be positive"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg := &Import{}
|
||||
if tt.maxNodes != -999 {
|
||||
cfg.BatchMaxNodes = *NewOptionalInteger(tt.maxNodes)
|
||||
}
|
||||
if tt.maxSize != -999 {
|
||||
cfg.BatchMaxSize = *NewOptionalInteger(tt.maxSize)
|
||||
}
|
||||
|
||||
err := ValidateImportConfig(cfg)
|
||||
|
||||
if tt.wantErr {
|
||||
if err == nil {
|
||||
t.Errorf("ValidateImportConfig() expected error, got nil")
|
||||
} else if tt.errMsg != "" && !strings.Contains(err.Error(), tt.errMsg) {
|
||||
t.Errorf("ValidateImportConfig() error = %v, want error containing %q", err, tt.errMsg)
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("ValidateImportConfig() unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateImportConfig_UnixFSChunker(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
chunker string
|
||||
wantErr bool
|
||||
errMsg string
|
||||
}{
|
||||
{name: "valid size-262144", chunker: "size-262144", wantErr: false},
|
||||
{name: "valid size-1", chunker: "size-1", wantErr: false},
|
||||
{name: "valid size-1048576", chunker: "size-1048576", wantErr: false},
|
||||
{name: "valid rabin", chunker: "rabin-128-256-512", wantErr: false},
|
||||
{name: "valid rabin min", chunker: "rabin-16-32-64", wantErr: false},
|
||||
{name: "valid buzhash", chunker: "buzhash", wantErr: false},
|
||||
{name: "invalid size-", chunker: "size-", wantErr: true, errMsg: "invalid format"},
|
||||
{name: "invalid size-abc", chunker: "size-abc", wantErr: true, errMsg: "invalid format"},
|
||||
{name: "invalid rabin-", chunker: "rabin-", wantErr: true, errMsg: "invalid format"},
|
||||
{name: "invalid rabin-128", chunker: "rabin-128", wantErr: true, errMsg: "invalid format"},
|
||||
{name: "invalid rabin-128-256", chunker: "rabin-128-256", wantErr: true, errMsg: "invalid format"},
|
||||
{name: "invalid rabin-a-b-c", chunker: "rabin-a-b-c", wantErr: true, errMsg: "invalid format"},
|
||||
{name: "invalid unknown", chunker: "unknown", wantErr: true, errMsg: "invalid format"},
|
||||
{name: "invalid empty", chunker: "", wantErr: true, errMsg: "invalid format"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg := &Import{
|
||||
UnixFSChunker: *NewOptionalString(tt.chunker),
|
||||
}
|
||||
|
||||
err := ValidateImportConfig(cfg)
|
||||
|
||||
if tt.wantErr {
|
||||
if err == nil {
|
||||
t.Errorf("ValidateImportConfig() expected error for chunker=%s, got nil", tt.chunker)
|
||||
} else if tt.errMsg != "" && !strings.Contains(err.Error(), tt.errMsg) {
|
||||
t.Errorf("ValidateImportConfig() error = %v, want error containing %q", err, tt.errMsg)
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("ValidateImportConfig() unexpected error for chunker=%s: %v", tt.chunker, err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateImportConfig_HashFunction(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
hashFunc string
|
||||
wantErr bool
|
||||
errMsg string
|
||||
}{
|
||||
{name: "valid sha2-256", hashFunc: "sha2-256", wantErr: false},
|
||||
{name: "valid sha2-512", hashFunc: "sha2-512", wantErr: false},
|
||||
{name: "valid sha3-256", hashFunc: "sha3-256", wantErr: false},
|
||||
{name: "valid blake2b-256", hashFunc: "blake2b-256", wantErr: false},
|
||||
{name: "valid blake3", hashFunc: "blake3", wantErr: false},
|
||||
{name: "invalid unknown", hashFunc: "unknown-hash", wantErr: true, errMsg: "unrecognized"},
|
||||
{name: "invalid empty", hashFunc: "", wantErr: true, errMsg: "unrecognized"},
|
||||
}
|
||||
|
||||
// Check for hashes that exist but are not allowed
|
||||
// MD5 should exist but not be allowed
|
||||
if code, ok := mh.Names["md5"]; ok {
|
||||
tests = append(tests, struct {
|
||||
name string
|
||||
hashFunc string
|
||||
wantErr bool
|
||||
errMsg string
|
||||
}{name: "md5 not allowed", hashFunc: "md5", wantErr: true, errMsg: "not allowed"})
|
||||
_ = code // use the variable
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg := &Import{
|
||||
HashFunction: *NewOptionalString(tt.hashFunc),
|
||||
}
|
||||
|
||||
err := ValidateImportConfig(cfg)
|
||||
|
||||
if tt.wantErr {
|
||||
if err == nil {
|
||||
t.Errorf("ValidateImportConfig() expected error for hashFunc=%s, got nil", tt.hashFunc)
|
||||
} else if tt.errMsg != "" && !strings.Contains(err.Error(), tt.errMsg) {
|
||||
t.Errorf("ValidateImportConfig() error = %v, want error containing %q", err, tt.errMsg)
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("ValidateImportConfig() unexpected error for hashFunc=%s: %v", tt.hashFunc, err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateImportConfig_DefaultValue(t *testing.T) {
|
||||
// Test that default (unset) value doesn't trigger validation
|
||||
cfg := &Import{}
|
||||
|
||||
err := ValidateImportConfig(cfg)
|
||||
if err != nil {
|
||||
t.Errorf("ValidateImportConfig() unexpected error for default config: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsValidChunker(t *testing.T) {
|
||||
tests := []struct {
|
||||
chunker string
|
||||
want bool
|
||||
}{
|
||||
{"buzhash", true},
|
||||
{"size-262144", true},
|
||||
{"size-1", true},
|
||||
{"size-0", false}, // 0 is not valid - must be positive
|
||||
{"size-9999999", true},
|
||||
{"rabin-128-256-512", true},
|
||||
{"rabin-16-32-64", true},
|
||||
{"rabin-1-2-3", true},
|
||||
{"rabin-512-256-128", false}, // Invalid ordering: min > avg > max
|
||||
{"rabin-256-128-512", false}, // Invalid ordering: min > avg
|
||||
{"rabin-128-512-256", false}, // Invalid ordering: avg > max
|
||||
|
||||
{"", false},
|
||||
{"size-", false},
|
||||
{"size-abc", false},
|
||||
{"size--1", false},
|
||||
{"rabin-", false},
|
||||
{"rabin-128", false},
|
||||
{"rabin-128-256", false},
|
||||
{"rabin-128-256-512-1024", false},
|
||||
{"rabin-a-b-c", false},
|
||||
{"unknown", false},
|
||||
{"buzzhash", false}, // typo
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.chunker, func(t *testing.T) {
|
||||
if got := isValidChunker(tt.chunker); got != tt.want {
|
||||
t.Errorf("isValidChunker(%q) = %v, want %v", tt.chunker, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsPowerOfTwo(t *testing.T) {
|
||||
tests := []struct {
|
||||
n int64
|
||||
want bool
|
||||
}{
|
||||
{0, false},
|
||||
{1, true},
|
||||
{2, true},
|
||||
{3, false},
|
||||
{4, true},
|
||||
{5, false},
|
||||
{6, false},
|
||||
{7, false},
|
||||
{8, true},
|
||||
{16, true},
|
||||
{32, true},
|
||||
{64, true},
|
||||
{100, false},
|
||||
{128, true},
|
||||
{256, true},
|
||||
{512, true},
|
||||
{1024, true},
|
||||
{2048, true},
|
||||
{-1, false},
|
||||
{-8, false},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run("", func(t *testing.T) {
|
||||
if got := isPowerOfTwo(tt.n); got != tt.want {
|
||||
t.Errorf("isPowerOfTwo(%d) = %v, want %v", tt.n, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -7,6 +7,7 @@ import (
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/cockroachdb/pebble/v2"
|
||||
"github.com/ipfs/kubo/core/coreiface/options"
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
@ -22,11 +23,6 @@ func Init(out io.Writer, nBitsForKeypair int) (*Config, error) {
|
||||
}
|
||||
|
||||
func InitWithIdentity(identity Identity) (*Config, error) {
|
||||
bootstrapPeers, err := DefaultBootstrapPeers()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
datastore := DefaultDatastoreConfig()
|
||||
|
||||
conf := &Config{
|
||||
@ -39,7 +35,7 @@ func InitWithIdentity(identity Identity) (*Config, error) {
|
||||
Addresses: addressesConfig(),
|
||||
|
||||
Datastore: datastore,
|
||||
Bootstrap: BootstrapPeerStrings(bootstrapPeers),
|
||||
Bootstrap: []string{AutoPlaceholder},
|
||||
Identity: identity,
|
||||
Discovery: Discovery{
|
||||
MDNS: MDNS{
|
||||
@ -47,20 +43,16 @@ func InitWithIdentity(identity Identity) (*Config, error) {
|
||||
},
|
||||
},
|
||||
|
||||
Routing: Routing{
|
||||
Type: nil,
|
||||
Methods: nil,
|
||||
Routers: nil,
|
||||
},
|
||||
|
||||
// setup the node mount points.
|
||||
Mounts: Mounts{
|
||||
IPFS: "/ipfs",
|
||||
IPNS: "/ipns",
|
||||
MFS: "/mfs",
|
||||
},
|
||||
|
||||
Ipns: Ipns{
|
||||
ResolveCacheSize: 128,
|
||||
ResolveCacheSize: 128,
|
||||
DelegatedPublishers: []string{AutoPlaceholder},
|
||||
},
|
||||
|
||||
Gateway: Gateway{
|
||||
@ -68,19 +60,16 @@ func InitWithIdentity(identity Identity) (*Config, error) {
|
||||
NoFetch: false,
|
||||
HTTPHeaders: map[string][]string{},
|
||||
},
|
||||
Reprovider: Reprovider{
|
||||
Interval: nil,
|
||||
Strategy: nil,
|
||||
},
|
||||
Pinning: Pinning{
|
||||
RemoteServices: map[string]RemotePinningService{},
|
||||
},
|
||||
DNS: DNS{
|
||||
Resolvers: map[string]string{},
|
||||
Resolvers: map[string]string{
|
||||
".": AutoPlaceholder,
|
||||
},
|
||||
},
|
||||
Migration: Migration{
|
||||
DownloadSources: []string{},
|
||||
Keep: "",
|
||||
Routing: Routing{
|
||||
DelegatedRouters: []string{AutoPlaceholder},
|
||||
},
|
||||
}
|
||||
|
||||
@ -99,6 +88,9 @@ const DefaultConnMgrLowWater = 32
|
||||
// grace period.
|
||||
const DefaultConnMgrGracePeriod = time.Second * 20
|
||||
|
||||
// DefaultConnMgrSilencePeriod controls how often the connection manager enforces the limits.
|
||||
const DefaultConnMgrSilencePeriod = time.Second * 10
|
||||
|
||||
// DefaultConnMgrType is the default value for the connection managers
|
||||
// type.
|
||||
const DefaultConnMgrType = "basic"
|
||||
@ -138,7 +130,38 @@ func DefaultDatastoreConfig() Datastore {
|
||||
}
|
||||
}
|
||||
|
||||
func pebbleSpec() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"type": "pebbleds",
|
||||
"prefix": "pebble.datastore",
|
||||
"path": "pebbleds",
|
||||
"formatMajorVersion": int(pebble.FormatNewest),
|
||||
}
|
||||
}
|
||||
|
||||
func pebbleSpecMeasure() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"type": "measure",
|
||||
"prefix": "pebble.datastore",
|
||||
"child": map[string]interface{}{
|
||||
"formatMajorVersion": int(pebble.FormatNewest),
|
||||
"type": "pebbleds",
|
||||
"path": "pebbleds",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func badgerSpec() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"type": "badgerds",
|
||||
"prefix": "badger.datastore",
|
||||
"path": "badgerds",
|
||||
"syncWrites": false,
|
||||
"truncate": true,
|
||||
}
|
||||
}
|
||||
|
||||
func badgerSpecMeasure() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"type": "measure",
|
||||
"prefix": "badger.datastore",
|
||||
@ -152,6 +175,29 @@ func badgerSpec() map[string]interface{} {
|
||||
}
|
||||
|
||||
func flatfsSpec() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"type": "mount",
|
||||
"mounts": []interface{}{
|
||||
map[string]interface{}{
|
||||
"mountpoint": "/blocks",
|
||||
"type": "flatfs",
|
||||
"prefix": "flatfs.datastore",
|
||||
"path": "blocks",
|
||||
"sync": false,
|
||||
"shardFunc": "/repo/flatfs/shard/v1/next-to-last/2",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"mountpoint": "/",
|
||||
"type": "levelds",
|
||||
"prefix": "leveldb.datastore",
|
||||
"path": "datastore",
|
||||
"compression": "none",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func flatfsSpecMeasure() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"type": "mount",
|
||||
"mounts": []interface{}{
|
||||
@ -162,7 +208,7 @@ func flatfsSpec() map[string]interface{} {
|
||||
"child": map[string]interface{}{
|
||||
"type": "flatfs",
|
||||
"path": "blocks",
|
||||
"sync": true,
|
||||
"sync": false,
|
||||
"shardFunc": "/repo/flatfs/shard/v1/next-to-last/2",
|
||||
},
|
||||
},
|
||||
|
||||
@ -1,11 +1,23 @@
|
||||
package config
|
||||
|
||||
const (
|
||||
// DefaultMFSNoFlushLimit is the default limit for consecutive unflushed MFS operations
|
||||
DefaultMFSNoFlushLimit = 256
|
||||
)
|
||||
|
||||
type Internal struct {
|
||||
// All marked as omitempty since we are expecting to make changes to all subcomponents of Internal
|
||||
Bitswap *InternalBitswap `json:",omitempty"`
|
||||
UnixFSShardingSizeThreshold *OptionalString `json:",omitempty"`
|
||||
UnixFSShardingSizeThreshold *OptionalString `json:",omitempty"` // moved to Import.UnixFSHAMTDirectorySizeThreshold
|
||||
Libp2pForceReachability *OptionalString `json:",omitempty"`
|
||||
BackupBootstrapInterval *OptionalDuration `json:",omitempty"`
|
||||
// MFSNoFlushLimit controls the maximum number of consecutive
|
||||
// MFS operations allowed with --flush=false before requiring a manual flush.
|
||||
// This prevents unbounded memory growth and ensures data consistency.
|
||||
// Set to 0 to disable limiting (old behavior, may cause high memory usage)
|
||||
// This is an EXPERIMENTAL feature and may change or be removed in future releases.
|
||||
// See https://github.com/ipfs/kubo/issues/10842
|
||||
MFSNoFlushLimit *OptionalInteger `json:",omitempty"`
|
||||
}
|
||||
|
||||
type InternalBitswap struct {
|
||||
@ -14,5 +26,53 @@ type InternalBitswap struct {
|
||||
EngineTaskWorkerCount OptionalInteger
|
||||
MaxOutstandingBytesPerPeer OptionalInteger
|
||||
ProviderSearchDelay OptionalDuration
|
||||
ProviderSearchMaxResults OptionalInteger
|
||||
WantHaveReplaceSize OptionalInteger
|
||||
BroadcastControl *BitswapBroadcastControl
|
||||
}
|
||||
|
||||
type BitswapBroadcastControl struct {
|
||||
// EnableEnables or disables broadcast control functionality. Setting this
|
||||
// to false disables broadcast control functionality and restores the
|
||||
// previous broadcast behavior of sending broadcasts to all peers. When
|
||||
// disabled, all other BroadcastControl configuration items are ignored.
|
||||
// Default is [DefaultBroadcastControlEnable].
|
||||
Enable Flag `json:",omitempty"`
|
||||
// MaxPeers sets a hard limit on the number of peers to send broadcasts to.
|
||||
// A value of 0 means no broadcasts are sent. A value of -1 means there is
|
||||
// no limit. Default is [DefaultBroadcastControlMaxPeers].
|
||||
MaxPeers OptionalInteger `json:",omitempty"`
|
||||
// LocalPeers enables or disables broadcast control for peers on the local
|
||||
// network. If false, than always broadcast to peers on the local network.
|
||||
// If true, apply broadcast control to local peers. Default is
|
||||
// [DefaultBroadcastControlLocalPeers].
|
||||
LocalPeers Flag `json:",omitempty"`
|
||||
// PeeredPeers enables or disables broadcast reduction for peers configured
|
||||
// for peering. If false, than always broadcast to peers configured for
|
||||
// peering. If true, apply broadcast reduction to peered peers. Default is
|
||||
// [DefaultBroadcastControlPeeredPeers].
|
||||
PeeredPeers Flag `json:",omitempty"`
|
||||
// MaxRandomPeers is the number of peers to broadcast to anyway, even
|
||||
// though broadcast reduction logic has determined that they are not
|
||||
// broadcast targets. Setting this to a non-zero value ensures at least
|
||||
// this number of random peers receives a broadcast. This may be helpful in
|
||||
// cases where peers that are not receiving broadcasts my have wanted
|
||||
// blocks. Default is [DefaultBroadcastControlMaxRandomPeers].
|
||||
MaxRandomPeers OptionalInteger `json:",omitempty"`
|
||||
// SendToPendingPeers enables or disables sending broadcasts to any peers
|
||||
// to which there is a pending message to send. When enabled, this sends
|
||||
// broadcasts to many more peers, but does so in a way that does not
|
||||
// increase the number of separate broadcast messages. There is still the
|
||||
// increased cost of the recipients having to process and respond to the
|
||||
// broadcasts. Default is [DefaultBroadcastControlSendToPendingPeers].
|
||||
SendToPendingPeers Flag `json:",omitempty"`
|
||||
}
|
||||
|
||||
const (
|
||||
DefaultBroadcastControlEnable = true // Enabled
|
||||
DefaultBroadcastControlMaxPeers = -1 // Unlimited
|
||||
DefaultBroadcastControlLocalPeers = false // No control of local
|
||||
DefaultBroadcastControlPeeredPeers = false // No control of peered
|
||||
DefaultBroadcastControlMaxRandomPeers = 0 // No randoms
|
||||
DefaultBroadcastControlSendToPendingPeers = false // Disabled
|
||||
)
|
||||
|
||||
@ -20,4 +20,7 @@ type Ipns struct {
|
||||
|
||||
// Enable namesys pubsub (--enable-namesys-pubsub)
|
||||
UsePubsub Flag `json:",omitempty"`
|
||||
|
||||
// Simplified configuration for delegated IPNS publishers
|
||||
DelegatedPublishers []string
|
||||
}
|
||||
|
||||
@ -2,16 +2,18 @@ package config
|
||||
|
||||
const DefaultMigrationKeep = "cache"
|
||||
|
||||
var DefaultMigrationDownloadSources = []string{"HTTPS", "IPFS"}
|
||||
// DefaultMigrationDownloadSources defines the default download sources for legacy migrations (repo versions <16).
|
||||
// Only HTTPS is supported for legacy migrations. IPFS downloads are not supported.
|
||||
var DefaultMigrationDownloadSources = []string{"HTTPS"}
|
||||
|
||||
// Migration configures how migrations are downloaded and if the downloads are
|
||||
// added to IPFS locally.
|
||||
// Migration configures how legacy migrations are downloaded (repo versions <16).
|
||||
//
|
||||
// DEPRECATED: This configuration only applies to legacy external migrations for repository
|
||||
// versions below 16. Modern repositories (v16+) use embedded migrations that do not require
|
||||
// external downloads. These settings will be ignored for modern repository versions.
|
||||
type Migration struct {
|
||||
// Sources in order of preference, where "IPFS" means use IPFS and "HTTPS"
|
||||
// means use default gateways. Any other values are interpreted as
|
||||
// hostnames for custom gateways. Empty list means "use default sources".
|
||||
DownloadSources []string
|
||||
// Whether or not to keep the migration after downloading it.
|
||||
// Options are "discard", "cache", "pin". Empty string for default.
|
||||
Keep string
|
||||
// DEPRECATED: This field is deprecated and ignored for modern repositories (repo versions ≥16).
|
||||
DownloadSources []string `json:",omitempty"`
|
||||
// DEPRECATED: This field is deprecated and ignored for modern repositories (repo versions ≥16).
|
||||
Keep string `json:",omitempty"`
|
||||
}
|
||||
|
||||
@ -4,5 +4,6 @@ package config
|
||||
type Mounts struct {
|
||||
IPFS string
|
||||
IPNS string
|
||||
MFS string
|
||||
FuseAllowOther bool
|
||||
}
|
||||
|
||||
@ -7,5 +7,5 @@ type Plugins struct {
|
||||
|
||||
type Plugin struct {
|
||||
Disabled bool
|
||||
Config interface{}
|
||||
Config interface{} `json:",omitempty"`
|
||||
}
|
||||
|
||||
@ -86,6 +86,13 @@ is useful when using the daemon in test environments.`,
|
||||
|
||||
c.Bootstrap = []string{}
|
||||
c.Discovery.MDNS.Enabled = false
|
||||
c.AutoTLS.Enabled = False
|
||||
c.AutoConf.Enabled = False
|
||||
|
||||
// Explicitly set autoconf-controlled fields to empty when autoconf is disabled
|
||||
c.DNS.Resolvers = map[string]string{}
|
||||
c.Routing.DelegatedRouters = []string{}
|
||||
c.Ipns.DelegatedPublishers = []string{}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
@ -96,14 +103,14 @@ Inverse profile of the test profile.`,
|
||||
Transform: func(c *Config) error {
|
||||
c.Addresses = addressesConfig()
|
||||
|
||||
bootstrapPeers, err := DefaultBootstrapPeers()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.Bootstrap = appendSingle(c.Bootstrap, BootstrapPeerStrings(bootstrapPeers))
|
||||
// Use AutoConf system for bootstrap peers
|
||||
c.Bootstrap = []string{AutoPlaceholder}
|
||||
c.AutoConf.Enabled = Default
|
||||
c.AutoConf.URL = nil // Clear URL to use implicit default
|
||||
|
||||
c.Swarm.DisableNatPortMap = false
|
||||
c.Discovery.MDNS.Enabled = true
|
||||
c.AutoTLS.Enabled = Default
|
||||
return nil
|
||||
},
|
||||
},
|
||||
@ -135,7 +142,11 @@ You should use this datastore if:
|
||||
* You want to minimize memory usage.
|
||||
* You are ok with the default speed of data import, or prefer to use --nocopy.
|
||||
|
||||
This profile may only be applied when first initializing the node.
|
||||
See configuration documentation at:
|
||||
https://github.com/ipfs/kubo/blob/master/docs/datastores.md#flatfs
|
||||
|
||||
NOTE: This profile may only be applied when first initializing node at IPFS_PATH
|
||||
via 'ipfs init --profile flatfs'
|
||||
`,
|
||||
|
||||
InitOnly: true,
|
||||
@ -144,6 +155,60 @@ This profile may only be applied when first initializing the node.
|
||||
return nil
|
||||
},
|
||||
},
|
||||
"flatfs-measure": {
|
||||
Description: `Configures the node to use the flatfs datastore with metrics tracking wrapper.
|
||||
Additional '*_datastore_*' metrics will be exposed on /debug/metrics/prometheus
|
||||
|
||||
NOTE: This profile may only be applied when first initializing node at IPFS_PATH
|
||||
via 'ipfs init --profile flatfs-measure'
|
||||
`,
|
||||
|
||||
InitOnly: true,
|
||||
Transform: func(c *Config) error {
|
||||
c.Datastore.Spec = flatfsSpecMeasure()
|
||||
return nil
|
||||
},
|
||||
},
|
||||
"pebbleds": {
|
||||
Description: `Configures the node to use the pebble high-performance datastore.
|
||||
|
||||
Pebble is a LevelDB/RocksDB inspired key-value store focused on performance
|
||||
and internal usage by CockroachDB.
|
||||
You should use this datastore if:
|
||||
|
||||
- You need a datastore that is focused on performance.
|
||||
- You need reliability by default, but may choose to disable WAL for maximum performance when reliability is not critical.
|
||||
- This datastore is good for multi-terabyte data sets.
|
||||
- May benefit from tuning depending on read/write patterns and throughput.
|
||||
- Performance is helped significantly by running on a system with plenty of memory.
|
||||
|
||||
See configuration documentation at:
|
||||
https://github.com/ipfs/kubo/blob/master/docs/datastores.md#pebbleds
|
||||
|
||||
NOTE: This profile may only be applied when first initializing node at IPFS_PATH
|
||||
via 'ipfs init --profile pebbleds'
|
||||
`,
|
||||
|
||||
InitOnly: true,
|
||||
Transform: func(c *Config) error {
|
||||
c.Datastore.Spec = pebbleSpec()
|
||||
return nil
|
||||
},
|
||||
},
|
||||
"pebbleds-measure": {
|
||||
Description: `Configures the node to use the pebble datastore with metrics tracking wrapper.
|
||||
Additional '*_datastore_*' metrics will be exposed on /debug/metrics/prometheus
|
||||
|
||||
NOTE: This profile may only be applied when first initializing node at IPFS_PATH
|
||||
via 'ipfs init --profile pebbleds-measure'
|
||||
`,
|
||||
|
||||
InitOnly: true,
|
||||
Transform: func(c *Config) error {
|
||||
c.Datastore.Spec = pebbleSpecMeasure()
|
||||
return nil
|
||||
},
|
||||
},
|
||||
"badgerds": {
|
||||
Description: `Configures the node to use the legacy badgerv1 datastore.
|
||||
|
||||
@ -160,7 +225,12 @@ Other caveats:
|
||||
* Good for medium-size datastores, but may run into performance issues
|
||||
if your dataset is bigger than a terabyte.
|
||||
|
||||
This profile may only be applied when first initializing the node.`,
|
||||
See configuration documentation at:
|
||||
https://github.com/ipfs/kubo/blob/master/docs/datastores.md#badgerds
|
||||
|
||||
NOTE: This profile may only be applied when first initializing node at IPFS_PATH
|
||||
via 'ipfs init --profile badgerds'
|
||||
`,
|
||||
|
||||
InitOnly: true,
|
||||
Transform: func(c *Config) error {
|
||||
@ -168,6 +238,20 @@ This profile may only be applied when first initializing the node.`,
|
||||
return nil
|
||||
},
|
||||
},
|
||||
"badgerds-measure": {
|
||||
Description: `Configures the node to use the legacy badgerv1 datastore with metrics wrapper.
|
||||
Additional '*_datastore_*' metrics will be exposed on /debug/metrics/prometheus
|
||||
|
||||
NOTE: This profile may only be applied when first initializing node at IPFS_PATH
|
||||
via 'ipfs init --profile badgerds-measure'
|
||||
`,
|
||||
|
||||
InitOnly: true,
|
||||
Transform: func(c *Config) error {
|
||||
c.Datastore.Spec = badgerSpecMeasure()
|
||||
return nil
|
||||
},
|
||||
},
|
||||
"lowpower": {
|
||||
Description: `Reduces daemon overhead on the system. May affect node
|
||||
functionality - performance of content discovery and data
|
||||
@ -191,25 +275,25 @@ fetching may be degraded.
|
||||
},
|
||||
},
|
||||
"announce-off": {
|
||||
Description: `Disables Reprovide system (and announcing to Amino DHT).
|
||||
Description: `Disables Provide system (announcing to Amino DHT).
|
||||
|
||||
USE WITH CAUTION:
|
||||
The main use case for this is setups with manual Peering.Peers config.
|
||||
Data from this node will not be announced on the DHT. This will make
|
||||
DHT-based routing an data retrieval impossible if this node is the only
|
||||
DHT-based routing and data retrieval impossible if this node is the only
|
||||
one hosting it, and other peers are not already connected to it.
|
||||
`,
|
||||
Transform: func(c *Config) error {
|
||||
c.Reprovider.Interval = NewOptionalDuration(0) // 0 disables periodic reprovide
|
||||
c.Experimental.StrategicProviding = true // this is not a typo (the name is counter-intuitive)
|
||||
c.Provide.Enabled = False
|
||||
c.Provide.DHT.Interval = NewOptionalDuration(0) // 0 disables periodic reprovide
|
||||
return nil
|
||||
},
|
||||
},
|
||||
"announce-on": {
|
||||
Description: `Re-enables Reprovide system (reverts announce-off profile).`,
|
||||
Description: `Re-enables Provide system (reverts announce-off profile).`,
|
||||
Transform: func(c *Config) error {
|
||||
c.Reprovider.Interval = NewOptionalDuration(DefaultReproviderInterval) // have to apply explicit default because nil would be ignored
|
||||
c.Experimental.StrategicProviding = false // this is not a typo (the name is counter-intuitive)
|
||||
c.Provide.Enabled = True
|
||||
c.Provide.DHT.Interval = NewOptionalDuration(DefaultProvideDHTInterval) // have to apply explicit default because nil would be ignored
|
||||
return nil
|
||||
},
|
||||
},
|
||||
@ -229,24 +313,77 @@ fetching may be degraded.
|
||||
},
|
||||
},
|
||||
"legacy-cid-v0": {
|
||||
Description: `Makes UnixFS import produce legacy CIDv0 with no raw leaves, sha2-256 and 256 KiB chunks.`,
|
||||
|
||||
Description: `Makes UnixFS import produce legacy CIDv0 with no raw leaves, sha2-256 and 256 KiB chunks. This is likely the least optimal preset, use only if legacy behavior is required.`,
|
||||
Transform: func(c *Config) error {
|
||||
c.Import.CidVersion = *NewOptionalInteger(0)
|
||||
c.Import.UnixFSRawLeaves = False
|
||||
c.Import.UnixFSChunker = *NewOptionalString("size-262144")
|
||||
c.Import.HashFunction = *NewOptionalString("sha2-256")
|
||||
c.Import.UnixFSFileMaxLinks = *NewOptionalInteger(174)
|
||||
c.Import.UnixFSDirectoryMaxLinks = *NewOptionalInteger(0)
|
||||
c.Import.UnixFSHAMTDirectoryMaxFanout = *NewOptionalInteger(256)
|
||||
c.Import.UnixFSHAMTDirectorySizeThreshold = *NewOptionalBytes("256KiB")
|
||||
return nil
|
||||
},
|
||||
},
|
||||
"test-cid-v1": {
|
||||
Description: `Makes UnixFS import produce modern CIDv1 with raw leaves, sha2-256 and 1 MiB chunks.`,
|
||||
|
||||
Description: `Makes UnixFS import produce CIDv1 with raw leaves, sha2-256 and 1 MiB chunks (max 174 links per file, 256 per HAMT node, switch dir to HAMT above 256KiB).`,
|
||||
Transform: func(c *Config) error {
|
||||
c.Import.CidVersion = *NewOptionalInteger(1)
|
||||
c.Import.UnixFSRawLeaves = True
|
||||
c.Import.UnixFSChunker = *NewOptionalString("size-1048576")
|
||||
c.Import.HashFunction = *NewOptionalString("sha2-256")
|
||||
c.Import.UnixFSFileMaxLinks = *NewOptionalInteger(174)
|
||||
c.Import.UnixFSDirectoryMaxLinks = *NewOptionalInteger(0)
|
||||
c.Import.UnixFSHAMTDirectoryMaxFanout = *NewOptionalInteger(256)
|
||||
c.Import.UnixFSHAMTDirectorySizeThreshold = *NewOptionalBytes("256KiB")
|
||||
return nil
|
||||
},
|
||||
},
|
||||
"test-cid-v1-wide": {
|
||||
Description: `Makes UnixFS import produce CIDv1 with raw leaves, sha2-256 and 1MiB chunks and wider file DAGs (max 1024 links per every node type, switch dir to HAMT above 1MiB).`,
|
||||
Transform: func(c *Config) error {
|
||||
c.Import.CidVersion = *NewOptionalInteger(1)
|
||||
c.Import.UnixFSRawLeaves = True
|
||||
c.Import.UnixFSChunker = *NewOptionalString("size-1048576") // 1MiB
|
||||
c.Import.HashFunction = *NewOptionalString("sha2-256")
|
||||
c.Import.UnixFSFileMaxLinks = *NewOptionalInteger(1024)
|
||||
c.Import.UnixFSDirectoryMaxLinks = *NewOptionalInteger(0) // no limit here, use size-based Import.UnixFSHAMTDirectorySizeThreshold instead
|
||||
c.Import.UnixFSHAMTDirectoryMaxFanout = *NewOptionalInteger(1024)
|
||||
c.Import.UnixFSHAMTDirectorySizeThreshold = *NewOptionalBytes("1MiB") // 1MiB
|
||||
return nil
|
||||
},
|
||||
},
|
||||
"autoconf-on": {
|
||||
Description: `Sets configuration to use implicit defaults from remote autoconf service.
|
||||
Bootstrap peers, DNS resolvers, delegated routers, and IPNS delegated publishers are set to "auto".
|
||||
This profile requires AutoConf to be enabled and configured.`,
|
||||
|
||||
Transform: func(c *Config) error {
|
||||
c.Bootstrap = []string{AutoPlaceholder}
|
||||
c.DNS.Resolvers = map[string]string{
|
||||
".": AutoPlaceholder,
|
||||
}
|
||||
c.Routing.DelegatedRouters = []string{AutoPlaceholder}
|
||||
c.Ipns.DelegatedPublishers = []string{AutoPlaceholder}
|
||||
c.AutoConf.Enabled = True
|
||||
if c.AutoConf.URL == nil {
|
||||
c.AutoConf.URL = NewOptionalString(DefaultAutoConfURL)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
"autoconf-off": {
|
||||
Description: `Disables AutoConf and sets networking fields to empty for manual configuration.
|
||||
Bootstrap peers, DNS resolvers, delegated routers, and IPNS delegated publishers are set to empty.
|
||||
Use this when you want normal networking but prefer manual control over all endpoints.`,
|
||||
|
||||
Transform: func(c *Config) error {
|
||||
c.Bootstrap = nil
|
||||
c.DNS.Resolvers = nil
|
||||
c.Routing.DelegatedRouters = nil
|
||||
c.Ipns.DelegatedPublishers = nil
|
||||
c.AutoConf.Enabled = False
|
||||
return nil
|
||||
},
|
||||
},
|
||||
|
||||
204
config/provide.go
Normal file
204
config/provide.go
Normal file
@ -0,0 +1,204 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p-kad-dht/amino"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultProvideEnabled = true
|
||||
DefaultProvideStrategy = "all"
|
||||
|
||||
// DHT provider defaults
|
||||
DefaultProvideDHTInterval = 22 * time.Hour // https://github.com/ipfs/kubo/pull/9326
|
||||
DefaultProvideDHTMaxWorkers = 16 // Unified default for both sweep and legacy providers
|
||||
DefaultProvideDHTSweepEnabled = true
|
||||
DefaultProvideDHTResumeEnabled = true
|
||||
DefaultProvideDHTDedicatedPeriodicWorkers = 2
|
||||
DefaultProvideDHTDedicatedBurstWorkers = 1
|
||||
DefaultProvideDHTMaxProvideConnsPerWorker = 20
|
||||
DefaultProvideDHTKeystoreBatchSize = 1 << 14 // ~544 KiB per batch (1 multihash = 34 bytes)
|
||||
DefaultProvideDHTOfflineDelay = 2 * time.Hour
|
||||
|
||||
// DefaultFastProvideTimeout is the maximum time allowed for fast-provide operations.
|
||||
// Prevents hanging on network issues when providing root CID.
|
||||
// 10 seconds is sufficient for DHT operations with sweep provider or accelerated client.
|
||||
DefaultFastProvideTimeout = 10 * time.Second
|
||||
)
|
||||
|
||||
type ProvideStrategy int
|
||||
|
||||
const (
|
||||
ProvideStrategyAll ProvideStrategy = 1 << iota
|
||||
ProvideStrategyPinned
|
||||
ProvideStrategyRoots
|
||||
ProvideStrategyMFS
|
||||
)
|
||||
|
||||
// Provide configures both immediate CID announcements (provide operations) for new content
|
||||
// and periodic re-announcements of existing CIDs (reprovide operations).
|
||||
// This section combines the functionality previously split between Provider and Reprovider.
|
||||
type Provide struct {
|
||||
// Enabled controls whether both provide and reprovide systems are enabled.
|
||||
// When disabled, the node will not announce any content to the routing system.
|
||||
Enabled Flag `json:",omitempty"`
|
||||
|
||||
// Strategy determines which CIDs are announced to the routing system.
|
||||
// Default: DefaultProvideStrategy
|
||||
Strategy *OptionalString `json:",omitempty"`
|
||||
|
||||
// DHT configures DHT-specific provide and reprovide settings.
|
||||
DHT ProvideDHT
|
||||
}
|
||||
|
||||
// ProvideDHT configures DHT provider settings for both immediate announcements
|
||||
// and periodic reprovides.
|
||||
type ProvideDHT struct {
|
||||
// Interval sets the time between rounds of reproviding local content
|
||||
// to the routing system. Set to "0" to disable content reproviding.
|
||||
// Default: DefaultProvideDHTInterval
|
||||
Interval *OptionalDuration `json:",omitempty"`
|
||||
|
||||
// MaxWorkers sets the maximum number of concurrent workers for provide operations.
|
||||
// When SweepEnabled is false: controls NEW CID announcements only.
|
||||
// When SweepEnabled is true: controls total worker pool for all operations.
|
||||
// Default: DefaultProvideDHTMaxWorkers
|
||||
MaxWorkers *OptionalInteger `json:",omitempty"`
|
||||
|
||||
// SweepEnabled activates the sweeping reprovider system which spreads
|
||||
// reprovide operations over time.
|
||||
// Default: DefaultProvideDHTSweepEnabled
|
||||
SweepEnabled Flag `json:",omitempty"`
|
||||
|
||||
// DedicatedPeriodicWorkers sets workers dedicated to periodic reprovides (sweep mode only).
|
||||
// Default: DefaultProvideDHTDedicatedPeriodicWorkers
|
||||
DedicatedPeriodicWorkers *OptionalInteger `json:",omitempty"`
|
||||
|
||||
// DedicatedBurstWorkers sets workers dedicated to burst provides (sweep mode only).
|
||||
// Default: DefaultProvideDHTDedicatedBurstWorkers
|
||||
DedicatedBurstWorkers *OptionalInteger `json:",omitempty"`
|
||||
|
||||
// MaxProvideConnsPerWorker sets concurrent connections per worker for sending provider records (sweep mode only).
|
||||
// Default: DefaultProvideDHTMaxProvideConnsPerWorker
|
||||
MaxProvideConnsPerWorker *OptionalInteger `json:",omitempty"`
|
||||
|
||||
// KeystoreBatchSize sets the batch size for keystore operations during reprovide refresh (sweep mode only).
|
||||
// Default: DefaultProvideDHTKeystoreBatchSize
|
||||
KeystoreBatchSize *OptionalInteger `json:",omitempty"`
|
||||
|
||||
// OfflineDelay sets the delay after which the provider switches from Disconnected to Offline state (sweep mode only).
|
||||
// Default: DefaultProvideDHTOfflineDelay
|
||||
OfflineDelay *OptionalDuration `json:",omitempty"`
|
||||
|
||||
// ResumeEnabled controls whether the provider resumes from its previous state on restart.
|
||||
// When enabled, the provider persists its reprovide cycle state and provide queue to the datastore,
|
||||
// and restores them on restart. When disabled, the provider starts fresh on each restart.
|
||||
// Default: true
|
||||
ResumeEnabled Flag `json:",omitempty"`
|
||||
}
|
||||
|
||||
func ParseProvideStrategy(s string) ProvideStrategy {
|
||||
var strategy ProvideStrategy
|
||||
for _, part := range strings.Split(s, "+") {
|
||||
switch part {
|
||||
case "all", "flat", "": // special case, does not mix with others ("flat" is deprecated, maps to "all")
|
||||
return ProvideStrategyAll
|
||||
case "pinned":
|
||||
strategy |= ProvideStrategyPinned
|
||||
case "roots":
|
||||
strategy |= ProvideStrategyRoots
|
||||
case "mfs":
|
||||
strategy |= ProvideStrategyMFS
|
||||
}
|
||||
}
|
||||
return strategy
|
||||
}
|
||||
|
||||
// ValidateProvideConfig validates the Provide configuration according to DHT requirements.
|
||||
func ValidateProvideConfig(cfg *Provide) error {
|
||||
// Validate Provide.DHT.Interval
|
||||
if !cfg.DHT.Interval.IsDefault() {
|
||||
interval := cfg.DHT.Interval.WithDefault(DefaultProvideDHTInterval)
|
||||
if interval > amino.DefaultProvideValidity {
|
||||
return fmt.Errorf("Provide.DHT.Interval (%v) must be less than or equal to DHT provider record validity (%v)", interval, amino.DefaultProvideValidity)
|
||||
}
|
||||
if interval < 0 {
|
||||
return fmt.Errorf("Provide.DHT.Interval must be non-negative, got %v", interval)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate MaxWorkers
|
||||
if !cfg.DHT.MaxWorkers.IsDefault() {
|
||||
maxWorkers := cfg.DHT.MaxWorkers.WithDefault(DefaultProvideDHTMaxWorkers)
|
||||
if maxWorkers <= 0 {
|
||||
return fmt.Errorf("Provide.DHT.MaxWorkers must be positive, got %d", maxWorkers)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate DedicatedPeriodicWorkers
|
||||
if !cfg.DHT.DedicatedPeriodicWorkers.IsDefault() {
|
||||
workers := cfg.DHT.DedicatedPeriodicWorkers.WithDefault(DefaultProvideDHTDedicatedPeriodicWorkers)
|
||||
if workers < 0 {
|
||||
return fmt.Errorf("Provide.DHT.DedicatedPeriodicWorkers must be non-negative, got %d", workers)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate DedicatedBurstWorkers
|
||||
if !cfg.DHT.DedicatedBurstWorkers.IsDefault() {
|
||||
workers := cfg.DHT.DedicatedBurstWorkers.WithDefault(DefaultProvideDHTDedicatedBurstWorkers)
|
||||
if workers < 0 {
|
||||
return fmt.Errorf("Provide.DHT.DedicatedBurstWorkers must be non-negative, got %d", workers)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate MaxProvideConnsPerWorker
|
||||
if !cfg.DHT.MaxProvideConnsPerWorker.IsDefault() {
|
||||
conns := cfg.DHT.MaxProvideConnsPerWorker.WithDefault(DefaultProvideDHTMaxProvideConnsPerWorker)
|
||||
if conns <= 0 {
|
||||
return fmt.Errorf("Provide.DHT.MaxProvideConnsPerWorker must be positive, got %d", conns)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate KeystoreBatchSize
|
||||
if !cfg.DHT.KeystoreBatchSize.IsDefault() {
|
||||
batchSize := cfg.DHT.KeystoreBatchSize.WithDefault(DefaultProvideDHTKeystoreBatchSize)
|
||||
if batchSize <= 0 {
|
||||
return fmt.Errorf("Provide.DHT.KeystoreBatchSize must be positive, got %d", batchSize)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate OfflineDelay
|
||||
if !cfg.DHT.OfflineDelay.IsDefault() {
|
||||
delay := cfg.DHT.OfflineDelay.WithDefault(DefaultProvideDHTOfflineDelay)
|
||||
if delay < 0 {
|
||||
return fmt.Errorf("Provide.DHT.OfflineDelay must be non-negative, got %v", delay)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ShouldProvideForStrategy determines if content should be provided based on the provide strategy
|
||||
// and content characteristics (pinned status, root status, MFS status).
|
||||
func ShouldProvideForStrategy(strategy ProvideStrategy, isPinned bool, isPinnedRoot bool, isMFS bool) bool {
|
||||
if strategy == ProvideStrategyAll {
|
||||
// 'all' strategy: always provide
|
||||
return true
|
||||
}
|
||||
|
||||
// For combined strategies, check each component
|
||||
if strategy&ProvideStrategyPinned != 0 && isPinned {
|
||||
return true
|
||||
}
|
||||
if strategy&ProvideStrategyRoots != 0 && isPinnedRoot {
|
||||
return true
|
||||
}
|
||||
if strategy&ProvideStrategyMFS != 0 && isMFS {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
191
config/provide_test.go
Normal file
191
config/provide_test.go
Normal file
@ -0,0 +1,191 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestParseProvideStrategy(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
expect ProvideStrategy
|
||||
}{
|
||||
{"all", ProvideStrategyAll},
|
||||
{"pinned", ProvideStrategyPinned},
|
||||
{"mfs", ProvideStrategyMFS},
|
||||
{"pinned+mfs", ProvideStrategyPinned | ProvideStrategyMFS},
|
||||
{"invalid", 0},
|
||||
{"all+invalid", ProvideStrategyAll},
|
||||
{"", ProvideStrategyAll},
|
||||
{"flat", ProvideStrategyAll}, // deprecated, maps to "all"
|
||||
{"flat+all", ProvideStrategyAll},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
result := ParseProvideStrategy(tt.input)
|
||||
if result != tt.expect {
|
||||
t.Errorf("ParseProvideStrategy(%q) = %d, want %d", tt.input, result, tt.expect)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateProvideConfig_Interval(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
interval time.Duration
|
||||
wantErr bool
|
||||
errMsg string
|
||||
}{
|
||||
{"valid default (22h)", 22 * time.Hour, false, ""},
|
||||
{"valid max (48h)", 48 * time.Hour, false, ""},
|
||||
{"valid small (1h)", 1 * time.Hour, false, ""},
|
||||
{"valid zero (disabled)", 0, false, ""},
|
||||
{"invalid over limit (49h)", 49 * time.Hour, true, "must be less than or equal to DHT provider record validity"},
|
||||
{"invalid over limit (72h)", 72 * time.Hour, true, "must be less than or equal to DHT provider record validity"},
|
||||
{"invalid negative", -1 * time.Hour, true, "must be non-negative"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg := &Provide{
|
||||
DHT: ProvideDHT{
|
||||
Interval: NewOptionalDuration(tt.interval),
|
||||
},
|
||||
}
|
||||
|
||||
err := ValidateProvideConfig(cfg)
|
||||
|
||||
if tt.wantErr {
|
||||
require.Error(t, err, "expected error for interval=%v", tt.interval)
|
||||
if tt.errMsg != "" {
|
||||
assert.Contains(t, err.Error(), tt.errMsg, "error message mismatch")
|
||||
}
|
||||
} else {
|
||||
require.NoError(t, err, "unexpected error for interval=%v", tt.interval)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateProvideConfig_MaxWorkers(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
maxWorkers int64
|
||||
wantErr bool
|
||||
errMsg string
|
||||
}{
|
||||
{"valid default", 16, false, ""},
|
||||
{"valid high", 100, false, ""},
|
||||
{"valid low", 1, false, ""},
|
||||
{"invalid zero", 0, true, "must be positive"},
|
||||
{"invalid negative", -1, true, "must be positive"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg := &Provide{
|
||||
DHT: ProvideDHT{
|
||||
MaxWorkers: NewOptionalInteger(tt.maxWorkers),
|
||||
},
|
||||
}
|
||||
|
||||
err := ValidateProvideConfig(cfg)
|
||||
|
||||
if tt.wantErr {
|
||||
require.Error(t, err, "expected error for maxWorkers=%d", tt.maxWorkers)
|
||||
if tt.errMsg != "" {
|
||||
assert.Contains(t, err.Error(), tt.errMsg, "error message mismatch")
|
||||
}
|
||||
} else {
|
||||
require.NoError(t, err, "unexpected error for maxWorkers=%d", tt.maxWorkers)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestShouldProvideForStrategy(t *testing.T) {
|
||||
t.Run("all strategy always provides", func(t *testing.T) {
|
||||
// ProvideStrategyAll should return true regardless of flags
|
||||
testCases := []struct{ pinned, pinnedRoot, mfs bool }{
|
||||
{false, false, false},
|
||||
{true, true, true},
|
||||
{true, false, false},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
assert.True(t, ShouldProvideForStrategy(
|
||||
ProvideStrategyAll, tc.pinned, tc.pinnedRoot, tc.mfs))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("single strategies match only their flag", func(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
strategy ProvideStrategy
|
||||
pinned, pinnedRoot, mfs bool
|
||||
want bool
|
||||
}{
|
||||
{"pinned: matches when pinned=true", ProvideStrategyPinned, true, false, false, true},
|
||||
{"pinned: ignores other flags", ProvideStrategyPinned, false, true, true, false},
|
||||
|
||||
{"roots: matches when pinnedRoot=true", ProvideStrategyRoots, false, true, false, true},
|
||||
{"roots: ignores other flags", ProvideStrategyRoots, true, false, true, false},
|
||||
|
||||
{"mfs: matches when mfs=true", ProvideStrategyMFS, false, false, true, true},
|
||||
{"mfs: ignores other flags", ProvideStrategyMFS, true, true, false, false},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := ShouldProvideForStrategy(tt.strategy, tt.pinned, tt.pinnedRoot, tt.mfs)
|
||||
assert.Equal(t, tt.want, got)
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("combined strategies use OR logic (else-if bug fix)", func(t *testing.T) {
|
||||
// CRITICAL: Tests the fix where bitflag combinations (pinned+mfs) didn't work
|
||||
// because of else-if instead of separate if statements
|
||||
tests := []struct {
|
||||
name string
|
||||
strategy ProvideStrategy
|
||||
pinned, pinnedRoot, mfs bool
|
||||
want bool
|
||||
}{
|
||||
// pinned|mfs: provide if EITHER matches
|
||||
{"pinned|mfs when pinned", ProvideStrategyPinned | ProvideStrategyMFS, true, false, false, true},
|
||||
{"pinned|mfs when mfs", ProvideStrategyPinned | ProvideStrategyMFS, false, false, true, true},
|
||||
{"pinned|mfs when both", ProvideStrategyPinned | ProvideStrategyMFS, true, false, true, true},
|
||||
{"pinned|mfs when neither", ProvideStrategyPinned | ProvideStrategyMFS, false, false, false, false},
|
||||
|
||||
// roots|mfs
|
||||
{"roots|mfs when root", ProvideStrategyRoots | ProvideStrategyMFS, false, true, false, true},
|
||||
{"roots|mfs when mfs", ProvideStrategyRoots | ProvideStrategyMFS, false, false, true, true},
|
||||
{"roots|mfs when neither", ProvideStrategyRoots | ProvideStrategyMFS, false, false, false, false},
|
||||
|
||||
// pinned|roots
|
||||
{"pinned|roots when pinned", ProvideStrategyPinned | ProvideStrategyRoots, true, false, false, true},
|
||||
{"pinned|roots when root", ProvideStrategyPinned | ProvideStrategyRoots, false, true, false, true},
|
||||
{"pinned|roots when neither", ProvideStrategyPinned | ProvideStrategyRoots, false, false, false, false},
|
||||
|
||||
// triple combination
|
||||
{"all-three when any matches", ProvideStrategyPinned | ProvideStrategyRoots | ProvideStrategyMFS, false, false, true, true},
|
||||
{"all-three when none match", ProvideStrategyPinned | ProvideStrategyRoots | ProvideStrategyMFS, false, false, false, false},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := ShouldProvideForStrategy(tt.strategy, tt.pinned, tt.pinnedRoot, tt.mfs)
|
||||
assert.Equal(t, tt.want, got)
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("zero strategy never provides", func(t *testing.T) {
|
||||
assert.False(t, ShouldProvideForStrategy(ProvideStrategy(0), false, false, false))
|
||||
assert.False(t, ShouldProvideForStrategy(ProvideStrategy(0), true, true, true))
|
||||
})
|
||||
}
|
||||
@ -1,5 +1,16 @@
|
||||
package config
|
||||
|
||||
// Provider configuration describes how NEW CIDs are announced the moment they are created.
|
||||
// For periodical reprovide configuration, see Provide.*
|
||||
//
|
||||
// Deprecated: use Provide instead. This will be removed in a future release.
|
||||
type Provider struct {
|
||||
Strategy string // Which keys to announce
|
||||
// Deprecated: use Provide.Enabled instead. This will be removed in a future release.
|
||||
Enabled Flag `json:",omitempty"`
|
||||
|
||||
// Deprecated: unused, you are likely looking for Provide.Strategy instead. This will be removed in a future release.
|
||||
Strategy *OptionalString `json:",omitempty"`
|
||||
|
||||
// Deprecated: use Provide.DHT.MaxWorkers instead. This will be removed in a future release.
|
||||
WorkerCount *OptionalInteger `json:",omitempty"`
|
||||
}
|
||||
|
||||
@ -1,13 +1,13 @@
|
||||
package config
|
||||
|
||||
import "time"
|
||||
|
||||
const (
|
||||
DefaultReproviderInterval = time.Hour * 22 // https://github.com/ipfs/kubo/pull/9326
|
||||
DefaultReproviderStrategy = "all"
|
||||
)
|
||||
|
||||
// Reprovider configuration describes how CID from local datastore are periodically re-announced to routing systems.
|
||||
// For provide behavior of ad-hoc or newly created CIDs and their first-time announcement, see Provide.*
|
||||
//
|
||||
// Deprecated: use Provide instead. This will be removed in a future release.
|
||||
type Reprovider struct {
|
||||
Interval *OptionalDuration `json:",omitempty"` // Time period to reprovide locally stored objects to the network
|
||||
Strategy *OptionalString `json:",omitempty"` // Which keys to announce
|
||||
// Deprecated: use Provide.DHT.Interval instead. This will be removed in a future release.
|
||||
Interval *OptionalDuration `json:",omitempty"`
|
||||
|
||||
// Deprecated: use Provide.Strategy instead. This will be removed in a future release.
|
||||
Strategy *OptionalString `json:",omitempty"`
|
||||
}
|
||||
|
||||
@ -3,20 +3,39 @@ package config
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultAcceleratedDHTClient = false
|
||||
DefaultLoopbackAddressesOnLanDHT = false
|
||||
DefaultRoutingType = "auto"
|
||||
CidContactRoutingURL = "https://cid.contact"
|
||||
PublicGoodDelegatedRoutingURL = "https://delegated-ipfs.dev" // cid.contact + amino dht (incl. IPNS PUTs)
|
||||
EnvHTTPRouters = "IPFS_HTTP_ROUTERS"
|
||||
EnvHTTPRoutersFilterProtocols = "IPFS_HTTP_ROUTERS_FILTER_PROTOCOLS"
|
||||
)
|
||||
|
||||
var (
|
||||
DefaultAcceleratedDHTClient = false
|
||||
DefaultLoopbackAddressesOnLanDHT = false
|
||||
// Default filter-protocols to pass along with delegated routing requests (as defined in IPIP-484)
|
||||
// and also filter out locally
|
||||
DefaultHTTPRoutersFilterProtocols = getEnvOrDefault(EnvHTTPRoutersFilterProtocols, []string{
|
||||
"unknown", // allow results without protocol list, we can do libp2p identify to test them
|
||||
"transport-bitswap",
|
||||
// http is added dynamically in routing/delegated.go.
|
||||
// 'transport-ipfs-gateway-http'
|
||||
})
|
||||
)
|
||||
|
||||
// Routing defines configuration options for libp2p routing.
|
||||
type Routing struct {
|
||||
// Type sets default daemon routing mode.
|
||||
//
|
||||
// Can be one of "auto", "autoclient", "dht", "dhtclient", "dhtserver", "none", or "custom".
|
||||
// Can be one of "auto", "autoclient", "dht", "dhtclient", "dhtserver", "none", "delegated", or "custom".
|
||||
// When unset or set to "auto", DHT and implicit routers are used.
|
||||
// When "delegated" is set, only HTTP delegated routers and IPNS publishers are used (no DHT).
|
||||
// When "custom" is set, user-provided Routing.Routers is used.
|
||||
Type *OptionalString `json:",omitempty"`
|
||||
|
||||
@ -24,9 +43,14 @@ type Routing struct {
|
||||
|
||||
LoopbackAddressesOnLanDHT Flag `json:",omitempty"`
|
||||
|
||||
Routers Routers
|
||||
IgnoreProviders []string `json:",omitempty"`
|
||||
|
||||
Methods Methods
|
||||
// Simplified configuration used by default when Routing.Type=auto|autoclient
|
||||
DelegatedRouters []string
|
||||
|
||||
// Advanced configuration used when Routing.Type=custom
|
||||
Routers Routers `json:",omitempty"`
|
||||
Methods Methods `json:",omitempty"`
|
||||
}
|
||||
|
||||
type Router struct {
|
||||
@ -180,3 +204,67 @@ type ConfigRouter struct {
|
||||
type Method struct {
|
||||
RouterName string
|
||||
}
|
||||
|
||||
// getEnvOrDefault reads space or comma separated strings from env if present,
|
||||
// and uses provided defaultValue as a fallback
|
||||
func getEnvOrDefault(key string, defaultValue []string) []string {
|
||||
if value, exists := os.LookupEnv(key); exists {
|
||||
splitFunc := func(r rune) bool { return r == ',' || r == ' ' }
|
||||
return strings.FieldsFunc(value, splitFunc)
|
||||
}
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
// HasHTTPProviderConfigured checks if the node is configured to use HTTP routers
|
||||
// for providing content announcements. This is used when determining if the node
|
||||
// can provide content even when not connected to libp2p peers.
|
||||
//
|
||||
// Note: Right now we only support delegated HTTP content providing if Routing.Type=custom
|
||||
// and Routing.Routers are configured according to:
|
||||
// https://github.com/ipfs/kubo/blob/master/docs/delegated-routing.md#configuration-file-example
|
||||
//
|
||||
// This uses the `ProvideBitswap` request type that is not documented anywhere,
|
||||
// because we hoped something like IPIP-378 (https://github.com/ipfs/specs/pull/378)
|
||||
// would get finalized and we'd switch to that. It never happened due to politics,
|
||||
// and now we are stuck with ProvideBitswap being the only API that works.
|
||||
// Some people have reverse engineered it (example:
|
||||
// https://discuss.ipfs.tech/t/only-peers-found-from-dht-seem-to-be-getting-used-as-relays-so-cant-use-http-routers/19545/9)
|
||||
// and use it, so what we do here is the bare minimum to ensure their use case works
|
||||
// using this old API until something better is available.
|
||||
func (c *Config) HasHTTPProviderConfigured() bool {
|
||||
if len(c.Routing.Routers) == 0 {
|
||||
// No "custom" routers
|
||||
return false
|
||||
}
|
||||
method, ok := c.Routing.Methods[MethodNameProvide]
|
||||
if !ok {
|
||||
// No provide method configured
|
||||
return false
|
||||
}
|
||||
return c.routerSupportsHTTPProviding(method.RouterName)
|
||||
}
|
||||
|
||||
// routerSupportsHTTPProviding checks if the supplied custom router is or
|
||||
// includes an HTTP-based router.
|
||||
func (c *Config) routerSupportsHTTPProviding(routerName string) bool {
|
||||
rp, ok := c.Routing.Routers[routerName]
|
||||
if !ok {
|
||||
// Router configured for providing doesn't exist
|
||||
return false
|
||||
}
|
||||
|
||||
switch rp.Type {
|
||||
case RouterTypeHTTP:
|
||||
return true
|
||||
case RouterTypeParallel, RouterTypeSequential:
|
||||
// Check if any child router supports HTTP
|
||||
if children, ok := rp.Parameters.(*ComposableRouterParams); ok {
|
||||
for _, childRouter := range children.Routers {
|
||||
if c.routerSupportsHTTPProviding(childRouter.RouterName) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@ -65,8 +65,6 @@ type RelayService struct {
|
||||
// BufferSize is the size of the relayed connection buffers.
|
||||
BufferSize *OptionalInteger `json:",omitempty"`
|
||||
|
||||
// MaxReservationsPerPeer is the maximum number of reservations originating from the same peer.
|
||||
MaxReservationsPerPeer *OptionalInteger `json:",omitempty"`
|
||||
// MaxReservationsPerIP is the maximum number of reservations originating from the same IP address.
|
||||
MaxReservationsPerIP *OptionalInteger `json:",omitempty"`
|
||||
// MaxReservationsPerASN is the maximum number of reservations origination from the same ASN.
|
||||
@ -106,10 +104,11 @@ type Transports struct {
|
||||
|
||||
// ConnMgr defines configuration options for the libp2p connection manager.
|
||||
type ConnMgr struct {
|
||||
Type *OptionalString `json:",omitempty"`
|
||||
LowWater *OptionalInteger `json:",omitempty"`
|
||||
HighWater *OptionalInteger `json:",omitempty"`
|
||||
GracePeriod *OptionalDuration `json:",omitempty"`
|
||||
Type *OptionalString `json:",omitempty"`
|
||||
LowWater *OptionalInteger `json:",omitempty"`
|
||||
HighWater *OptionalInteger `json:",omitempty"`
|
||||
GracePeriod *OptionalDuration `json:",omitempty"`
|
||||
SilencePeriod *OptionalDuration `json:",omitempty"`
|
||||
}
|
||||
|
||||
// ResourceMgr defines configuration options for the libp2p Network Resource Manager
|
||||
@ -119,7 +118,7 @@ type ResourceMgr struct {
|
||||
Enabled Flag `json:",omitempty"`
|
||||
Limits swarmLimits `json:",omitempty"`
|
||||
|
||||
MaxMemory *OptionalString `json:",omitempty"`
|
||||
MaxMemory *OptionalBytes `json:",omitempty"`
|
||||
MaxFileDescriptors *OptionalInteger `json:",omitempty"`
|
||||
|
||||
// A list of multiaddrs that can bypass normal system limits (but are still
|
||||
|
||||
@ -7,6 +7,8 @@ import (
|
||||
"io"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
)
|
||||
|
||||
// Strings is a helper type that (un)marshals a single string to/from a single
|
||||
@ -115,6 +117,16 @@ func (f Flag) String() string {
|
||||
}
|
||||
}
|
||||
|
||||
// ResolveBoolFromConfig returns the resolved boolean value based on:
|
||||
// - If userSet is true, returns userValue (user explicitly set the flag)
|
||||
// - Otherwise, uses configFlag.WithDefault(defaultValue) (respects config or falls back to default)
|
||||
func ResolveBoolFromConfig(userValue bool, userSet bool, configFlag Flag, defaultValue bool) bool {
|
||||
if userSet {
|
||||
return userValue
|
||||
}
|
||||
return configFlag.WithDefault(defaultValue)
|
||||
}
|
||||
|
||||
var (
|
||||
_ json.Unmarshaler = (*Flag)(nil)
|
||||
_ json.Marshaler = (*Flag)(nil)
|
||||
@ -425,8 +437,79 @@ func (p OptionalString) String() string {
|
||||
}
|
||||
|
||||
var (
|
||||
_ json.Unmarshaler = (*OptionalInteger)(nil)
|
||||
_ json.Marshaler = (*OptionalInteger)(nil)
|
||||
_ json.Unmarshaler = (*OptionalString)(nil)
|
||||
_ json.Marshaler = (*OptionalString)(nil)
|
||||
)
|
||||
|
||||
// OptionalBytes represents a byte size that has a default value
|
||||
//
|
||||
// When encoded in json, Default is encoded as "null".
|
||||
// Stores the original string representation and parses on access.
|
||||
// Embeds OptionalString to share common functionality.
|
||||
type OptionalBytes struct {
|
||||
OptionalString
|
||||
}
|
||||
|
||||
// NewOptionalBytes returns an OptionalBytes from a string.
|
||||
func NewOptionalBytes(s string) *OptionalBytes {
|
||||
return &OptionalBytes{OptionalString{value: &s}}
|
||||
}
|
||||
|
||||
// IsDefault returns if this is a default optional byte value.
|
||||
func (p *OptionalBytes) IsDefault() bool {
|
||||
if p == nil {
|
||||
return true
|
||||
}
|
||||
return p.OptionalString.IsDefault()
|
||||
}
|
||||
|
||||
// WithDefault resolves the byte size with the given default.
|
||||
// Parses the stored string value using humanize.ParseBytes.
|
||||
func (p *OptionalBytes) WithDefault(defaultValue uint64) (value uint64) {
|
||||
if p.IsDefault() {
|
||||
return defaultValue
|
||||
}
|
||||
strValue := p.OptionalString.WithDefault("")
|
||||
bytes, err := humanize.ParseBytes(strValue)
|
||||
if err != nil {
|
||||
// This should never happen as values are validated during UnmarshalJSON.
|
||||
// If it does, it indicates either config corruption or a programming error.
|
||||
panic(fmt.Sprintf("invalid byte size in OptionalBytes: %q - %v", strValue, err))
|
||||
}
|
||||
return bytes
|
||||
}
|
||||
|
||||
// UnmarshalJSON validates the input is a parseable byte size.
|
||||
func (p *OptionalBytes) UnmarshalJSON(input []byte) error {
|
||||
switch string(input) {
|
||||
case "null", "undefined":
|
||||
*p = OptionalBytes{}
|
||||
default:
|
||||
var value interface{}
|
||||
err := json.Unmarshal(input, &value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch v := value.(type) {
|
||||
case float64:
|
||||
str := fmt.Sprintf("%.0f", v)
|
||||
p.value = &str
|
||||
case string:
|
||||
_, err := humanize.ParseBytes(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.value = &v
|
||||
default:
|
||||
return fmt.Errorf("unable to parse byte size, expected a size string (e.g., \"5GiB\") or a number, but got %T", v)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
_ json.Unmarshaler = (*OptionalBytes)(nil)
|
||||
_ json.Marshaler = (*OptionalBytes)(nil)
|
||||
)
|
||||
|
||||
type swarmLimits doNotUse
|
||||
|
||||
@ -5,6 +5,9 @@ import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestOptionalDuration(t *testing.T) {
|
||||
@ -509,3 +512,125 @@ func TestOptionalString(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestOptionalBytes(t *testing.T) {
|
||||
makeStringPointer := func(v string) *string { return &v }
|
||||
|
||||
t.Run("default value", func(t *testing.T) {
|
||||
var b OptionalBytes
|
||||
assert.True(t, b.IsDefault())
|
||||
assert.Equal(t, uint64(0), b.WithDefault(0))
|
||||
assert.Equal(t, uint64(1024), b.WithDefault(1024))
|
||||
assert.Equal(t, "default", b.String())
|
||||
})
|
||||
|
||||
t.Run("non-default value", func(t *testing.T) {
|
||||
b := OptionalBytes{OptionalString{value: makeStringPointer("1MiB")}}
|
||||
assert.False(t, b.IsDefault())
|
||||
assert.Equal(t, uint64(1048576), b.WithDefault(512))
|
||||
assert.Equal(t, "1MiB", b.String())
|
||||
})
|
||||
|
||||
t.Run("JSON roundtrip", func(t *testing.T) {
|
||||
testCases := []struct {
|
||||
jsonInput string
|
||||
jsonOutput string
|
||||
expectedValue string
|
||||
}{
|
||||
{"null", "null", ""},
|
||||
{"\"256KiB\"", "\"256KiB\"", "256KiB"},
|
||||
{"\"1MiB\"", "\"1MiB\"", "1MiB"},
|
||||
{"\"5GiB\"", "\"5GiB\"", "5GiB"},
|
||||
{"\"256KB\"", "\"256KB\"", "256KB"},
|
||||
{"1048576", "\"1048576\"", "1048576"},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.jsonInput, func(t *testing.T) {
|
||||
var b OptionalBytes
|
||||
err := json.Unmarshal([]byte(tc.jsonInput), &b)
|
||||
require.NoError(t, err)
|
||||
|
||||
if tc.expectedValue == "" {
|
||||
assert.Nil(t, b.value)
|
||||
} else {
|
||||
require.NotNil(t, b.value)
|
||||
assert.Equal(t, tc.expectedValue, *b.value)
|
||||
}
|
||||
|
||||
out, err := json.Marshal(b)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tc.jsonOutput, string(out))
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("parsing byte sizes", func(t *testing.T) {
|
||||
testCases := []struct {
|
||||
input string
|
||||
expected uint64
|
||||
}{
|
||||
{"256KiB", 262144},
|
||||
{"1MiB", 1048576},
|
||||
{"5GiB", 5368709120},
|
||||
{"256KB", 256000},
|
||||
{"1048576", 1048576},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.input, func(t *testing.T) {
|
||||
var b OptionalBytes
|
||||
err := json.Unmarshal([]byte("\""+tc.input+"\""), &b)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tc.expected, b.WithDefault(0))
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("omitempty", func(t *testing.T) {
|
||||
type Foo struct {
|
||||
B *OptionalBytes `json:",omitempty"`
|
||||
}
|
||||
|
||||
out, err := json.Marshal(new(Foo))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "{}", string(out))
|
||||
|
||||
var foo2 Foo
|
||||
err = json.Unmarshal(out, &foo2)
|
||||
require.NoError(t, err)
|
||||
|
||||
if foo2.B != nil {
|
||||
assert.Equal(t, uint64(1024), foo2.B.WithDefault(1024))
|
||||
assert.True(t, foo2.B.IsDefault())
|
||||
} else {
|
||||
// When field is omitted, pointer is nil which is also considered default
|
||||
t.Log("B is nil, which is acceptable for omitempty")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("invalid values", func(t *testing.T) {
|
||||
invalidInputs := []string{
|
||||
"\"5XiB\"", "\"invalid\"", "\"\"", "[]", "{}",
|
||||
}
|
||||
|
||||
for _, invalid := range invalidInputs {
|
||||
t.Run(invalid, func(t *testing.T) {
|
||||
var b OptionalBytes
|
||||
err := json.Unmarshal([]byte(invalid), &b)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("panic on invalid stored value", func(t *testing.T) {
|
||||
// This tests that if somehow an invalid value gets stored
|
||||
// (bypassing UnmarshalJSON validation), WithDefault will panic
|
||||
invalidValue := "invalid-size"
|
||||
b := OptionalBytes{OptionalString{value: &invalidValue}}
|
||||
|
||||
assert.Panics(t, func() {
|
||||
b.WithDefault(1024)
|
||||
}, "should panic on invalid stored value")
|
||||
})
|
||||
}
|
||||
|
||||
@ -2,7 +2,7 @@ package config
|
||||
|
||||
const DefaultSwarmCheckPercentThreshold = 5
|
||||
|
||||
// Version allows controling things like custom user agent and update checks.
|
||||
// Version allows controlling things like custom user agent and update checks.
|
||||
type Version struct {
|
||||
// Optional suffix to the AgentVersion presented by `ipfs id` and exposed
|
||||
// via libp2p identify protocol.
|
||||
|
||||
@ -3,7 +3,7 @@ package commands
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"slices"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
@ -60,7 +60,7 @@ Lists running and recently run commands.
|
||||
for k := range req.Options {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
slices.Sort(keys)
|
||||
|
||||
for _, k := range keys {
|
||||
fmt.Fprintf(tw, "%s=%v,", k, req.Options[k])
|
||||
|
||||
@ -8,15 +8,16 @@ import (
|
||||
gopath "path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/kubo/config"
|
||||
"github.com/ipfs/kubo/core/commands/cmdenv"
|
||||
"github.com/ipfs/kubo/core/commands/cmdutils"
|
||||
|
||||
"github.com/cheggaaa/pb"
|
||||
"github.com/ipfs/boxo/files"
|
||||
mfs "github.com/ipfs/boxo/mfs"
|
||||
"github.com/ipfs/boxo/path"
|
||||
"github.com/ipfs/boxo/verifcid"
|
||||
cmds "github.com/ipfs/go-ipfs-cmds"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
coreiface "github.com/ipfs/kubo/core/coreiface"
|
||||
@ -25,24 +26,7 @@ import (
|
||||
)
|
||||
|
||||
// ErrDepthLimitExceeded indicates that the max depth has been exceeded.
|
||||
var ErrDepthLimitExceeded = fmt.Errorf("depth limit exceeded")
|
||||
|
||||
type TimeParts struct {
|
||||
t *time.Time
|
||||
}
|
||||
|
||||
func (t TimeParts) MarshalJSON() ([]byte, error) {
|
||||
return t.t.MarshalJSON()
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface.
|
||||
// The time is expected to be a quoted string in RFC 3339 format.
|
||||
func (t *TimeParts) UnmarshalJSON(data []byte) (err error) {
|
||||
// Fractional seconds are handled implicitly by Parse.
|
||||
tt, err := time.Parse("\"2006-01-02T15:04:05Z\"", string(data))
|
||||
*t = TimeParts{&tt}
|
||||
return
|
||||
}
|
||||
var ErrDepthLimitExceeded = errors.New("depth limit exceeded")
|
||||
|
||||
type AddEvent struct {
|
||||
Name string
|
||||
@ -55,47 +39,78 @@ type AddEvent struct {
|
||||
}
|
||||
|
||||
const (
|
||||
quietOptionName = "quiet"
|
||||
quieterOptionName = "quieter"
|
||||
silentOptionName = "silent"
|
||||
progressOptionName = "progress"
|
||||
trickleOptionName = "trickle"
|
||||
wrapOptionName = "wrap-with-directory"
|
||||
onlyHashOptionName = "only-hash"
|
||||
chunkerOptionName = "chunker"
|
||||
pinOptionName = "pin"
|
||||
rawLeavesOptionName = "raw-leaves"
|
||||
noCopyOptionName = "nocopy"
|
||||
fstoreCacheOptionName = "fscache"
|
||||
cidVersionOptionName = "cid-version"
|
||||
hashOptionName = "hash"
|
||||
inlineOptionName = "inline"
|
||||
inlineLimitOptionName = "inline-limit"
|
||||
toFilesOptionName = "to-files"
|
||||
pinNameOptionName = "pin-name"
|
||||
quietOptionName = "quiet"
|
||||
quieterOptionName = "quieter"
|
||||
silentOptionName = "silent"
|
||||
progressOptionName = "progress"
|
||||
trickleOptionName = "trickle"
|
||||
wrapOptionName = "wrap-with-directory"
|
||||
onlyHashOptionName = "only-hash"
|
||||
chunkerOptionName = "chunker"
|
||||
pinOptionName = "pin"
|
||||
rawLeavesOptionName = "raw-leaves"
|
||||
maxFileLinksOptionName = "max-file-links"
|
||||
maxDirectoryLinksOptionName = "max-directory-links"
|
||||
maxHAMTFanoutOptionName = "max-hamt-fanout"
|
||||
noCopyOptionName = "nocopy"
|
||||
fstoreCacheOptionName = "fscache"
|
||||
cidVersionOptionName = "cid-version"
|
||||
hashOptionName = "hash"
|
||||
inlineOptionName = "inline"
|
||||
inlineLimitOptionName = "inline-limit"
|
||||
toFilesOptionName = "to-files"
|
||||
|
||||
preserveModeOptionName = "preserve-mode"
|
||||
preserveMtimeOptionName = "preserve-mtime"
|
||||
modeOptionName = "mode"
|
||||
mtimeOptionName = "mtime"
|
||||
mtimeNsecsOptionName = "mtime-nsecs"
|
||||
preserveModeOptionName = "preserve-mode"
|
||||
preserveMtimeOptionName = "preserve-mtime"
|
||||
modeOptionName = "mode"
|
||||
mtimeOptionName = "mtime"
|
||||
mtimeNsecsOptionName = "mtime-nsecs"
|
||||
fastProvideRootOptionName = "fast-provide-root"
|
||||
fastProvideWaitOptionName = "fast-provide-wait"
|
||||
)
|
||||
|
||||
const adderOutChanSize = 8
|
||||
const (
|
||||
adderOutChanSize = 8
|
||||
)
|
||||
|
||||
var AddCmd = &cmds.Command{
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Add a file or directory to IPFS.",
|
||||
ShortDescription: `
|
||||
Adds the content of <path> to IPFS. Use -r to add directories (recursively).
|
||||
|
||||
FAST PROVIDE OPTIMIZATION:
|
||||
|
||||
When you add content to IPFS, the sweep provider queues it for efficient
|
||||
DHT provides over time. While this is resource-efficient, other peers won't
|
||||
find your content immediately after 'ipfs add' completes.
|
||||
|
||||
To make sharing faster, 'ipfs add' does an immediate provide of the root CID
|
||||
to the DHT in addition to the regular queue. This complements the sweep provider:
|
||||
fast-provide handles the urgent case (root CIDs that users share and reference),
|
||||
while the sweep provider efficiently provides all blocks according to
|
||||
Provide.Strategy over time.
|
||||
|
||||
By default, this immediate provide runs in the background without blocking
|
||||
the command. If you need certainty that the root CID is discoverable before
|
||||
the command returns (e.g., sharing a link immediately), use --fast-provide-wait
|
||||
to wait for the provide to complete. Use --fast-provide-root=false to skip
|
||||
this optimization.
|
||||
|
||||
This works best with the sweep provider and accelerated DHT client.
|
||||
Automatically skipped when DHT is not available.
|
||||
`,
|
||||
LongDescription: `
|
||||
Adds the content of <path> to IPFS. Use -r to add directories.
|
||||
Note that directories are added recursively, to form the IPFS
|
||||
MerkleDAG.
|
||||
Note that directories are added recursively, and big files are chunked,
|
||||
to form the IPFS MerkleDAG. Learn more: https://docs.ipfs.tech/concepts/merkle-dag/
|
||||
|
||||
If the daemon is not running, it will just add locally.
|
||||
If the daemon is not running, it will just add locally to the repo at $IPFS_PATH.
|
||||
If the daemon is started later, it will be advertised after a few
|
||||
seconds when the reprovider runs.
|
||||
seconds when the provide system runs.
|
||||
|
||||
BASIC EXAMPLES:
|
||||
|
||||
The wrap option, '-w', wraps the file (or files, if using the
|
||||
recursive option) in a directory. This directory contains only
|
||||
@ -115,6 +130,12 @@ You can now refer to the added file in a gateway, like so:
|
||||
Files imported with 'ipfs add' are protected from GC (implicit '--pin=true'),
|
||||
but it is up to you to remember the returned CID to get the data back later.
|
||||
|
||||
If you need to back up or transport content-addressed data using a non-IPFS
|
||||
medium, CID can be preserved with CAR files.
|
||||
See 'dag export' and 'dag import' for more information.
|
||||
|
||||
MFS INTEGRATION:
|
||||
|
||||
Passing '--to-files' creates a reference in Files API (MFS), making it easier
|
||||
to find it in the future:
|
||||
|
||||
@ -126,6 +147,8 @@ to find it in the future:
|
||||
See 'ipfs files --help' to learn more about using MFS
|
||||
for keeping track of added files and directories.
|
||||
|
||||
CHUNKING EXAMPLES:
|
||||
|
||||
The chunker option, '-s', specifies the chunking strategy that dictates
|
||||
how to break files into blocks. Blocks with same content can
|
||||
be deduplicated. Different chunking strategies will produce different
|
||||
@ -146,14 +169,16 @@ want to use a 1024 times larger chunk sizes for most files.
|
||||
|
||||
You can now check what blocks have been created by:
|
||||
|
||||
> ipfs object links QmafrLBfzRLV4XSH1XcaMMeaXEUhDJjmtDfsYU95TrWG87
|
||||
> ipfs ls QmafrLBfzRLV4XSH1XcaMMeaXEUhDJjmtDfsYU95TrWG87
|
||||
QmY6yj1GsermExDXoosVE3aSPxdMNYr6aKuw3nA8LoWPRS 2059
|
||||
Qmf7ZQeSxq2fJVJbCmgTrLLVN9tDR9Wy5k75DxQKuz5Gyt 1195
|
||||
> ipfs object links Qmf1hDN65tR55Ubh2RN1FPxr69xq3giVBz1KApsresY8Gn
|
||||
> ipfs ls Qmf1hDN65tR55Ubh2RN1FPxr69xq3giVBz1KApsresY8Gn
|
||||
QmY6yj1GsermExDXoosVE3aSPxdMNYr6aKuw3nA8LoWPRS 2059
|
||||
QmerURi9k4XzKCaaPbsK6BL5pMEjF7PGphjDvkkjDtsVf3 868
|
||||
QmQB28iwSriSUSMqG2nXDTLtdPHgWb4rebBrU7Q1j4vxPv 338
|
||||
|
||||
ADVANCED CONFIGURATION:
|
||||
|
||||
Finally, a note on hash (CID) determinism and 'ipfs add' command.
|
||||
|
||||
Almost all the flags provided by this command will change the final CID, and
|
||||
@ -161,9 +186,11 @@ new flags may be added in the future. It is not guaranteed for the implicit
|
||||
defaults of 'ipfs add' to remain the same in future Kubo releases, or for other
|
||||
IPFS software to use the same import parameters as Kubo.
|
||||
|
||||
If you need to back up or transport content-addressed data using a non-IPFS
|
||||
medium, CID can be preserved with CAR files.
|
||||
See 'dag export' and 'dag import' for more information.
|
||||
Note: CIDv1 is automatically used when using non-default options like custom
|
||||
hash functions or when raw-leaves is explicitly enabled.
|
||||
|
||||
Use Import.* configuration options to override global implicit defaults:
|
||||
https://github.com/ipfs/kubo/blob/master/docs/config.md#import
|
||||
`,
|
||||
},
|
||||
|
||||
@ -171,34 +198,48 @@ See 'dag export' and 'dag import' for more information.
|
||||
cmds.FileArg("path", true, true, "The path to a file to be added to IPFS.").EnableRecursive().EnableStdin(),
|
||||
},
|
||||
Options: []cmds.Option{
|
||||
// Input Processing
|
||||
cmds.OptionRecursivePath, // a builtin option that allows recursive paths (-r, --recursive)
|
||||
cmds.OptionDerefArgs, // a builtin option that resolves passed in filesystem links (--dereference-args)
|
||||
cmds.OptionStdinName, // a builtin option that optionally allows wrapping stdin into a named file
|
||||
cmds.OptionHidden,
|
||||
cmds.OptionIgnore,
|
||||
cmds.OptionIgnoreRules,
|
||||
// Output Control
|
||||
cmds.BoolOption(quietOptionName, "q", "Write minimal output."),
|
||||
cmds.BoolOption(quieterOptionName, "Q", "Write only final hash."),
|
||||
cmds.BoolOption(silentOptionName, "Write no output."),
|
||||
cmds.BoolOption(progressOptionName, "p", "Stream progress data."),
|
||||
cmds.BoolOption(trickleOptionName, "t", "Use trickle-dag format for dag generation."),
|
||||
// Basic Add Behavior
|
||||
cmds.BoolOption(onlyHashOptionName, "n", "Only chunk and hash - do not write to disk."),
|
||||
cmds.BoolOption(wrapOptionName, "w", "Wrap files with a directory object."),
|
||||
cmds.StringOption(chunkerOptionName, "s", "Chunking algorithm, size-[bytes], rabin-[min]-[avg]-[max] or buzhash"),
|
||||
cmds.BoolOption(rawLeavesOptionName, "Use raw blocks for leaf nodes."),
|
||||
cmds.BoolOption(noCopyOptionName, "Add the file using filestore. Implies raw-leaves. (experimental)"),
|
||||
cmds.BoolOption(fstoreCacheOptionName, "Check the filestore for pre-existing blocks. (experimental)"),
|
||||
cmds.IntOption(cidVersionOptionName, "CID version. Defaults to 0 unless an option that depends on CIDv1 is passed. Passing version 1 will cause the raw-leaves option to default to true."),
|
||||
cmds.StringOption(hashOptionName, "Hash function to use. Implies CIDv1 if not sha2-256. (experimental)"),
|
||||
cmds.BoolOption(inlineOptionName, "Inline small blocks into CIDs. (experimental)"),
|
||||
cmds.IntOption(inlineLimitOptionName, "Maximum block size to inline. (experimental)").WithDefault(32),
|
||||
cmds.BoolOption(pinOptionName, "Pin locally to protect added files from garbage collection.").WithDefault(true),
|
||||
cmds.StringOption(pinNameOptionName, "Name to use for the pin. Requires explicit value (e.g., --pin-name=myname)."),
|
||||
// MFS Integration
|
||||
cmds.StringOption(toFilesOptionName, "Add reference to Files API (MFS) at the provided path."),
|
||||
cmds.BoolOption(preserveModeOptionName, "Apply existing POSIX permissions to created UnixFS entries. Disables raw-leaves. (experimental)"),
|
||||
cmds.BoolOption(preserveMtimeOptionName, "Apply existing POSIX modification time to created UnixFS entries. Disables raw-leaves. (experimental)"),
|
||||
cmds.UintOption(modeOptionName, "Custom POSIX file mode to store in created UnixFS entries. Disables raw-leaves. (experimental)"),
|
||||
cmds.Int64Option(mtimeOptionName, "Custom POSIX modification time to store in created UnixFS entries (seconds before or after the Unix Epoch). Disables raw-leaves. (experimental)"),
|
||||
// CID & Hashing
|
||||
cmds.IntOption(cidVersionOptionName, "CID version (0 or 1). CIDv1 automatically enables raw-leaves and is required for non-sha2-256 hashes. Default: Import.CidVersion"),
|
||||
cmds.StringOption(hashOptionName, "Hash function to use. Implies CIDv1 if not sha2-256. Default: Import.HashFunction"),
|
||||
cmds.BoolOption(rawLeavesOptionName, "Use raw blocks for leaf nodes. Note: CIDv1 automatically enables raw-leaves. Default: false for CIDv0, true for CIDv1 (Import.UnixFSRawLeaves)"),
|
||||
// Chunking & DAG Structure
|
||||
cmds.StringOption(chunkerOptionName, "s", "Chunking algorithm, size-[bytes], rabin-[min]-[avg]-[max] or buzhash. Files larger than chunk size are split into multiple blocks. Default: Import.UnixFSChunker"),
|
||||
cmds.BoolOption(trickleOptionName, "t", "Use trickle-dag format for dag generation."),
|
||||
// Advanced UnixFS Limits
|
||||
cmds.IntOption(maxFileLinksOptionName, "Limit the maximum number of links in UnixFS file nodes to this value. WARNING: experimental. Default: Import.UnixFSFileMaxLinks"),
|
||||
cmds.IntOption(maxDirectoryLinksOptionName, "Limit the maximum number of links in UnixFS basic directory nodes to this value. WARNING: experimental, Import.UnixFSHAMTDirectorySizeThreshold is safer. Default: Import.UnixFSDirectoryMaxLinks"),
|
||||
cmds.IntOption(maxHAMTFanoutOptionName, "Limit the maximum number of links of a UnixFS HAMT directory node to this (power of 2, multiple of 8). WARNING: experimental, Import.UnixFSHAMTDirectorySizeThreshold is safer. Default: Import.UnixFSHAMTDirectoryMaxFanout"),
|
||||
// Experimental Features
|
||||
cmds.BoolOption(inlineOptionName, "Inline small blocks into CIDs. WARNING: experimental"),
|
||||
cmds.IntOption(inlineLimitOptionName, fmt.Sprintf("Maximum block size to inline. Maximum: %d bytes. WARNING: experimental", verifcid.DefaultMaxIdentityDigestSize)).WithDefault(32),
|
||||
cmds.BoolOption(noCopyOptionName, "Add the file using filestore. Implies raw-leaves. WARNING: experimental"),
|
||||
cmds.BoolOption(fstoreCacheOptionName, "Check the filestore for pre-existing blocks. WARNING: experimental"),
|
||||
cmds.BoolOption(preserveModeOptionName, "Apply existing POSIX permissions to created UnixFS entries. WARNING: experimental, forces dag-pb for root block, disables raw-leaves"),
|
||||
cmds.BoolOption(preserveMtimeOptionName, "Apply existing POSIX modification time to created UnixFS entries. WARNING: experimental, forces dag-pb for root block, disables raw-leaves"),
|
||||
cmds.UintOption(modeOptionName, "Custom POSIX file mode to store in created UnixFS entries. WARNING: experimental, forces dag-pb for root block, disables raw-leaves"),
|
||||
cmds.Int64Option(mtimeOptionName, "Custom POSIX modification time to store in created UnixFS entries (seconds before or after the Unix Epoch). WARNING: experimental, forces dag-pb for root block, disables raw-leaves"),
|
||||
cmds.UintOption(mtimeNsecsOptionName, "Custom POSIX modification time (optional time fraction in nanoseconds)"),
|
||||
cmds.BoolOption(fastProvideRootOptionName, "Immediately provide root CID to DHT in addition to regular queue, for faster discovery. Default: Import.FastProvideRoot"),
|
||||
cmds.BoolOption(fastProvideWaitOptionName, "Block until the immediate provide completes before returning. Default: Import.FastProvideWait"),
|
||||
},
|
||||
PreRun: func(req *cmds.Request, env cmds.Environment) error {
|
||||
quiet, _ := req.Options[quietOptionName].(bool)
|
||||
@ -239,19 +280,38 @@ See 'dag export' and 'dag import' for more information.
|
||||
silent, _ := req.Options[silentOptionName].(bool)
|
||||
chunker, _ := req.Options[chunkerOptionName].(string)
|
||||
dopin, _ := req.Options[pinOptionName].(bool)
|
||||
pinName, pinNameSet := req.Options[pinNameOptionName].(string)
|
||||
rawblks, rbset := req.Options[rawLeavesOptionName].(bool)
|
||||
maxFileLinks, maxFileLinksSet := req.Options[maxFileLinksOptionName].(int)
|
||||
maxDirectoryLinks, maxDirectoryLinksSet := req.Options[maxDirectoryLinksOptionName].(int)
|
||||
maxHAMTFanout, maxHAMTFanoutSet := req.Options[maxHAMTFanoutOptionName].(int)
|
||||
nocopy, _ := req.Options[noCopyOptionName].(bool)
|
||||
fscache, _ := req.Options[fstoreCacheOptionName].(bool)
|
||||
cidVer, cidVerSet := req.Options[cidVersionOptionName].(int)
|
||||
hashFunStr, _ := req.Options[hashOptionName].(string)
|
||||
inline, _ := req.Options[inlineOptionName].(bool)
|
||||
inlineLimit, _ := req.Options[inlineLimitOptionName].(int)
|
||||
|
||||
// Validate inline-limit doesn't exceed the maximum identity digest size
|
||||
if inline && inlineLimit > verifcid.DefaultMaxIdentityDigestSize {
|
||||
return fmt.Errorf("inline-limit %d exceeds maximum allowed size of %d bytes", inlineLimit, verifcid.DefaultMaxIdentityDigestSize)
|
||||
}
|
||||
|
||||
// Validate pin name
|
||||
if pinNameSet {
|
||||
if err := cmdutils.ValidatePinName(pinName); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
toFilesStr, toFilesSet := req.Options[toFilesOptionName].(string)
|
||||
preserveMode, _ := req.Options[preserveModeOptionName].(bool)
|
||||
preserveMtime, _ := req.Options[preserveMtimeOptionName].(bool)
|
||||
mode, _ := req.Options[modeOptionName].(uint)
|
||||
mtime, _ := req.Options[mtimeOptionName].(int64)
|
||||
mtimeNsecs, _ := req.Options[mtimeNsecsOptionName].(uint)
|
||||
fastProvideRoot, fastProvideRootSet := req.Options[fastProvideRootOptionName].(bool)
|
||||
fastProvideWait, fastProvideWaitSet := req.Options[fastProvideWaitOptionName].(bool)
|
||||
|
||||
if chunker == "" {
|
||||
chunker = cfg.Import.UnixFSChunker.WithDefault(config.DefaultUnixFSChunker)
|
||||
@ -266,11 +326,31 @@ See 'dag export' and 'dag import' for more information.
|
||||
cidVer = int(cfg.Import.CidVersion.WithDefault(config.DefaultCidVersion))
|
||||
}
|
||||
|
||||
// Pin names are only used when explicitly provided via --pin-name=value
|
||||
|
||||
if !rbset && cfg.Import.UnixFSRawLeaves != config.Default {
|
||||
rbset = true
|
||||
rawblks = cfg.Import.UnixFSRawLeaves.WithDefault(config.DefaultUnixFSRawLeaves)
|
||||
}
|
||||
|
||||
if !maxFileLinksSet && !cfg.Import.UnixFSFileMaxLinks.IsDefault() {
|
||||
maxFileLinksSet = true
|
||||
maxFileLinks = int(cfg.Import.UnixFSFileMaxLinks.WithDefault(config.DefaultUnixFSFileMaxLinks))
|
||||
}
|
||||
|
||||
if !maxDirectoryLinksSet && !cfg.Import.UnixFSDirectoryMaxLinks.IsDefault() {
|
||||
maxDirectoryLinksSet = true
|
||||
maxDirectoryLinks = int(cfg.Import.UnixFSDirectoryMaxLinks.WithDefault(config.DefaultUnixFSDirectoryMaxLinks))
|
||||
}
|
||||
|
||||
if !maxHAMTFanoutSet && !cfg.Import.UnixFSHAMTDirectoryMaxFanout.IsDefault() {
|
||||
maxHAMTFanoutSet = true
|
||||
maxHAMTFanout = int(cfg.Import.UnixFSHAMTDirectoryMaxFanout.WithDefault(config.DefaultUnixFSHAMTDirectoryMaxFanout))
|
||||
}
|
||||
|
||||
fastProvideRoot = config.ResolveBoolFromConfig(fastProvideRoot, fastProvideRootSet, cfg.Import.FastProvideRoot, config.DefaultFastProvideRoot)
|
||||
fastProvideWait = config.ResolveBoolFromConfig(fastProvideWait, fastProvideWaitSet, cfg.Import.FastProvideWait, config.DefaultFastProvideWait)
|
||||
|
||||
// Storing optional mode or mtime (UnixFS 1.5) requires root block
|
||||
// to always be 'dag-pb' and not 'raw'. Below adjusts raw-leaves setting, if possible.
|
||||
if preserveMode || preserveMtime || mode != 0 || mtime != 0 {
|
||||
@ -287,6 +367,12 @@ See 'dag export' and 'dag import' for more information.
|
||||
if onlyHash && toFilesSet {
|
||||
return fmt.Errorf("%s and %s options are not compatible", onlyHashOptionName, toFilesOptionName)
|
||||
}
|
||||
if !dopin && pinNameSet {
|
||||
return fmt.Errorf("%s option requires %s to be set", pinNameOptionName, pinOptionName)
|
||||
}
|
||||
if wrap && toFilesSet {
|
||||
return fmt.Errorf("%s and %s options are not compatible", wrapOptionName, toFilesOptionName)
|
||||
}
|
||||
|
||||
hashFunCode, ok := mh.Names[strings.ToLower(hashFunStr)]
|
||||
if !ok {
|
||||
@ -313,7 +399,7 @@ See 'dag export' and 'dag import' for more information.
|
||||
|
||||
options.Unixfs.Chunker(chunker),
|
||||
|
||||
options.Unixfs.Pin(dopin),
|
||||
options.Unixfs.Pin(dopin, pinName),
|
||||
options.Unixfs.HashOnly(onlyHash),
|
||||
options.Unixfs.FsCache(fscache),
|
||||
options.Unixfs.Nocopy(nocopy),
|
||||
@ -343,6 +429,18 @@ See 'dag export' and 'dag import' for more information.
|
||||
opts = append(opts, options.Unixfs.RawLeaves(rawblks))
|
||||
}
|
||||
|
||||
if maxFileLinksSet {
|
||||
opts = append(opts, options.Unixfs.MaxFileLinks(maxFileLinks))
|
||||
}
|
||||
|
||||
if maxDirectoryLinksSet {
|
||||
opts = append(opts, options.Unixfs.MaxDirectoryLinks(maxDirectoryLinks))
|
||||
}
|
||||
|
||||
if maxHAMTFanoutSet {
|
||||
opts = append(opts, options.Unixfs.MaxHAMTFanout(maxHAMTFanout))
|
||||
}
|
||||
|
||||
if trickle {
|
||||
opts = append(opts, options.Unixfs.Layout(options.TrickleLayout))
|
||||
}
|
||||
@ -355,11 +453,12 @@ See 'dag export' and 'dag import' for more information.
|
||||
}
|
||||
var added int
|
||||
var fileAddedToMFS bool
|
||||
var lastRootCid path.ImmutablePath // Track the root CID for fast-provide
|
||||
addit := toadd.Entries()
|
||||
for addit.Next() {
|
||||
_, dir := addit.Node().(files.Directory)
|
||||
errCh := make(chan error, 1)
|
||||
events := make(chan interface{}, adderOutChanSize)
|
||||
events := make(chan any, adderOutChanSize)
|
||||
opts[len(opts)-1] = options.Unixfs.Events(events)
|
||||
|
||||
go func() {
|
||||
@ -371,8 +470,16 @@ See 'dag export' and 'dag import' for more information.
|
||||
return
|
||||
}
|
||||
|
||||
// Store the root CID for potential fast-provide operation
|
||||
lastRootCid = pathAdded
|
||||
|
||||
// creating MFS pointers when optional --to-files is set
|
||||
if toFilesSet {
|
||||
if addit.Name() == "" {
|
||||
errCh <- fmt.Errorf("%s: cannot add unnamed files to MFS", toFilesOptionName)
|
||||
return
|
||||
}
|
||||
|
||||
if toFilesStr == "" {
|
||||
toFilesStr = "/"
|
||||
}
|
||||
@ -489,12 +596,29 @@ See 'dag export' and 'dag import' for more information.
|
||||
return fmt.Errorf("expected a file argument")
|
||||
}
|
||||
|
||||
// Apply fast-provide-root if the flag is enabled
|
||||
if fastProvideRoot && (lastRootCid != path.ImmutablePath{}) {
|
||||
cfg, err := ipfsNode.Repo.Config()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := cmdenv.ExecuteFastProvide(req.Context, ipfsNode, cfg, lastRootCid.RootCid(), fastProvideWait, dopin, dopin, toFilesSet); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if !fastProvideRoot {
|
||||
if fastProvideWait {
|
||||
log.Debugw("fast-provide-root: skipped", "reason", "disabled by flag or config", "wait-flag-ignored", true)
|
||||
} else {
|
||||
log.Debugw("fast-provide-root: skipped", "reason", "disabled by flag or config")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
PostRun: cmds.PostRunMap{
|
||||
cmds.CLI: func(res cmds.Response, re cmds.ResponseEmitter) error {
|
||||
sizeChan := make(chan int64, 1)
|
||||
outChan := make(chan interface{})
|
||||
outChan := make(chan any)
|
||||
req := res.Request()
|
||||
|
||||
// Could be slow.
|
||||
|
||||
@ -5,7 +5,6 @@ import (
|
||||
"io"
|
||||
|
||||
cmdenv "github.com/ipfs/kubo/core/commands/cmdenv"
|
||||
e "github.com/ipfs/kubo/core/commands/e"
|
||||
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
bitswap "github.com/ipfs/boxo/bitswap"
|
||||
@ -25,7 +24,7 @@ var BitswapCmd = &cmds.Command{
|
||||
"stat": bitswapStatCmd,
|
||||
"wantlist": showWantlistCmd,
|
||||
"ledger": ledgerCmd,
|
||||
"reprovide": reprovideCmd,
|
||||
"reprovide": deprecatedBitswapReprovideCmd,
|
||||
},
|
||||
}
|
||||
|
||||
@ -33,6 +32,17 @@ const (
|
||||
peerOptionName = "peer"
|
||||
)
|
||||
|
||||
var deprecatedBitswapReprovideCmd = &cmds.Command{
|
||||
Status: cmds.Deprecated,
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Deprecated command to announce to bitswap. Use 'ipfs routing reprovide' instead.",
|
||||
ShortDescription: `
|
||||
'ipfs bitswap reprovide' is a legacy plumbing command used to announce to DHT.
|
||||
Deprecated, use modern 'ipfs routing reprovide' instead.`,
|
||||
},
|
||||
Run: reprovideRoutingCmd.Run, // alias to routing reprovide to not break existing users
|
||||
}
|
||||
|
||||
var showWantlistCmd = &cmds.Command{
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Show blocks currently on the wantlist.",
|
||||
@ -53,10 +63,7 @@ Print out all blocks currently on the bitswap wantlist for the local peer.`,
|
||||
return ErrNotOnline
|
||||
}
|
||||
|
||||
bs, ok := nd.Exchange.(*bitswap.Bitswap)
|
||||
if !ok {
|
||||
return e.TypeErr(bs, nd.Exchange)
|
||||
}
|
||||
bs := nd.Bitswap
|
||||
|
||||
pstr, found := req.Options[peerOptionName].(string)
|
||||
if found {
|
||||
@ -112,12 +119,7 @@ var bitswapStatCmd = &cmds.Command{
|
||||
return cmds.Errorf(cmds.ErrClient, "unable to run offline: %s", ErrNotOnline)
|
||||
}
|
||||
|
||||
bs, ok := nd.Exchange.(*bitswap.Bitswap)
|
||||
if !ok {
|
||||
return e.TypeErr(bs, nd.Exchange)
|
||||
}
|
||||
|
||||
st, err := bs.Stat()
|
||||
st, err := nd.Bitswap.Stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -134,7 +136,6 @@ var bitswapStatCmd = &cmds.Command{
|
||||
human, _ := req.Options[bitswapHumanOptionName].(bool)
|
||||
|
||||
fmt.Fprintln(w, "bitswap status")
|
||||
fmt.Fprintf(w, "\tprovides buffer: %d / %d\n", s.ProvideBufLen, bitswap.HasBlockBufferSize)
|
||||
fmt.Fprintf(w, "\tblocks received: %d\n", s.BlocksReceived)
|
||||
fmt.Fprintf(w, "\tblocks sent: %d\n", s.BlocksSent)
|
||||
if human {
|
||||
@ -190,17 +191,12 @@ prints the ledger associated with a given peer.
|
||||
return ErrNotOnline
|
||||
}
|
||||
|
||||
bs, ok := nd.Exchange.(*bitswap.Bitswap)
|
||||
if !ok {
|
||||
return e.TypeErr(bs, nd.Exchange)
|
||||
}
|
||||
|
||||
partner, err := peer.Decode(req.Arguments[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return cmds.EmitOnce(res, bs.LedgerForPeer(partner))
|
||||
return cmds.EmitOnce(res, nd.Bitswap.LedgerForPeer(partner))
|
||||
},
|
||||
Encoders: cmds.EncoderMap{
|
||||
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *server.Receipt) error {
|
||||
@ -215,29 +211,3 @@ prints the ledger associated with a given peer.
|
||||
}),
|
||||
},
|
||||
}
|
||||
|
||||
var reprovideCmd = &cmds.Command{
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Trigger reprovider.",
|
||||
ShortDescription: `
|
||||
Trigger reprovider to announce our data to network.
|
||||
`,
|
||||
},
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
nd, err := cmdenv.GetNode(env)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !nd.IsOnline {
|
||||
return ErrNotOnline
|
||||
}
|
||||
|
||||
err = nd.Provider.Reprovide(req.Context)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
@ -4,14 +4,14 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
|
||||
cmdenv "github.com/ipfs/kubo/core/commands/cmdenv"
|
||||
repo "github.com/ipfs/kubo/repo"
|
||||
fsrepo "github.com/ipfs/kubo/repo/fsrepo"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
cmds "github.com/ipfs/go-ipfs-cmds"
|
||||
config "github.com/ipfs/kubo/config"
|
||||
cmdenv "github.com/ipfs/kubo/core/commands/cmdenv"
|
||||
repo "github.com/ipfs/kubo/repo"
|
||||
fsrepo "github.com/ipfs/kubo/repo/fsrepo"
|
||||
peer "github.com/libp2p/go-libp2p/core/peer"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
)
|
||||
@ -41,15 +41,15 @@ Running 'ipfs bootstrap' with no arguments will run 'ipfs bootstrap list'.
|
||||
},
|
||||
}
|
||||
|
||||
const (
|
||||
defaultOptionName = "default"
|
||||
)
|
||||
|
||||
var bootstrapAddCmd = &cmds.Command{
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Add peers to the bootstrap list.",
|
||||
ShortDescription: `Outputs a list of peers that were added (that weren't already
|
||||
in the bootstrap list).
|
||||
|
||||
The special values 'default' and 'auto' can be used to add the default
|
||||
bootstrap peers. Both are equivalent and will add the 'auto' placeholder to
|
||||
the bootstrap list, which gets resolved using the AutoConf system.
|
||||
` + bootstrapSecurityWarning,
|
||||
},
|
||||
|
||||
@ -57,29 +57,23 @@ in the bootstrap list).
|
||||
cmds.StringArg("peer", false, true, peerOptionDesc).EnableStdin(),
|
||||
},
|
||||
|
||||
Options: []cmds.Option{
|
||||
cmds.BoolOption(defaultOptionName, "Add default bootstrap nodes. (Deprecated, use 'default' subcommand instead)"),
|
||||
},
|
||||
Subcommands: map[string]*cmds.Command{
|
||||
"default": bootstrapAddDefaultCmd,
|
||||
},
|
||||
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
deflt, _ := req.Options[defaultOptionName].(bool)
|
||||
|
||||
inputPeers := config.DefaultBootstrapAddresses
|
||||
if !deflt {
|
||||
if err := req.ParseBodyArgs(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
inputPeers = req.Arguments
|
||||
if err := req.ParseBodyArgs(); err != nil {
|
||||
return err
|
||||
}
|
||||
inputPeers := req.Arguments
|
||||
|
||||
if len(inputPeers) == 0 {
|
||||
return errors.New("no bootstrap peers to add")
|
||||
}
|
||||
|
||||
// Convert "default" to "auto" for backward compatibility
|
||||
for i, peer := range inputPeers {
|
||||
if peer == "default" {
|
||||
inputPeers[i] = "auto"
|
||||
}
|
||||
}
|
||||
|
||||
cfgRoot, err := cmdenv.GetConfigRoot(env)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -95,6 +89,13 @@ in the bootstrap list).
|
||||
return err
|
||||
}
|
||||
|
||||
// Check if trying to add "auto" when AutoConf is disabled
|
||||
for _, peer := range inputPeers {
|
||||
if peer == config.AutoPlaceholder && !cfg.AutoConf.Enabled.WithDefault(config.DefaultAutoConfEnabled) {
|
||||
return errors.New("cannot add default bootstrap peers: AutoConf is disabled (AutoConf.Enabled=false). Enable AutoConf by setting AutoConf.Enabled=true in your config, or add specific peer addresses instead")
|
||||
}
|
||||
}
|
||||
|
||||
added, err := bootstrapAdd(r, cfg, inputPeers)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -110,44 +111,6 @@ in the bootstrap list).
|
||||
},
|
||||
}
|
||||
|
||||
var bootstrapAddDefaultCmd = &cmds.Command{
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Add default peers to the bootstrap list.",
|
||||
ShortDescription: `Outputs a list of peers that were added (that weren't already
|
||||
in the bootstrap list).`,
|
||||
},
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
cfgRoot, err := cmdenv.GetConfigRoot(env)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r, err := fsrepo.Open(cfgRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer r.Close()
|
||||
cfg, err := r.Config()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
added, err := bootstrapAdd(r, cfg, config.DefaultBootstrapAddresses)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return cmds.EmitOnce(res, &BootstrapOutput{added})
|
||||
},
|
||||
Type: BootstrapOutput{},
|
||||
Encoders: cmds.EncoderMap{
|
||||
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *BootstrapOutput) error {
|
||||
return bootstrapWritePeers(w, "added ", out.Peers)
|
||||
}),
|
||||
},
|
||||
}
|
||||
|
||||
const (
|
||||
bootstrapAllOptionName = "all"
|
||||
)
|
||||
@ -251,6 +214,9 @@ var bootstrapListCmd = &cmds.Command{
|
||||
Tagline: "Show peers in the bootstrap list.",
|
||||
ShortDescription: "Peers are output in the format '<multiaddr>/<peerID>'.",
|
||||
},
|
||||
Options: []cmds.Option{
|
||||
cmds.BoolOption(configExpandAutoName, "Expand 'auto' placeholders from AutoConf service."),
|
||||
},
|
||||
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
cfgRoot, err := cmdenv.GetConfigRoot(env)
|
||||
@ -268,12 +234,16 @@ var bootstrapListCmd = &cmds.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
peers, err := cfg.BootstrapPeers()
|
||||
if err != nil {
|
||||
return err
|
||||
// Check if user wants to expand auto values
|
||||
expandAuto, _ := req.Options[configExpandAutoName].(bool)
|
||||
if expandAuto {
|
||||
// Use the same expansion method as the daemon
|
||||
expandedBootstrap := cfg.BootstrapWithAutoConf()
|
||||
return cmds.EmitOnce(res, &BootstrapOutput{expandedBootstrap})
|
||||
}
|
||||
|
||||
return cmds.EmitOnce(res, &BootstrapOutput{config.BootstrapPeerStrings(peers)})
|
||||
// Simply return the bootstrap config as-is, including any "auto" values
|
||||
return cmds.EmitOnce(res, &BootstrapOutput{cfg.Bootstrap})
|
||||
},
|
||||
Type: BootstrapOutput{},
|
||||
Encoders: cmds.EncoderMap{
|
||||
@ -284,7 +254,9 @@ var bootstrapListCmd = &cmds.Command{
|
||||
}
|
||||
|
||||
func bootstrapWritePeers(w io.Writer, prefix string, peers []string) error {
|
||||
sort.Stable(sort.StringSlice(peers))
|
||||
slices.SortStableFunc(peers, func(a, b string) int {
|
||||
return strings.Compare(a, b)
|
||||
})
|
||||
for _, peer := range peers {
|
||||
_, err := w.Write([]byte(prefix + peer + "\n"))
|
||||
if err != nil {
|
||||
@ -295,7 +267,11 @@ func bootstrapWritePeers(w io.Writer, prefix string, peers []string) error {
|
||||
}
|
||||
|
||||
func bootstrapAdd(r repo.Repo, cfg *config.Config, peers []string) ([]string, error) {
|
||||
// Validate peers - skip validation for "auto" placeholder
|
||||
for _, p := range peers {
|
||||
if p == config.AutoPlaceholder {
|
||||
continue // Skip validation for "auto" placeholder
|
||||
}
|
||||
m, err := ma.NewMultiaddr(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -345,6 +321,16 @@ func bootstrapAdd(r repo.Repo, cfg *config.Config, peers []string) ([]string, er
|
||||
}
|
||||
|
||||
func bootstrapRemove(r repo.Repo, cfg *config.Config, toRemove []string) ([]string, error) {
|
||||
// Check if bootstrap contains "auto"
|
||||
hasAuto := slices.Contains(cfg.Bootstrap, config.AutoPlaceholder)
|
||||
|
||||
if hasAuto && cfg.AutoConf.Enabled.WithDefault(config.DefaultAutoConfEnabled) {
|
||||
// Cannot selectively remove peers when using "auto" bootstrap
|
||||
// Users should either disable AutoConf or replace "auto" with specific peers
|
||||
return nil, fmt.Errorf("cannot remove individual bootstrap peers when using 'auto' placeholder: the 'auto' value is managed by AutoConf. Either disable AutoConf by setting AutoConf.Enabled=false and replace 'auto' with specific peer addresses, or use 'ipfs bootstrap rm --all' to remove all peers")
|
||||
}
|
||||
|
||||
// Original logic for non-auto bootstrap
|
||||
removed := make([]peer.AddrInfo, 0, len(toRemove))
|
||||
keep := make([]peer.AddrInfo, 0, len(cfg.Bootstrap))
|
||||
|
||||
@ -404,16 +390,28 @@ func bootstrapRemove(r repo.Repo, cfg *config.Config, toRemove []string) ([]stri
|
||||
}
|
||||
|
||||
func bootstrapRemoveAll(r repo.Repo, cfg *config.Config) ([]string, error) {
|
||||
removed, err := cfg.BootstrapPeers()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// Check if bootstrap contains "auto" - if so, we need special handling
|
||||
hasAuto := slices.Contains(cfg.Bootstrap, config.AutoPlaceholder)
|
||||
|
||||
var removed []string
|
||||
if hasAuto {
|
||||
// When "auto" is present, we can't parse it as peer.AddrInfo
|
||||
// Just return the raw bootstrap list as strings for display
|
||||
removed = slices.Clone(cfg.Bootstrap)
|
||||
} else {
|
||||
// Original logic for configs without "auto"
|
||||
removedPeers, err := cfg.BootstrapPeers()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
removed = config.BootstrapPeerStrings(removedPeers)
|
||||
}
|
||||
|
||||
cfg.Bootstrap = nil
|
||||
if err := r.SetConfig(cfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return config.BootstrapPeerStrings(removed), nil
|
||||
return removed, nil
|
||||
}
|
||||
|
||||
const bootstrapSecurityWarning = `
|
||||
|
||||
@ -2,7 +2,7 @@ package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
@ -43,13 +43,13 @@ var CatCmd = &cmds.Command{
|
||||
|
||||
offset, _ := req.Options[offsetOptionName].(int64)
|
||||
if offset < 0 {
|
||||
return fmt.Errorf("cannot specify negative offset")
|
||||
return errors.New("cannot specify negative offset")
|
||||
}
|
||||
|
||||
max, found := req.Options[lengthOptionName].(int64)
|
||||
|
||||
if max < 0 {
|
||||
return fmt.Errorf("cannot specify negative length")
|
||||
return errors.New("cannot specify negative length")
|
||||
}
|
||||
if !found {
|
||||
max = -1
|
||||
|
||||
@ -1,9 +1,11 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"slices"
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
@ -33,7 +35,7 @@ var CidCmd = &cmds.Command{
|
||||
|
||||
const (
|
||||
cidFormatOptionName = "f"
|
||||
cidVerisonOptionName = "v"
|
||||
cidToVersionOptionName = "v"
|
||||
cidCodecOptionName = "mc"
|
||||
cidMultibaseOptionName = "b"
|
||||
)
|
||||
@ -52,13 +54,13 @@ The optional format string is a printf style format string:
|
||||
},
|
||||
Options: []cmds.Option{
|
||||
cmds.StringOption(cidFormatOptionName, "Printf style format string.").WithDefault("%s"),
|
||||
cmds.StringOption(cidVerisonOptionName, "CID version to convert to."),
|
||||
cmds.StringOption(cidToVersionOptionName, "CID version to convert to."),
|
||||
cmds.StringOption(cidCodecOptionName, "CID multicodec to convert to."),
|
||||
cmds.StringOption(cidMultibaseOptionName, "Multibase to display CID in."),
|
||||
},
|
||||
Run: func(req *cmds.Request, resp cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
fmtStr, _ := req.Options[cidFormatOptionName].(string)
|
||||
verStr, _ := req.Options[cidVerisonOptionName].(string)
|
||||
verStr, _ := req.Options[cidToVersionOptionName].(string)
|
||||
codecStr, _ := req.Options[cidCodecOptionName].(string)
|
||||
baseStr, _ := req.Options[cidMultibaseOptionName].(string)
|
||||
|
||||
@ -85,10 +87,10 @@ The optional format string is a printf style format string:
|
||||
}
|
||||
case "0":
|
||||
if opts.newCodec != 0 && opts.newCodec != cid.DagProtobuf {
|
||||
return fmt.Errorf("cannot convert to CIDv0 with any codec other than dag-pb")
|
||||
return errors.New("cannot convert to CIDv0 with any codec other than dag-pb")
|
||||
}
|
||||
if baseStr != "" && baseStr != "base58btc" {
|
||||
return fmt.Errorf("cannot convert to CIDv0 with any multibase other than the implicit base58btc")
|
||||
return errors.New("cannot convert to CIDv0 with any multibase other than the implicit base58btc")
|
||||
}
|
||||
opts.verConv = toCidV0
|
||||
case "1":
|
||||
@ -119,7 +121,8 @@ The optional format string is a printf style format string:
|
||||
return ""
|
||||
}),
|
||||
},
|
||||
Type: CidFormatRes{},
|
||||
Type: CidFormatRes{},
|
||||
Extra: CreateCmdExtras(SetDoesNotUseRepo(true)),
|
||||
}
|
||||
|
||||
type CidFormatRes struct {
|
||||
@ -149,6 +152,7 @@ Useful when processing third-party CIDs which could come with arbitrary formats.
|
||||
},
|
||||
PostRun: cidFmtCmd.PostRun,
|
||||
Type: cidFmtCmd.Type,
|
||||
Extra: CreateCmdExtras(SetDoesNotUseRepo(true)),
|
||||
}
|
||||
|
||||
type cidFormatOpts struct {
|
||||
@ -286,10 +290,10 @@ var basesCmd = &cmds.Command{
|
||||
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, val []CodeAndName) error {
|
||||
prefixes, _ := req.Options[prefixOptionName].(bool)
|
||||
numeric, _ := req.Options[numericOptionName].(bool)
|
||||
sort.Sort(multibaseSorter{val})
|
||||
multibaseSorter{val}.Sort()
|
||||
for _, v := range val {
|
||||
code := v.Code
|
||||
if code < 32 || code >= 127 {
|
||||
if !unicode.IsPrint(rune(code)) {
|
||||
// don't display non-printable prefixes
|
||||
code = ' '
|
||||
}
|
||||
@ -307,7 +311,8 @@ var basesCmd = &cmds.Command{
|
||||
return nil
|
||||
}),
|
||||
},
|
||||
Type: []CodeAndName{},
|
||||
Type: []CodeAndName{},
|
||||
Extra: CreateCmdExtras(SetDoesNotUseRepo(true)),
|
||||
}
|
||||
|
||||
const (
|
||||
@ -356,7 +361,7 @@ var codecsCmd = &cmds.Command{
|
||||
Encoders: cmds.EncoderMap{
|
||||
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, val []CodeAndName) error {
|
||||
numeric, _ := req.Options[codecsNumericOptionName].(bool)
|
||||
sort.Sort(codeAndNameSorter{val})
|
||||
codeAndNameSorter{val}.Sort()
|
||||
for _, v := range val {
|
||||
if numeric {
|
||||
fmt.Fprintf(w, "%5d %s\n", v.Code, v.Name)
|
||||
@ -367,7 +372,8 @@ var codecsCmd = &cmds.Command{
|
||||
return nil
|
||||
}),
|
||||
},
|
||||
Type: []CodeAndName{},
|
||||
Type: []CodeAndName{},
|
||||
Extra: CreateCmdExtras(SetDoesNotUseRepo(true)),
|
||||
}
|
||||
|
||||
var hashesCmd = &cmds.Command{
|
||||
@ -391,29 +397,29 @@ var hashesCmd = &cmds.Command{
|
||||
},
|
||||
Encoders: codecsCmd.Encoders,
|
||||
Type: codecsCmd.Type,
|
||||
Extra: CreateCmdExtras(SetDoesNotUseRepo(true)),
|
||||
}
|
||||
|
||||
type multibaseSorter struct {
|
||||
data []CodeAndName
|
||||
}
|
||||
|
||||
func (s multibaseSorter) Len() int { return len(s.data) }
|
||||
func (s multibaseSorter) Swap(i, j int) { s.data[i], s.data[j] = s.data[j], s.data[i] }
|
||||
|
||||
func (s multibaseSorter) Less(i, j int) bool {
|
||||
a := unicode.ToLower(rune(s.data[i].Code))
|
||||
b := unicode.ToLower(rune(s.data[j].Code))
|
||||
if a != b {
|
||||
return a < b
|
||||
}
|
||||
// lowecase letters should come before uppercase
|
||||
return s.data[i].Code > s.data[j].Code
|
||||
func (s multibaseSorter) Sort() {
|
||||
slices.SortFunc(s.data, func(a, b CodeAndName) int {
|
||||
if n := cmp.Compare(unicode.ToLower(rune(a.Code)), unicode.ToLower(rune(b.Code))); n != 0 {
|
||||
return n
|
||||
}
|
||||
// lowercase letters should come before uppercase
|
||||
return cmp.Compare(b.Code, a.Code)
|
||||
})
|
||||
}
|
||||
|
||||
type codeAndNameSorter struct {
|
||||
data []CodeAndName
|
||||
}
|
||||
|
||||
func (s codeAndNameSorter) Len() int { return len(s.data) }
|
||||
func (s codeAndNameSorter) Swap(i, j int) { s.data[i], s.data[j] = s.data[j], s.data[i] }
|
||||
func (s codeAndNameSorter) Less(i, j int) bool { return s.data[i].Code < s.data[j].Code }
|
||||
func (s codeAndNameSorter) Sort() {
|
||||
slices.SortFunc(s.data, func(a, b CodeAndName) int {
|
||||
return cmp.Compare(a.Code, b.Code)
|
||||
})
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user