Merge remote-tracking branch 'origin/master' into feat/p2p-foreground

# Conflicts:
#	docs/changelogs/v0.40.md
This commit is contained in:
Marcin Rataj 2026-01-09 18:43:27 +01:00
commit f15075150c
63 changed files with 1243 additions and 718 deletions

View File

@ -109,13 +109,13 @@ jobs:
run: cat output.md >> $GITHUB_STEP_SUMMARY
- name: Upload HTML report
if: failure() || success()
uses: actions/upload-artifact@v5
uses: actions/upload-artifact@v6
with:
name: gateway-conformance.html
path: output.html
- name: Upload JSON report
if: failure() || success()
uses: actions/upload-artifact@v5
uses: actions/upload-artifact@v6
with:
name: gateway-conformance.json
path: output.json
@ -214,13 +214,13 @@ jobs:
run: cat output.md >> $GITHUB_STEP_SUMMARY
- name: Upload HTML report
if: failure() || success()
uses: actions/upload-artifact@v5
uses: actions/upload-artifact@v6
with:
name: gateway-conformance-libp2p.html
path: output.html
- name: Upload JSON report
if: failure() || success()
uses: actions/upload-artifact@v5
uses: actions/upload-artifact@v6
with:
name: gateway-conformance-libp2p.json
path: output.json

View File

@ -14,11 +14,13 @@ concurrency:
cancel-in-progress: true
jobs:
go-test:
# Unit tests with coverage collection (uploaded to Codecov)
unit-tests:
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
runs-on: ${{ fromJSON(github.repository == 'ipfs/kubo' && '["self-hosted", "linux", "x64", "2xlarge"]' || '"ubuntu-latest"') }}
timeout-minutes: 20
timeout-minutes: 15
env:
GOTRACEBACK: single # reduce noise on test timeout panics
TEST_DOCKER: 0
TEST_FUSE: 0
TEST_VERBOSE: 1
@ -36,41 +38,18 @@ jobs:
go-version-file: 'go.mod'
- name: Install missing tools
run: sudo apt update && sudo apt install -y zsh
- name: 👉️ If this step failed, go to «Summary» (top left) → inspect the «Failures/Errors» table
env:
# increasing parallelism beyond 2 doesn't speed up the tests much
PARALLEL: 2
- name: Run unit tests
run: |
make -j "$PARALLEL" test/unit/gotest.junit.xml &&
make test_unit &&
[[ ! $(jq -s -c 'map(select(.Action == "fail")) | .[]' test/unit/gotest.json) ]]
- name: Upload coverage to Codecov
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5.5.1
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # v5.5.2
if: failure() || success()
with:
name: unittests
files: coverage/unit_tests.coverprofile
- name: Test kubo-as-a-library example
run: |
# we want to first test with the kubo version in the go.mod file
go test -v ./...
# we also want to test the examples against the current version of kubo
# however, that version might be in a fork so we need to replace the dependency
# backup the go.mod and go.sum files to restore them after we run the tests
cp go.mod go.mod.bak
cp go.sum go.sum.bak
# make sure the examples run against the current version of kubo
go mod edit -replace github.com/ipfs/kubo=./../../..
go mod tidy
go test -v ./...
# restore the go.mod and go.sum files to their original state
mv go.mod.bak go.mod
mv go.sum.bak go.sum
working-directory: docs/examples/kubo-as-a-library
token: ${{ secrets.CODECOV_TOKEN }}
fail_ci_if_error: false
- name: Create a proper JUnit XML report
uses: ipdxco/gotest-json-to-junit-xml@v1
with:
@ -78,9 +57,9 @@ jobs:
output: test/unit/gotest.junit.xml
if: failure() || success()
- name: Archive the JUnit XML report
uses: actions/upload-artifact@v5
uses: actions/upload-artifact@v6
with:
name: unit
name: unit-tests-junit
path: test/unit/gotest.junit.xml
if: failure() || success()
- name: Create a HTML report
@ -91,9 +70,9 @@ jobs:
output: test/unit/gotest.html
if: failure() || success()
- name: Archive the HTML report
uses: actions/upload-artifact@v5
uses: actions/upload-artifact@v6
with:
name: html
name: unit-tests-html
path: test/unit/gotest.html
if: failure() || success()
- name: Create a Markdown report
@ -106,3 +85,86 @@ jobs:
- name: Set the summary
run: cat test/unit/gotest.md >> $GITHUB_STEP_SUMMARY
if: failure() || success()
# End-to-end integration/regression tests from test/cli
# (Go-based replacement for legacy test/sharness shell scripts)
cli-tests:
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
runs-on: ${{ fromJSON(github.repository == 'ipfs/kubo' && '["self-hosted", "linux", "x64", "2xlarge"]' || '"ubuntu-latest"') }}
timeout-minutes: 15
env:
GOTRACEBACK: single # reduce noise on test timeout panics
TEST_VERBOSE: 1
GIT_PAGER: cat
IPFS_CHECK_RCMGR_DEFAULTS: 1
defaults:
run:
shell: bash
steps:
- name: Check out Kubo
uses: actions/checkout@v6
- name: Set up Go
uses: actions/setup-go@v6
with:
go-version-file: 'go.mod'
- name: Install missing tools
run: sudo apt update && sudo apt install -y zsh
- name: Run CLI tests
env:
IPFS_PATH: ${{ runner.temp }}/ipfs-test
run: make test_cli
- name: Create JUnit XML report
uses: ipdxco/gotest-json-to-junit-xml@v1
with:
input: test/cli/cli-tests.json
output: test/cli/cli-tests.junit.xml
if: failure() || success()
- name: Archive JUnit XML report
uses: actions/upload-artifact@v6
with:
name: cli-tests-junit
path: test/cli/cli-tests.junit.xml
if: failure() || success()
- name: Create HTML report
uses: ipdxco/junit-xml-to-html@v1
with:
mode: no-frames
input: test/cli/cli-tests.junit.xml
output: test/cli/cli-tests.html
if: failure() || success()
- name: Archive HTML report
uses: actions/upload-artifact@v6
with:
name: cli-tests-html
path: test/cli/cli-tests.html
if: failure() || success()
- name: Create Markdown report
uses: ipdxco/junit-xml-to-html@v1
with:
mode: summary
input: test/cli/cli-tests.junit.xml
output: test/cli/cli-tests.md
if: failure() || success()
- name: Set summary
run: cat test/cli/cli-tests.md >> $GITHUB_STEP_SUMMARY
if: failure() || success()
# Example tests (kubo-as-a-library)
example-tests:
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
runs-on: ${{ fromJSON(github.repository == 'ipfs/kubo' && '["self-hosted", "linux", "x64", "2xlarge"]' || '"ubuntu-latest"') }}
timeout-minutes: 5
env:
GOTRACEBACK: single
defaults:
run:
shell: bash
steps:
- name: Check out Kubo
uses: actions/checkout@v6
- name: Set up Go
uses: actions/setup-go@v6
with:
go-version-file: 'go.mod'
- name: Run example tests
run: make test_examples

View File

@ -37,7 +37,7 @@ jobs:
with:
go-version-file: 'go.mod'
- run: make build
- uses: actions/upload-artifact@v5
- uses: actions/upload-artifact@v6
with:
name: kubo
path: cmd/ipfs/ipfs
@ -52,7 +52,7 @@ jobs:
- uses: actions/setup-node@v6
with:
node-version: lts/*
- uses: actions/download-artifact@v6
- uses: actions/download-artifact@v7
with:
name: kubo
path: cmd/ipfs
@ -87,7 +87,7 @@ jobs:
- uses: actions/setup-node@v6
with:
node-version: 20.x
- uses: actions/download-artifact@v6
- uses: actions/download-artifact@v7
with:
name: kubo
path: cmd/ipfs

View File

@ -55,11 +55,13 @@ jobs:
# increasing parallelism beyond 10 doesn't speed up the tests much
PARALLEL: ${{ github.repository == 'ipfs/kubo' && 10 || 3 }}
- name: Upload coverage report
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5.5.1
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # v5.5.2
if: failure() || success()
with:
name: sharness
files: kubo/coverage/sharness_tests.coverprofile
token: ${{ secrets.CODECOV_TOKEN }}
fail_ci_if_error: false
- name: Aggregate results
run: find kubo/test/sharness/test-results -name 't*-*.sh.*.counts' | kubo/test/sharness/lib/sharness/aggregate-results.sh > kubo/test/sharness/test-results/summary.txt
- name: 👉️ If this step failed, go to «Summary» (top left) → «HTML Report» → inspect the «Failures» column
@ -88,7 +90,7 @@ jobs:
destination: sharness.html
- name: Upload one-page HTML report
if: github.repository != 'ipfs/kubo' && (failure() || success())
uses: actions/upload-artifact@v5
uses: actions/upload-artifact@v6
with:
name: sharness.html
path: kubo/test/sharness/test-results/sharness.html
@ -108,7 +110,7 @@ jobs:
destination: sharness-html/
- name: Upload full HTML report
if: github.repository != 'ipfs/kubo' && (failure() || success())
uses: actions/upload-artifact@v5
uses: actions/upload-artifact@v6
with:
name: sharness-html
path: kubo/test/sharness/test-results/sharness-html

View File

@ -77,7 +77,7 @@ jobs:
- name: Upload test results
if: always()
uses: actions/upload-artifact@v5
uses: actions/upload-artifact@v6
with:
name: ${{ matrix.os }}-test-results
path: |

5
.gitignore vendored
View File

@ -28,6 +28,11 @@ go-ipfs-source.tar.gz
docs/examples/go-ipfs-as-a-library/example-folder/Qm*
/test/sharness/t0054-dag-car-import-export-data/*.car
# test artifacts from make test_unit / test_cli
/test/unit/gotest.json
/test/unit/gotest.junit.xml
/test/cli/cli-tests.json
# ignore build output from snapcraft
/ipfs_*.snap
/parts

View File

@ -1,6 +1,10 @@
IPFS as a project, including go-ipfs and all of its modules, follows the [standard IPFS Community contributing guidelines](https://github.com/ipfs/community/blob/master/CONTRIBUTING.md).
# Contributing to Kubo
We also adhere to the [GO IPFS Community contributing guidelines](https://github.com/ipfs/community/blob/master/CONTRIBUTING_GO.md) which provide additional information of how to collaborate and contribute in the Go implementation of IPFS.
**For development setup, building, and testing, see the [Developer Guide](docs/developer-guide.md).**
IPFS as a project, including Kubo and all of its modules, follows the [standard IPFS Community contributing guidelines](https://github.com/ipfs/community/blob/master/CONTRIBUTING.md).
We also adhere to the [Go IPFS Community contributing guidelines](https://github.com/ipfs/community/blob/master/CONTRIBUTING_GO.md) which provide additional information on how to collaborate and contribute to the Go implementation of IPFS.
We appreciate your time and attention for going over these. Please open an issue on ipfs/community if you have any questions.

530
README.md
View File

@ -2,7 +2,7 @@
<br>
<a href="https://github.com/ipfs/kubo/blob/master/docs/logo/"><img src="https://user-images.githubusercontent.com/157609/250148884-d6d12db8-fdcf-4be3-8546-2550b69845d8.png" alt="Kubo logo" title="Kubo logo" width="200"></a>
<br>
Kubo: IPFS Implementation in GO
Kubo: IPFS Implementation in Go
<br>
</h1>
@ -11,111 +11,61 @@
<p align="center">
<a href="https://ipfs.tech"><img src="https://img.shields.io/badge/project-IPFS-blue.svg?style=flat-square" alt="Official Part of IPFS Project"></a>
<a href="https://discuss.ipfs.tech"><img alt="Discourse Forum" src="https://img.shields.io/discourse/posts?server=https%3A%2F%2Fdiscuss.ipfs.tech"></a>
<a href="https://matrix.to/#/#ipfs-space:ipfs.io"><img alt="Matrix" src="https://img.shields.io/matrix/ipfs-space%3Aipfs.io?server_fqdn=matrix.org"></a>
<a href="https://docs.ipfs.tech/community/"><img alt="Matrix" src="https://img.shields.io/matrix/ipfs-space%3Aipfs.io?server_fqdn=matrix.org"></a>
<a href="https://github.com/ipfs/kubo/actions"><img src="https://img.shields.io/github/actions/workflow/status/ipfs/kubo/gobuild.yml?branch=master"></a>
<a href="https://github.com/ipfs/kubo/releases"><img alt="GitHub release" src="https://img.shields.io/github/v/release/ipfs/kubo?filter=!*rc*"></a>
</p>
<hr />
<p align="center">
<b><a href="#what-is-kubo">What is Kubo?</a></b> | <b><a href="#quick-taste">Quick Taste</a></b> | <b><a href="#install">Install</a></b> | <b><a href="#documentation">Documentation</a></b> | <b><a href="#development">Development</a></b> | <b><a href="#getting-help">Getting Help</a></b>
</p>
## What is Kubo?
Kubo was the first IPFS implementation and is the most widely used one today. Implementing the *Interplanetary Filesystem* - the standard for content-addressing on the Web, interoperable with HTTP. Thus powered by future-proof data models and the libp2p for network communication. Kubo is written in Go.
Kubo was the first [IPFS](https://docs.ipfs.tech/concepts/what-is-ipfs/) implementation and is the [most widely used one today](https://probelab.io/ipfs/topology/#chart-agent-types-avg). It takes an opinionated approach to content-addressing ([CIDs](https://docs.ipfs.tech/concepts/glossary/#cid), [DAGs](https://docs.ipfs.tech/concepts/glossary/#dag)) that maximizes interoperability: [UnixFS](https://docs.ipfs.tech/concepts/glossary/#unixfs) for files and directories, [HTTP Gateways](https://docs.ipfs.tech/concepts/glossary/#gateway) for web browsers, [Bitswap](https://docs.ipfs.tech/concepts/glossary/#bitswap) and [HTTP](https://specs.ipfs.tech/http-gateways/trustless-gateway/) for verifiable data transfer.
Featureset
- Runs an IPFS-Node as a network service that is part of LAN and WAN DHT
- Native support for UnixFS (most popular way to represent files and directories on IPFS)
- [HTTP Gateway](https://specs.ipfs.tech/http-gateways/) (`/ipfs` and `/ipns`) functionality for trusted and [trustless](https://docs.ipfs.tech/reference/http/gateway/#trustless-verifiable-retrieval) content retrieval
- [HTTP Routing V1](https://specs.ipfs.tech/routing/http-routing-v1/) (`/routing/v1`) client and server implementation for [delegated routing](./docs/delegated-routing.md) lookups
- [HTTP Kubo RPC API](https://docs.ipfs.tech/reference/kubo/rpc/) (`/api/v0`) to access and control the daemon
- [Command Line Interface](https://docs.ipfs.tech/reference/kubo/cli/) based on (`/api/v0`) RPC API
- [WebUI](https://github.com/ipfs/ipfs-webui/#readme) to manage the Kubo node
- [Content blocking](/docs/content-blocking.md) support for operators of public nodes
**Features:**
### Other implementations
- Runs an IPFS node as a network service (LAN [mDNS](https://github.com/libp2p/specs/blob/master/discovery/mdns.md) and WAN [Amino DHT](https://docs.ipfs.tech/concepts/glossary/#dht))
- [Command-line interface](https://docs.ipfs.tech/reference/kubo/cli/) (`ipfs --help`)
- [WebUI](https://github.com/ipfs/ipfs-webui/#readme) for node management
- [HTTP Gateway](https://specs.ipfs.tech/http-gateways/) for trusted and [trustless](https://docs.ipfs.tech/reference/http/gateway/#trustless-verifiable-retrieval) content retrieval
- [HTTP RPC API](https://docs.ipfs.tech/reference/kubo/rpc/) to control the daemon
- [HTTP Routing V1](https://specs.ipfs.tech/routing/http-routing-v1/) client and server for [delegated routing](./docs/delegated-routing.md)
- [Content blocking](./docs/content-blocking.md) for public node operators
See [List](https://docs.ipfs.tech/basics/ipfs-implementations/)
**Other IPFS implementations:** [Helia](https://github.com/ipfs/helia) (JavaScript), [more...](https://docs.ipfs.tech/concepts/ipfs-implementations/)
## What is IPFS?
## Quick Taste
IPFS is a global, versioned, peer-to-peer filesystem. It combines good ideas from previous systems such as Git, BitTorrent, Kademlia, SFS, and the Web. It is like a single BitTorrent swarm, exchanging git objects. IPFS provides an interface as simple as the HTTP web, but with permanence built-in. You can also mount the world at /ipfs.
After [installing Kubo](#install), verify it works:
For more info see: https://docs.ipfs.tech/concepts/what-is-ipfs/
```console
$ ipfs init
generating ED25519 keypair...done
peer identity: 12D3KooWGcSLQdLDBi2BvoP8WnpdHvhWPbxpGcqkf93rL2XMZK7R
Before opening an issue, consider using one of the following locations to ensure you are opening your thread in the right place:
- kubo (previously named go-ipfs) _implementation_ bugs in [this repo](https://github.com/ipfs/kubo/issues).
- Documentation issues in [ipfs/docs issues](https://github.com/ipfs/ipfs-docs/issues).
- IPFS _design_ in [ipfs/specs issues](https://github.com/ipfs/specs/issues).
- Exploration of new ideas in [ipfs/notes issues](https://github.com/ipfs/notes/issues).
- Ask questions and meet the rest of the community at the [IPFS Forum](https://discuss.ipfs.tech).
- Or [chat with us](https://docs.ipfs.tech/community/chat/).
$ ipfs daemon &
Daemon is ready
[![YouTube Channel Subscribers](https://img.shields.io/youtube/channel/subscribers/UCdjsUXJ3QawK4O5L1kqqsew?label=Subscribe%20IPFS&style=social&cacheSeconds=3600)](https://www.youtube.com/channel/UCdjsUXJ3QawK4O5L1kqqsew) [![Follow @IPFS on Twitter](https://img.shields.io/twitter/follow/IPFS?style=social&cacheSeconds=3600)](https://twitter.com/IPFS)
$ echo "hello IPFS" | ipfs add -q --cid-version 1
bafkreicouv3sksjuzxb3rbb6rziy6duakk2aikegsmtqtz5rsuppjorxsa
## Next milestones
$ ipfs cat bafkreicouv3sksjuzxb3rbb6rziy6duakk2aikegsmtqtz5rsuppjorxsa
hello IPFS
```
[Milestones on GitHub](https://github.com/ipfs/kubo/milestones)
Verify this CID is provided by your node to the IPFS network: <https://check.ipfs.network/?cid=bafkreicouv3sksjuzxb3rbb6rziy6duakk2aikegsmtqtz5rsuppjorxsa>
## Table of Contents
- [What is Kubo?](#what-is-kubo)
- [What is IPFS?](#what-is-ipfs)
- [Next milestones](#next-milestones)
- [Table of Contents](#table-of-contents)
- [Security Issues](#security-issues)
- [Install](#install)
- [Minimal System Requirements](#minimal-system-requirements)
- [Docker](#docker)
- [Official prebuilt binaries](#official-prebuilt-binaries)
- [Updating](#updating)
- [Downloading builds using IPFS](#downloading-builds-using-ipfs)
- [Unofficial Linux packages](#unofficial-linux-packages)
- [ArchLinux](#arch-linux)
- [Gentoo Linux](#gentoo-linux)
- [Nix](#nix)
- [Solus](#solus)
- [openSUSE](#opensuse)
- [Guix](#guix)
- [Snap](#snap)
- [Ubuntu PPA](#ubuntu-ppa)
- [Fedora](#fedora-copr)
- [Unofficial Windows packages](#unofficial-windows-packages)
- [Chocolatey](#chocolatey)
- [Scoop](#scoop)
- [Unofficial MacOS packages](#unofficial-macos-packages)
- [MacPorts](#macports)
- [Nix](#nix-macos)
- [Homebrew](#homebrew)
- [Build from Source](#build-from-source)
- [Install Go](#install-go)
- [Download and Compile IPFS](#download-and-compile-ipfs)
- [Cross Compiling](#cross-compiling)
- [Troubleshooting](#troubleshooting)
- [Getting Started](#getting-started)
- [Usage](#usage)
- [Some things to try](#some-things-to-try)
- [Troubleshooting](#troubleshooting-1)
- [Packages](#packages)
- [Development](#development)
- [Map of Implemented Subsystems](#map-of-implemented-subsystems)
- [CLI, HTTP-API, Architecture Diagram](#cli-http-api-architecture-diagram)
- [Testing](#testing)
- [Development Dependencies](#development-dependencies)
- [Developer Notes](#developer-notes)
- [Maintainer Info](#maintainer-info)
- [Contributing](#contributing)
- [License](#license)
## Security Issues
Please follow [`SECURITY.md`](SECURITY.md).
See `ipfs add --help` for all import options. Ready for more? Follow the [command-line quick start](https://docs.ipfs.tech/how-to/command-line-quick-start/).
## Install
The canonical download instructions for IPFS are over at: https://docs.ipfs.tech/install/. It is **highly recommended** you follow those instructions if you are not interested in working on IPFS development.
Follow the [official installation guide](https://docs.ipfs.tech/install/command-line/), or choose: [prebuilt binary](#official-prebuilt-binaries) | [Docker](#docker) | [package manager](#package-managers) | [from source](#build-from-source).
For production use, Release Docker images (below) are recommended.
Prefer a GUI? Try [IPFS Desktop](https://docs.ipfs.tech/install/ipfs-desktop/) and/or [IPFS Companion](https://docs.ipfs.tech/install/ipfs-companion/).
### Minimal System Requirements
@ -127,388 +77,148 @@ Kubo runs on most Linux, macOS, and Windows systems. For optimal performance, we
> [!CAUTION]
> Systems with less than the recommended memory may experience instability, frequent OOM errors or restarts, and missing data announcement (reprovider window), which can make data fully or partially inaccessible to other peers. Running Kubo on underprovisioned hardware is at your own risk.
### Official Prebuilt Binaries
Download from https://dist.ipfs.tech#kubo or [GitHub Releases](https://github.com/ipfs/kubo/releases/latest).
### Docker
Official images are published at https://hub.docker.com/r/ipfs/kubo/: [![Docker Image Version (latest semver)](https://img.shields.io/docker/v/ipfs/kubo?color=blue&label=kubo%20docker%20image&logo=docker&sort=semver&style=flat-square&cacheSeconds=3600)](https://hub.docker.com/r/ipfs/kubo/)
#### 🟢 Release Images
- These are production grade images. Use them.
- `latest` and [`release`](https://hub.docker.com/r/ipfs/kubo/tags?name=release) tags always point at [the latest stable release](https://github.com/ipfs/kubo/releases/latest). If you use this, remember to `docker pull` periodically to update.
- [`vN.N.N`](https://hub.docker.com/r/ipfs/kubo/tags?name=v) points at a specific [release tag](https://github.com/ipfs/kubo/releases)
#### 🟠 Developer Preview Images
- These tags are used by developers for internal testing, not intended for end users or production use.
- [`master-latest`](https://hub.docker.com/r/ipfs/kubo/tags?name=master-latest) always points at the `HEAD` of the [`master`](https://github.com/ipfs/kubo/commits/master/) branch
- [`master-YYYY-DD-MM-GITSHA`](https://hub.docker.com/r/ipfs/kubo/tags?name=master-2) points at a specific commit from the `master` branch
Use these for production deployments.
#### 🔴 Internal Staging Images
- We use `staging` for testing arbitrary commits and experimental patches.
- To build image for current HEAD, force push to `staging` via `git push origin HEAD:staging --force`)
- [`staging-latest`](https://hub.docker.com/r/ipfs/kubo/tags?name=staging-latest) always points at the `HEAD` of the [`staging`](https://github.com/ipfs/kubo/commits/staging/) branch
- [`staging-YYYY-DD-MM-GITSHA`](https://hub.docker.com/r/ipfs/kubo/tags?name=staging-2) points at a specific commit from the `staging` branch
- `latest` and [`release`](https://hub.docker.com/r/ipfs/kubo/tags?name=release) always point at [the latest stable release](https://github.com/ipfs/kubo/releases/latest)
- [`vN.N.N`](https://hub.docker.com/r/ipfs/kubo/tags?name=v) points at a specific [release tag](https://github.com/ipfs/kubo/releases)
```console
$ docker pull ipfs/kubo:latest
$ docker run --rm -it --net=host ipfs/kubo:latest
```
To [customize your node](https://docs.ipfs.tech/install/run-ipfs-inside-docker/#customizing-your-node),
pass necessary config via `-e` or by mounting scripts in the `/container-init.d`.
To [customize your node](https://docs.ipfs.tech/install/run-ipfs-inside-docker/#customizing-your-node), pass config via `-e` or mount scripts in `/container-init.d`.
Learn more at https://docs.ipfs.tech/install/run-ipfs-inside-docker/
#### 🟠 Developer Preview Images
### Official prebuilt binaries
For internal testing, not intended for production.
The official binaries are published at https://dist.ipfs.tech#kubo:
- [`master-latest`](https://hub.docker.com/r/ipfs/kubo/tags?name=master-latest) points at `HEAD` of [`master`](https://github.com/ipfs/kubo/commits/master/)
- [`master-YYYY-DD-MM-GITSHA`](https://hub.docker.com/r/ipfs/kubo/tags?name=master-2) points at a specific commit
[![dist.ipfs.tech Downloads](https://img.shields.io/github/v/release/ipfs/kubo?label=dist.ipfs.tech&logo=ipfs&style=flat-square&cacheSeconds=3600)](https://dist.ipfs.tech#kubo)
#### 🔴 Internal Staging Images
From there:
- Click the blue "Download Kubo" on the right side of the page.
- Open/extract the archive.
- Move kubo (`ipfs`) to your path (`install.sh` can do it for you).
For testing arbitrary commits and experimental patches (force push to `staging` branch).
If you are unable to access [dist.ipfs.tech](https://dist.ipfs.tech#kubo), you can also download kubo from:
- this project's GitHub [releases](https://github.com/ipfs/kubo/releases/latest) page
- `/ipns/dist.ipfs.tech` at [dweb.link](https://dweb.link/ipns/dist.ipfs.tech#kubo) gateway
#### Updating
##### Downloading builds using IPFS
List the available versions of Kubo implementation:
```console
$ ipfs cat /ipns/dist.ipfs.tech/kubo/versions
```
Then, to view available builds for a version from the previous command (`$VERSION`):
```console
$ ipfs ls /ipns/dist.ipfs.tech/kubo/$VERSION
```
To download a given build of a version:
```console
$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_darwin-amd64.tar.gz # darwin amd64 build
$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_darwin-arm64.tar.gz # darwin arm64 build
$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_freebsd-amd64.tar.gz # freebsd amd64 build
$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_linux-amd64.tar.gz # linux amd64 build
$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_linux-riscv64.tar.gz # linux riscv64 build
$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_linux-arm64.tar.gz # linux arm64 build
$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_windows-amd64.zip # windows amd64 build
```
### Unofficial Linux packages
<a href="https://repology.org/project/kubo/versions">
<img src="https://repology.org/badge/vertical-allrepos/kubo.svg" alt="Packaging status" align="right">
</a>
- [ArchLinux](#arch-linux)
- [Gentoo Linux](#gentoo-linux)
- [Nix](#nix-linux)
- [Solus](#solus)
- [openSUSE](#opensuse)
- [Guix](#guix)
- [Snap](#snap)
- [Ubuntu PPA](#ubuntu-ppa)
- [Fedora](#fedora-copr)
#### Arch Linux
[![kubo via Community Repo](https://img.shields.io/archlinux/v/community/x86_64/kubo?color=1793d1&label=kubo&logo=arch-linux&style=flat-square&cacheSeconds=3600)](https://wiki.archlinux.org/title/IPFS)
```bash
# pacman -S kubo
```
[![kubo-git via AUR](https://img.shields.io/static/v1?label=kubo-git&message=latest%40master&color=1793d1&logo=arch-linux&style=flat-square&cacheSeconds=3600)](https://archlinux.org/packages/kubo/)
#### <a name="gentoo-linux">Gentoo Linux</a>
https://wiki.gentoo.org/wiki/Kubo
```bash
# emerge -a net-p2p/kubo
```
https://packages.gentoo.org/packages/net-p2p/kubo
#### <a name="nix-linux">Nix</a>
With the purely functional package manager [Nix](https://nixos.org/nix/) you can install kubo like this:
```
$ nix-env -i kubo
```
You can also install the Package by using its attribute name, which is also `kubo`.
#### Solus
[Package for Solus](https://dev.getsol.us/source/kubo/repository/master/)
```
$ sudo eopkg install kubo
```
You can also install it through the Solus software center.
#### openSUSE
[Community Package for kubo](https://software.opensuse.org/package/kubo)
#### Guix
[Community Package for kubo](https://packages.guix.gnu.org/search/?query=kubo) is available.
#### Snap
No longer supported, see rationale in [kubo#8688](https://github.com/ipfs/kubo/issues/8688).
#### Ubuntu PPA
[PPA homepage](https://launchpad.net/~twdragon/+archive/ubuntu/ipfs) on Launchpad.
##### Latest Ubuntu (>= 20.04 LTS)
```sh
sudo add-apt-repository ppa:twdragon/ipfs
sudo apt update
sudo apt install ipfs-kubo
```
### Fedora COPR
[`taw00/ipfs-rpm`](https://github.com/taw00/ipfs-rpm)
##### Any Ubuntu version
```sh
sudo su
echo 'deb https://ppa.launchpadcontent.net/twdragon/ipfs/ubuntu <<DISTRO>> main' >> /etc/apt/sources.list.d/ipfs
echo 'deb-src https://ppa.launchpadcontent.net/twdragon/ipfs/ubuntu <<DISTRO>> main' >> /etc/apt/sources.list.d/ipfs
exit
sudo apt update
sudo apt install ipfs-kubo
```
where `<<DISTRO>>` is the codename of your Ubuntu distribution (for example, `jammy` for 22.04 LTS). During the first installation the package maintenance script may automatically ask you about which networking profile, CPU accounting model, and/or existing node configuration file you want to use.
**NOTE**: this method also may work with any compatible Debian-based distro which has `libc6` inside, and APT as a package manager.
### Unofficial Windows packages
- [Chocolatey](#chocolatey)
- [Scoop](#scoop)
#### Chocolatey
No longer supported, see rationale in [kubo#9341](https://github.com/ipfs/kubo/issues/9341).
#### Scoop
Scoop provides kubo as `kubo` in its 'extras' bucket.
```Powershell
PS> scoop bucket add extras
PS> scoop install kubo
```
### Unofficial macOS packages
- [MacPorts](#macports)
- [Nix](#nix-macos)
- [Homebrew](#homebrew)
#### MacPorts
The package [ipfs](https://ports.macports.org/port/ipfs) currently points to kubo and is being maintained.
```
$ sudo port install ipfs
```
#### <a name="nix-macos">Nix</a>
In macOS you can use the purely functional package manager [Nix](https://nixos.org/nix/):
```
$ nix-env -i kubo
```
You can also install the Package by using its attribute name, which is also `kubo`.
#### Homebrew
A Homebrew formula [ipfs](https://formulae.brew.sh/formula/ipfs) is maintained too.
```
$ brew install --formula ipfs
```
- [`staging-latest`](https://hub.docker.com/r/ipfs/kubo/tags?name=staging-latest) points at `HEAD` of [`staging`](https://github.com/ipfs/kubo/commits/staging/)
- [`staging-YYYY-DD-MM-GITSHA`](https://hub.docker.com/r/ipfs/kubo/tags?name=staging-2) points at a specific commit
### Build from Source
![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/ipfs/kubo?label=Requires%20Go&logo=go&style=flat-square&cacheSeconds=3600)
kubo's build system requires Go and some standard POSIX build tools:
* GNU make
* Git
* GCC (or some other go compatible C Compiler) (optional)
To build without GCC, build with `CGO_ENABLED=0` (e.g., `make build CGO_ENABLED=0`).
#### Install Go
![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/ipfs/kubo?label=Requires%20Go&logo=go&style=flat-square&cacheSeconds=3600)
If you need to update: [Download latest version of Go](https://golang.org/dl/).
You'll need to add Go's bin directories to your `$PATH` environment variable e.g., by adding these lines to your `/etc/profile` (for a system-wide installation) or `$HOME/.profile`:
```
export PATH=$PATH:/usr/local/go/bin
export PATH=$PATH:$GOPATH/bin
```bash
git clone https://github.com/ipfs/kubo.git
cd kubo
make build # creates cmd/ipfs/ipfs
make install # installs to $GOPATH/bin/ipfs
```
(If you run into trouble, see the [Go install instructions](https://golang.org/doc/install)).
See the [Developer Guide](docs/developer-guide.md) for details, Windows instructions, and troubleshooting.
#### Download and Compile IPFS
### Package Managers
```
$ git clone https://github.com/ipfs/kubo.git
Kubo is available in community-maintained packages across many operating systems, Linux distributions, and package managers. See [Repology](https://repology.org/project/kubo/versions) for the full list: [![Packaging status](https://repology.org/badge/tiny-repos/kubo.svg)](https://repology.org/project/kubo/versions)
$ cd kubo
$ make install
```
> [!WARNING]
> These packages are maintained by third-party volunteers. The IPFS Project and Kubo maintainers are not responsible for their contents or supply chain security. For increased security, [build from source](#build-from-source).
Alternatively, you can run `make build` to build the kubo binary (storing it in `cmd/ipfs/ipfs`) without installing it.
#### Linux
**NOTE:** If you get an error along the lines of "fatal error: stdlib.h: No such file or directory", you're missing a C compiler. Either re-run `make` with `CGO_ENABLED=0` or install GCC.
| Distribution | Install | Version |
|--------------|---------|---------|
| Ubuntu | [PPA](https://launchpad.net/~twdragon/+archive/ubuntu/ipfs): `sudo apt install ipfs-kubo` | [![PPA: twdragon](https://img.shields.io/badge/PPA-twdragon-E95420?logo=ubuntu)](https://launchpad.net/~twdragon/+archive/ubuntu/ipfs) |
| Arch | `pacman -S kubo` | [![Arch package](https://repology.org/badge/version-for-repo/arch/kubo.svg)](https://archlinux.org/packages/extra/x86_64/kubo/) |
| Fedora | [COPR](https://copr.fedorainfracloud.org/coprs/taw/ipfs/): `dnf install kubo` | [![COPR: taw](https://img.shields.io/badge/COPR-taw-51A2DA?logo=fedora)](https://copr.fedorainfracloud.org/coprs/taw/ipfs/) |
| Nix | `nix-env -i kubo` | [![nixpkgs unstable](https://repology.org/badge/version-for-repo/nix_unstable/kubo.svg)](https://search.nixos.org/packages?query=kubo) |
| Gentoo | `emerge -a net-p2p/kubo` | [![Gentoo package](https://repology.org/badge/version-for-repo/gentoo/kubo.svg)](https://packages.gentoo.org/packages/net-p2p/kubo) |
| openSUSE | `zypper install kubo` | [![openSUSE Tumbleweed](https://repology.org/badge/version-for-repo/opensuse_tumbleweed/kubo.svg)](https://software.opensuse.org/package/kubo) |
| Solus | `sudo eopkg install kubo` | [![Solus package](https://repology.org/badge/version-for-repo/solus/kubo.svg)](https://packages.getsol.us/shannon/k/kubo/) |
| Guix | `guix install kubo` | [![Guix package](https://repology.org/badge/version-for-repo/gnuguix/kubo.svg)](https://packages.guix.gnu.org/packages/kubo/) |
| _other_ | [See Repology for the full list](https://repology.org/project/kubo/versions) | |
##### Cross Compiling
~~Snap~~ no longer supported ([#8688](https://github.com/ipfs/kubo/issues/8688))
Compiling for a different platform is as simple as running:
#### macOS
```
make build GOOS=myTargetOS GOARCH=myTargetArchitecture
```
| Manager | Install | Version |
|---------|---------|---------|
| Homebrew | `brew install ipfs` | [![Homebrew](https://repology.org/badge/version-for-repo/homebrew/kubo.svg)](https://formulae.brew.sh/formula/ipfs) |
| MacPorts | `sudo port install ipfs` | [![MacPorts](https://repology.org/badge/version-for-repo/macports/kubo.svg)](https://ports.macports.org/port/ipfs/) |
| Nix | `nix-env -i kubo` | [![nixpkgs unstable](https://repology.org/badge/version-for-repo/nix_unstable/kubo.svg)](https://search.nixos.org/packages?query=kubo) |
| _other_ | [See Repology for the full list](https://repology.org/project/kubo/versions) | |
#### Troubleshooting
#### Windows
- Separate [instructions are available for building on Windows](docs/windows.md).
- `git` is required in order for `go get` to fetch all dependencies.
- Package managers often contain out-of-date `golang` packages.
Ensure that `go version` reports the minimum version required (see go.mod). See above for how to install go.
- If you are interested in development, please install the development
dependencies as well.
- Shell command completions can be generated with one of the `ipfs commands completion` subcommands. Read [docs/command-completion.md](docs/command-completion.md) to learn more.
- See the [misc folder](https://github.com/ipfs/kubo/tree/master/misc) for how to connect IPFS to systemd or whatever init system your distro uses.
| Manager | Install | Version |
|---------|---------|---------|
| Scoop | `scoop install kubo` | [![Scoop](https://repology.org/badge/version-for-repo/scoop/kubo.svg)](https://scoop.sh/#/apps?q=kubo) |
| _other_ | [See Repology for the full list](https://repology.org/project/kubo/versions) | |
## Getting Started
~~Chocolatey~~ no longer supported ([#9341](https://github.com/ipfs/kubo/issues/9341))
### Usage
## Documentation
[![docs: Command-line quick start](https://img.shields.io/static/v1?label=docs&message=Command-line%20quick%20start&color=blue&style=flat-square&cacheSeconds=3600)](https://docs.ipfs.tech/how-to/command-line-quick-start/)
[![docs: Command-line reference](https://img.shields.io/static/v1?label=docs&message=Command-line%20reference&color=blue&style=flat-square&cacheSeconds=3600)](https://docs.ipfs.tech/reference/kubo/cli/)
To start using IPFS, you must first initialize IPFS's config files on your
system, this is done with `ipfs init`. See `ipfs init --help` for information on
the optional arguments it takes. After initialization is complete, you can use
`ipfs mount`, `ipfs add` and any of the other commands to explore!
For detailed configuration options, see [docs/config.md](https://github.com/ipfs/kubo/blob/master/docs/config.md).
### Some things to try
Basic proof of 'ipfs working' locally:
echo "hello world" > hello
ipfs add hello
# This should output a hash string that looks something like:
# QmT78zSuBmuS4z925WZfrqQ1qHaJ56DQaTfyMUF7F8ff5o
ipfs cat <that hash>
### HTTP/RPC clients
For programmatic interaction with Kubo, see our [list of HTTP/RPC clients](docs/http-rpc-clients.md).
### Troubleshooting
If you have previously installed IPFS before and you are running into problems getting a newer version to work, try deleting (or backing up somewhere else) your IPFS config directory (~/.ipfs by default) and rerunning `ipfs init`. This will reinitialize the config file to its defaults and clear out the local datastore of any bad entries.
For more information about configuration options, see [docs/config.md](https://github.com/ipfs/kubo/blob/master/docs/config.md).
Please direct general questions and help requests to our [forums](https://discuss.ipfs.tech).
If you believe you've found a bug, check the [issues list](https://github.com/ipfs/kubo/issues) and, if you don't see your problem there, either come talk to us on [Matrix chat](https://docs.ipfs.tech/community/chat/), or file an issue of your own!
## Packages
See [IPFS in GO](https://docs.ipfs.tech/reference/go/api/) documentation.
| Topic | Description |
|-------|-------------|
| [Configuration](docs/config.md) | All config options reference |
| [Environment variables](docs/environment-variables.md) | Runtime settings via env vars |
| [Experimental features](docs/experimental-features.md) | Opt-in features in development |
| [HTTP Gateway](docs/gateway.md) | Path, subdomain, and trustless gateway setup |
| [HTTP RPC clients](docs/http-rpc-clients.md) | Client libraries for Go, JS |
| [Delegated routing](docs/delegated-routing.md) | Multi-router and HTTP routing |
| [Metrics & monitoring](docs/metrics.md) | Prometheus metrics |
| [Content blocking](docs/content-blocking.md) | Denylist for public nodes |
| [Customizing](docs/customizing.md) | Unsure if use Plugins, Boxo, or fork? |
| [Debug guide](docs/debug-guide.md) | CPU profiles, memory analysis, tracing |
| [Changelogs](docs/changelogs/) | Release notes for each version |
| [All documentation](https://github.com/ipfs/kubo/tree/master/docs) | Full list of docs |
## Development
Some places to get you started on the codebase:
See the [Developer Guide](docs/developer-guide.md) for build instructions, testing, and contribution workflow.
- Main file: [./cmd/ipfs/main.go](https://github.com/ipfs/kubo/blob/master/cmd/ipfs/main.go)
- CLI Commands: [./core/commands/](https://github.com/ipfs/kubo/tree/master/core/commands)
- Bitswap (the data trading engine): [go-bitswap](https://github.com/ipfs/go-bitswap)
- libp2p
- libp2p: https://github.com/libp2p/go-libp2p
- DHT: https://github.com/libp2p/go-libp2p-kad-dht
- [IPFS : The `Add` command demystified](https://github.com/ipfs/kubo/tree/master/docs/add-code-flow.md)
## Getting Help
### Map of Implemented Subsystems
**WIP**: This is a high-level architecture diagram of the various sub-systems of this specific implementation. To be updated with how they interact. Anyone who has suggestions is welcome to comment [here](https://docs.google.com/drawings/d/1OVpBT2q-NtSJqlPX3buvjYhOnWfdzb85YEsM_njesME/edit) on how we can improve this!
<img src="https://docs.google.com/drawings/d/e/2PACX-1vS_n1FvSu6mdmSirkBrIIEib2gqhgtatD9awaP2_WdrGN4zTNeg620XQd9P95WT-IvognSxIIdCM5uE/pub?w=1446&amp;h=1036">
- [IPFS Forum](https://discuss.ipfs.tech) - community support, questions, and discussion
- [Community](https://docs.ipfs.tech/community/) - chat, events, and working groups
- [GitHub Issues](https://github.com/ipfs/kubo/issues) - bug reports for Kubo specifically
- [IPFS Docs Issues](https://github.com/ipfs/ipfs-docs/issues) - documentation issues
### CLI, HTTP-API, Architecture Diagram
![](./docs/cli-http-api-core-diagram.png)
> [Origin](https://github.com/ipfs/pm/pull/678#discussion_r210410924)
Description: Dotted means "likely going away". The "Legacy" parts are thin wrappers around some commands to translate between the new system and the old system. The grayed-out parts on the "daemon" diagram are there to show that the code is all the same, it's just that we turn some pieces on and some pieces off depending on whether we're running on the client or the server.
### Testing
```
make test
```
### Development Dependencies
If you make changes to the protocol buffers, you will need to install the [protoc compiler](https://github.com/google/protobuf).
### Developer Notes
Find more documentation for developers on [docs](./docs)
## Maintainer Info
Kubo is maintained by [Shipyard](https://ipshipyard.com/).
* This repository is part of [Shipyard's GO Triage triage](https://ipshipyard.notion.site/IPFS-Go-Triage-Boxo-Kubo-Rainbow-0ddee6b7f28d412da7dabe4f9107c29a).
* [Release Process](https://ipshipyard.notion.site/Kubo-Release-Process-6dba4f5755c9458ab5685eeb28173778)
## Security Issues
See [`SECURITY.md`](SECURITY.md).
## Contributing
[![](https://cdn.rawgit.com/jbenet/contribute-ipfs-gif/master/img/contribute.gif)](https://github.com/ipfs/community/blob/master/CONTRIBUTING.md)
We ❤️ all [our contributors](docs/AUTHORS); this project wouldnt be what it is without you! If you want to help out, please see [CONTRIBUTING.md](CONTRIBUTING.md).
We welcome contributions. See [CONTRIBUTING.md](CONTRIBUTING.md) and the [Developer Guide](docs/developer-guide.md).
This repository falls under the IPFS [Code of Conduct](https://github.com/ipfs/community/blob/master/code-of-conduct.md).
This repository follows the IPFS [Code of Conduct](https://github.com/ipfs/community/blob/master/code-of-conduct.md).
Members of IPFS community provide Kubo support on [discussion forum category here](https://discuss.ipfs.tech/c/help/help-kubo/23).
## Maintainer Info
Need help with IPFS itself? Learn where to get help and support at https://ipfs.tech/help.
<a href="https://ipshipyard.com/"><img align="right" src="https://github.com/user-attachments/assets/39ed3504-bb71-47f6-9bf8-cb9a1698f272" /></a>
> [!NOTE]
> Kubo is maintained by the [Shipyard](https://ipshipyard.com/) team.
>
> [Release Process](https://ipshipyard.notion.site/Kubo-Release-Process-6dba4f5755c9458ab5685eeb28173778)
## License
This project is dual-licensed under Apache 2.0 and MIT terms:
Dual-licensed under Apache 2.0 and MIT:
- Apache License, Version 2.0, ([LICENSE-APACHE](https://github.com/ipfs/kubo/blob/master/LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
- MIT license ([LICENSE-MIT](https://github.com/ipfs/kubo/blob/master/LICENSE-MIT) or http://opensource.org/licenses/MIT)
- [LICENSE-APACHE](LICENSE-APACHE)
- [LICENSE-MIT](LICENSE-MIT)

View File

@ -134,15 +134,14 @@ help:
@echo ''
@echo 'TESTING TARGETS:'
@echo ''
@echo ' test - Run all tests'
@echo ' test_short - Run short go tests and short sharness tests'
@echo ' test_go_short - Run short go tests'
@echo ' test_go_test - Run all go tests'
@echo ' test - Run all tests (test_go_fmt, test_unit, test_cli, test_sharness)'
@echo ' test_short - Run fast tests (test_go_fmt, test_unit)'
@echo ' test_unit - Run unit tests with coverage (excludes test/cli)'
@echo ' test_cli - Run CLI integration tests (requires built binary)'
@echo ' test_go_fmt - Check Go source formatting'
@echo ' test_go_build - Build kubo for all platforms from .github/build-platforms.yml'
@echo ' test_go_expensive - Run all go tests and build all platforms'
@echo ' test_go_race - Run go tests with the race detector enabled'
@echo ' test_go_lint - Run the `golangci-lint` vetting tool'
@echo ' test_go_lint - Run golangci-lint'
@echo ' test_sharness - Run sharness tests'
@echo ' coverage - Collects coverage info from unit tests and sharness'
@echo ' coverage - Collect coverage info from unit tests and sharness'
@echo
.PHONY: help

View File

@ -7,6 +7,7 @@ import (
"io"
"path"
"github.com/dustin/go-humanize"
"github.com/ipfs/kubo/core/commands/cmdenv"
"github.com/ipfs/kubo/core/commands/cmdutils"
@ -349,7 +350,11 @@ type DagStatSummary struct {
}
func (s *DagStatSummary) String() string {
return fmt.Sprintf("Total Size: %d\nUnique Blocks: %d\nShared Size: %d\nRatio: %f", s.TotalSize, s.UniqueBlocks, s.SharedSize, s.Ratio)
return fmt.Sprintf("Total Size: %d (%s)\nUnique Blocks: %d\nShared Size: %d (%s)\nRatio: %f",
s.TotalSize, humanize.Bytes(s.TotalSize),
s.UniqueBlocks,
s.SharedSize, humanize.Bytes(s.SharedSize),
s.Ratio)
}
func (s *DagStatSummary) incrementTotalSize(size uint64) {
@ -384,7 +389,7 @@ Note: This command skips duplicate blocks in reporting both size and the number
cmds.StringArg("root", true, true, "CID of a DAG root to get statistics for").EnableStdin(),
},
Options: []cmds.Option{
cmds.BoolOption(progressOptionName, "p", "Return progressive data while reading through the DAG").WithDefault(true),
cmds.BoolOption(progressOptionName, "p", "Show progress on stderr. Auto-detected if stderr is a terminal."),
},
Run: dagStat,
Type: DagStatSummary{},

View File

@ -5,6 +5,7 @@ import (
"io"
"os"
"github.com/dustin/go-humanize"
mdag "github.com/ipfs/boxo/ipld/merkledag"
"github.com/ipfs/boxo/ipld/merkledag/traverse"
cid "github.com/ipfs/go-cid"
@ -19,7 +20,11 @@ import (
// to compute the new state
func dagStat(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
progressive := req.Options[progressOptionName].(bool)
// Default to true (emit intermediate states) for HTTP/RPC clients that want progress
progressive := true
if val, specified := req.Options[progressOptionName].(bool); specified {
progressive = val
}
api, err := cmdenv.GetApi(env, req)
if err != nil {
return err
@ -84,6 +89,18 @@ func dagStat(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment)
}
func finishCLIStat(res cmds.Response, re cmds.ResponseEmitter) error {
// Determine whether to show progress based on TTY detection or explicit flag
var showProgress bool
val, specified := res.Request().Options[progressOptionName]
if !specified {
// Auto-detect: show progress only if stderr is a TTY
if errStat, err := os.Stderr.Stat(); err == nil {
showProgress = (errStat.Mode() & os.ModeCharDevice) != 0
}
} else {
showProgress = val.(bool)
}
var dagStats *DagStatSummary
for {
v, err := res.Next()
@ -96,17 +113,26 @@ func finishCLIStat(res cmds.Response, re cmds.ResponseEmitter) error {
switch out := v.(type) {
case *DagStatSummary:
dagStats = out
if dagStats.Ratio == 0 {
length := len(dagStats.DagStatsArray)
if length > 0 {
currentStat := dagStats.DagStatsArray[length-1]
fmt.Fprintf(os.Stderr, "CID: %s, Size: %d, NumBlocks: %d\n", currentStat.Cid, currentStat.Size, currentStat.NumBlocks)
// Ratio == 0 means this is a progress update (not final result)
if showProgress && dagStats.Ratio == 0 {
// Sum up total progress across all DAGs being scanned
var totalBlocks int64
var totalSize uint64
for _, stat := range dagStats.DagStatsArray {
totalBlocks += stat.NumBlocks
totalSize += stat.Size
}
fmt.Fprintf(os.Stderr, "Fetched/Processed %d blocks, %d bytes (%s)\r", totalBlocks, totalSize, humanize.Bytes(totalSize))
}
default:
return e.TypeErr(out, v)
}
}
// Clear the progress line before final output
if showProgress {
fmt.Fprint(os.Stderr, "\033[2K\r")
}
return re.Emit(dagStats)
}

View File

@ -458,7 +458,7 @@ var keyListCmd = &cmds.Command{
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
keyEnc, err := ke.KeyEncoderFromString(req.Options[ke.OptionIPNSBase.Name()].(string))
if err != nil {
return err
return fmt.Errorf("cannot get key encoder: %w", err)
}
api, err := cmdenv.GetApi(env, req)
@ -468,7 +468,7 @@ var keyListCmd = &cmds.Command{
keys, err := api.Key().List(req.Context)
if err != nil {
return err
return fmt.Errorf("listing keys failed: %w", err)
}
list := make([]KeyOutput, 0, len(keys))

View File

@ -29,7 +29,7 @@ type key struct {
func newKey(name string, pid peer.ID) (*key, error) {
p, err := path.NewPath("/ipns/" + ipns.NameFromPeer(pid).String())
if err != nil {
return nil, err
return nil, fmt.Errorf("cannot create new key: %w", err)
}
return &key{
name: name,
@ -121,34 +121,37 @@ func (api *KeyAPI) List(ctx context.Context) ([]coreiface.Key, error) {
keys, err := api.repo.Keystore().List()
if err != nil {
return nil, err
return nil, fmt.Errorf("cannot list keys in keystore: %w", err)
}
sort.Strings(keys)
out := make([]coreiface.Key, len(keys)+1)
out := make([]coreiface.Key, 1, len(keys)+1)
out[0], err = newKey("self", api.identity)
if err != nil {
return nil, err
}
for n, k := range keys {
for _, k := range keys {
privKey, err := api.repo.Keystore().Get(k)
if err != nil {
return nil, err
log.Errorf("cannot get key from keystore: %s", err)
continue
}
pubKey := privKey.GetPublic()
pid, err := peer.IDFromPublicKey(pubKey)
if err != nil {
return nil, err
log.Errorf("cannot decode public key: %s", err)
continue
}
out[n+1], err = newKey(k, pid)
k, err := newKey(k, pid)
if err != nil {
return nil, err
}
out = append(out, k)
}
return out, nil
}

View File

@ -30,6 +30,7 @@ import (
const testPeerID = "QmTFauExutTsy4XP6JbMFcw2Wa9645HJt2bTqL6qYDCKfe"
func TestAddMultipleGCLive(t *testing.T) {
ctx := t.Context()
r := &repo.Mock{
C: config.Config{
Identity: config.Identity{
@ -38,13 +39,13 @@ func TestAddMultipleGCLive(t *testing.T) {
},
D: syncds.MutexWrap(datastore.NewMapDatastore()),
}
node, err := core.NewNode(context.Background(), &core.BuildCfg{Repo: r})
node, err := core.NewNode(ctx, &core.BuildCfg{Repo: r})
if err != nil {
t.Fatal(err)
}
out := make(chan interface{}, 10)
adder, err := NewAdder(context.Background(), node.Pinning, node.Blockstore, node.DAG)
adder, err := NewAdder(ctx, node.Pinning, node.Blockstore, node.DAG)
if err != nil {
t.Fatal(err)
}
@ -67,7 +68,7 @@ func TestAddMultipleGCLive(t *testing.T) {
go func() {
defer close(out)
_, _ = adder.AddAllAndPin(context.Background(), slf)
_, _ = adder.AddAllAndPin(ctx, slf)
// Ignore errors for clarity - the real bug would be gc'ing files while adding them, not this resultant error
}()
@ -80,9 +81,12 @@ func TestAddMultipleGCLive(t *testing.T) {
gc1started := make(chan struct{})
go func() {
defer close(gc1started)
gc1out = gc.GC(context.Background(), node.Blockstore, node.Repo.Datastore(), node.Pinning, nil)
gc1out = gc.GC(ctx, node.Blockstore, node.Repo.Datastore(), node.Pinning, nil)
}()
// Give GC goroutine time to reach GCLock (will block there waiting for adder)
time.Sleep(time.Millisecond * 100)
// GC shouldn't get the lock until after the file is completely added
select {
case <-gc1started:
@ -119,9 +123,12 @@ func TestAddMultipleGCLive(t *testing.T) {
gc2started := make(chan struct{})
go func() {
defer close(gc2started)
gc2out = gc.GC(context.Background(), node.Blockstore, node.Repo.Datastore(), node.Pinning, nil)
gc2out = gc.GC(ctx, node.Blockstore, node.Repo.Datastore(), node.Pinning, nil)
}()
// Give GC goroutine time to reach GCLock
time.Sleep(time.Millisecond * 100)
select {
case <-gc2started:
t.Fatal("gc shouldn't have started yet")
@ -155,6 +162,7 @@ func TestAddMultipleGCLive(t *testing.T) {
}
func TestAddGCLive(t *testing.T) {
ctx := t.Context()
r := &repo.Mock{
C: config.Config{
Identity: config.Identity{
@ -163,13 +171,13 @@ func TestAddGCLive(t *testing.T) {
},
D: syncds.MutexWrap(datastore.NewMapDatastore()),
}
node, err := core.NewNode(context.Background(), &core.BuildCfg{Repo: r})
node, err := core.NewNode(ctx, &core.BuildCfg{Repo: r})
if err != nil {
t.Fatal(err)
}
out := make(chan interface{})
adder, err := NewAdder(context.Background(), node.Pinning, node.Blockstore, node.DAG)
adder, err := NewAdder(ctx, node.Pinning, node.Blockstore, node.DAG)
if err != nil {
t.Fatal(err)
}
@ -193,7 +201,7 @@ func TestAddGCLive(t *testing.T) {
go func() {
defer close(addDone)
defer close(out)
_, err := adder.AddAllAndPin(context.Background(), slf)
_, err := adder.AddAllAndPin(ctx, slf)
if err != nil {
t.Error(err)
}
@ -211,7 +219,7 @@ func TestAddGCLive(t *testing.T) {
gcstarted := make(chan struct{})
go func() {
defer close(gcstarted)
gcout = gc.GC(context.Background(), node.Blockstore, node.Repo.Datastore(), node.Pinning, nil)
gcout = gc.GC(ctx, node.Blockstore, node.Repo.Datastore(), node.Pinning, nil)
}()
// gc shouldn't start until we let the add finish its current file.
@ -255,9 +263,6 @@ func TestAddGCLive(t *testing.T) {
last = c
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
set := cid.NewSet()
err = dag.Walk(ctx, dag.GetLinksWithDAG(node.DAG), last, set.Visit)
if err != nil {

View File

@ -12,7 +12,6 @@ import (
"github.com/ipfs/kubo/core/node/helpers"
"github.com/ipfs/kubo/repo"
"github.com/filecoin-project/go-clock"
logging "github.com/ipfs/go-log/v2"
"github.com/libp2p/go-libp2p"
"github.com/libp2p/go-libp2p/core/network"
@ -112,7 +111,6 @@ filled in with autocomputed defaults.`)
return nil, opts, fmt.Errorf("creating libp2p resource manager: %w", err)
}
lrm := &loggingResourceManager{
clock: clock.New(),
logger: &logging.Logger("resourcemanager").SugaredLogger,
delegate: manager,
}

View File

@ -7,7 +7,6 @@ import (
"sync"
"time"
"github.com/filecoin-project/go-clock"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/protocol"
@ -17,7 +16,6 @@ import (
)
type loggingResourceManager struct {
clock clock.Clock
logger *zap.SugaredLogger
delegate network.ResourceManager
logInterval time.Duration
@ -42,7 +40,7 @@ func (n *loggingResourceManager) start(ctx context.Context) {
if logInterval == 0 {
logInterval = 10 * time.Second
}
ticker := n.clock.Ticker(logInterval)
ticker := time.NewTicker(logInterval)
go func() {
defer ticker.Stop()
for {

View File

@ -2,9 +2,9 @@ package libp2p
import (
"testing"
"testing/synctest"
"time"
"github.com/filecoin-project/go-clock"
"github.com/libp2p/go-libp2p/core/network"
rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager"
ma "github.com/multiformats/go-multiaddr"
@ -14,48 +14,49 @@ import (
)
func TestLoggingResourceManager(t *testing.T) {
clock := clock.NewMock()
orig := rcmgr.DefaultLimits.AutoScale()
limits := orig.ToPartialLimitConfig()
limits.System.Conns = 1
limits.System.ConnsInbound = 1
limits.System.ConnsOutbound = 1
limiter := rcmgr.NewFixedLimiter(limits.Build(orig))
rm, err := rcmgr.NewResourceManager(limiter)
if err != nil {
t.Fatal(err)
}
oCore, oLogs := observer.New(zap.WarnLevel)
oLogger := zap.New(oCore)
lrm := &loggingResourceManager{
clock: clock,
logger: oLogger.Sugar(),
delegate: rm,
logInterval: 1 * time.Second,
}
// 2 of these should result in resource limit exceeded errors and subsequent log messages
for i := 0; i < 3; i++ {
_, _ = lrm.OpenConnection(network.DirInbound, false, ma.StringCast("/ip4/127.0.0.1/tcp/1234"))
}
// run the logger which will write an entry for those errors
ctx := t.Context()
lrm.start(ctx)
clock.Add(3 * time.Second)
timer := time.NewTimer(1 * time.Second)
for {
select {
case <-timer.C:
t.Fatalf("expected logs never arrived")
default:
if oLogs.Len() == 0 {
continue
}
require.Equal(t, "Protected from exceeding resource limits 2 times. libp2p message: \"system: cannot reserve inbound connection: resource limit exceeded\".", oLogs.All()[0].Message)
return
synctest.Test(t, func(t *testing.T) {
orig := rcmgr.DefaultLimits.AutoScale()
limits := orig.ToPartialLimitConfig()
limits.System.Conns = 1
limits.System.ConnsInbound = 1
limits.System.ConnsOutbound = 1
limiter := rcmgr.NewFixedLimiter(limits.Build(orig))
rm, err := rcmgr.NewResourceManager(limiter)
if err != nil {
t.Fatal(err)
}
}
defer rm.Close()
oCore, oLogs := observer.New(zap.WarnLevel)
oLogger := zap.New(oCore)
lrm := &loggingResourceManager{
logger: oLogger.Sugar(),
delegate: rm,
logInterval: 1 * time.Second,
}
// 2 of these should result in resource limit exceeded errors and subsequent log messages
for i := 0; i < 3; i++ {
_, _ = lrm.OpenConnection(network.DirInbound, false, ma.StringCast("/ip4/127.0.0.1/tcp/1234"))
}
// run the logger which will write an entry for those errors
ctx := t.Context()
lrm.start(ctx)
time.Sleep(3 * time.Second)
timer := time.NewTimer(1 * time.Second)
for {
select {
case <-timer.C:
t.Fatalf("expected logs never arrived")
default:
if oLogs.Len() == 0 {
continue
}
require.Equal(t, "Protected from exceeding resource limits 2 times. libp2p message: \"system: cannot reserve inbound connection: resource limit exceeded\".", oLogs.All()[0].Message)
return
}
}
})
}

View File

@ -692,6 +692,48 @@ See docs: https://github.com/ipfs/kubo/blob/master/docs/config.md#providedhtmaxw
// ONLINE/OFFLINE
// hasDHTRouting checks if the routing configuration includes a DHT component.
// Returns false for HTTP-only custom routing configurations (e.g., Routing.Type="custom"
// with only HTTP routers). This is used to determine whether SweepingProviderOpt
// can be used, since it requires a DHT client.
func hasDHTRouting(cfg *config.Config) bool {
routingType := cfg.Routing.Type.WithDefault(config.DefaultRoutingType)
switch routingType {
case "auto", "autoclient", "dht", "dhtclient", "dhtserver":
return true
case "custom":
// Check if any router in custom config is DHT-based
for _, router := range cfg.Routing.Routers {
if routerIncludesDHT(router, cfg) {
return true
}
}
return false
default: // "none", "delegated"
return false
}
}
// routerIncludesDHT recursively checks if a router configuration includes DHT.
// Handles parallel and sequential composite routers by checking their children.
func routerIncludesDHT(rp config.RouterParser, cfg *config.Config) bool {
switch rp.Type {
case config.RouterTypeDHT:
return true
case config.RouterTypeParallel, config.RouterTypeSequential:
if children, ok := rp.Parameters.(*config.ComposableRouterParams); ok {
for _, child := range children.Routers {
if childRouter, exists := cfg.Routing.Routers[child.RouterName]; exists {
if routerIncludesDHT(childRouter, cfg) {
return true
}
}
}
}
}
return false
}
// OnlineProviders groups units managing provide routing records online
func OnlineProviders(provide bool, cfg *config.Config) fx.Option {
if !provide {
@ -708,7 +750,15 @@ func OnlineProviders(provide bool, cfg *config.Config) fx.Option {
opts := []fx.Option{
fx.Provide(setReproviderKeyProvider(providerStrategy)),
}
if cfg.Provide.DHT.SweepEnabled.WithDefault(config.DefaultProvideDHTSweepEnabled) {
sweepEnabled := cfg.Provide.DHT.SweepEnabled.WithDefault(config.DefaultProvideDHTSweepEnabled)
dhtAvailable := hasDHTRouting(cfg)
// Use SweepingProvider only when both sweep is enabled AND DHT is available.
// For HTTP-only routing (e.g., Routing.Type="custom" with only HTTP routers),
// fall back to LegacyProvider which works with ProvideManyRouter.
// See https://github.com/ipfs/kubo/issues/11089
if sweepEnabled && dhtAvailable {
opts = append(opts, SweepingProviderOpt(cfg))
} else {
reprovideInterval := cfg.Provide.DHT.Interval.WithDefault(config.DefaultProvideDHTInterval)

View File

@ -3,33 +3,14 @@ include mk/header.mk
GOCC ?= go
$(d)/coverage_deps: $$(DEPS_GO) cmd/ipfs/ipfs
rm -rf $(@D)/unitcover && mkdir $(@D)/unitcover
rm -rf $(@D)/sharnesscover && mkdir $(@D)/sharnesscover
ifneq ($(IPFS_SKIP_COVER_BINS),1)
$(d)/coverage_deps: test/bin/gocovmerge
endif
.PHONY: $(d)/coverage_deps
# unit tests coverage
UTESTS_$(d) := $(shell $(GOCC) list -f '{{if (or (len .TestGoFiles) (len .XTestGoFiles))}}{{.ImportPath}}{{end}}' $(go-flags-with-tags) ./... | grep -v go-ipfs/vendor | grep -v go-ipfs/Godeps)
# unit tests coverage is now produced by test_unit target in mk/golang.mk
# (outputs coverage/unit_tests.coverprofile and test/unit/gotest.json)
UCOVER_$(d) := $(addsuffix .coverprofile,$(addprefix $(d)/unitcover/, $(subst /,_,$(UTESTS_$(d)))))
$(UCOVER_$(d)): $(d)/coverage_deps ALWAYS
$(eval TMP_PKG := $(subst _,/,$(basename $(@F))))
$(eval TMP_DEPS := $(shell $(GOCC) list -f '{{range .Deps}}{{.}} {{end}}' $(go-flags-with-tags) $(TMP_PKG) | sed 's/ /\n/g' | grep ipfs/go-ipfs) $(TMP_PKG))
$(eval TMP_DEPS_LIST := $(call join-with,$(comma),$(TMP_DEPS)))
$(GOCC) test $(go-flags-with-tags) $(GOTFLAGS) -v -covermode=atomic -json -coverpkg=$(TMP_DEPS_LIST) -coverprofile=$@ $(TMP_PKG) | tee -a test/unit/gotest.json
$(d)/unit_tests.coverprofile: $(UCOVER_$(d))
gocovmerge $^ > $@
TGTS_$(d) := $(d)/unit_tests.coverprofile
.PHONY: $(d)/unit_tests.coverprofile
TGTS_$(d) :=
# sharness tests coverage
$(d)/ipfs: GOTAGS += testrunmain
@ -46,7 +27,7 @@ endif
export IPFS_COVER_DIR:= $(realpath $(d))/sharnesscover/
$(d)/sharness_tests.coverprofile: export TEST_PLUGIN=0
$(d)/sharness_tests.coverprofile: $(d)/ipfs cmd/ipfs/ipfs-test-cover $(d)/coverage_deps test_sharness
$(d)/sharness_tests.coverprofile: $(d)/ipfs cmd/ipfs/ipfs-test-cover $(d)/coverage_deps test/bin/gocovmerge test_sharness
(cd $(@D)/sharnesscover && find . -type f | gocovmerge -list -) > $@

View File

@ -14,9 +14,9 @@ Otherwise, check out the following guides to using and developing IPFS:
## Developing `kubo`
- First, please read the Contributing Guidelines [for IPFS projects](https://github.com/ipfs/community/blob/master/CONTRIBUTING.md) and then the Contributing Guidelines for [Go code specifically](https://github.com/ipfs/community/blob/master/CONTRIBUTING_GO.md)
- Building on…
- [Windows](windows.md)
- **[Developer Guide](developer-guide.md)** - prerequisites, build, test, and contribute
- Contributing Guidelines [for IPFS projects](https://github.com/ipfs/community/blob/master/CONTRIBUTING.md) and for [Go code specifically](https://github.com/ipfs/community/blob/master/CONTRIBUTING_GO.md)
- Building on [Windows](windows.md)
- [Performance Debugging Guidelines](debug-guide.md)
- [Release Checklist](releases.md)

View File

@ -13,6 +13,8 @@ This release was brought to you by the [Shipyard](https://ipshipyard.com/) team.
- [Routing V1 HTTP API now exposed by default](#routing-v1-http-api-now-exposed-by-default)
- [Track total size when adding pins](#track-total-size-when-adding-pins)
- [🚇 Improved `ipfs p2p` tunnels with foreground mode](#-improved-ipfs-p2p-tunnels-with-foreground-mode)
- [Improved `ipfs dag stat` output](#improved-ipfs-dag-stat-output)
- [Skip bad keys when listing](#skip_bad_keys_when_listing)
- [📦️ Dependency updates](#-dependency-updates)
- [📝 Changelog](#-changelog)
- [👨‍👩‍👧‍👦 Contributors](#-contributors)
@ -51,6 +53,34 @@ Without `--foreground`, commands return immediately and tunnels persist until ex
See [docs/p2p-tunnels.md](https://github.com/ipfs/kubo/blob/master/docs/p2p-tunnels.md) for usage examples.
#### Improved `ipfs dag stat` output
The `ipfs dag stat` command has been improved for better terminal UX:
- Progress output now uses a single line with carriage return, avoiding terminal flooding
- Progress is auto-detected: shown only in interactive terminals by default
- Human-readable sizes are now displayed alongside raw byte counts
Example progress (interactive terminal):
```
Fetched/Processed 84 blocks, 2097152 bytes (2.1 MB)
```
Example summary output:
```
Summary
Total Size: 2097152 (2.1 MB)
Unique Blocks: 42
Shared Size: 1048576 (1.0 MB)
Ratio: 1.500000
```
Use `--progress=true` to force progress even when piped, or `--progress=false` to disable it.
#### Skip bad keys when listing
Change the `ipfs key list` behavior to log an error and continue listing keys when a key cannot be read from the keystore or decoded.
#### 📦️ Dependency updates
- update `go-libp2p` to [v0.46.0](https://github.com/libp2p/go-libp2p/releases/tag/v0.46.0)
@ -58,6 +88,8 @@ See [docs/p2p-tunnels.md](https://github.com/ipfs/kubo/blob/master/docs/p2p-tunn
- Fixed mDNS discovery on Windows and macOS by filtering addresses to reduce packet size ([go-libp2p#3434](https://github.com/libp2p/go-libp2p/pull/3434)).
- update `quic-go` to [v0.57.1](https://github.com/quic-go/quic-go/releases/tag/v0.57.1) (incl. [v0.56.0](https://github.com/quic-go/quic-go/releases/tag/v0.56.0) + [v0.57.0](https://github.com/quic-go/quic-go/releases/tag/v0.57.0))
- update `p2p-forge` to [v0.7.0](https://github.com/ipshipyard/p2p-forge/releases/tag/v0.7.0)
- update `go-ds-pebble` to [v0.5.8](https://github.com/ipfs/go-ds-pebble/releases/tag/v0.5.8)
- updates `github.com/cockroachdb/pebble` to [v2.1.3](https://github.com/cockroachdb/pebble/releases/tag/v2.1.3) to enable Go 1.26 support
### 📝 Changelog

View File

@ -1085,7 +1085,11 @@ Type: `bool`
Options for the HTTP gateway.
**NOTE:** support for `/api/v0` under the gateway path is now deprecated. It will be removed in future versions: <https://github.com/ipfs/kubo/issues/10312>.
> [!IMPORTANT]
> By default, Kubo's gateway is configured for local use at `127.0.0.1` and `localhost`.
> To run a public gateway, configure your domain names in [`Gateway.PublicGateways`](#gatewaypublicgateways).
> For production deployment considerations (reverse proxy, timeouts, rate limiting, CDN),
> see [Running in Production](gateway.md#running-in-production).
### `Gateway.NoFetch`
@ -1280,6 +1284,11 @@ Examples:
- `*.example.com` will match requests to `http://foo.example.com/ipfs/*` or `http://{cid}.ipfs.bar.example.com/*`.
- `foo-*.example.com` will match requests to `http://foo-bar.example.com/ipfs/*` or `http://{cid}.ipfs.foo-xyz.example.com/*`.
> [!IMPORTANT]
> **Reverse Proxy:** If running behind nginx or another reverse proxy, ensure
> `Host` and `X-Forwarded-*` headers are forwarded correctly.
> See [Reverse Proxy Caveats](gateway.md#reverse-proxy) in gateway documentation.
#### `Gateway.PublicGateways: Paths`
An array of paths that should be exposed on the hostname.
@ -1346,6 +1355,9 @@ Default: `false`
Type: `bool`
> [!IMPORTANT]
> See [Reverse Proxy Caveats](gateway.md#reverse-proxy) if running behind nginx or another reverse proxy.
#### `Gateway.PublicGateways: NoDNSLink`
A boolean to configure whether DNSLink for hostname present in `Host`
@ -1356,6 +1368,9 @@ Default: `false` (DNSLink lookup enabled by default for every defined hostname)
Type: `bool`
> [!IMPORTANT]
> See [Reverse Proxy Caveats](gateway.md#reverse-proxy) if running behind nginx or another reverse proxy.
#### `Gateway.PublicGateways: InlineDNSLink`
An optional flag to explicitly configure whether subdomain gateway's redirects
@ -1423,6 +1438,9 @@ ipfs config --json Gateway.PublicGateways '{"localhost": null }'
Below is a list of the most common gateway setups.
> [!IMPORTANT]
> See [Reverse Proxy Caveats](gateway.md#reverse-proxy) if running behind nginx or another reverse proxy.
- Public [subdomain gateway](https://docs.ipfs.tech/how-to/address-ipfs-on-web/#subdomain-gateway) at `http://{cid}.ipfs.dweb.link` (each content root gets its own Origin)
```console
@ -2207,6 +2225,9 @@ You can compare the effectiveness of sweep mode vs legacy mode by monitoring the
> [!NOTE]
> This is the default provider system as of Kubo v0.39. To use the legacy provider instead, set `Provide.DHT.SweepEnabled=false`.
> [!NOTE]
> When DHT routing is unavailable (e.g., `Routing.Type=custom` with only HTTP routers), the provider automatically falls back to the legacy provider regardless of this setting.
Default: `true`
Type: `flag`

316
docs/developer-guide.md Normal file
View File

@ -0,0 +1,316 @@
# Developer Guide
By the end of this guide, you will be able to:
- Build Kubo from source
- Run the test suites
- Make and verify code changes
This guide covers the local development workflow. For user documentation, see [docs.ipfs.tech](https://docs.ipfs.tech/).
## Table of Contents
- [Prerequisites](#prerequisites)
- [Quick Start](#quick-start)
- [Building](#building)
- [Running Tests](#running-tests)
- [Running the Linter](#running-the-linter)
- [Common Development Tasks](#common-development-tasks)
- [Code Organization](#code-organization)
- [Architecture](#architecture)
- [Troubleshooting](#troubleshooting)
- [Development Dependencies](#development-dependencies)
- [Further Reading](#further-reading)
## Prerequisites
Before you begin, ensure you have:
- **Go** - see `go.mod` for the minimum required version
- **Git**
- **GNU Make**
- **GCC** (optional) - required for CGO (Go's C interop); without it, build with `CGO_ENABLED=0`
## Quick Start
```bash
git clone https://github.com/ipfs/kubo.git
cd kubo
make build
./cmd/ipfs/ipfs version
```
You should see output like:
```
ipfs version 0.34.0-dev
```
The binary is built to `cmd/ipfs/ipfs`. To install it system-wide:
```bash
make install
```
This installs the binary to `$GOPATH/bin`.
## Building
| Command | Description |
|---------|-------------|
| `make build` | build the `ipfs` binary to `cmd/ipfs/ipfs` |
| `make install` | install to `$GOPATH/bin` |
| `make nofuse` | build without FUSE support |
| `make build CGO_ENABLED=0` | build without CGO (no C compiler needed) |
For Windows-specific instructions, see [windows.md](windows.md).
## Running Tests
Kubo has two types of tests:
- **Unit tests** - test individual packages in isolation. Fast and don't require a running daemon.
- **End-to-end tests** - spawn real `ipfs` nodes, run actual CLI commands, and test the full system. Slower but catch integration issues.
Note that `go test ./...` runs both unit and end-to-end tests. Use `make test` to run all tests. CI runs unit and end-to-end tests in separate jobs for faster feedback.
<!-- TODO: uncomment when https://github.com/ipfs/kubo/pull/11113 is merged
| Command | What it runs |
|---------|--------------|
| `make test_unit` | unit tests only (excludes `test/cli`) |
| `make test_cli` | CLI end-to-end tests only (requires `make build` first) |
| `make test_sharness` | sharness end-to-end tests only |
| `make test` | all tests (unit + CLI + sharness) |
-->
For end-to-end tests, Kubo has two suites:
- **`test/cli`** - modern Go-based test harness that spawns real `ipfs` nodes and runs actual CLI commands. All new tests should be added here.
- **`test/sharness`** - legacy bash-based tests. We are slowly migrating these to `test/cli`.
When modifying tests: cosmetic changes to `test/sharness` are fine, but if significant rewrites are needed, remove the outdated sharness test and add a modern one to `test/cli` instead.
### Before Running Tests
**Environment requirements**: some legacy tests expect default ports (8080, 5001, 4001) to be free and no mDNS (local network discovery) Kubo service on the LAN. Tests may fail if you have a local Kubo instance running. Before running the full test suite, stop any running `ipfs daemon`.
Two critical setup steps:
1. **Rebuild after code changes**: if you modify any `.go` files outside of `test/`, you must run `make build` before running integration tests.
2. **Set environment variables**: integration tests use the `ipfs` binary from `PATH` and need an isolated `IPFS_PATH`. Run these commands from the repository root:
```bash
export PATH="$PWD/cmd/ipfs:$PATH"
export IPFS_PATH="$(mktemp -d)"
```
### Unit Tests
```bash
go test ./...
```
### CLI Integration Tests (`test/cli`)
These are Go-based integration tests that invoke the `ipfs` CLI.
Instead of running the entire test suite, you can run a specific test to get faster feedback during development.
Run a specific test (recommended during development):
```bash
go test ./test/cli/... -run TestAdd -v
```
Run all CLI tests:
```bash
go test ./test/cli/...
```
Run a specific test:
```bash
go test ./test/cli/... -run TestAdd
```
Run with verbose output:
```bash
go test ./test/cli/... -v
```
**Common error**: "version (16) is lower than repos (17)" means your `PATH` points to an old binary. Check `which ipfs` and rebuild with `make build`.
### Sharness Tests (`test/sharness`)
Shell-based integration tests using [sharness](https://github.com/chriscool/sharness) (a portable shell testing framework).
```bash
cd test/sharness
```
Run a specific test:
```bash
timeout 60s ./t0080-repo.sh
```
Run with verbose output (this disables automatic cleanup):
```bash
./t0080-repo.sh -v
```
**Cleanup**: the `-v` flag disables automatic cleanup. Before re-running tests, kill any dangling `ipfs daemon` processes:
```bash
pkill -f "ipfs daemon"
```
### Full Test Suite
```bash
make test # run all tests
make test_short # run shorter test suite
```
## Running the Linter
Run the linter using the Makefile target (not `golangci-lint` directly):
```bash
make -O test_go_lint
```
## Common Development Tasks
### Modifying CLI Commands
After editing help text in `core/commands/`, verify the output width:
```bash
go test ./test/cli/... -run TestCommandDocsWidth
```
### Updating Dependencies
Use the Makefile target (not `go mod tidy` directly):
```bash
make mod_tidy
```
### Editing the Changelog
When modifying `docs/changelogs/`:
- update the Table of Contents when adding sections
- add user-facing changes to the Highlights section (the Changelog section is auto-generated)
### Running the Daemon
Always run the daemon with a timeout or shut it down promptly.
With timeout:
```bash
timeout 60s ipfs daemon
```
Or shut down via API:
```bash
ipfs shutdown
```
For multi-step experiments, store `IPFS_PATH` in a file to ensure consistency.
## Code Organization
| Directory | Description |
|-----------|-------------|
| `cmd/ipfs/` | CLI entry point and binary |
| `core/` | core IPFS node implementation |
| `core/commands/` | CLI command definitions |
| `core/coreapi/` | Go API implementation |
| `client/rpc/` | HTTP RPC client |
| `plugin/` | plugin system |
| `repo/` | repository management |
| `test/cli/` | Go-based CLI integration tests |
| `test/sharness/` | legacy shell-based integration tests |
| `docs/` | documentation |
Key external dependencies:
- [go-libp2p](https://github.com/libp2p/go-libp2p) - networking stack
- [go-libp2p-kad-dht](https://github.com/libp2p/go-libp2p-kad-dht) - distributed hash table
- [boxo](https://github.com/ipfs/boxo) - IPFS SDK (including Bitswap, the data exchange engine)
For a deep dive into how code flows through Kubo, see [The `Add` command demystified](add-code-flow.md).
## Architecture
**Map of Implemented Subsystems** ([editable source](https://docs.google.com/drawings/d/1OVpBT2q-NtSJqlPX3buvjYhOnWfdzb85YEsM_njesME/edit)):
<img src="https://docs.google.com/drawings/d/e/2PACX-1vS_n1FvSu6mdmSirkBrIIEib2gqhgtatD9awaP2_WdrGN4zTNeg620XQd9P95WT-IvognSxIIdCM5uE/pub?w=1446&amp;h=1036">
**CLI, HTTP-API, Core Diagram**:
![](./cli-http-api-core-diagram.png)
## Troubleshooting
### "version (N) is lower than repos (M)" Error
This means the `ipfs` binary in your `PATH` is older than expected.
Check which binary is being used:
```bash
which ipfs
```
Rebuild and verify PATH:
```bash
make build
export PATH="$PWD/cmd/ipfs:$PATH"
./cmd/ipfs/ipfs version
```
### FUSE Issues
If you don't need FUSE support, build without it:
```bash
make nofuse
```
Or set the `TEST_FUSE=0` environment variable when running tests.
### Build Fails with "No such file: stdlib.h"
You're missing a C compiler. Either install GCC or build without CGO:
```bash
make build CGO_ENABLED=0
```
## Development Dependencies
If you make changes to the protocol buffers, you will need to install the [protoc compiler](https://github.com/google/protobuf).
## Further Reading
- [The `Add` command demystified](add-code-flow.md) - deep dive into code flow
- [Configuration reference](config.md)
- [Performance debugging](debug-guide.md)
- [Experimental features](experimental-features.md)
- [Release process](releases.md)
- [Contributing guidelines](https://github.com/ipfs/community/blob/master/CONTRIBUTING.md)
## Source Code
The complete source code is at [github.com/ipfs/kubo](https://github.com/ipfs/kubo).

View File

@ -34,9 +34,9 @@ require (
github.com/cockroachdb/crlib v0.0.0-20241112164430-1264a2edc35b // indirect
github.com/cockroachdb/errors v1.11.3 // indirect
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
github.com/cockroachdb/pebble/v2 v2.1.2 // indirect
github.com/cockroachdb/pebble/v2 v2.1.3 // indirect
github.com/cockroachdb/redact v1.1.5 // indirect
github.com/cockroachdb/swiss v0.0.0-20250624142022-d6e517c1d961 // indirect
github.com/cockroachdb/swiss v0.0.0-20251224182025-b0f6560f979b // indirect
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect
github.com/crackcomm/go-gitignore v0.0.0-20241020182519-7843d2ba8fdf // indirect
github.com/cskr/pubsub v1.0.2 // indirect
@ -81,7 +81,7 @@ require (
github.com/ipfs/go-ds-flatfs v0.5.5 // indirect
github.com/ipfs/go-ds-leveldb v0.5.2 // indirect
github.com/ipfs/go-ds-measure v0.2.2 // indirect
github.com/ipfs/go-ds-pebble v0.5.7 // indirect
github.com/ipfs/go-ds-pebble v0.5.8 // indirect
github.com/ipfs/go-dsqueue v0.1.1 // indirect
github.com/ipfs/go-fs-lock v0.1.1 // indirect
github.com/ipfs/go-ipfs-cmds v0.15.0 // indirect

View File

@ -84,12 +84,12 @@ github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZe
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895 h1:XANOgPYtvELQ/h4IrmPAohXqe2pWA8Bwhejr3VQoZsA=
github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895/go.mod h1:aPd7gM9ov9M8v32Yy5NJrDyOcD8z642dqs+F0CeNXfA=
github.com/cockroachdb/pebble/v2 v2.1.2 h1:IwYt+Y2Cdw6egblwk1kWzdmJvD2680t5VK/3i0BJ6IA=
github.com/cockroachdb/pebble/v2 v2.1.2/go.mod h1:Aza05DCCc05ghIJZkB4Q/axv/JK9wx5cFwWcnhG0eGw=
github.com/cockroachdb/pebble/v2 v2.1.3 h1:irU503OnjRoJBrkZQIJvwv9c4WvpUeOJxhRApojB8D8=
github.com/cockroachdb/pebble/v2 v2.1.3/go.mod h1:B1UgWsyR+L+UvZXNgpxw+WqsUKA8VQ/bb//FXOHghB8=
github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30=
github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
github.com/cockroachdb/swiss v0.0.0-20250624142022-d6e517c1d961 h1:Nua446ru3juLHLZd4AwKNzClZgL1co3pUPGv3o8FlcA=
github.com/cockroachdb/swiss v0.0.0-20250624142022-d6e517c1d961/go.mod h1:yBRu/cnL4ks9bgy4vAASdjIW+/xMlFwuHKqtmh3GZQg=
github.com/cockroachdb/swiss v0.0.0-20251224182025-b0f6560f979b h1:VXvSNzmr8hMj8XTuY0PT9Ane9qZGul/p67vGYwl9BFI=
github.com/cockroachdb/swiss v0.0.0-20251224182025-b0f6560f979b/go.mod h1:yBRu/cnL4ks9bgy4vAASdjIW+/xMlFwuHKqtmh3GZQg=
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo=
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
@ -295,8 +295,8 @@ github.com/ipfs/go-ds-leveldb v0.5.2 h1:6nmxlQ2zbp4LCNdJVsmHfs9GP0eylfBNxpmY1csp
github.com/ipfs/go-ds-leveldb v0.5.2/go.mod h1:2fAwmcvD3WoRT72PzEekHBkQmBDhc39DJGoREiuGmYo=
github.com/ipfs/go-ds-measure v0.2.2 h1:4kwvBGbbSXNYe4ANlg7qTIYoZU6mNlqzQHdVqICkqGI=
github.com/ipfs/go-ds-measure v0.2.2/go.mod h1:b/87ak0jMgH9Ylt7oH0+XGy4P8jHx9KG09Qz+pOeTIs=
github.com/ipfs/go-ds-pebble v0.5.7 h1:4PQI46y3fjjxUTgHwYqcOVyoxiU6v1sqN6ONeRXGQTM=
github.com/ipfs/go-ds-pebble v0.5.7/go.mod h1:rsIgXE2qN+VfHKBin2cOOGFTZ/Agor6i8wBWA6ihbr0=
github.com/ipfs/go-ds-pebble v0.5.8 h1:NbAfKQo+m39Nka6gt8PARAyH+VoHtRInB6CFCmT+wqo=
github.com/ipfs/go-ds-pebble v0.5.8/go.mod h1:AJjJTHgads/Fn5+tuWmaDGjGEbks7Wgx82NQ/pwmEhc=
github.com/ipfs/go-dsqueue v0.1.1 h1:6PQlHDyf9PSTN69NmwUir5+0is3tU0vRJj8zLlgK8Mc=
github.com/ipfs/go-dsqueue v0.1.1/go.mod h1:Xxg353WSwwzYn3FGSzZ+taSQII3pIZ+EJC8/oWRDM10=
github.com/ipfs/go-fs-lock v0.1.1 h1:TecsP/Uc7WqYYatasreZQiP9EGRy4ZnKoG4yXxR33nw=

View File

@ -47,7 +47,7 @@ func setupPlugins(externalPluginsPath string) error {
return nil
}
func createTempRepo(swarmPort int) (string, error) {
func createTempRepo() (string, error) {
repoPath, err := os.MkdirTemp("", "ipfs-shell")
if err != nil {
return "", fmt.Errorf("failed to get temp dir: %s", err)
@ -59,15 +59,28 @@ func createTempRepo(swarmPort int) (string, error) {
return "", err
}
// Configure custom ports to avoid conflicts with other IPFS instances.
// This demonstrates how to customize the node's network addresses.
// Use TCP-only on loopback with random port for reliable local testing.
// This matches what kubo's test harness uses (test/cli/transports_test.go).
// QUIC/UDP transports are avoided because they may be throttled on CI.
cfg.Addresses.Swarm = []string{
fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", swarmPort),
fmt.Sprintf("/ip4/0.0.0.0/udp/%d/quic-v1", swarmPort),
fmt.Sprintf("/ip4/0.0.0.0/udp/%d/quic-v1/webtransport", swarmPort),
fmt.Sprintf("/ip4/0.0.0.0/udp/%d/webrtc-direct", swarmPort),
"/ip4/127.0.0.1/tcp/0",
}
// Explicitly disable non-TCP transports for reliability.
cfg.Swarm.Transports.Network.QUIC = config.False
cfg.Swarm.Transports.Network.Relay = config.False
cfg.Swarm.Transports.Network.WebTransport = config.False
cfg.Swarm.Transports.Network.WebRTCDirect = config.False
cfg.Swarm.Transports.Network.Websocket = config.False
cfg.AutoTLS.Enabled = config.False
// Disable routing - we don't need DHT for direct peer connections.
// Bitswap works with directly connected peers without needing DHT lookups.
cfg.Routing.Type = config.NewOptionalString("none")
// Disable bootstrap for this example - we manually connect only the peers we need.
cfg.Bootstrap = []string{}
// When creating the repository, you can define custom settings on the repository, such as enabling experimental
// features (See experimental-features.md) or customizing the gateway endpoint.
// To do such things, you should modify the variable `cfg`. For example:
@ -106,10 +119,14 @@ func createNode(ctx context.Context, repoPath string) (*core.IpfsNode, error) {
// Construct the node
nodeOptions := &core.BuildCfg{
Online: true,
Routing: libp2p.DHTOption, // This option sets the node to be a full DHT node (both fetching and storing DHT Records)
// Routing: libp2p.DHTClientOption, // This option sets the node to be a client DHT node (only fetching records)
Repo: repo,
Online: true,
// For this example, we use NilRouterOption (no routing) since we connect peers directly.
// Bitswap works with directly connected peers without needing DHT lookups.
// In production, you would typically use:
// Routing: libp2p.DHTOption, // Full DHT node (stores and fetches records)
// Routing: libp2p.DHTClientOption, // DHT client (only fetches records)
Routing: libp2p.NilRouterOption,
Repo: repo,
}
return core.NewNode(ctx, nodeOptions)
@ -118,8 +135,7 @@ func createNode(ctx context.Context, repoPath string) (*core.IpfsNode, error) {
var loadPluginsOnce sync.Once
// Spawns a node to be used just for this run (i.e. creates a tmp repo).
// The swarmPort parameter specifies the port for libp2p swarm listeners.
func spawnEphemeral(ctx context.Context, swarmPort int) (icore.CoreAPI, *core.IpfsNode, error) {
func spawnEphemeral(ctx context.Context) (icore.CoreAPI, *core.IpfsNode, error) {
var onceErr error
loadPluginsOnce.Do(func() {
onceErr = setupPlugins("")
@ -129,7 +145,7 @@ func spawnEphemeral(ctx context.Context, swarmPort int) (icore.CoreAPI, *core.Ip
}
// Create a Temporary Repo
repoPath, err := createTempRepo(swarmPort)
repoPath, err := createTempRepo()
if err != nil {
return nil, nil, fmt.Errorf("failed to create temp repo: %s", err)
}
@ -207,8 +223,7 @@ func main() {
defer cancel()
// Spawn a local peer using a temporary path, for testing purposes
// Using port 4010 to avoid conflict with default IPFS port 4001
ipfsA, nodeA, err := spawnEphemeral(ctx, 4010)
ipfsA, nodeA, err := spawnEphemeral(ctx)
if err != nil {
panic(fmt.Errorf("failed to spawn peer node: %s", err))
}
@ -222,9 +237,8 @@ func main() {
fmt.Printf("Added file to peer with CID %s\n", peerCidFile.String())
// Spawn a node using a temporary path, creating a temporary repo for the run
// Using port 4011 (different from nodeA's port 4010)
fmt.Println("Spawning Kubo node on a temporary repo")
ipfsB, _, err := spawnEphemeral(ctx, 4011)
ipfsB, _, err := spawnEphemeral(ctx)
if err != nil {
panic(fmt.Errorf("failed to spawn ephemeral node: %s", err))
}
@ -297,11 +311,12 @@ func main() {
fmt.Printf("Got directory back from IPFS (IPFS path: %s) and wrote it to %s\n", cidDirectory.String(), outputPathDirectory)
/// --- Part IV: Getting a file from the IPFS Network
/// --- Part IV: Getting a file from another IPFS node
fmt.Println("\n-- Going to connect to a few nodes in the Network as bootstrappers --")
fmt.Println("\n-- Connecting to nodeA and fetching content via bitswap --")
// Get nodeA's address so we can fetch the file we added to it
// Get nodeA's actual listening address dynamically.
// We configured TCP-only on 127.0.0.1 with random port, so this will be a TCP address.
peerAddrs, err := ipfsA.Swarm().LocalAddrs(ctx)
if err != nil {
panic(fmt.Errorf("could not get peer addresses: %s", err))
@ -309,26 +324,18 @@ func main() {
peerMa := peerAddrs[0].String() + "/p2p/" + nodeA.Identity.String()
bootstrapNodes := []string{
// In production, use autoconf.FallbackBootstrapPeers from boxo/autoconf
// which includes well-known IPFS bootstrap peers like:
// In production, use real bootstrap peers like:
// "/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN",
// "/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa",
// "/ip4/104.131.131.82/tcp/4001/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ",
// You can add custom peers here. For example, another IPFS node:
// "/ip4/192.0.2.1/tcp/4001/p2p/QmYourPeerID...",
// "/ip4/192.0.2.1/udp/4001/quic-v1/p2p/QmYourPeerID...",
// nodeA's address (the peer we created above that has our test file)
// For this example, we only connect to nodeA which has our test content.
peerMa,
}
fmt.Println("Connecting to peers...")
fmt.Println("Connecting to peer...")
err = connectToPeers(ctx, ipfsB, bootstrapNodes)
if err != nil {
panic(fmt.Errorf("failed to connect to peers: %s", err))
}
fmt.Println("Connected to peers")
fmt.Println("Connected to peer")
exampleCIDStr := peerCidFile.RootCid().String()

View File

@ -1,21 +1,39 @@
package main
import (
"bytes"
"io"
"os"
"os/exec"
"strings"
"testing"
"time"
)
func TestExample(t *testing.T) {
out, err := exec.Command("go", "run", "main.go").Output()
t.Log("Starting go run main.go...")
start := time.Now()
cmd := exec.Command("go", "run", "main.go")
cmd.Env = append(os.Environ(), "GOLOG_LOG_LEVEL=error") // reduce libp2p noise
// Stream output to both test log and capture buffer for verification
// This ensures we see progress even if the process is killed
var buf bytes.Buffer
cmd.Stdout = io.MultiWriter(os.Stdout, &buf)
cmd.Stderr = io.MultiWriter(os.Stderr, &buf)
err := cmd.Run()
elapsed := time.Since(start)
t.Logf("Command completed in %v", elapsed)
out := buf.String()
if err != nil {
var stderr string
if xe, ok := err.(*exec.ExitError); ok {
stderr = string(xe.Stderr)
}
t.Fatalf("running example (%v): %s\n%s", err, string(out), stderr)
t.Fatalf("running example (%v):\n%s", err, out)
}
if !strings.Contains(string(out), "All done!") {
t.Errorf("example did not run successfully")
if !strings.Contains(out, "All done!") {
t.Errorf("example did not complete successfully, output:\n%s", out)
}
}

View File

@ -6,7 +6,7 @@ they were stored in a traditional web server.
[More about Gateways](https://docs.ipfs.tech/concepts/ipfs-gateway/) and [addressing IPFS on the web](https://docs.ipfs.tech/how-to/address-ipfs-on-web/).
Kubo's Gateway implementation follows [ipfs/specs: Specification for HTTP Gateways](https://github.com/ipfs/specs/tree/main/http-gateways#readme).
Kubo's Gateway implementation follows [IPFS Gateway Specifications](https://specs.ipfs.tech/http-gateways/) and is tested with [Gateway Conformance Test Suite](https://github.com/ipfs/gateway-conformance).
### Local gateway
@ -14,14 +14,21 @@ By default, Kubo nodes run
a [path gateway](https://docs.ipfs.tech/how-to/address-ipfs-on-web/#path-gateway) at `http://127.0.0.1:8080/`
and a [subdomain gateway](https://docs.ipfs.tech/how-to/address-ipfs-on-web/#subdomain-gateway) at `http://localhost:8080/`.
The path one also implements [trustless gateway spec](https://specs.ipfs.tech/http-gateways/trustless-gateway/)
and supports [trustless responses](https://docs.ipfs.tech/reference/http/gateway/#trustless-verifiable-retrieval) as opt-in via `Accept` header.
> [!CAUTION]
> **For browsing websites, web apps, and dapps in a browser, use the subdomain
> gateway** (`localhost`). Each content root gets its own
> [web origin](https://developer.mozilla.org/en-US/docs/Web/Security/Same-origin_policy),
> isolating localStorage, cookies, and session data between sites.
>
> **For file retrieval, use the path gateway** (`127.0.0.1`). Path gateways are
> suited for downloading files or fetching [verifiable](https://docs.ipfs.tech/reference/http/gateway/#trustless-verifiable-retrieval)
> content, but lack origin isolation (all content shares the same origin).
Additional listening addresses and gateway behaviors can be set in the [config](#configuration) file.
### Public gateways
Protocol Labs provides a public gateway at
IPFS Foundation [provides public gateways](https://docs.ipfs.tech/concepts/public-utilities/) at
`https://ipfs.io` ([path](https://specs.ipfs.tech/http-gateways/path-gateway/)),
`https://dweb.link` ([subdomain](https://docs.ipfs.tech/how-to/address-ipfs-on-web/#subdomain-gateway)),
and `https://trustless-gateway.link` ([trustless](https://specs.ipfs.tech/http-gateways/trustless-gateway/) only).
@ -41,6 +48,80 @@ The gateway's log level can be changed with this command:
> ipfs log level core/server debug
```
## Running in Production
When deploying Kubo's gateway in production, be aware of these important considerations:
<a id="reverse-proxy"></a>
> [!IMPORTANT]
> **Reverse Proxy:** When running Kubo behind a reverse proxy (such as nginx),
> the original `Host` header **must** be forwarded to Kubo for
> [`Gateway.PublicGateways`](config.md#gatewaypublicgateways) to work.
> Kubo uses the `Host` header to match configured hostnames and detect
> subdomain gateway patterns like `{cid}.ipfs.example.org` or DNSLink hostnames.
>
> If the `Host` header is not forwarded correctly, Kubo will not recognize
> the configured gateway hostnames and requests may be handled incorrectly.
>
> If `X-Forwarded-Proto` is not set, redirects over HTTPS will use wrong protocol
> and DNSLink names will not be inlined for subdomain gateways.
>
> Example: minimal nginx configuration for `example.org`
>
> ```nginx
> server {
> listen 80;
> listen [::]:80;
>
> # IMPORTANT: Include wildcard to match subdomain gateway requests.
> # The dot prefix matches both apex domain and all subdomains.
> server_name .example.org;
>
> location / {
> proxy_pass http://127.0.0.1:8080;
>
> # IMPORTANT: Forward the original Host header to Kubo.
> # Without this, PublicGateways configuration will not work.
> proxy_set_header Host $host;
>
> # IMPORTANT: X-Forwarded-Proto is required for correct behavior:
> # - Redirects will use https:// URLs when set to "https"
> # - DNSLink names will be inlined for subdomain gateways
> # (e.g., /ipns/en.wikipedia-on-ipfs.org → en-wikipedia--on--ipfs-org.ipns.example.org)
> proxy_set_header X-Forwarded-Proto $scheme;
> proxy_set_header X-Forwarded-Host $host;
> }
> }
> ```
>
> Common mistakes to avoid:
>
> - **Missing wildcard in `server_name`:** Using only `server_name example.org;`
> will not match subdomain requests like `{cid}.ipfs.example.org`. Always
> include `*.example.org` or use the dot prefix `.example.org`.
>
> - **Wrong `Host` header value:** Using `proxy_set_header Host $proxy_host;`
> sends the backend's hostname (e.g., `127.0.0.1:8080`) instead of the
> original `Host` header. Always use `$host` or `$http_host`.
>
> - **Missing `Host` header entirely:** If `proxy_set_header Host` is not
> specified, nginx defaults to `$proxy_host`, which breaks gateway routing.
> [!IMPORTANT]
> **Timeouts:** Configure [`Gateway.RetrievalTimeout`](config.md#gatewayretrievaltimeout)
> based on your expected content retrieval times.
> [!IMPORTANT]
> **Rate Limiting:** Use [`Gateway.MaxConcurrentRequests`](config.md#gatewaymaxconcurrentrequests)
> to protect against traffic spikes.
> [!IMPORTANT]
> **CDN/Cloudflare:** If using Cloudflare or other CDNs with
> [deserialized responses](config.md#gatewaydeserializedresponses) enabled, review
> [`Gateway.MaxRangeRequestFileSize`](config.md#gatewaymaxrangerequestfilesize) to avoid
> excess bandwidth billing from range request bugs. Cloudflare users may need additional
> protection via [Cloudflare Snippets](https://github.com/ipfs/boxo/issues/856#issuecomment-3523944976).
## Directories
For convenience, the gateway (mostly) acts like a normal web-server when serving
@ -53,7 +134,7 @@ a directory:
2. Dynamically build and serve a listing of the contents of the directory.
<sub><sup>&dagger;</sup>This redirect is skipped if the query string contains a
`go-get=1` parameter. See [PR#3964](https://github.com/ipfs/kubo/pull/3963)
`go-get=1` parameter. See [PR#3963](https://github.com/ipfs/kubo/pull/3963)
for details</sub>
## Static Websites
@ -107,10 +188,12 @@ This is equivalent of `ipfs block get`.
### `application/vnd.ipld.car`
Returns a [CAR](https://ipld.io/specs/transport/car/) stream for specific DAG and selector.
Returns a [CAR](https://ipld.io/specs/transport/car/) stream for a DAG or a subset of it.
Right now only 'full DAG' implicit selector is implemented.
Support for user-provided IPLD selectors is tracked in https://github.com/ipfs/kubo/issues/8769.
The `dag-scope` parameter controls which blocks are included: `all` (default, entire DAG),
`entity` (logical unit like a file), or `block` (single block). For [UnixFS](https://specs.ipfs.tech/unixfs/) files,
`entity-bytes` enables byte range requests. See [IPIP-402](https://specs.ipfs.tech/ipips/ipip-0402/)
for details.
This is a rough equivalent of `ipfs dag export`.

8
go.mod
View File

@ -11,12 +11,11 @@ require (
github.com/cenkalti/backoff/v4 v4.3.0
github.com/ceramicnetwork/go-dag-jose v0.1.1
github.com/cheggaaa/pb v1.0.29
github.com/cockroachdb/pebble/v2 v2.1.2
github.com/cockroachdb/pebble/v2 v2.1.3
github.com/coreos/go-systemd/v22 v22.5.0
github.com/dustin/go-humanize v1.0.1
github.com/elgris/jsondiff v0.0.0-20160530203242-765b5c24c302
github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5
github.com/filecoin-project/go-clock v0.1.0
github.com/fsnotify/fsnotify v1.9.0
github.com/google/uuid v1.6.0
github.com/hashicorp/go-version v1.7.0
@ -32,7 +31,7 @@ require (
github.com/ipfs/go-ds-flatfs v0.5.5
github.com/ipfs/go-ds-leveldb v0.5.2
github.com/ipfs/go-ds-measure v0.2.2
github.com/ipfs/go-ds-pebble v0.5.7
github.com/ipfs/go-ds-pebble v0.5.8
github.com/ipfs/go-fs-lock v0.1.1
github.com/ipfs/go-ipfs-cmds v0.15.0
github.com/ipfs/go-ipld-cbor v0.2.1
@ -114,7 +113,7 @@ require (
github.com/cockroachdb/errors v1.11.3 // indirect
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
github.com/cockroachdb/redact v1.1.5 // indirect
github.com/cockroachdb/swiss v0.0.0-20250624142022-d6e517c1d961 // indirect
github.com/cockroachdb/swiss v0.0.0-20251224182025-b0f6560f979b // indirect
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect
github.com/crackcomm/go-gitignore v0.0.0-20241020182519-7843d2ba8fdf // indirect
github.com/cskr/pubsub v1.0.2 // indirect
@ -126,6 +125,7 @@ require (
github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect
github.com/fatih/color v1.15.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/filecoin-project/go-clock v0.1.0 // indirect
github.com/flynn/noise v1.1.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.10 // indirect
github.com/gammazero/chanqueue v1.1.1 // indirect

12
go.sum
View File

@ -117,12 +117,12 @@ github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZe
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895 h1:XANOgPYtvELQ/h4IrmPAohXqe2pWA8Bwhejr3VQoZsA=
github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895/go.mod h1:aPd7gM9ov9M8v32Yy5NJrDyOcD8z642dqs+F0CeNXfA=
github.com/cockroachdb/pebble/v2 v2.1.2 h1:IwYt+Y2Cdw6egblwk1kWzdmJvD2680t5VK/3i0BJ6IA=
github.com/cockroachdb/pebble/v2 v2.1.2/go.mod h1:Aza05DCCc05ghIJZkB4Q/axv/JK9wx5cFwWcnhG0eGw=
github.com/cockroachdb/pebble/v2 v2.1.3 h1:irU503OnjRoJBrkZQIJvwv9c4WvpUeOJxhRApojB8D8=
github.com/cockroachdb/pebble/v2 v2.1.3/go.mod h1:B1UgWsyR+L+UvZXNgpxw+WqsUKA8VQ/bb//FXOHghB8=
github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30=
github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
github.com/cockroachdb/swiss v0.0.0-20250624142022-d6e517c1d961 h1:Nua446ru3juLHLZd4AwKNzClZgL1co3pUPGv3o8FlcA=
github.com/cockroachdb/swiss v0.0.0-20250624142022-d6e517c1d961/go.mod h1:yBRu/cnL4ks9bgy4vAASdjIW+/xMlFwuHKqtmh3GZQg=
github.com/cockroachdb/swiss v0.0.0-20251224182025-b0f6560f979b h1:VXvSNzmr8hMj8XTuY0PT9Ane9qZGul/p67vGYwl9BFI=
github.com/cockroachdb/swiss v0.0.0-20251224182025-b0f6560f979b/go.mod h1:yBRu/cnL4ks9bgy4vAASdjIW+/xMlFwuHKqtmh3GZQg=
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo=
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
@ -366,8 +366,8 @@ github.com/ipfs/go-ds-leveldb v0.5.2 h1:6nmxlQ2zbp4LCNdJVsmHfs9GP0eylfBNxpmY1csp
github.com/ipfs/go-ds-leveldb v0.5.2/go.mod h1:2fAwmcvD3WoRT72PzEekHBkQmBDhc39DJGoREiuGmYo=
github.com/ipfs/go-ds-measure v0.2.2 h1:4kwvBGbbSXNYe4ANlg7qTIYoZU6mNlqzQHdVqICkqGI=
github.com/ipfs/go-ds-measure v0.2.2/go.mod h1:b/87ak0jMgH9Ylt7oH0+XGy4P8jHx9KG09Qz+pOeTIs=
github.com/ipfs/go-ds-pebble v0.5.7 h1:4PQI46y3fjjxUTgHwYqcOVyoxiU6v1sqN6ONeRXGQTM=
github.com/ipfs/go-ds-pebble v0.5.7/go.mod h1:rsIgXE2qN+VfHKBin2cOOGFTZ/Agor6i8wBWA6ihbr0=
github.com/ipfs/go-ds-pebble v0.5.8 h1:NbAfKQo+m39Nka6gt8PARAyH+VoHtRInB6CFCmT+wqo=
github.com/ipfs/go-ds-pebble v0.5.8/go.mod h1:AJjJTHgads/Fn5+tuWmaDGjGEbks7Wgx82NQ/pwmEhc=
github.com/ipfs/go-dsqueue v0.1.1 h1:6PQlHDyf9PSTN69NmwUir5+0is3tU0vRJj8zLlgK8Mc=
github.com/ipfs/go-dsqueue v0.1.1/go.mod h1:Xxg353WSwwzYn3FGSzZ+taSQII3pIZ+EJC8/oWRDM10=
github.com/ipfs/go-fs-lock v0.1.1 h1:TecsP/Uc7WqYYatasreZQiP9EGRy4ZnKoG4yXxR33nw=

View File

@ -41,40 +41,57 @@ define go-build
$(GOCC) build $(go-flags-with-tags) -o "$@" "$(1)"
endef
test_go_test: $$(DEPS_GO)
$(GOCC) test $(go-flags-with-tags) $(GOTFLAGS) ./...
.PHONY: test_go_test
# Only disable colors when running in CI (non-interactive terminal)
GOTESTSUM_NOCOLOR := $(if $(CI),--no-color,)
# Build all platforms from .github/build-platforms.yml
# Packages excluded from coverage (test code and examples are not production code)
COVERPKG_EXCLUDE := /(test|docs/examples)/
# Packages excluded from unit tests: coverage exclusions + client/rpc (tested by test_cli)
UNIT_EXCLUDE := /(test|docs/examples)/|/client/rpc$$
# Unit tests with coverage
# Produces JSON for CI reporting and coverage profile for Codecov
test_unit: test/bin/gotestsum $$(DEPS_GO)
mkdir -p test/unit coverage
rm -f test/unit/gotest.json coverage/unit_tests.coverprofile
gotestsum $(GOTESTSUM_NOCOLOR) --jsonfile test/unit/gotest.json -- $(go-flags-with-tags) $(GOTFLAGS) -covermode=atomic -coverprofile=coverage/unit_tests.coverprofile -coverpkg=$$($(GOCC) list $(go-tags) ./... | grep -vE '$(COVERPKG_EXCLUDE)' | tr '\n' ',' | sed 's/,$$//') $$($(GOCC) list $(go-tags) ./... | grep -vE '$(UNIT_EXCLUDE)')
.PHONY: test_unit
# CLI/integration tests (requires built binary in PATH)
# Includes test/cli, test/integration, and client/rpc
# Produces JSON for CI reporting
# Override TEST_CLI_TIMEOUT for local development: make test_cli TEST_CLI_TIMEOUT=5m
TEST_CLI_TIMEOUT ?= 10m
test_cli: cmd/ipfs/ipfs test/bin/gotestsum $$(DEPS_GO)
mkdir -p test/cli
rm -f test/cli/cli-tests.json
PATH="$(CURDIR)/cmd/ipfs:$(CURDIR)/test/bin:$$PATH" gotestsum $(GOTESTSUM_NOCOLOR) --jsonfile test/cli/cli-tests.json -- -v -timeout=$(TEST_CLI_TIMEOUT) ./test/cli/... ./test/integration/... ./client/rpc/...
.PHONY: test_cli
# Example tests (docs/examples/kubo-as-a-library)
# Tests against both published and current kubo versions
# Uses timeout to ensure CI gets output before job-level timeout kills everything
TEST_EXAMPLES_TIMEOUT ?= 2m
test_examples:
cd docs/examples/kubo-as-a-library && go test -v -timeout=$(TEST_EXAMPLES_TIMEOUT) ./... && cp go.mod go.mod.bak && cp go.sum go.sum.bak && (go mod edit -replace github.com/ipfs/kubo=./../../.. && go mod tidy && go test -v -timeout=$(TEST_EXAMPLES_TIMEOUT) ./...; ret=$$?; mv go.mod.bak go.mod; mv go.sum.bak go.sum; exit $$ret)
.PHONY: test_examples
# Build kubo for all platforms from .github/build-platforms.yml
test_go_build:
bin/test-go-build-platforms
.PHONY: test_go_build
test_go_short: GOTFLAGS += -test.short
test_go_short: test_go_test
.PHONY: test_go_short
test_go_race: GOTFLAGS += -race
test_go_race: test_go_test
.PHONY: test_go_race
test_go_expensive: test_go_test test_go_build
.PHONY: test_go_expensive
TEST_GO += test_go_expensive
# Check Go source formatting
test_go_fmt:
bin/test-go-fmt
.PHONY: test_go_fmt
TEST_GO += test_go_fmt
# Run golangci-lint (used by CI)
test_go_lint: test/bin/golangci-lint
golangci-lint run --timeout=3m ./...
.PHONY: test_go_lint
test_go: $(TEST_GO)
# Version check is no longer needed - go.mod enforces minimum version
.PHONY: check_go_version
TEST_GO := test_go_fmt test_unit test_cli test_examples
TEST += $(TEST_GO)
TEST_SHORT += test_go_fmt test_go_short
TEST_SHORT += test_go_fmt test_unit

View File

@ -39,7 +39,9 @@ func TestBackupBootstrapPeers(t *testing.T) {
// Start 1 and 2. 2 does not know anyone yet.
nodes[1].StartDaemon()
defer nodes[1].StopDaemon()
nodes[2].StartDaemon()
defer nodes[2].StopDaemon()
assert.Len(t, nodes[1].Peers(), 0)
assert.Len(t, nodes[2].Peers(), 0)
@ -51,6 +53,7 @@ func TestBackupBootstrapPeers(t *testing.T) {
// Start 0, wait a bit. Should connect to 1, and then discover 2 via the
// backup bootstrap peers.
nodes[0].StartDaemon()
defer nodes[0].StopDaemon()
time.Sleep(time.Millisecond * 500)
// Check if they're all connected.

View File

@ -22,7 +22,9 @@ func TestBitswapConfig(t *testing.T) {
t.Parallel()
h := harness.NewT(t)
provider := h.NewNode().Init().StartDaemon()
defer provider.StopDaemon()
requester := h.NewNode().Init().StartDaemon()
defer requester.StopDaemon()
hash := provider.IPFSAddStr(string(testData))
requester.Connect(provider)
@ -38,8 +40,10 @@ func TestBitswapConfig(t *testing.T) {
provider := h.NewNode().Init()
provider.SetIPFSConfig("Bitswap.ServerEnabled", false)
provider = provider.StartDaemon()
defer provider.StopDaemon()
requester := h.NewNode().Init().StartDaemon()
defer requester.StopDaemon()
hash := provider.IPFSAddStr(string(testData))
requester.Connect(provider)
@ -70,8 +74,10 @@ func TestBitswapConfig(t *testing.T) {
requester := h.NewNode().Init()
requester.SetIPFSConfig("Bitswap.ServerEnabled", false)
requester.StartDaemon()
defer requester.StopDaemon()
provider := h.NewNode().Init().StartDaemon()
defer provider.StopDaemon()
hash := provider.IPFSAddStr(string(testData))
requester.Connect(provider)
@ -91,8 +97,10 @@ func TestBitswapConfig(t *testing.T) {
cfg.HTTPRetrieval.Enabled = config.True
})
requester.StartDaemon()
defer requester.StopDaemon()
provider := h.NewNode().Init().StartDaemon()
defer provider.StopDaemon()
hash := provider.IPFSAddStr(string(testData))
requester.Connect(provider)
@ -126,7 +134,9 @@ func TestBitswapConfig(t *testing.T) {
cfg.HTTPRetrieval.Enabled = config.True
})
provider = provider.StartDaemon()
defer provider.StopDaemon()
requester := h.NewNode().Init().StartDaemon()
defer requester.StopDaemon()
requester.Connect(provider)
// read libp2p identify from remote peer, and print protocols

View File

@ -76,6 +76,7 @@ func TestContentBlocking(t *testing.T) {
// Start daemon, it should pick up denylist from $IPFS_PATH/denylists/test.deny
node.StartDaemon() // we need online mode for GatewayOverLibp2p tests
t.Cleanup(func() { node.StopDaemon() })
client := node.GatewayClient()
// First, confirm gateway works

View File

@ -47,6 +47,8 @@ func TestDag(t *testing.T) {
t.Run("ipfs dag stat --enc=json", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
// Import fixture
r, err := os.Open(fixtureFile)
assert.Nil(t, err)
@ -91,6 +93,7 @@ func TestDag(t *testing.T) {
t.Run("ipfs dag stat", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
r, err := os.Open(fixtureFile)
assert.NoError(t, err)
defer r.Close()

View File

@ -60,6 +60,10 @@ func TestRoutingV1Proxy(t *testing.T) {
})
nodes[2].StartDaemon()
t.Cleanup(func() {
nodes.StopDaemons()
})
// Connect them.
nodes.Connect()

View File

@ -2,7 +2,6 @@ package cli
import (
"context"
"encoding/json"
"strings"
"testing"
"time"
@ -21,11 +20,6 @@ import (
"github.com/stretchr/testify/require"
)
// swarmPeersOutput is used to parse the JSON output of 'ipfs swarm peers --enc=json'
type swarmPeersOutput struct {
Peers []struct{} `json:"Peers"`
}
func TestRoutingV1Server(t *testing.T) {
t.Parallel()
@ -38,6 +32,7 @@ func TestRoutingV1Server(t *testing.T) {
})
})
nodes.StartDaemons().Connect()
t.Cleanup(func() { nodes.StopDaemons() })
return nodes
}
@ -139,6 +134,7 @@ func TestRoutingV1Server(t *testing.T) {
cfg.Routing.Type = config.NewOptionalString("dht")
})
node.StartDaemon()
defer node.StopDaemon()
// Put IPNS record in lonely node. It should be accepted as it is a valid record.
c, err = client.New(node.GatewayURL())
@ -202,15 +198,19 @@ func TestRoutingV1Server(t *testing.T) {
}
})
node.StartDaemon()
defer node.StopDaemon()
c, err := client.New(node.GatewayURL())
require.NoError(t, err)
// Try to get closest peers - should fail gracefully with an error
// Try to get closest peers - should fail gracefully with an error.
// Use 60-second timeout (server has 30s routing timeout).
testCid, err := cid.Decode("QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn")
require.NoError(t, err)
_, err = c.GetClosestPeers(context.Background(), testCid)
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
_, err = c.GetClosestPeers(ctx, testCid)
require.Error(t, err)
// All these routing types should indicate DHT is not available
// The exact error message may vary based on implementation details
@ -224,7 +224,7 @@ func TestRoutingV1Server(t *testing.T) {
}
})
t.Run("GetClosestPeers returns peers for self", func(t *testing.T) {
t.Run("GetClosestPeers returns peers", func(t *testing.T) {
t.Parallel()
routingTypes := []string{"auto", "autoclient", "dht", "dhtclient"}
@ -241,48 +241,35 @@ func TestRoutingV1Server(t *testing.T) {
cfg.Bootstrap = autoconf.FallbackBootstrapPeers
})
node.StartDaemon()
defer node.StopDaemon()
// Create client before waiting so we can probe DHT readiness
c, err := client.New(node.GatewayURL())
require.NoError(t, err)
// Query for closest peers to our own peer ID
key := peer.ToCid(node.PeerID())
// Wait for node to connect to bootstrap peers and populate WAN DHT routing table
minPeers := len(autoconf.FallbackBootstrapPeers)
require.EventuallyWithT(t, func(t *assert.CollectT) {
res := node.RunIPFS("swarm", "peers", "--enc=json")
var output swarmPeersOutput
err := json.Unmarshal(res.Stdout.Bytes(), &output)
assert.NoError(t, err)
peerCount := len(output.Peers)
// Wait until we have at least minPeers connected
assert.GreaterOrEqual(t, peerCount, minPeers,
"waiting for at least %d bootstrap peers, currently have %d", minPeers, peerCount)
}, 60*time.Second, time.Second)
// Wait for DHT to be ready by probing GetClosestPeers until it succeeds
require.EventuallyWithT(t, func(t *assert.CollectT) {
probeCtx, probeCancel := context.WithTimeout(context.Background(), 30*time.Second)
defer probeCancel()
probeIter, probeErr := c.GetClosestPeers(probeCtx, key)
if probeErr == nil {
probeIter.Close()
// Wait for WAN DHT routing table to be populated.
// The server has a 30-second routing timeout, so we use 60 seconds
// per request to allow for network latency while preventing hangs.
// Total wait time is 2 minutes (locally passes in under 1 minute).
var records []*types.PeerRecord
require.EventuallyWithT(t, func(ct *assert.CollectT) {
ctx, cancel := context.WithTimeout(t.Context(), 60*time.Second)
defer cancel()
resultsIter, err := c.GetClosestPeers(ctx, key)
if !assert.NoError(ct, err) {
return
}
assert.NoError(t, probeErr, "DHT should be ready to handle GetClosestPeers")
records, err = iter.ReadAllResults(resultsIter)
assert.NoError(ct, err)
}, 2*time.Minute, 5*time.Second)
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
resultsIter, err := c.GetClosestPeers(ctx, key)
require.NoError(t, err)
records, err := iter.ReadAllResults(resultsIter)
require.NoError(t, err)
// Verify we got some peers back from WAN DHT
assert.NotEmpty(t, records, "should return some peers close to own peerid")
require.NotEmpty(t, records, "should return peers close to own peerid")
// Per IPIP-0476, GetClosestPeers returns at most 20 peers
assert.LessOrEqual(t, len(records), 20, "IPIP-0476 limits GetClosestPeers to 20 peers")
// Verify structure of returned records
for _, record := range records {

View File

@ -16,6 +16,7 @@ func TestDHTAutoclient(t *testing.T) {
node.IPFS("config", "Routing.Type", "autoclient")
})
nodes.StartDaemons().Connect()
t.Cleanup(func() { nodes.StopDaemons() })
t.Run("file added on node in client mode is retrievable from node in client mode", func(t *testing.T) {
t.Parallel()

View File

@ -22,6 +22,7 @@ func TestDHTOptimisticProvide(t *testing.T) {
})
nodes.StartDaemons().Connect()
defer nodes.StopDaemons()
hash := nodes[0].IPFSAddStr(string(random.Bytes(100)))
nodes[0].IPFS("routing", "provide", hash)

View File

@ -19,6 +19,7 @@ func TestFilesCp(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
// Create simple text file
data := "testing files cp command"
@ -36,6 +37,7 @@ func TestFilesCp(t *testing.T) {
t.Run("files cp with unsupported DAG node type fails", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
// MFS UnixFS is limited to dag-pb or raw, so we create a dag-cbor node to test this
jsonData := `{"data": "not a UnixFS node"}`
@ -53,6 +55,7 @@ func TestFilesCp(t *testing.T) {
t.Run("files cp with invalid UnixFS data structure fails", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
// Create an invalid proto file
data := []byte{0xDE, 0xAD, 0xBE, 0xEF} // Invalid protobuf data
@ -75,6 +78,7 @@ func TestFilesCp(t *testing.T) {
t.Run("files cp with raw node succeeds", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
// Create a raw node
data := "raw data"
@ -98,6 +102,7 @@ func TestFilesCp(t *testing.T) {
t.Run("files cp creates intermediate directories with -p", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
// Create a simple text file and add it to IPFS
data := "hello parent directories"
@ -130,6 +135,7 @@ func TestFilesRm(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
// Create a file to remove
node.IPFS("files", "mkdir", "/test-dir")
@ -149,6 +155,7 @@ func TestFilesRm(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
// Create a file to remove
node.IPFS("files", "mkdir", "/test-dir")
@ -166,6 +173,7 @@ func TestFilesRm(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
// Create a file to remove
node.IPFS("files", "mkdir", "/test-dir")
@ -186,6 +194,7 @@ func TestFilesNoFlushLimit(t *testing.T) {
t.Run("reaches default limit of 256 operations", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
// Perform 256 operations with --flush=false (should succeed)
for i := 0; i < 256; i++ {
@ -214,6 +223,7 @@ func TestFilesNoFlushLimit(t *testing.T) {
})
node.StartDaemon()
defer node.StopDaemon()
// Perform 5 operations (should succeed)
for i := 0; i < 5; i++ {
@ -239,6 +249,7 @@ func TestFilesNoFlushLimit(t *testing.T) {
})
node.StartDaemon()
defer node.StopDaemon()
// Do 2 operations with --flush=false
node.IPFS("files", "mkdir", "--flush=false", "/dir1")
@ -271,6 +282,7 @@ func TestFilesNoFlushLimit(t *testing.T) {
})
node.StartDaemon()
defer node.StopDaemon()
// Do 2 operations with --flush=false
node.IPFS("files", "mkdir", "--flush=false", "/dir1")
@ -303,6 +315,7 @@ func TestFilesNoFlushLimit(t *testing.T) {
})
node.StartDaemon()
defer node.StopDaemon()
// Should be able to do many operations without error
for i := 0; i < 300; i++ {
@ -322,6 +335,7 @@ func TestFilesNoFlushLimit(t *testing.T) {
})
node.StartDaemon()
defer node.StopDaemon()
// Mix of different MFS operations (5 operations to hit the limit)
node.IPFS("files", "mkdir", "--flush=false", "/testdir")

View File

@ -4,9 +4,9 @@ bafyreibmdfd7c5db4kls4ty57zljfhqv36gi43l6txl44pi423wwmeskwy 2 53
bafyreie3njilzdi4ixumru4nzgecsnjtu7fzfcwhg7e6s4s5i7cnbslvn4 2 53
Summary
Total Size: 99
Total Size: 99 (99 B)
Unique Blocks: 3
Shared Size: 7
Shared Size: 7 (7 B)
Ratio: 1.070707

View File

@ -28,6 +28,7 @@ func TestGatewayLimits(t *testing.T) {
cfg.Gateway.RetrievalTimeout = config.NewOptionalDuration(1 * time.Second)
})
node.StartDaemon()
defer node.StopDaemon()
// Add content that can be retrieved quickly
cid := node.IPFSAddStr("test content")
@ -69,6 +70,7 @@ func TestGatewayLimits(t *testing.T) {
cfg.Gateway.RetrievalTimeout = config.NewOptionalDuration(2 * time.Second)
})
node.StartDaemon()
defer node.StopDaemon()
// Add some content - use a non-existent CID that will block during retrieval
// to ensure we can control timing

View File

@ -27,6 +27,7 @@ func TestGatewayHAMTDirectory(t *testing.T) {
// Start node
h := harness.NewT(t)
node := h.NewNode().Init("--empty-repo", "--profile=test").StartDaemon("--offline")
defer node.StopDaemon()
client := node.GatewayClient()
// Import fixtures
@ -56,6 +57,7 @@ func TestGatewayHAMTRanges(t *testing.T) {
// Start node
h := harness.NewT(t)
node := h.NewNode().Init("--empty-repo", "--profile=test").StartDaemon("--offline")
t.Cleanup(func() { node.StopDaemon() })
client := node.GatewayClient()
// Import fixtures

View File

@ -28,6 +28,7 @@ func TestGateway(t *testing.T) {
t.Parallel()
h := harness.NewT(t)
node := h.NewNode().Init().StartDaemon("--offline")
t.Cleanup(func() { node.StopDaemon() })
cid := node.IPFSAddStr("Hello Worlds!")
peerID, err := peer.ToCid(node.PeerID()).StringOfBase(multibase.Base36)
@ -234,6 +235,7 @@ func TestGateway(t *testing.T) {
cfg.API.HTTPHeaders = map[string][]string{header: values}
})
node.StartDaemon()
defer node.StopDaemon()
resp := node.APIClient().DisableRedirects().Get("/webui/")
assert.Equal(t, resp.Headers.Values(header), values)
@ -257,6 +259,7 @@ func TestGateway(t *testing.T) {
t.Run("pprof", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
t.Cleanup(func() { node.StopDaemon() })
apiClient := node.APIClient()
t.Run("mutex", func(t *testing.T) {
t.Parallel()
@ -300,6 +303,7 @@ func TestGateway(t *testing.T) {
t.Parallel()
h := harness.NewT(t)
node := h.NewNode().Init().StartDaemon()
t.Cleanup(func() { node.StopDaemon() })
h.WriteFile("index/index.html", "<p></p>")
cid := node.IPFS("add", "-Q", "-r", filepath.Join(h.Dir, "index")).Stderr.Trimmed()
@ -367,6 +371,7 @@ func TestGateway(t *testing.T) {
cfg.Addresses.Gateway = config.Strings{"/ip4/127.0.0.1/tcp/32563"}
})
node.StartDaemon()
defer node.StopDaemon()
b, err := os.ReadFile(filepath.Join(node.Dir, "gateway"))
require.NoError(t, err)
@ -388,6 +393,7 @@ func TestGateway(t *testing.T) {
assert.NoError(t, err)
nodes.StartDaemons().Connect()
t.Cleanup(func() { nodes.StopDaemons() })
t.Run("not present", func(t *testing.T) {
cidFoo := node2.IPFSAddStr("foo")
@ -460,6 +466,7 @@ func TestGateway(t *testing.T) {
}
})
node.StartDaemon()
defer node.StopDaemon()
cidFoo := node.IPFSAddStr("foo")
client := node.GatewayClient()
@ -509,6 +516,7 @@ func TestGateway(t *testing.T) {
node := harness.NewT(t).NewNode().Init()
node.StartDaemon()
defer node.StopDaemon()
client := node.GatewayClient()
res := client.Get("/ipfs/invalid-thing", func(r *http.Request) {
@ -526,6 +534,7 @@ func TestGateway(t *testing.T) {
cfg.Gateway.DisableHTMLErrors = config.True
})
node.StartDaemon()
defer node.StopDaemon()
client := node.GatewayClient()
res := client.Get("/ipfs/invalid-thing", func(r *http.Request) {
@ -546,6 +555,7 @@ func TestLogs(t *testing.T) {
t.Setenv("GOLOG_LOG_LEVEL", "info")
node := h.NewNode().Init().StartDaemon("--offline")
defer node.StopDaemon()
cid := node.IPFSAddStr("Hello Worlds!")
peerID, err := peer.ToCid(node.PeerID()).StringOfBase(multibase.Base36)

View File

@ -32,6 +32,7 @@ func TestGatewayOverLibp2p(t *testing.T) {
p2pProxyNode := nodes[1]
nodes.StartDaemons().Connect()
defer nodes.StopDaemons()
// Add data to the gateway node
cidDataOnGatewayNode := cid.MustParse(gwNode.IPFSAddStr("Hello Worlds2!"))
@ -65,6 +66,7 @@ func TestGatewayOverLibp2p(t *testing.T) {
// Enable the experimental feature and reconnect the nodes
gwNode.IPFS("config", "--json", "Experimental.GatewayOverLibp2p", "true")
gwNode.StopDaemon().StartDaemon()
t.Cleanup(func() { gwNode.StopDaemon() })
nodes.Connect()
// Note: the bare HTTP requests here assume that the gateway is mounted at `/`

View File

@ -75,6 +75,7 @@ func TestHTTPRetrievalClient(t *testing.T) {
// Start Kubo
node.StartDaemon()
defer node.StopDaemon()
if debug {
fmt.Printf("delegatedRoutingServer.URL: %s\n", delegatedRoutingServer.URL)

View File

@ -155,6 +155,7 @@ func TestInit(t *testing.T) {
t.Run("ipfs init should not run while daemon is running", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
res := node.RunIPFS("init")
assert.NotEqual(t, 0, res.ExitErr.ExitCode())
assert.Contains(t, res.Stderr.String(), "Error: ipfs daemon is running. please stop it to run this command")

View File

@ -103,6 +103,7 @@ func TestName(t *testing.T) {
})
node.StartDaemon()
defer node.StopDaemon()
t.Run("Resolving self offline succeeds (daemon on)", func(t *testing.T) {
res = node.IPFS("name", "resolve", "--offline", "/ipns/"+name.String())
@ -147,6 +148,7 @@ func TestName(t *testing.T) {
t.Run("Fails to publish in offline mode", func(t *testing.T) {
t.Parallel()
node := makeDaemon(t, nil).StartDaemon("--offline")
defer node.StopDaemon()
res := node.RunIPFS("name", "publish", "/ipfs/"+fixtureCid)
require.Error(t, res.Err)
require.Equal(t, 1, res.ExitCode())
@ -157,6 +159,7 @@ func TestName(t *testing.T) {
t.Parallel()
node := makeDaemon(t, nil).StartDaemon()
defer node.StopDaemon()
ipnsName := ipns.NameFromPeer(node.PeerID()).String()
ipnsPath := ipns.NamespacePrefix + ipnsName
publishPath := "/ipfs/" + fixtureCid
@ -187,6 +190,7 @@ func TestName(t *testing.T) {
t.Parallel()
node := makeDaemon(t, nil).StartDaemon()
t.Cleanup(func() { node.StopDaemon() })
ipnsPath := ipns.NamespacePrefix + ipns.NameFromPeer(node.PeerID()).String()
publishPath := "/ipfs/" + fixtureCid
@ -227,6 +231,7 @@ func TestName(t *testing.T) {
t.Run("Inspect with verification using wrong RSA key errors", func(t *testing.T) {
t.Parallel()
node := makeDaemon(t, nil).StartDaemon()
defer node.StopDaemon()
// Prepare RSA Key 1
res := node.IPFS("key", "gen", "--type=rsa", "--size=4096", "key1")
@ -299,6 +304,7 @@ func TestName(t *testing.T) {
t.Parallel()
node := makeDaemon(t, nil).StartDaemon()
defer node.StopDaemon()
publishPath1 := "/ipfs/" + fixtureCid
publishPath2 := "/ipfs/" + dagCid // Different content
name := ipns.NameFromPeer(node.PeerID())

View File

@ -62,6 +62,7 @@ func TestPeering(t *testing.T) {
h, nodes := harness.CreatePeerNodes(t, 3, peerings)
nodes.StartDaemons()
defer nodes.StopDaemons()
assertPeerings(h, nodes, peerings)
nodes[0].Disconnect(nodes[1])
@ -74,6 +75,7 @@ func TestPeering(t *testing.T) {
h, nodes := harness.CreatePeerNodes(t, 3, peerings)
nodes.StartDaemons()
defer nodes.StopDaemons()
assertPeerings(h, nodes, peerings)
nodes[2].Disconnect(nodes[1])
@ -85,6 +87,7 @@ func TestPeering(t *testing.T) {
peerings := []harness.Peering{{From: 0, To: 1}, {From: 1, To: 0}, {From: 1, To: 2}}
h, nodes := harness.CreatePeerNodes(t, 3, peerings)
defer nodes.StopDaemons()
nodes[0].StartDaemon()
nodes[1].StartDaemon()
assertPeerings(h, nodes, []harness.Peering{{From: 0, To: 1}, {From: 1, To: 0}})
@ -99,6 +102,7 @@ func TestPeering(t *testing.T) {
h, nodes := harness.CreatePeerNodes(t, 3, peerings)
nodes.StartDaemons()
defer nodes.StopDaemons()
assertPeerings(h, nodes, peerings)
nodes[2].StopDaemon()

View File

@ -28,6 +28,9 @@ func setupTestNode(t *testing.T) *harness.Node {
t.Helper()
node := harness.NewT(t).NewNode().Init()
node.StartDaemon("--offline")
t.Cleanup(func() {
node.StopDaemon()
})
return node
}
@ -498,7 +501,6 @@ func TestPinLsEdgeCases(t *testing.T) {
t.Run("invalid pin type returns error", func(t *testing.T) {
t.Parallel()
node := setupTestNode(t)
defer node.StopDaemon()
// Try to list pins with invalid type
res := node.RunIPFS("pin", "ls", "--type=invalid")
@ -510,7 +512,6 @@ func TestPinLsEdgeCases(t *testing.T) {
t.Run("non-existent path returns proper error", func(t *testing.T) {
t.Parallel()
node := setupTestNode(t)
defer node.StopDaemon()
// Try to list a non-existent CID
fakeCID := "QmNonExistent123456789"
@ -521,7 +522,6 @@ func TestPinLsEdgeCases(t *testing.T) {
t.Run("unpinned CID returns not pinned error", func(t *testing.T) {
t.Parallel()
node := setupTestNode(t)
defer node.StopDaemon()
// Add content but don't pin it explicitly (it's just in blockstore)
unpinnedCID := node.IPFSAddStr("unpinned content", "--pin=false")

View File

@ -15,6 +15,7 @@ func TestPing(t *testing.T) {
t.Run("other", func(t *testing.T) {
t.Parallel()
nodes := harness.NewT(t).NewNodes(2).Init().StartDaemons().Connect()
defer nodes.StopDaemons()
node1 := nodes[0]
node2 := nodes[1]
@ -25,6 +26,7 @@ func TestPing(t *testing.T) {
t.Run("ping unreachable peer", func(t *testing.T) {
t.Parallel()
nodes := harness.NewT(t).NewNodes(2).Init().StartDaemons().Connect()
defer nodes.StopDaemons()
node1 := nodes[0]
badPeer := "QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJx"
@ -37,6 +39,7 @@ func TestPing(t *testing.T) {
t.Run("self", func(t *testing.T) {
t.Parallel()
nodes := harness.NewT(t).NewNodes(2).Init().StartDaemons()
defer nodes.StopDaemons()
node1 := nodes[0]
node2 := nodes[1]
@ -52,6 +55,7 @@ func TestPing(t *testing.T) {
t.Run("0", func(t *testing.T) {
t.Parallel()
nodes := harness.NewT(t).NewNodes(2).Init().StartDaemons().Connect()
defer nodes.StopDaemons()
node1 := nodes[0]
node2 := nodes[1]
@ -63,6 +67,7 @@ func TestPing(t *testing.T) {
t.Run("offline", func(t *testing.T) {
t.Parallel()
nodes := harness.NewT(t).NewNodes(2).Init().StartDaemons().Connect()
defer nodes.StopDaemons()
node1 := nodes[0]
node2 := nodes[1]

View File

@ -51,6 +51,7 @@ func TestRemotePinning(t *testing.T) {
node.IPFS("config", "--json", "Pinning.RemoteServices.svc.Policies.MFS.Enable", "true")
node.StartDaemon()
t.Cleanup(func() { node.StopDaemon() })
node.IPFS("files", "cp", "/ipfs/bafkqaaa", "/mfs-pinning-test-"+uuid.NewString())
node.IPFS("files", "flush")
@ -133,6 +134,8 @@ func TestRemotePinning(t *testing.T) {
t.Run("pin remote service ls --stat", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
_, svcURL := runPinningService(t, authToken)
node.IPFS("pin", "remote", "service", "add", "svc", svcURL, authToken)
@ -155,6 +158,7 @@ func TestRemotePinning(t *testing.T) {
t.Run("adding service with invalid URL fails", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
res := node.RunIPFS("pin", "remote", "service", "add", "svc", "invalid-service.example.com", "key")
assert.Equal(t, 1, res.ExitCode())
@ -168,6 +172,7 @@ func TestRemotePinning(t *testing.T) {
t.Run("unauthorized pinning service calls fail", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
_, svcURL := runPinningService(t, authToken)
node.IPFS("pin", "remote", "service", "add", "svc", svcURL, "othertoken")
@ -180,6 +185,7 @@ func TestRemotePinning(t *testing.T) {
t.Run("pinning service calls fail when there is a wrong path", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
_, svcURL := runPinningService(t, authToken)
node.IPFS("pin", "remote", "service", "add", "svc", svcURL+"/invalid-path", authToken)
@ -191,6 +197,7 @@ func TestRemotePinning(t *testing.T) {
t.Run("pinning service calls fail when DNS resolution fails", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
node.IPFS("pin", "remote", "service", "add", "svc", "https://invalid-service.example.com", authToken)
res := node.RunIPFS("pin", "remote", "ls", "--service=svc")
@ -201,6 +208,7 @@ func TestRemotePinning(t *testing.T) {
t.Run("pin remote service rm", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
node.IPFS("pin", "remote", "service", "add", "svc", "https://example.com", authToken)
node.IPFS("pin", "remote", "service", "rm", "svc")
res := node.IPFS("pin", "remote", "service", "ls")
@ -225,6 +233,7 @@ func TestRemotePinning(t *testing.T) {
t.Run("'ipfs pin remote add --background=true'", func(t *testing.T) {
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
svc, svcURL := runPinningService(t, authToken)
node.IPFS("pin", "remote", "service", "add", "svc", svcURL, authToken)
@ -266,6 +275,7 @@ func TestRemotePinning(t *testing.T) {
t.Run("'ipfs pin remote add --background=false'", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
svc, svcURL := runPinningService(t, authToken)
node.IPFS("pin", "remote", "service", "add", "svc", svcURL, authToken)
@ -287,6 +297,7 @@ func TestRemotePinning(t *testing.T) {
t.Run("'ipfs pin remote ls' with multiple statuses", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
svc, svcURL := runPinningService(t, authToken)
node.IPFS("pin", "remote", "service", "add", "svc", svcURL, authToken)
@ -340,6 +351,7 @@ func TestRemotePinning(t *testing.T) {
t.Run("'ipfs pin remote ls' by CID", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
svc, svcURL := runPinningService(t, authToken)
node.IPFS("pin", "remote", "service", "add", "svc", svcURL, authToken)
@ -360,6 +372,7 @@ func TestRemotePinning(t *testing.T) {
t.Run("'ipfs pin remote rm --name' without --force when multiple pins match", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
svc, svcURL := runPinningService(t, authToken)
node.IPFS("pin", "remote", "service", "add", "svc", svcURL, authToken)
@ -388,6 +401,7 @@ func TestRemotePinning(t *testing.T) {
t.Run("'ipfs pin remote rm --name --force' remove multiple pins", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
svc, svcURL := runPinningService(t, authToken)
node.IPFS("pin", "remote", "service", "add", "svc", svcURL, authToken)
@ -408,6 +422,7 @@ func TestRemotePinning(t *testing.T) {
t.Run("'ipfs pin remote rm --force' removes all pins", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
svc, svcURL := runPinningService(t, authToken)
node.IPFS("pin", "remote", "service", "add", "svc", svcURL, authToken)

View File

@ -26,6 +26,7 @@ func testPins(t *testing.T, args testPinsArgs) {
node := harness.NewT(t).NewNode().Init()
if args.runDaemon {
node.StartDaemon("--offline")
defer node.StopDaemon()
}
strs := []string{"a", "b", "c", "d", "e", "f", "g"}
@ -127,6 +128,7 @@ func testPinsErrorReporting(t *testing.T, args testPinsArgs) {
node := harness.NewT(t).NewNode().Init()
if args.runDaemon {
node.StartDaemon("--offline")
defer node.StopDaemon()
}
randomCID := "Qme8uX5n9hn15pw9p6WcVKoziyyC9LXv4LEgvsmKMULjnV"
res := node.RunIPFS(StrCat("pin", "add", args.pinArg, randomCID)...)
@ -142,6 +144,7 @@ func testPinDAG(t *testing.T, args testPinsArgs) {
node := h.NewNode().Init()
if args.runDaemon {
node.StartDaemon("--offline")
defer node.StopDaemon()
}
bytes := random.Bytes(1 << 20) // 1 MiB
tmpFile := h.WriteToTemp(string(bytes))
@ -168,6 +171,7 @@ func testPinProgress(t *testing.T, args testPinsArgs) {
if args.runDaemon {
node.StartDaemon("--offline")
defer node.StopDaemon()
}
bytes := random.Bytes(1 << 20) // 1 MiB

View File

@ -7,6 +7,7 @@ import (
"net/http"
"net/http/httptest"
"strings"
"sync/atomic"
"testing"
"time"
@ -764,3 +765,81 @@ func TestProvider(t *testing.T) {
})
}
}
// TestHTTPOnlyProviderWithSweepEnabled tests that provider records are correctly
// sent to HTTP routers when Routing.Type="custom" with only HTTP routers configured,
// even when Provide.DHT.SweepEnabled=true (the default since v0.39).
//
// This is a regression test for https://github.com/ipfs/kubo/issues/11089
func TestHTTPOnlyProviderWithSweepEnabled(t *testing.T) {
t.Parallel()
// Track provide requests received by the mock HTTP router
var provideRequests atomic.Int32
mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if (r.Method == http.MethodPut || r.Method == http.MethodPost) &&
strings.HasPrefix(r.URL.Path, "/routing/v1/providers") {
provideRequests.Add(1)
w.WriteHeader(http.StatusOK)
} else if strings.HasPrefix(r.URL.Path, "/routing/v1/providers") && r.Method == http.MethodGet {
// Return empty providers for findprovs
w.Header().Set("Content-Type", "application/x-ndjson")
w.WriteHeader(http.StatusOK)
} else {
w.WriteHeader(http.StatusNotFound)
}
}))
defer mockServer.Close()
h := harness.NewT(t)
node := h.NewNode().Init()
// Explicitly set SweepEnabled=true (the default since v0.39, but be explicit for test clarity)
node.SetIPFSConfig("Provide.DHT.SweepEnabled", true)
node.SetIPFSConfig("Provide.Enabled", true)
// Configure HTTP-only custom routing (no DHT) with explicit Routing.Type=custom
routingConf := map[string]any{
"Type": "custom", // Explicitly set Routing.Type=custom
"Methods": map[string]any{
"provide": map[string]any{"RouterName": "HTTPRouter"},
"get-ipns": map[string]any{"RouterName": "HTTPRouter"},
"put-ipns": map[string]any{"RouterName": "HTTPRouter"},
"find-peers": map[string]any{"RouterName": "HTTPRouter"},
"find-providers": map[string]any{"RouterName": "HTTPRouter"},
},
"Routers": map[string]any{
"HTTPRouter": map[string]any{
"Type": "http",
"Parameters": map[string]any{
"Endpoint": mockServer.URL,
},
},
},
}
node.SetIPFSConfig("Routing", routingConf)
node.StartDaemon()
defer node.StopDaemon()
// Add content and manually provide it
cid := node.IPFSAddStr(time.Now().String())
// Manual provide should succeed even without libp2p peers
res := node.RunIPFS("routing", "provide", cid)
// Check that the command succeeded (exit code 0) and no provide-related errors
assert.Equal(t, 0, res.ExitCode(), "routing provide should succeed with HTTP-only routing and SweepEnabled=true")
assert.NotContains(t, res.Stderr.String(), "cannot provide", "should not have provide errors")
// Verify HTTP router received at least one provide request
assert.Greater(t, provideRequests.Load(), int32(0),
"HTTP router should have received provide requests")
// Verify 'provide stat' works with HTTP-only routing (regression test for stats)
statRes := node.RunIPFS("provide", "stat")
assert.Equal(t, 0, statRes.ExitCode(), "provide stat should succeed with HTTP-only routing")
assert.NotContains(t, statRes.Stderr.String(), "stats not available",
"should not report stats unavailable")
// LegacyProvider outputs "TotalReprovides:" in its stats
assert.Contains(t, statRes.Stdout.String(), "TotalReprovides:",
"should show legacy provider stats")
}

View File

@ -26,6 +26,7 @@ func TestRcmgr(t *testing.T) {
})
node.StartDaemon()
defer node.StopDaemon()
t.Run("swarm resources should fail", func(t *testing.T) {
res := node.RunIPFS("swarm", "resources")
@ -41,6 +42,7 @@ func TestRcmgr(t *testing.T) {
cfg.Swarm.ResourceMgr.Enabled = config.False
})
node.StartDaemon()
defer node.StopDaemon()
t.Run("swarm resources should fail", func(t *testing.T) {
res := node.RunIPFS("swarm", "resources")
@ -56,6 +58,7 @@ func TestRcmgr(t *testing.T) {
cfg.Swarm.ConnMgr.HighWater = config.NewOptionalInteger(1000)
})
node.StartDaemon()
defer node.StopDaemon()
res := node.RunIPFS("swarm", "resources", "--enc=json")
require.Equal(t, 0, res.ExitCode())
@ -73,7 +76,9 @@ func TestRcmgr(t *testing.T) {
node.UpdateConfig(func(cfg *config.Config) {
cfg.Swarm.ConnMgr.HighWater = config.NewOptionalInteger(1000)
})
node.StartDaemon()
t.Cleanup(func() { node.StopDaemon() })
t.Run("conns and streams are above 800 for default connmgr settings", func(t *testing.T) {
t.Parallel()
@ -135,6 +140,7 @@ func TestRcmgr(t *testing.T) {
overrides.System.ConnsInbound = rcmgr.Unlimited
})
node.StartDaemon()
defer node.StopDaemon()
res := node.RunIPFS("swarm", "resources", "--enc=json")
limits := unmarshalLimits(t, res.Stdout.Bytes())
@ -150,6 +156,7 @@ func TestRcmgr(t *testing.T) {
overrides.Transient.Memory = 88888
})
node.StartDaemon()
defer node.StopDaemon()
res := node.RunIPFS("swarm", "resources", "--enc=json")
limits := unmarshalLimits(t, res.Stdout.Bytes())
@ -163,6 +170,7 @@ func TestRcmgr(t *testing.T) {
overrides.Service = map[string]rcmgr.ResourceLimits{"foo": {Memory: 77777}}
})
node.StartDaemon()
defer node.StopDaemon()
res := node.RunIPFS("swarm", "resources", "--enc=json")
limits := unmarshalLimits(t, res.Stdout.Bytes())
@ -176,6 +184,7 @@ func TestRcmgr(t *testing.T) {
overrides.Protocol = map[protocol.ID]rcmgr.ResourceLimits{"foo": {Memory: 66666}}
})
node.StartDaemon()
defer node.StopDaemon()
res := node.RunIPFS("swarm", "resources", "--enc=json")
limits := unmarshalLimits(t, res.Stdout.Bytes())
@ -191,6 +200,7 @@ func TestRcmgr(t *testing.T) {
overrides.Peer = map[peer.ID]rcmgr.ResourceLimits{validPeerID: {Memory: 55555}}
})
node.StartDaemon()
defer node.StopDaemon()
res := node.RunIPFS("swarm", "resources", "--enc=json")
limits := unmarshalLimits(t, res.Stdout.Bytes())
@ -218,6 +228,7 @@ func TestRcmgr(t *testing.T) {
})
nodes.StartDaemons()
t.Cleanup(func() { nodes.StopDaemons() })
t.Run("node 0 should fail to connect to and ping node 1", func(t *testing.T) {
t.Parallel()

View File

@ -57,6 +57,7 @@ func testRoutingDHT(t *testing.T, enablePubsub bool) {
}
nodes.StartDaemons(daemonArgs...).Connect()
t.Cleanup(func() { nodes.StopDaemons() })
t.Run("ipfs routing findpeer", func(t *testing.T) {
t.Parallel()
@ -157,6 +158,7 @@ func testSelfFindDHT(t *testing.T) {
})
nodes.StartDaemons()
defer nodes.StopDaemons()
res := nodes[0].RunIPFS("dht", "findpeer", nodes[0].PeerID().String())
assert.Equal(t, 1, res.ExitCode())

View File

@ -14,6 +14,7 @@ func TestStats(t *testing.T) {
t.Run("stats dht", func(t *testing.T) {
t.Parallel()
nodes := harness.NewT(t).NewNodes(2).Init().StartDaemons().Connect()
defer nodes.StopDaemons()
node1 := nodes[0]
res := node1.IPFS("stats", "dht")

View File

@ -31,6 +31,7 @@ func TestSwarm(t *testing.T) {
t.Run("ipfs swarm peers returns empty peers when a node is not connected to any peers", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
res := node.RunIPFS("swarm", "peers", "--enc=json", "--identify")
var output expectedOutputType
err := json.Unmarshal(res.Stdout.Bytes(), &output)
@ -40,7 +41,9 @@ func TestSwarm(t *testing.T) {
t.Run("ipfs swarm peers with flag identify outputs expected identify information about connected peers", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
otherNode := harness.NewT(t).NewNode().Init().StartDaemon()
defer otherNode.StopDaemon()
node.Connect(otherNode)
res := node.RunIPFS("swarm", "peers", "--enc=json", "--identify")
@ -67,7 +70,9 @@ func TestSwarm(t *testing.T) {
t.Run("ipfs swarm peers with flag identify outputs Identify field with data that matches calling ipfs id on a peer", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
otherNode := harness.NewT(t).NewNode().Init().StartDaemon()
defer otherNode.StopDaemon()
node.Connect(otherNode)
otherNodeIDResponse := otherNode.RunIPFS("id", "--enc=json")

View File

@ -76,6 +76,7 @@ func TestTracing(t *testing.T) {
node.Runner.Env["OTEL_EXPORTER_OTLP_PROTOCOL"] = "grpc"
node.Runner.Env["OTEL_EXPORTER_OTLP_ENDPOINT"] = "http://localhost:4317"
node.StartDaemon()
defer node.StopDaemon()
assert.Eventually(t,
func() bool {

View File

@ -74,6 +74,7 @@ func TestTransports(t *testing.T) {
t.Parallel()
nodes := tcpNodes(t).StartDaemons().Connect()
runTests(nodes)
nodes.StopDaemons()
})
t.Run("tcp with NOISE", func(t *testing.T) {
@ -86,6 +87,7 @@ func TestTransports(t *testing.T) {
})
nodes.StartDaemons().Connect()
runTests(nodes)
nodes.StopDaemons()
})
t.Run("QUIC", func(t *testing.T) {
@ -104,6 +106,7 @@ func TestTransports(t *testing.T) {
disableRouting(nodes)
nodes.StartDaemons().Connect()
runTests(nodes)
nodes.StopDaemons()
})
t.Run("QUIC+Webtransport", func(t *testing.T) {
@ -122,6 +125,7 @@ func TestTransports(t *testing.T) {
disableRouting(nodes)
nodes.StartDaemons().Connect()
runTests(nodes)
nodes.StopDaemons()
})
t.Run("QUIC connects with non-dialable transports", func(t *testing.T) {
@ -144,6 +148,7 @@ func TestTransports(t *testing.T) {
disableRouting(nodes)
nodes.StartDaemons().Connect()
runTests(nodes)
nodes.StopDaemons()
})
t.Run("WebRTC Direct", func(t *testing.T) {
@ -162,5 +167,6 @@ func TestTransports(t *testing.T) {
disableRouting(nodes)
nodes.StartDaemons().Connect()
runTests(nodes)
nodes.StopDaemons()
})
}

View File

@ -15,7 +15,7 @@ require (
github.com/ipfs/iptb-plugins v0.5.1
github.com/multiformats/go-multiaddr v0.16.1
github.com/multiformats/go-multihash v0.2.3
gotest.tools/gotestsum v1.12.3
gotest.tools/gotestsum v1.13.0
)
require (
@ -65,9 +65,9 @@ require (
github.com/cockroachdb/crlib v0.0.0-20241112164430-1264a2edc35b // indirect
github.com/cockroachdb/errors v1.11.3 // indirect
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
github.com/cockroachdb/pebble/v2 v2.1.2 // indirect
github.com/cockroachdb/pebble/v2 v2.1.3 // indirect
github.com/cockroachdb/redact v1.1.5 // indirect
github.com/cockroachdb/swiss v0.0.0-20250624142022-d6e517c1d961 // indirect
github.com/cockroachdb/swiss v0.0.0-20251224182025-b0f6560f979b // indirect
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect
github.com/crackcomm/go-gitignore v0.0.0-20241020182519-7843d2ba8fdf // indirect

View File

@ -104,12 +104,12 @@ github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZe
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895 h1:XANOgPYtvELQ/h4IrmPAohXqe2pWA8Bwhejr3VQoZsA=
github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895/go.mod h1:aPd7gM9ov9M8v32Yy5NJrDyOcD8z642dqs+F0CeNXfA=
github.com/cockroachdb/pebble/v2 v2.1.2 h1:IwYt+Y2Cdw6egblwk1kWzdmJvD2680t5VK/3i0BJ6IA=
github.com/cockroachdb/pebble/v2 v2.1.2/go.mod h1:Aza05DCCc05ghIJZkB4Q/axv/JK9wx5cFwWcnhG0eGw=
github.com/cockroachdb/pebble/v2 v2.1.3 h1:irU503OnjRoJBrkZQIJvwv9c4WvpUeOJxhRApojB8D8=
github.com/cockroachdb/pebble/v2 v2.1.3/go.mod h1:B1UgWsyR+L+UvZXNgpxw+WqsUKA8VQ/bb//FXOHghB8=
github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30=
github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
github.com/cockroachdb/swiss v0.0.0-20250624142022-d6e517c1d961 h1:Nua446ru3juLHLZd4AwKNzClZgL1co3pUPGv3o8FlcA=
github.com/cockroachdb/swiss v0.0.0-20250624142022-d6e517c1d961/go.mod h1:yBRu/cnL4ks9bgy4vAASdjIW+/xMlFwuHKqtmh3GZQg=
github.com/cockroachdb/swiss v0.0.0-20251224182025-b0f6560f979b h1:VXvSNzmr8hMj8XTuY0PT9Ane9qZGul/p67vGYwl9BFI=
github.com/cockroachdb/swiss v0.0.0-20251224182025-b0f6560f979b/go.mod h1:yBRu/cnL4ks9bgy4vAASdjIW+/xMlFwuHKqtmh3GZQg=
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo=
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
@ -985,8 +985,8 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools/gotestsum v1.12.3 h1:jFwenGJ0RnPkuKh2VzAYl1mDOJgbhobBDeL2W1iEycs=
gotest.tools/gotestsum v1.12.3/go.mod h1:Y1+e0Iig4xIRtdmYbEV7K7H6spnjc1fX4BOuUhWw2Wk=
gotest.tools/gotestsum v1.13.0 h1:+Lh454O9mu9AMG1APV4o0y7oDYKyik/3kBOiCqiEpRo=
gotest.tools/gotestsum v1.13.0/go.mod h1:7f0NS5hFb0dWr4NtcsAsF0y1kzjEFfAil0HiBQJE03Q=
gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q=
gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA=
honnef.co/go/tools v0.6.1 h1:R094WgE8K4JirYjBaOpz/AvTyUu/3wbmAoskKN/pxTI=

View File

@ -2,7 +2,8 @@ include mk/header.mk
CLEAN += $(d)/gotest.json $(d)/gotest.junit.xml
$(d)/gotest.junit.xml: test/bin/gotestsum coverage/unit_tests.coverprofile
# Convert gotest.json (produced by test_unit) to JUnit XML format
$(d)/gotest.junit.xml: test/bin/gotestsum $(d)/gotest.json
gotestsum --no-color --junitfile $@ --raw-command cat $(@D)/gotest.json
include mk/footer.mk