Merge branch 'master' into no-keystore-error-on-shutdown

This commit is contained in:
Andrew Gillis 2026-01-09 09:53:15 -08:00 committed by GitHub
commit 15249453c2
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
35 changed files with 432 additions and 24 deletions

View File

@ -7,6 +7,7 @@ import (
"io"
"path"
"github.com/dustin/go-humanize"
"github.com/ipfs/kubo/core/commands/cmdenv"
"github.com/ipfs/kubo/core/commands/cmdutils"
@ -349,7 +350,11 @@ type DagStatSummary struct {
}
func (s *DagStatSummary) String() string {
return fmt.Sprintf("Total Size: %d\nUnique Blocks: %d\nShared Size: %d\nRatio: %f", s.TotalSize, s.UniqueBlocks, s.SharedSize, s.Ratio)
return fmt.Sprintf("Total Size: %d (%s)\nUnique Blocks: %d\nShared Size: %d (%s)\nRatio: %f",
s.TotalSize, humanize.Bytes(s.TotalSize),
s.UniqueBlocks,
s.SharedSize, humanize.Bytes(s.SharedSize),
s.Ratio)
}
func (s *DagStatSummary) incrementTotalSize(size uint64) {
@ -384,7 +389,7 @@ Note: This command skips duplicate blocks in reporting both size and the number
cmds.StringArg("root", true, true, "CID of a DAG root to get statistics for").EnableStdin(),
},
Options: []cmds.Option{
cmds.BoolOption(progressOptionName, "p", "Return progressive data while reading through the DAG").WithDefault(true),
cmds.BoolOption(progressOptionName, "p", "Show progress on stderr. Auto-detected if stderr is a terminal."),
},
Run: dagStat,
Type: DagStatSummary{},

View File

@ -5,6 +5,7 @@ import (
"io"
"os"
"github.com/dustin/go-humanize"
mdag "github.com/ipfs/boxo/ipld/merkledag"
"github.com/ipfs/boxo/ipld/merkledag/traverse"
cid "github.com/ipfs/go-cid"
@ -19,7 +20,11 @@ import (
// to compute the new state
func dagStat(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
progressive := req.Options[progressOptionName].(bool)
// Default to true (emit intermediate states) for HTTP/RPC clients that want progress
progressive := true
if val, specified := req.Options[progressOptionName].(bool); specified {
progressive = val
}
api, err := cmdenv.GetApi(env, req)
if err != nil {
return err
@ -84,6 +89,18 @@ func dagStat(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment)
}
func finishCLIStat(res cmds.Response, re cmds.ResponseEmitter) error {
// Determine whether to show progress based on TTY detection or explicit flag
var showProgress bool
val, specified := res.Request().Options[progressOptionName]
if !specified {
// Auto-detect: show progress only if stderr is a TTY
if errStat, err := os.Stderr.Stat(); err == nil {
showProgress = (errStat.Mode() & os.ModeCharDevice) != 0
}
} else {
showProgress = val.(bool)
}
var dagStats *DagStatSummary
for {
v, err := res.Next()
@ -96,17 +113,26 @@ func finishCLIStat(res cmds.Response, re cmds.ResponseEmitter) error {
switch out := v.(type) {
case *DagStatSummary:
dagStats = out
if dagStats.Ratio == 0 {
length := len(dagStats.DagStatsArray)
if length > 0 {
currentStat := dagStats.DagStatsArray[length-1]
fmt.Fprintf(os.Stderr, "CID: %s, Size: %d, NumBlocks: %d\n", currentStat.Cid, currentStat.Size, currentStat.NumBlocks)
// Ratio == 0 means this is a progress update (not final result)
if showProgress && dagStats.Ratio == 0 {
// Sum up total progress across all DAGs being scanned
var totalBlocks int64
var totalSize uint64
for _, stat := range dagStats.DagStatsArray {
totalBlocks += stat.NumBlocks
totalSize += stat.Size
}
fmt.Fprintf(os.Stderr, "Fetched/Processed %d blocks, %d bytes (%s)\r", totalBlocks, totalSize, humanize.Bytes(totalSize))
}
default:
return e.TypeErr(out, v)
}
}
// Clear the progress line before final output
if showProgress {
fmt.Fprint(os.Stderr, "\033[2K\r")
}
return re.Emit(dagStats)
}

View File

@ -694,6 +694,48 @@ See docs: https://github.com/ipfs/kubo/blob/master/docs/config.md#providedhtmaxw
// ONLINE/OFFLINE
// hasDHTRouting checks if the routing configuration includes a DHT component.
// Returns false for HTTP-only custom routing configurations (e.g., Routing.Type="custom"
// with only HTTP routers). This is used to determine whether SweepingProviderOpt
// can be used, since it requires a DHT client.
func hasDHTRouting(cfg *config.Config) bool {
routingType := cfg.Routing.Type.WithDefault(config.DefaultRoutingType)
switch routingType {
case "auto", "autoclient", "dht", "dhtclient", "dhtserver":
return true
case "custom":
// Check if any router in custom config is DHT-based
for _, router := range cfg.Routing.Routers {
if routerIncludesDHT(router, cfg) {
return true
}
}
return false
default: // "none", "delegated"
return false
}
}
// routerIncludesDHT recursively checks if a router configuration includes DHT.
// Handles parallel and sequential composite routers by checking their children.
func routerIncludesDHT(rp config.RouterParser, cfg *config.Config) bool {
switch rp.Type {
case config.RouterTypeDHT:
return true
case config.RouterTypeParallel, config.RouterTypeSequential:
if children, ok := rp.Parameters.(*config.ComposableRouterParams); ok {
for _, child := range children.Routers {
if childRouter, exists := cfg.Routing.Routers[child.RouterName]; exists {
if routerIncludesDHT(childRouter, cfg) {
return true
}
}
}
}
}
return false
}
// OnlineProviders groups units managing provide routing records online
func OnlineProviders(provide bool, cfg *config.Config) fx.Option {
if !provide {
@ -710,7 +752,15 @@ func OnlineProviders(provide bool, cfg *config.Config) fx.Option {
opts := []fx.Option{
fx.Provide(setReproviderKeyProvider(providerStrategy)),
}
if cfg.Provide.DHT.SweepEnabled.WithDefault(config.DefaultProvideDHTSweepEnabled) {
sweepEnabled := cfg.Provide.DHT.SweepEnabled.WithDefault(config.DefaultProvideDHTSweepEnabled)
dhtAvailable := hasDHTRouting(cfg)
// Use SweepingProvider only when both sweep is enabled AND DHT is available.
// For HTTP-only routing (e.g., Routing.Type="custom" with only HTTP routers),
// fall back to LegacyProvider which works with ProvideManyRouter.
// See https://github.com/ipfs/kubo/issues/11089
if sweepEnabled && dhtAvailable {
opts = append(opts, SweepingProviderOpt(cfg))
} else {
reprovideInterval := cfg.Provide.DHT.Interval.WithDefault(config.DefaultProvideDHTInterval)

View File

@ -12,6 +12,7 @@ This release was brought to you by the [Shipyard](https://ipshipyard.com/) team.
- [🔦 Highlights](#-highlights)
- [Routing V1 HTTP API now exposed by default](#routing-v1-http-api-now-exposed-by-default)
- [Track total size when adding pins](#track-total-size-when-adding-pins)
- [Improved `ipfs dag stat` output](#improved-ipfs-dag-stat-output)
- [Skip bad keys when listing](#skip_bad_keys_when_listing)
- [📦️ Dependency updates](#-dependency-updates)
- [📝 Changelog](#-changelog)
@ -34,6 +35,30 @@ Example output:
Fetched/Processed 336 nodes (83 MB)
```
#### Improved `ipfs dag stat` output
The `ipfs dag stat` command has been improved for better terminal UX:
- Progress output now uses a single line with carriage return, avoiding terminal flooding
- Progress is auto-detected: shown only in interactive terminals by default
- Human-readable sizes are now displayed alongside raw byte counts
Example progress (interactive terminal):
```
Fetched/Processed 84 blocks, 2097152 bytes (2.1 MB)
```
Example summary output:
```
Summary
Total Size: 2097152 (2.1 MB)
Unique Blocks: 42
Shared Size: 1048576 (1.0 MB)
Ratio: 1.500000
```
Use `--progress=true` to force progress even when piped, or `--progress=false` to disable it.
#### Skip bad keys when listing
Change the `ipfs key list` behavior to log an error and continue listing keys when a key cannot be read from the keystore or decoded.

View File

@ -1073,7 +1073,11 @@ Toggle and configure experimental features of Kubo. Experimental features are li
Options for the HTTP gateway.
**NOTE:** support for `/api/v0` under the gateway path is now deprecated. It will be removed in future versions: <https://github.com/ipfs/kubo/issues/10312>.
> [!IMPORTANT]
> By default, Kubo's gateway is configured for local use at `127.0.0.1` and `localhost`.
> To run a public gateway, configure your domain names in [`Gateway.PublicGateways`](#gatewaypublicgateways).
> For production deployment considerations (reverse proxy, timeouts, rate limiting, CDN),
> see [Running in Production](gateway.md#running-in-production).
### `Gateway.NoFetch`
@ -1268,6 +1272,11 @@ Examples:
- `*.example.com` will match requests to `http://foo.example.com/ipfs/*` or `http://{cid}.ipfs.bar.example.com/*`.
- `foo-*.example.com` will match requests to `http://foo-bar.example.com/ipfs/*` or `http://{cid}.ipfs.foo-xyz.example.com/*`.
> [!IMPORTANT]
> **Reverse Proxy:** If running behind nginx or another reverse proxy, ensure
> `Host` and `X-Forwarded-*` headers are forwarded correctly.
> See [Reverse Proxy Caveats](gateway.md#reverse-proxy) in gateway documentation.
#### `Gateway.PublicGateways: Paths`
An array of paths that should be exposed on the hostname.
@ -1334,6 +1343,9 @@ Default: `false`
Type: `bool`
> [!IMPORTANT]
> See [Reverse Proxy Caveats](gateway.md#reverse-proxy) if running behind nginx or another reverse proxy.
#### `Gateway.PublicGateways: NoDNSLink`
A boolean to configure whether DNSLink for hostname present in `Host`
@ -1344,6 +1356,9 @@ Default: `false` (DNSLink lookup enabled by default for every defined hostname)
Type: `bool`
> [!IMPORTANT]
> See [Reverse Proxy Caveats](gateway.md#reverse-proxy) if running behind nginx or another reverse proxy.
#### `Gateway.PublicGateways: InlineDNSLink`
An optional flag to explicitly configure whether subdomain gateway's redirects
@ -1411,6 +1426,9 @@ ipfs config --json Gateway.PublicGateways '{"localhost": null }'
Below is a list of the most common gateway setups.
> [!IMPORTANT]
> See [Reverse Proxy Caveats](gateway.md#reverse-proxy) if running behind nginx or another reverse proxy.
- Public [subdomain gateway](https://docs.ipfs.tech/how-to/address-ipfs-on-web/#subdomain-gateway) at `http://{cid}.ipfs.dweb.link` (each content root gets its own Origin)
```console
@ -2195,6 +2213,9 @@ You can compare the effectiveness of sweep mode vs legacy mode by monitoring the
> [!NOTE]
> This is the default provider system as of Kubo v0.39. To use the legacy provider instead, set `Provide.DHT.SweepEnabled=false`.
> [!NOTE]
> When DHT routing is unavailable (e.g., `Routing.Type=custom` with only HTTP routers), the provider automatically falls back to the legacy provider regardless of this setting.
Default: `true`
Type: `flag`

View File

@ -6,7 +6,7 @@ they were stored in a traditional web server.
[More about Gateways](https://docs.ipfs.tech/concepts/ipfs-gateway/) and [addressing IPFS on the web](https://docs.ipfs.tech/how-to/address-ipfs-on-web/).
Kubo's Gateway implementation follows [ipfs/specs: Specification for HTTP Gateways](https://github.com/ipfs/specs/tree/main/http-gateways#readme).
Kubo's Gateway implementation follows [IPFS Gateway Specifications](https://specs.ipfs.tech/http-gateways/) and is tested with [Gateway Conformance Test Suite](https://github.com/ipfs/gateway-conformance).
### Local gateway
@ -14,14 +14,21 @@ By default, Kubo nodes run
a [path gateway](https://docs.ipfs.tech/how-to/address-ipfs-on-web/#path-gateway) at `http://127.0.0.1:8080/`
and a [subdomain gateway](https://docs.ipfs.tech/how-to/address-ipfs-on-web/#subdomain-gateway) at `http://localhost:8080/`.
The path one also implements [trustless gateway spec](https://specs.ipfs.tech/http-gateways/trustless-gateway/)
and supports [trustless responses](https://docs.ipfs.tech/reference/http/gateway/#trustless-verifiable-retrieval) as opt-in via `Accept` header.
> [!CAUTION]
> **For browsing websites, web apps, and dapps in a browser, use the subdomain
> gateway** (`localhost`). Each content root gets its own
> [web origin](https://developer.mozilla.org/en-US/docs/Web/Security/Same-origin_policy),
> isolating localStorage, cookies, and session data between sites.
>
> **For file retrieval, use the path gateway** (`127.0.0.1`). Path gateways are
> suited for downloading files or fetching [verifiable](https://docs.ipfs.tech/reference/http/gateway/#trustless-verifiable-retrieval)
> content, but lack origin isolation (all content shares the same origin).
Additional listening addresses and gateway behaviors can be set in the [config](#configuration) file.
### Public gateways
Protocol Labs provides a public gateway at
IPFS Foundation [provides public gateways](https://docs.ipfs.tech/concepts/public-utilities/) at
`https://ipfs.io` ([path](https://specs.ipfs.tech/http-gateways/path-gateway/)),
`https://dweb.link` ([subdomain](https://docs.ipfs.tech/how-to/address-ipfs-on-web/#subdomain-gateway)),
and `https://trustless-gateway.link` ([trustless](https://specs.ipfs.tech/http-gateways/trustless-gateway/) only).
@ -41,6 +48,80 @@ The gateway's log level can be changed with this command:
> ipfs log level core/server debug
```
## Running in Production
When deploying Kubo's gateway in production, be aware of these important considerations:
<a id="reverse-proxy"></a>
> [!IMPORTANT]
> **Reverse Proxy:** When running Kubo behind a reverse proxy (such as nginx),
> the original `Host` header **must** be forwarded to Kubo for
> [`Gateway.PublicGateways`](config.md#gatewaypublicgateways) to work.
> Kubo uses the `Host` header to match configured hostnames and detect
> subdomain gateway patterns like `{cid}.ipfs.example.org` or DNSLink hostnames.
>
> If the `Host` header is not forwarded correctly, Kubo will not recognize
> the configured gateway hostnames and requests may be handled incorrectly.
>
> If `X-Forwarded-Proto` is not set, redirects over HTTPS will use wrong protocol
> and DNSLink names will not be inlined for subdomain gateways.
>
> Example: minimal nginx configuration for `example.org`
>
> ```nginx
> server {
> listen 80;
> listen [::]:80;
>
> # IMPORTANT: Include wildcard to match subdomain gateway requests.
> # The dot prefix matches both apex domain and all subdomains.
> server_name .example.org;
>
> location / {
> proxy_pass http://127.0.0.1:8080;
>
> # IMPORTANT: Forward the original Host header to Kubo.
> # Without this, PublicGateways configuration will not work.
> proxy_set_header Host $host;
>
> # IMPORTANT: X-Forwarded-Proto is required for correct behavior:
> # - Redirects will use https:// URLs when set to "https"
> # - DNSLink names will be inlined for subdomain gateways
> # (e.g., /ipns/en.wikipedia-on-ipfs.org → en-wikipedia--on--ipfs-org.ipns.example.org)
> proxy_set_header X-Forwarded-Proto $scheme;
> proxy_set_header X-Forwarded-Host $host;
> }
> }
> ```
>
> Common mistakes to avoid:
>
> - **Missing wildcard in `server_name`:** Using only `server_name example.org;`
> will not match subdomain requests like `{cid}.ipfs.example.org`. Always
> include `*.example.org` or use the dot prefix `.example.org`.
>
> - **Wrong `Host` header value:** Using `proxy_set_header Host $proxy_host;`
> sends the backend's hostname (e.g., `127.0.0.1:8080`) instead of the
> original `Host` header. Always use `$host` or `$http_host`.
>
> - **Missing `Host` header entirely:** If `proxy_set_header Host` is not
> specified, nginx defaults to `$proxy_host`, which breaks gateway routing.
> [!IMPORTANT]
> **Timeouts:** Configure [`Gateway.RetrievalTimeout`](config.md#gatewayretrievaltimeout)
> based on your expected content retrieval times.
> [!IMPORTANT]
> **Rate Limiting:** Use [`Gateway.MaxConcurrentRequests`](config.md#gatewaymaxconcurrentrequests)
> to protect against traffic spikes.
> [!IMPORTANT]
> **CDN/Cloudflare:** If using Cloudflare or other CDNs with
> [deserialized responses](config.md#gatewaydeserializedresponses) enabled, review
> [`Gateway.MaxRangeRequestFileSize`](config.md#gatewaymaxrangerequestfilesize) to avoid
> excess bandwidth billing from range request bugs. Cloudflare users may need additional
> protection via [Cloudflare Snippets](https://github.com/ipfs/boxo/issues/856#issuecomment-3523944976).
## Directories
For convenience, the gateway (mostly) acts like a normal web-server when serving
@ -53,7 +134,7 @@ a directory:
2. Dynamically build and serve a listing of the contents of the directory.
<sub><sup>&dagger;</sup>This redirect is skipped if the query string contains a
`go-get=1` parameter. See [PR#3964](https://github.com/ipfs/kubo/pull/3963)
`go-get=1` parameter. See [PR#3963](https://github.com/ipfs/kubo/pull/3963)
for details</sub>
## Static Websites
@ -107,10 +188,12 @@ This is equivalent of `ipfs block get`.
### `application/vnd.ipld.car`
Returns a [CAR](https://ipld.io/specs/transport/car/) stream for specific DAG and selector.
Returns a [CAR](https://ipld.io/specs/transport/car/) stream for a DAG or a subset of it.
Right now only 'full DAG' implicit selector is implemented.
Support for user-provided IPLD selectors is tracked in https://github.com/ipfs/kubo/issues/8769.
The `dag-scope` parameter controls which blocks are included: `all` (default, entire DAG),
`entity` (logical unit like a file), or `block` (single block). For [UnixFS](https://specs.ipfs.tech/unixfs/) files,
`entity-bytes` enables byte range requests. See [IPIP-402](https://specs.ipfs.tech/ipips/ipip-0402/)
for details.
This is a rough equivalent of `ipfs dag export`.

View File

@ -39,7 +39,9 @@ func TestBackupBootstrapPeers(t *testing.T) {
// Start 1 and 2. 2 does not know anyone yet.
nodes[1].StartDaemon()
defer nodes[1].StopDaemon()
nodes[2].StartDaemon()
defer nodes[2].StopDaemon()
assert.Len(t, nodes[1].Peers(), 0)
assert.Len(t, nodes[2].Peers(), 0)
@ -51,6 +53,7 @@ func TestBackupBootstrapPeers(t *testing.T) {
// Start 0, wait a bit. Should connect to 1, and then discover 2 via the
// backup bootstrap peers.
nodes[0].StartDaemon()
defer nodes[0].StopDaemon()
time.Sleep(time.Millisecond * 500)
// Check if they're all connected.

View File

@ -22,7 +22,9 @@ func TestBitswapConfig(t *testing.T) {
t.Parallel()
h := harness.NewT(t)
provider := h.NewNode().Init().StartDaemon()
defer provider.StopDaemon()
requester := h.NewNode().Init().StartDaemon()
defer requester.StopDaemon()
hash := provider.IPFSAddStr(string(testData))
requester.Connect(provider)
@ -38,8 +40,10 @@ func TestBitswapConfig(t *testing.T) {
provider := h.NewNode().Init()
provider.SetIPFSConfig("Bitswap.ServerEnabled", false)
provider = provider.StartDaemon()
defer provider.StopDaemon()
requester := h.NewNode().Init().StartDaemon()
defer requester.StopDaemon()
hash := provider.IPFSAddStr(string(testData))
requester.Connect(provider)
@ -70,8 +74,10 @@ func TestBitswapConfig(t *testing.T) {
requester := h.NewNode().Init()
requester.SetIPFSConfig("Bitswap.ServerEnabled", false)
requester.StartDaemon()
defer requester.StopDaemon()
provider := h.NewNode().Init().StartDaemon()
defer provider.StopDaemon()
hash := provider.IPFSAddStr(string(testData))
requester.Connect(provider)
@ -91,8 +97,10 @@ func TestBitswapConfig(t *testing.T) {
cfg.HTTPRetrieval.Enabled = config.True
})
requester.StartDaemon()
defer requester.StopDaemon()
provider := h.NewNode().Init().StartDaemon()
defer provider.StopDaemon()
hash := provider.IPFSAddStr(string(testData))
requester.Connect(provider)
@ -126,7 +134,9 @@ func TestBitswapConfig(t *testing.T) {
cfg.HTTPRetrieval.Enabled = config.True
})
provider = provider.StartDaemon()
defer provider.StopDaemon()
requester := h.NewNode().Init().StartDaemon()
defer requester.StopDaemon()
requester.Connect(provider)
// read libp2p identify from remote peer, and print protocols

View File

@ -76,6 +76,7 @@ func TestContentBlocking(t *testing.T) {
// Start daemon, it should pick up denylist from $IPFS_PATH/denylists/test.deny
node.StartDaemon() // we need online mode for GatewayOverLibp2p tests
t.Cleanup(func() { node.StopDaemon() })
client := node.GatewayClient()
// First, confirm gateway works

View File

@ -47,6 +47,8 @@ func TestDag(t *testing.T) {
t.Run("ipfs dag stat --enc=json", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
// Import fixture
r, err := os.Open(fixtureFile)
assert.Nil(t, err)
@ -91,6 +93,7 @@ func TestDag(t *testing.T) {
t.Run("ipfs dag stat", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
r, err := os.Open(fixtureFile)
assert.NoError(t, err)
defer r.Close()

View File

@ -60,6 +60,10 @@ func TestRoutingV1Proxy(t *testing.T) {
})
nodes[2].StartDaemon()
t.Cleanup(func() {
nodes.StopDaemons()
})
// Connect them.
nodes.Connect()

View File

@ -32,6 +32,7 @@ func TestRoutingV1Server(t *testing.T) {
})
})
nodes.StartDaemons().Connect()
t.Cleanup(func() { nodes.StopDaemons() })
return nodes
}
@ -133,6 +134,7 @@ func TestRoutingV1Server(t *testing.T) {
cfg.Routing.Type = config.NewOptionalString("dht")
})
node.StartDaemon()
defer node.StopDaemon()
// Put IPNS record in lonely node. It should be accepted as it is a valid record.
c, err = client.New(node.GatewayURL())
@ -196,6 +198,7 @@ func TestRoutingV1Server(t *testing.T) {
}
})
node.StartDaemon()
defer node.StopDaemon()
c, err := client.New(node.GatewayURL())
require.NoError(t, err)
@ -238,6 +241,7 @@ func TestRoutingV1Server(t *testing.T) {
cfg.Bootstrap = autoconf.FallbackBootstrapPeers
})
node.StartDaemon()
defer node.StopDaemon()
c, err := client.New(node.GatewayURL())
require.NoError(t, err)

View File

@ -16,6 +16,7 @@ func TestDHTAutoclient(t *testing.T) {
node.IPFS("config", "Routing.Type", "autoclient")
})
nodes.StartDaemons().Connect()
t.Cleanup(func() { nodes.StopDaemons() })
t.Run("file added on node in client mode is retrievable from node in client mode", func(t *testing.T) {
t.Parallel()

View File

@ -22,6 +22,7 @@ func TestDHTOptimisticProvide(t *testing.T) {
})
nodes.StartDaemons().Connect()
defer nodes.StopDaemons()
hash := nodes[0].IPFSAddStr(string(random.Bytes(100)))
nodes[0].IPFS("routing", "provide", hash)

View File

@ -19,6 +19,7 @@ func TestFilesCp(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
// Create simple text file
data := "testing files cp command"
@ -36,6 +37,7 @@ func TestFilesCp(t *testing.T) {
t.Run("files cp with unsupported DAG node type fails", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
// MFS UnixFS is limited to dag-pb or raw, so we create a dag-cbor node to test this
jsonData := `{"data": "not a UnixFS node"}`
@ -53,6 +55,7 @@ func TestFilesCp(t *testing.T) {
t.Run("files cp with invalid UnixFS data structure fails", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
// Create an invalid proto file
data := []byte{0xDE, 0xAD, 0xBE, 0xEF} // Invalid protobuf data
@ -75,6 +78,7 @@ func TestFilesCp(t *testing.T) {
t.Run("files cp with raw node succeeds", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
// Create a raw node
data := "raw data"
@ -98,6 +102,7 @@ func TestFilesCp(t *testing.T) {
t.Run("files cp creates intermediate directories with -p", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
// Create a simple text file and add it to IPFS
data := "hello parent directories"
@ -130,6 +135,7 @@ func TestFilesRm(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
// Create a file to remove
node.IPFS("files", "mkdir", "/test-dir")
@ -149,6 +155,7 @@ func TestFilesRm(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
// Create a file to remove
node.IPFS("files", "mkdir", "/test-dir")
@ -166,6 +173,7 @@ func TestFilesRm(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
// Create a file to remove
node.IPFS("files", "mkdir", "/test-dir")
@ -186,6 +194,7 @@ func TestFilesNoFlushLimit(t *testing.T) {
t.Run("reaches default limit of 256 operations", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
// Perform 256 operations with --flush=false (should succeed)
for i := 0; i < 256; i++ {
@ -214,6 +223,7 @@ func TestFilesNoFlushLimit(t *testing.T) {
})
node.StartDaemon()
defer node.StopDaemon()
// Perform 5 operations (should succeed)
for i := 0; i < 5; i++ {
@ -239,6 +249,7 @@ func TestFilesNoFlushLimit(t *testing.T) {
})
node.StartDaemon()
defer node.StopDaemon()
// Do 2 operations with --flush=false
node.IPFS("files", "mkdir", "--flush=false", "/dir1")
@ -271,6 +282,7 @@ func TestFilesNoFlushLimit(t *testing.T) {
})
node.StartDaemon()
defer node.StopDaemon()
// Do 2 operations with --flush=false
node.IPFS("files", "mkdir", "--flush=false", "/dir1")
@ -303,6 +315,7 @@ func TestFilesNoFlushLimit(t *testing.T) {
})
node.StartDaemon()
defer node.StopDaemon()
// Should be able to do many operations without error
for i := 0; i < 300; i++ {
@ -322,6 +335,7 @@ func TestFilesNoFlushLimit(t *testing.T) {
})
node.StartDaemon()
defer node.StopDaemon()
// Mix of different MFS operations (5 operations to hit the limit)
node.IPFS("files", "mkdir", "--flush=false", "/testdir")

View File

@ -4,9 +4,9 @@ bafyreibmdfd7c5db4kls4ty57zljfhqv36gi43l6txl44pi423wwmeskwy 2 53
bafyreie3njilzdi4ixumru4nzgecsnjtu7fzfcwhg7e6s4s5i7cnbslvn4 2 53
Summary
Total Size: 99
Total Size: 99 (99 B)
Unique Blocks: 3
Shared Size: 7
Shared Size: 7 (7 B)
Ratio: 1.070707

View File

@ -28,6 +28,7 @@ func TestGatewayLimits(t *testing.T) {
cfg.Gateway.RetrievalTimeout = config.NewOptionalDuration(1 * time.Second)
})
node.StartDaemon()
defer node.StopDaemon()
// Add content that can be retrieved quickly
cid := node.IPFSAddStr("test content")
@ -69,6 +70,7 @@ func TestGatewayLimits(t *testing.T) {
cfg.Gateway.RetrievalTimeout = config.NewOptionalDuration(2 * time.Second)
})
node.StartDaemon()
defer node.StopDaemon()
// Add some content - use a non-existent CID that will block during retrieval
// to ensure we can control timing

View File

@ -27,6 +27,7 @@ func TestGatewayHAMTDirectory(t *testing.T) {
// Start node
h := harness.NewT(t)
node := h.NewNode().Init("--empty-repo", "--profile=test").StartDaemon("--offline")
defer node.StopDaemon()
client := node.GatewayClient()
// Import fixtures
@ -56,6 +57,7 @@ func TestGatewayHAMTRanges(t *testing.T) {
// Start node
h := harness.NewT(t)
node := h.NewNode().Init("--empty-repo", "--profile=test").StartDaemon("--offline")
t.Cleanup(func() { node.StopDaemon() })
client := node.GatewayClient()
// Import fixtures

View File

@ -28,6 +28,7 @@ func TestGateway(t *testing.T) {
t.Parallel()
h := harness.NewT(t)
node := h.NewNode().Init().StartDaemon("--offline")
t.Cleanup(func() { node.StopDaemon() })
cid := node.IPFSAddStr("Hello Worlds!")
peerID, err := peer.ToCid(node.PeerID()).StringOfBase(multibase.Base36)
@ -234,6 +235,7 @@ func TestGateway(t *testing.T) {
cfg.API.HTTPHeaders = map[string][]string{header: values}
})
node.StartDaemon()
defer node.StopDaemon()
resp := node.APIClient().DisableRedirects().Get("/webui/")
assert.Equal(t, resp.Headers.Values(header), values)
@ -257,6 +259,7 @@ func TestGateway(t *testing.T) {
t.Run("pprof", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
t.Cleanup(func() { node.StopDaemon() })
apiClient := node.APIClient()
t.Run("mutex", func(t *testing.T) {
t.Parallel()
@ -300,6 +303,7 @@ func TestGateway(t *testing.T) {
t.Parallel()
h := harness.NewT(t)
node := h.NewNode().Init().StartDaemon()
t.Cleanup(func() { node.StopDaemon() })
h.WriteFile("index/index.html", "<p></p>")
cid := node.IPFS("add", "-Q", "-r", filepath.Join(h.Dir, "index")).Stderr.Trimmed()
@ -367,6 +371,7 @@ func TestGateway(t *testing.T) {
cfg.Addresses.Gateway = config.Strings{"/ip4/127.0.0.1/tcp/32563"}
})
node.StartDaemon()
defer node.StopDaemon()
b, err := os.ReadFile(filepath.Join(node.Dir, "gateway"))
require.NoError(t, err)
@ -388,6 +393,7 @@ func TestGateway(t *testing.T) {
assert.NoError(t, err)
nodes.StartDaemons().Connect()
t.Cleanup(func() { nodes.StopDaemons() })
t.Run("not present", func(t *testing.T) {
cidFoo := node2.IPFSAddStr("foo")
@ -460,6 +466,7 @@ func TestGateway(t *testing.T) {
}
})
node.StartDaemon()
defer node.StopDaemon()
cidFoo := node.IPFSAddStr("foo")
client := node.GatewayClient()
@ -509,6 +516,7 @@ func TestGateway(t *testing.T) {
node := harness.NewT(t).NewNode().Init()
node.StartDaemon()
defer node.StopDaemon()
client := node.GatewayClient()
res := client.Get("/ipfs/invalid-thing", func(r *http.Request) {
@ -526,6 +534,7 @@ func TestGateway(t *testing.T) {
cfg.Gateway.DisableHTMLErrors = config.True
})
node.StartDaemon()
defer node.StopDaemon()
client := node.GatewayClient()
res := client.Get("/ipfs/invalid-thing", func(r *http.Request) {
@ -546,6 +555,7 @@ func TestLogs(t *testing.T) {
t.Setenv("GOLOG_LOG_LEVEL", "info")
node := h.NewNode().Init().StartDaemon("--offline")
defer node.StopDaemon()
cid := node.IPFSAddStr("Hello Worlds!")
peerID, err := peer.ToCid(node.PeerID()).StringOfBase(multibase.Base36)

View File

@ -32,6 +32,7 @@ func TestGatewayOverLibp2p(t *testing.T) {
p2pProxyNode := nodes[1]
nodes.StartDaemons().Connect()
defer nodes.StopDaemons()
// Add data to the gateway node
cidDataOnGatewayNode := cid.MustParse(gwNode.IPFSAddStr("Hello Worlds2!"))
@ -65,6 +66,7 @@ func TestGatewayOverLibp2p(t *testing.T) {
// Enable the experimental feature and reconnect the nodes
gwNode.IPFS("config", "--json", "Experimental.GatewayOverLibp2p", "true")
gwNode.StopDaemon().StartDaemon()
t.Cleanup(func() { gwNode.StopDaemon() })
nodes.Connect()
// Note: the bare HTTP requests here assume that the gateway is mounted at `/`

View File

@ -75,6 +75,7 @@ func TestHTTPRetrievalClient(t *testing.T) {
// Start Kubo
node.StartDaemon()
defer node.StopDaemon()
if debug {
fmt.Printf("delegatedRoutingServer.URL: %s\n", delegatedRoutingServer.URL)

View File

@ -155,6 +155,7 @@ func TestInit(t *testing.T) {
t.Run("ipfs init should not run while daemon is running", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
res := node.RunIPFS("init")
assert.NotEqual(t, 0, res.ExitErr.ExitCode())
assert.Contains(t, res.Stderr.String(), "Error: ipfs daemon is running. please stop it to run this command")

View File

@ -103,6 +103,7 @@ func TestName(t *testing.T) {
})
node.StartDaemon()
defer node.StopDaemon()
t.Run("Resolving self offline succeeds (daemon on)", func(t *testing.T) {
res = node.IPFS("name", "resolve", "--offline", "/ipns/"+name.String())
@ -147,6 +148,7 @@ func TestName(t *testing.T) {
t.Run("Fails to publish in offline mode", func(t *testing.T) {
t.Parallel()
node := makeDaemon(t, nil).StartDaemon("--offline")
defer node.StopDaemon()
res := node.RunIPFS("name", "publish", "/ipfs/"+fixtureCid)
require.Error(t, res.Err)
require.Equal(t, 1, res.ExitCode())
@ -157,6 +159,7 @@ func TestName(t *testing.T) {
t.Parallel()
node := makeDaemon(t, nil).StartDaemon()
defer node.StopDaemon()
ipnsName := ipns.NameFromPeer(node.PeerID()).String()
ipnsPath := ipns.NamespacePrefix + ipnsName
publishPath := "/ipfs/" + fixtureCid
@ -187,6 +190,7 @@ func TestName(t *testing.T) {
t.Parallel()
node := makeDaemon(t, nil).StartDaemon()
t.Cleanup(func() { node.StopDaemon() })
ipnsPath := ipns.NamespacePrefix + ipns.NameFromPeer(node.PeerID()).String()
publishPath := "/ipfs/" + fixtureCid
@ -227,6 +231,7 @@ func TestName(t *testing.T) {
t.Run("Inspect with verification using wrong RSA key errors", func(t *testing.T) {
t.Parallel()
node := makeDaemon(t, nil).StartDaemon()
defer node.StopDaemon()
// Prepare RSA Key 1
res := node.IPFS("key", "gen", "--type=rsa", "--size=4096", "key1")
@ -299,6 +304,7 @@ func TestName(t *testing.T) {
t.Parallel()
node := makeDaemon(t, nil).StartDaemon()
defer node.StopDaemon()
publishPath1 := "/ipfs/" + fixtureCid
publishPath2 := "/ipfs/" + dagCid // Different content
name := ipns.NameFromPeer(node.PeerID())

View File

@ -62,6 +62,7 @@ func TestPeering(t *testing.T) {
h, nodes := harness.CreatePeerNodes(t, 3, peerings)
nodes.StartDaemons()
defer nodes.StopDaemons()
assertPeerings(h, nodes, peerings)
nodes[0].Disconnect(nodes[1])
@ -74,6 +75,7 @@ func TestPeering(t *testing.T) {
h, nodes := harness.CreatePeerNodes(t, 3, peerings)
nodes.StartDaemons()
defer nodes.StopDaemons()
assertPeerings(h, nodes, peerings)
nodes[2].Disconnect(nodes[1])
@ -85,6 +87,7 @@ func TestPeering(t *testing.T) {
peerings := []harness.Peering{{From: 0, To: 1}, {From: 1, To: 0}, {From: 1, To: 2}}
h, nodes := harness.CreatePeerNodes(t, 3, peerings)
defer nodes.StopDaemons()
nodes[0].StartDaemon()
nodes[1].StartDaemon()
assertPeerings(h, nodes, []harness.Peering{{From: 0, To: 1}, {From: 1, To: 0}})
@ -99,6 +102,7 @@ func TestPeering(t *testing.T) {
h, nodes := harness.CreatePeerNodes(t, 3, peerings)
nodes.StartDaemons()
defer nodes.StopDaemons()
assertPeerings(h, nodes, peerings)
nodes[2].StopDaemon()

View File

@ -28,6 +28,9 @@ func setupTestNode(t *testing.T) *harness.Node {
t.Helper()
node := harness.NewT(t).NewNode().Init()
node.StartDaemon("--offline")
t.Cleanup(func() {
node.StopDaemon()
})
return node
}
@ -498,7 +501,6 @@ func TestPinLsEdgeCases(t *testing.T) {
t.Run("invalid pin type returns error", func(t *testing.T) {
t.Parallel()
node := setupTestNode(t)
defer node.StopDaemon()
// Try to list pins with invalid type
res := node.RunIPFS("pin", "ls", "--type=invalid")
@ -510,7 +512,6 @@ func TestPinLsEdgeCases(t *testing.T) {
t.Run("non-existent path returns proper error", func(t *testing.T) {
t.Parallel()
node := setupTestNode(t)
defer node.StopDaemon()
// Try to list a non-existent CID
fakeCID := "QmNonExistent123456789"
@ -521,7 +522,6 @@ func TestPinLsEdgeCases(t *testing.T) {
t.Run("unpinned CID returns not pinned error", func(t *testing.T) {
t.Parallel()
node := setupTestNode(t)
defer node.StopDaemon()
// Add content but don't pin it explicitly (it's just in blockstore)
unpinnedCID := node.IPFSAddStr("unpinned content", "--pin=false")

View File

@ -15,6 +15,7 @@ func TestPing(t *testing.T) {
t.Run("other", func(t *testing.T) {
t.Parallel()
nodes := harness.NewT(t).NewNodes(2).Init().StartDaemons().Connect()
defer nodes.StopDaemons()
node1 := nodes[0]
node2 := nodes[1]
@ -25,6 +26,7 @@ func TestPing(t *testing.T) {
t.Run("ping unreachable peer", func(t *testing.T) {
t.Parallel()
nodes := harness.NewT(t).NewNodes(2).Init().StartDaemons().Connect()
defer nodes.StopDaemons()
node1 := nodes[0]
badPeer := "QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJx"
@ -37,6 +39,7 @@ func TestPing(t *testing.T) {
t.Run("self", func(t *testing.T) {
t.Parallel()
nodes := harness.NewT(t).NewNodes(2).Init().StartDaemons()
defer nodes.StopDaemons()
node1 := nodes[0]
node2 := nodes[1]
@ -52,6 +55,7 @@ func TestPing(t *testing.T) {
t.Run("0", func(t *testing.T) {
t.Parallel()
nodes := harness.NewT(t).NewNodes(2).Init().StartDaemons().Connect()
defer nodes.StopDaemons()
node1 := nodes[0]
node2 := nodes[1]
@ -63,6 +67,7 @@ func TestPing(t *testing.T) {
t.Run("offline", func(t *testing.T) {
t.Parallel()
nodes := harness.NewT(t).NewNodes(2).Init().StartDaemons().Connect()
defer nodes.StopDaemons()
node1 := nodes[0]
node2 := nodes[1]

View File

@ -51,6 +51,7 @@ func TestRemotePinning(t *testing.T) {
node.IPFS("config", "--json", "Pinning.RemoteServices.svc.Policies.MFS.Enable", "true")
node.StartDaemon()
t.Cleanup(func() { node.StopDaemon() })
node.IPFS("files", "cp", "/ipfs/bafkqaaa", "/mfs-pinning-test-"+uuid.NewString())
node.IPFS("files", "flush")
@ -133,6 +134,8 @@ func TestRemotePinning(t *testing.T) {
t.Run("pin remote service ls --stat", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
_, svcURL := runPinningService(t, authToken)
node.IPFS("pin", "remote", "service", "add", "svc", svcURL, authToken)
@ -155,6 +158,7 @@ func TestRemotePinning(t *testing.T) {
t.Run("adding service with invalid URL fails", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
res := node.RunIPFS("pin", "remote", "service", "add", "svc", "invalid-service.example.com", "key")
assert.Equal(t, 1, res.ExitCode())
@ -168,6 +172,7 @@ func TestRemotePinning(t *testing.T) {
t.Run("unauthorized pinning service calls fail", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
_, svcURL := runPinningService(t, authToken)
node.IPFS("pin", "remote", "service", "add", "svc", svcURL, "othertoken")
@ -180,6 +185,7 @@ func TestRemotePinning(t *testing.T) {
t.Run("pinning service calls fail when there is a wrong path", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
_, svcURL := runPinningService(t, authToken)
node.IPFS("pin", "remote", "service", "add", "svc", svcURL+"/invalid-path", authToken)
@ -191,6 +197,7 @@ func TestRemotePinning(t *testing.T) {
t.Run("pinning service calls fail when DNS resolution fails", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
node.IPFS("pin", "remote", "service", "add", "svc", "https://invalid-service.example.com", authToken)
res := node.RunIPFS("pin", "remote", "ls", "--service=svc")
@ -201,6 +208,7 @@ func TestRemotePinning(t *testing.T) {
t.Run("pin remote service rm", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
node.IPFS("pin", "remote", "service", "add", "svc", "https://example.com", authToken)
node.IPFS("pin", "remote", "service", "rm", "svc")
res := node.IPFS("pin", "remote", "service", "ls")
@ -225,6 +233,7 @@ func TestRemotePinning(t *testing.T) {
t.Run("'ipfs pin remote add --background=true'", func(t *testing.T) {
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
svc, svcURL := runPinningService(t, authToken)
node.IPFS("pin", "remote", "service", "add", "svc", svcURL, authToken)
@ -266,6 +275,7 @@ func TestRemotePinning(t *testing.T) {
t.Run("'ipfs pin remote add --background=false'", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
svc, svcURL := runPinningService(t, authToken)
node.IPFS("pin", "remote", "service", "add", "svc", svcURL, authToken)
@ -287,6 +297,7 @@ func TestRemotePinning(t *testing.T) {
t.Run("'ipfs pin remote ls' with multiple statuses", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
svc, svcURL := runPinningService(t, authToken)
node.IPFS("pin", "remote", "service", "add", "svc", svcURL, authToken)
@ -340,6 +351,7 @@ func TestRemotePinning(t *testing.T) {
t.Run("'ipfs pin remote ls' by CID", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
svc, svcURL := runPinningService(t, authToken)
node.IPFS("pin", "remote", "service", "add", "svc", svcURL, authToken)
@ -360,6 +372,7 @@ func TestRemotePinning(t *testing.T) {
t.Run("'ipfs pin remote rm --name' without --force when multiple pins match", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
svc, svcURL := runPinningService(t, authToken)
node.IPFS("pin", "remote", "service", "add", "svc", svcURL, authToken)
@ -388,6 +401,7 @@ func TestRemotePinning(t *testing.T) {
t.Run("'ipfs pin remote rm --name --force' remove multiple pins", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
svc, svcURL := runPinningService(t, authToken)
node.IPFS("pin", "remote", "service", "add", "svc", svcURL, authToken)
@ -408,6 +422,7 @@ func TestRemotePinning(t *testing.T) {
t.Run("'ipfs pin remote rm --force' removes all pins", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
svc, svcURL := runPinningService(t, authToken)
node.IPFS("pin", "remote", "service", "add", "svc", svcURL, authToken)

View File

@ -26,6 +26,7 @@ func testPins(t *testing.T, args testPinsArgs) {
node := harness.NewT(t).NewNode().Init()
if args.runDaemon {
node.StartDaemon("--offline")
defer node.StopDaemon()
}
strs := []string{"a", "b", "c", "d", "e", "f", "g"}
@ -127,6 +128,7 @@ func testPinsErrorReporting(t *testing.T, args testPinsArgs) {
node := harness.NewT(t).NewNode().Init()
if args.runDaemon {
node.StartDaemon("--offline")
defer node.StopDaemon()
}
randomCID := "Qme8uX5n9hn15pw9p6WcVKoziyyC9LXv4LEgvsmKMULjnV"
res := node.RunIPFS(StrCat("pin", "add", args.pinArg, randomCID)...)
@ -142,6 +144,7 @@ func testPinDAG(t *testing.T, args testPinsArgs) {
node := h.NewNode().Init()
if args.runDaemon {
node.StartDaemon("--offline")
defer node.StopDaemon()
}
bytes := random.Bytes(1 << 20) // 1 MiB
tmpFile := h.WriteToTemp(string(bytes))
@ -168,6 +171,7 @@ func testPinProgress(t *testing.T, args testPinsArgs) {
if args.runDaemon {
node.StartDaemon("--offline")
defer node.StopDaemon()
}
bytes := random.Bytes(1 << 20) // 1 MiB

View File

@ -7,6 +7,7 @@ import (
"net/http"
"net/http/httptest"
"strings"
"sync/atomic"
"testing"
"time"
@ -764,3 +765,81 @@ func TestProvider(t *testing.T) {
})
}
}
// TestHTTPOnlyProviderWithSweepEnabled tests that provider records are correctly
// sent to HTTP routers when Routing.Type="custom" with only HTTP routers configured,
// even when Provide.DHT.SweepEnabled=true (the default since v0.39).
//
// This is a regression test for https://github.com/ipfs/kubo/issues/11089
func TestHTTPOnlyProviderWithSweepEnabled(t *testing.T) {
t.Parallel()
// Track provide requests received by the mock HTTP router
var provideRequests atomic.Int32
mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if (r.Method == http.MethodPut || r.Method == http.MethodPost) &&
strings.HasPrefix(r.URL.Path, "/routing/v1/providers") {
provideRequests.Add(1)
w.WriteHeader(http.StatusOK)
} else if strings.HasPrefix(r.URL.Path, "/routing/v1/providers") && r.Method == http.MethodGet {
// Return empty providers for findprovs
w.Header().Set("Content-Type", "application/x-ndjson")
w.WriteHeader(http.StatusOK)
} else {
w.WriteHeader(http.StatusNotFound)
}
}))
defer mockServer.Close()
h := harness.NewT(t)
node := h.NewNode().Init()
// Explicitly set SweepEnabled=true (the default since v0.39, but be explicit for test clarity)
node.SetIPFSConfig("Provide.DHT.SweepEnabled", true)
node.SetIPFSConfig("Provide.Enabled", true)
// Configure HTTP-only custom routing (no DHT) with explicit Routing.Type=custom
routingConf := map[string]any{
"Type": "custom", // Explicitly set Routing.Type=custom
"Methods": map[string]any{
"provide": map[string]any{"RouterName": "HTTPRouter"},
"get-ipns": map[string]any{"RouterName": "HTTPRouter"},
"put-ipns": map[string]any{"RouterName": "HTTPRouter"},
"find-peers": map[string]any{"RouterName": "HTTPRouter"},
"find-providers": map[string]any{"RouterName": "HTTPRouter"},
},
"Routers": map[string]any{
"HTTPRouter": map[string]any{
"Type": "http",
"Parameters": map[string]any{
"Endpoint": mockServer.URL,
},
},
},
}
node.SetIPFSConfig("Routing", routingConf)
node.StartDaemon()
defer node.StopDaemon()
// Add content and manually provide it
cid := node.IPFSAddStr(time.Now().String())
// Manual provide should succeed even without libp2p peers
res := node.RunIPFS("routing", "provide", cid)
// Check that the command succeeded (exit code 0) and no provide-related errors
assert.Equal(t, 0, res.ExitCode(), "routing provide should succeed with HTTP-only routing and SweepEnabled=true")
assert.NotContains(t, res.Stderr.String(), "cannot provide", "should not have provide errors")
// Verify HTTP router received at least one provide request
assert.Greater(t, provideRequests.Load(), int32(0),
"HTTP router should have received provide requests")
// Verify 'provide stat' works with HTTP-only routing (regression test for stats)
statRes := node.RunIPFS("provide", "stat")
assert.Equal(t, 0, statRes.ExitCode(), "provide stat should succeed with HTTP-only routing")
assert.NotContains(t, statRes.Stderr.String(), "stats not available",
"should not report stats unavailable")
// LegacyProvider outputs "TotalReprovides:" in its stats
assert.Contains(t, statRes.Stdout.String(), "TotalReprovides:",
"should show legacy provider stats")
}

View File

@ -26,6 +26,7 @@ func TestRcmgr(t *testing.T) {
})
node.StartDaemon()
defer node.StopDaemon()
t.Run("swarm resources should fail", func(t *testing.T) {
res := node.RunIPFS("swarm", "resources")
@ -41,6 +42,7 @@ func TestRcmgr(t *testing.T) {
cfg.Swarm.ResourceMgr.Enabled = config.False
})
node.StartDaemon()
defer node.StopDaemon()
t.Run("swarm resources should fail", func(t *testing.T) {
res := node.RunIPFS("swarm", "resources")
@ -56,6 +58,7 @@ func TestRcmgr(t *testing.T) {
cfg.Swarm.ConnMgr.HighWater = config.NewOptionalInteger(1000)
})
node.StartDaemon()
defer node.StopDaemon()
res := node.RunIPFS("swarm", "resources", "--enc=json")
require.Equal(t, 0, res.ExitCode())
@ -73,7 +76,9 @@ func TestRcmgr(t *testing.T) {
node.UpdateConfig(func(cfg *config.Config) {
cfg.Swarm.ConnMgr.HighWater = config.NewOptionalInteger(1000)
})
node.StartDaemon()
t.Cleanup(func() { node.StopDaemon() })
t.Run("conns and streams are above 800 for default connmgr settings", func(t *testing.T) {
t.Parallel()
@ -135,6 +140,7 @@ func TestRcmgr(t *testing.T) {
overrides.System.ConnsInbound = rcmgr.Unlimited
})
node.StartDaemon()
defer node.StopDaemon()
res := node.RunIPFS("swarm", "resources", "--enc=json")
limits := unmarshalLimits(t, res.Stdout.Bytes())
@ -150,6 +156,7 @@ func TestRcmgr(t *testing.T) {
overrides.Transient.Memory = 88888
})
node.StartDaemon()
defer node.StopDaemon()
res := node.RunIPFS("swarm", "resources", "--enc=json")
limits := unmarshalLimits(t, res.Stdout.Bytes())
@ -163,6 +170,7 @@ func TestRcmgr(t *testing.T) {
overrides.Service = map[string]rcmgr.ResourceLimits{"foo": {Memory: 77777}}
})
node.StartDaemon()
defer node.StopDaemon()
res := node.RunIPFS("swarm", "resources", "--enc=json")
limits := unmarshalLimits(t, res.Stdout.Bytes())
@ -176,6 +184,7 @@ func TestRcmgr(t *testing.T) {
overrides.Protocol = map[protocol.ID]rcmgr.ResourceLimits{"foo": {Memory: 66666}}
})
node.StartDaemon()
defer node.StopDaemon()
res := node.RunIPFS("swarm", "resources", "--enc=json")
limits := unmarshalLimits(t, res.Stdout.Bytes())
@ -191,6 +200,7 @@ func TestRcmgr(t *testing.T) {
overrides.Peer = map[peer.ID]rcmgr.ResourceLimits{validPeerID: {Memory: 55555}}
})
node.StartDaemon()
defer node.StopDaemon()
res := node.RunIPFS("swarm", "resources", "--enc=json")
limits := unmarshalLimits(t, res.Stdout.Bytes())
@ -218,6 +228,7 @@ func TestRcmgr(t *testing.T) {
})
nodes.StartDaemons()
t.Cleanup(func() { nodes.StopDaemons() })
t.Run("node 0 should fail to connect to and ping node 1", func(t *testing.T) {
t.Parallel()

View File

@ -57,6 +57,7 @@ func testRoutingDHT(t *testing.T, enablePubsub bool) {
}
nodes.StartDaemons(daemonArgs...).Connect()
t.Cleanup(func() { nodes.StopDaemons() })
t.Run("ipfs routing findpeer", func(t *testing.T) {
t.Parallel()
@ -157,6 +158,7 @@ func testSelfFindDHT(t *testing.T) {
})
nodes.StartDaemons()
defer nodes.StopDaemons()
res := nodes[0].RunIPFS("dht", "findpeer", nodes[0].PeerID().String())
assert.Equal(t, 1, res.ExitCode())

View File

@ -14,6 +14,7 @@ func TestStats(t *testing.T) {
t.Run("stats dht", func(t *testing.T) {
t.Parallel()
nodes := harness.NewT(t).NewNodes(2).Init().StartDaemons().Connect()
defer nodes.StopDaemons()
node1 := nodes[0]
res := node1.IPFS("stats", "dht")

View File

@ -31,6 +31,7 @@ func TestSwarm(t *testing.T) {
t.Run("ipfs swarm peers returns empty peers when a node is not connected to any peers", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
res := node.RunIPFS("swarm", "peers", "--enc=json", "--identify")
var output expectedOutputType
err := json.Unmarshal(res.Stdout.Bytes(), &output)
@ -40,7 +41,9 @@ func TestSwarm(t *testing.T) {
t.Run("ipfs swarm peers with flag identify outputs expected identify information about connected peers", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
otherNode := harness.NewT(t).NewNode().Init().StartDaemon()
defer otherNode.StopDaemon()
node.Connect(otherNode)
res := node.RunIPFS("swarm", "peers", "--enc=json", "--identify")
@ -67,7 +70,9 @@ func TestSwarm(t *testing.T) {
t.Run("ipfs swarm peers with flag identify outputs Identify field with data that matches calling ipfs id on a peer", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
otherNode := harness.NewT(t).NewNode().Init().StartDaemon()
defer otherNode.StopDaemon()
node.Connect(otherNode)
otherNodeIDResponse := otherNode.RunIPFS("id", "--enc=json")

View File

@ -76,6 +76,7 @@ func TestTracing(t *testing.T) {
node.Runner.Env["OTEL_EXPORTER_OTLP_PROTOCOL"] = "grpc"
node.Runner.Env["OTEL_EXPORTER_OTLP_ENDPOINT"] = "http://localhost:4317"
node.StartDaemon()
defer node.StopDaemon()
assert.Eventually(t,
func() bool {

View File

@ -74,6 +74,7 @@ func TestTransports(t *testing.T) {
t.Parallel()
nodes := tcpNodes(t).StartDaemons().Connect()
runTests(nodes)
nodes.StopDaemons()
})
t.Run("tcp with NOISE", func(t *testing.T) {
@ -86,6 +87,7 @@ func TestTransports(t *testing.T) {
})
nodes.StartDaemons().Connect()
runTests(nodes)
nodes.StopDaemons()
})
t.Run("QUIC", func(t *testing.T) {
@ -104,6 +106,7 @@ func TestTransports(t *testing.T) {
disableRouting(nodes)
nodes.StartDaemons().Connect()
runTests(nodes)
nodes.StopDaemons()
})
t.Run("QUIC+Webtransport", func(t *testing.T) {
@ -122,6 +125,7 @@ func TestTransports(t *testing.T) {
disableRouting(nodes)
nodes.StartDaemons().Connect()
runTests(nodes)
nodes.StopDaemons()
})
t.Run("QUIC connects with non-dialable transports", func(t *testing.T) {
@ -144,6 +148,7 @@ func TestTransports(t *testing.T) {
disableRouting(nodes)
nodes.StartDaemons().Connect()
runTests(nodes)
nodes.StopDaemons()
})
t.Run("WebRTC Direct", func(t *testing.T) {
@ -162,5 +167,6 @@ func TestTransports(t *testing.T) {
disableRouting(nodes)
nodes.StartDaemons().Connect()
runTests(nodes)
nodes.StopDaemons()
})
}