From b57278017a5d91144ed354c4fa314d60c1556802 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 27 Jan 2026 21:43:52 -0800 Subject: [PATCH 1/6] chore(deps): bump github.com/tidwall/gjson from 1.16.0 to 1.18.0 (#11168) Bumps [github.com/tidwall/gjson](https://github.com/tidwall/gjson) from 1.16.0 to 1.18.0. - [Commits](https://github.com/tidwall/gjson/compare/v1.16.0...v1.18.0) --- updated-dependencies: - dependency-name: github.com/tidwall/gjson dependency-version: 1.18.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 7c4be05fa..a4a5c4ecb 100644 --- a/go.mod +++ b/go.mod @@ -72,7 +72,7 @@ require ( github.com/prometheus/client_golang v1.23.2 github.com/stretchr/testify v1.11.1 github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d - github.com/tidwall/gjson v1.16.0 + github.com/tidwall/gjson v1.18.0 github.com/tidwall/sjson v1.2.5 github.com/whyrusleeping/go-sysinfo v0.0.0-20190219211824-4a357d4b90b1 github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 diff --git a/go.sum b/go.sum index 1e4be0486..475c95728 100644 --- a/go.sum +++ b/go.sum @@ -809,8 +809,8 @@ github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJ github.com/texttheater/golang-levenshtein v1.0.1 h1:+cRNoVrfiwufQPhoMzB6N0Yf/Mqajr6t1lOv8GyGE2U= github.com/texttheater/golang-levenshtein v1.0.1/go.mod h1:PYAKrbF5sAiq9wd+H82hs7gNaen0CplQ9uvm6+enD/8= github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= -github.com/tidwall/gjson v1.16.0 h1:SyXa+dsSPpUlcwEDuKuEBJEz5vzTvOea+9rjyYodQFg= -github.com/tidwall/gjson v1.16.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= From ef99e0a0f70b97b713e6e1debf1d3d30806eb92e Mon Sep 17 00:00:00 2001 From: Marcin Rataj Date: Thu, 29 Jan 2026 02:56:42 +0100 Subject: [PATCH 2/6] refactor(ci): add caching to ipfs-webui interop tests (#11173) * ci: add caching to ipfs-webui interop tests cache node_modules, Playwright browsers, and test build output to speed up repeated CI runs. also use node version from ipfs-webui/.tool-versions instead of hardcoding, and upload test artifacts on failure. * docs(ci): add header comment to interop workflow explain what helia-interop and ipfs-webui jobs do --- .github/workflows/interop.yml | 92 ++++++++++++++++++++++++++--------- 1 file changed, 70 insertions(+), 22 deletions(-) diff --git a/.github/workflows/interop.yml b/.github/workflows/interop.yml index 25bdba4f2..f78fd40df 100644 --- a/.github/workflows/interop.yml +++ b/.github/workflows/interop.yml @@ -1,3 +1,17 @@ +# Interoperability Tests +# +# This workflow ensures Kubo remains compatible with the broader IPFS ecosystem. +# It builds Kubo from source, then runs: +# +# 1. helia-interop: Tests compatibility with Helia (JavaScript IPFS implementation) +# using Playwright-based tests from @helia/interop package. +# +# 2. ipfs-webui: Runs E2E tests from ipfs/ipfs-webui repository to verify +# the web interface works correctly with the locally built Kubo binary. +# +# Both jobs use caching to speed up repeated runs (npm dependencies, Playwright +# browsers, and webui build artifacts). + name: Interop on: @@ -84,9 +98,6 @@ jobs: run: shell: bash steps: - - uses: actions/setup-node@v6 - with: - node-version: 20.x - uses: actions/download-artifact@v7 with: name: kubo @@ -96,36 +107,73 @@ jobs: with: repository: ipfs/ipfs-webui path: ipfs-webui - - run: | - echo "dir=$(npm config get cache)" >> $GITHUB_OUTPUT - id: npm-cache-dir - - uses: actions/cache@v5 + - uses: actions/setup-node@v6 with: - path: ${{ steps.npm-cache-dir.outputs.dir }} - key: ${{ runner.os }}-${{ github.job }}-${{ hashFiles('**/package-lock.json') }} - restore-keys: | - ${{ runner.os }}-${{ github.job }}- - - env: - NPM_CACHE_DIR: ${{ steps.npm-cache-dir.outputs.dir }} - run: | - npm ci --prefer-offline --no-audit --progress=false --cache "$NPM_CACHE_DIR" - npx playwright install --with-deps - working-directory: ipfs-webui - - id: ref + node-version-file: 'ipfs-webui/.tool-versions' + - id: webui-ref run: echo "ref=$(git rev-parse --short HEAD)" | tee -a $GITHUB_OUTPUT working-directory: ipfs-webui - - id: state + - id: webui-state env: GITHUB_TOKEN: ${{ github.token }} - ENDPOINT: repos/ipfs/ipfs-webui/commits/${{ steps.ref.outputs.ref }}/status + ENDPOINT: repos/ipfs/ipfs-webui/commits/${{ steps.webui-ref.outputs.ref }}/status SELECTOR: .state KEY: state run: gh api "$ENDPOINT" --jq "$SELECTOR" | xargs -I{} echo "$KEY={}" | tee -a $GITHUB_OUTPUT - - name: Build ipfs-webui@main (state=${{ steps.state.outputs.state }}) + # Cache node_modules based on package-lock.json + - name: Cache node_modules + uses: actions/cache@v5 + id: node-modules-cache + with: + path: ipfs-webui/node_modules + key: ${{ runner.os }}-webui-node-modules-${{ hashFiles('ipfs-webui/package-lock.json') }} + restore-keys: | + ${{ runner.os }}-webui-node-modules- + - name: Install dependencies + if: steps.node-modules-cache.outputs.cache-hit != 'true' + run: npm ci --prefer-offline --no-audit --progress=false + working-directory: ipfs-webui + # Cache Playwright browsers + - name: Cache Playwright browsers + uses: actions/cache@v5 + id: playwright-cache + with: + path: ~/.cache/ms-playwright + key: ${{ runner.os }}-playwright-${{ hashFiles('ipfs-webui/package-lock.json') }} + restore-keys: | + ${{ runner.os }}-playwright- + # On cache miss: download browsers and install OS dependencies + - name: Install Playwright with dependencies + if: steps.playwright-cache.outputs.cache-hit != 'true' + run: npx playwright install --with-deps + working-directory: ipfs-webui + # On cache hit: only ensure OS dependencies are present (fast, idempotent) + - name: Install Playwright OS dependencies + if: steps.playwright-cache.outputs.cache-hit == 'true' + run: npx playwright install-deps + working-directory: ipfs-webui + # Cache test build output + - name: Cache test build + uses: actions/cache@v5 + id: test-build-cache + with: + path: ipfs-webui/build + key: ${{ runner.os }}-webui-build-${{ hashFiles('ipfs-webui/package-lock.json', 'ipfs-webui/src/**', 'ipfs-webui/public/**') }} + restore-keys: | + ${{ runner.os }}-webui-build- + - name: Build ipfs-webui@${{ steps.webui-ref.outputs.ref }} (state=${{ steps.webui-state.outputs.state }}) + if: steps.test-build-cache.outputs.cache-hit != 'true' run: npm run test:build working-directory: ipfs-webui - - name: Test ipfs-webui@main (state=${{ steps.state.outputs.state }}) E2E against the locally built Kubo binary + - name: Test ipfs-webui@${{ steps.webui-ref.outputs.ref }} (state=${{ steps.webui-state.outputs.state }}) E2E against the locally built Kubo binary run: npm run test:e2e env: IPFS_GO_EXEC: ${{ github.workspace }}/cmd/ipfs/ipfs working-directory: ipfs-webui + - name: Upload test artifacts on failure + if: failure() + uses: actions/upload-artifact@v6 + with: + name: webui-test-results + path: ipfs-webui/test-results/ + retention-days: 7 From 7de7af082076313705691bda7fc178dc7415f0c7 Mon Sep 17 00:00:00 2001 From: Marcin Rataj Date: Fri, 30 Jan 2026 17:20:56 +0100 Subject: [PATCH 3/6] feat(dns): skip DNS lookups for AutoTLS hostnames (#11140) * feat(dns): resolve libp2p.direct addresses locally without network I/O p2p-forge hostnames encode IP addresses directly (e.g., 1-2-3-4.peerID.libp2p.direct -> 1.2.3.4), so DNS queries are wasteful. kubo now parses these IPs in-memory. - applies to both default libp2p.direct and custom AutoTLS.DomainSuffix - TXT queries still delegate to network for ACME DNS-01 compatibility - https://github.com/ipfs/kubo/pull/11140#discussion_r2683477754 use fallback to network DNS instead of returning errors when local parsing fails, ensuring forward compatibility with future DNS records - https://github.com/ipfs/kubo/pull/11140#discussion_r2683512408 add peerID validation using peer.Decode(), matching libp2p.direct server behavior, with fallback on invalid peerID - https://github.com/ipfs/kubo/pull/11140#discussion_r2683521930 document interaction with DNS.Resolvers in config.md - https://github.com/ipfs/kubo/pull/11140#discussion_r2683526647 add AutoTLS.SkipDNSLookup config flag to disable local resolution (useful for debugging or custom DNS override scenarios) - https://github.com/ipfs/kubo/pull/11140#discussion_r2683533462 add E2E test verifying libp2p.direct resolves locally even when DNS.Resolvers points to a broken server additional improvements: - use madns.BasicResolver interface instead of custom basicResolver - add compile-time interface checks for p2pForgeResolver and madns.Resolver - refactor tests: merge IPv4/IPv6, add helpers, use config.DefaultDomainSuffix - improve changelog to explain public good benefit (reducing DNS load) Fixes #11136 --- config/autotls.go | 8 ++ core/node/dns.go | 35 ++++- core/node/p2pforge_resolver.go | 120 ++++++++++++++++ core/node/p2pforge_resolver_test.go | 172 +++++++++++++++++++++++ docs/changelogs/v0.40.md | 7 + docs/config.md | 19 ++- test/cli/dns_resolvers_multiaddr_test.go | 143 +++++++++++++++++++ 7 files changed, 502 insertions(+), 2 deletions(-) create mode 100644 core/node/p2pforge_resolver.go create mode 100644 core/node/p2pforge_resolver_test.go create mode 100644 test/cli/dns_resolvers_multiaddr_test.go diff --git a/config/autotls.go b/config/autotls.go index 805a9ded6..4d90b7171 100644 --- a/config/autotls.go +++ b/config/autotls.go @@ -16,6 +16,13 @@ type AutoTLS struct { // Optional, controls if Kubo should add /tls/sni/.../ws listener to every /tcp port if no explicit /ws is defined in Addresses.Swarm AutoWSS Flag `json:",omitempty"` + // Optional, controls whether to skip network DNS lookups for p2p-forge domains. + // Applies to resolution via DNS.Resolvers, including /dns* multiaddrs in go-libp2p. + // When enabled (default), A/AAAA queries for *.libp2p.direct are resolved + // locally by parsing the IP directly from the hostname, avoiding network I/O. + // Set to false to always use network DNS (useful for debugging). + SkipDNSLookup Flag `json:",omitempty"` + // Optional override of the parent domain that will be used DomainSuffix *OptionalString `json:",omitempty"` @@ -42,5 +49,6 @@ const ( DefaultCAEndpoint = p2pforge.DefaultCAEndpoint DefaultAutoWSS = true // requires AutoTLS.Enabled DefaultAutoTLSShortAddrs = true // requires AutoTLS.Enabled + DefaultAutoTLSSkipDNSLookup = true // skip network DNS for p2p-forge domains DefaultAutoTLSRegistrationDelay = 1 * time.Hour ) diff --git a/core/node/dns.go b/core/node/dns.go index 3f0875afb..ba4e00784 100644 --- a/core/node/dns.go +++ b/core/node/dns.go @@ -10,6 +10,10 @@ import ( madns "github.com/multiformats/go-multiaddr-dns" ) +// Compile-time interface check: *madns.Resolver (returned by gateway.NewDNSResolver +// and madns.NewResolver) must implement madns.BasicResolver for p2pForgeResolver fallback. +var _ madns.BasicResolver = (*madns.Resolver)(nil) + func DNSResolver(cfg *config.Config) (*madns.Resolver, error) { var dohOpts []doh.Option if !cfg.DNS.MaxCacheTTL.IsDefault() { @@ -19,5 +23,34 @@ func DNSResolver(cfg *config.Config) (*madns.Resolver, error) { // Replace "auto" DNS resolver placeholders with autoconf values resolvers := cfg.DNSResolversWithAutoConf() - return gateway.NewDNSResolver(resolvers, dohOpts...) + // Get base resolver from boxo (handles custom DoH resolvers per eTLD) + baseResolver, err := gateway.NewDNSResolver(resolvers, dohOpts...) + if err != nil { + return nil, err + } + + // Check if we should skip network DNS lookups for p2p-forge domains + skipAutoTLSDNS := cfg.AutoTLS.SkipDNSLookup.WithDefault(config.DefaultAutoTLSSkipDNSLookup) + if !skipAutoTLSDNS { + // Local resolution disabled, use network DNS for everything + return baseResolver, nil + } + + // Build list of p2p-forge domains to resolve locally without network I/O. + // AutoTLS hostnames encode IP addresses directly (e.g., 1-2-3-4.peerID.libp2p.direct), + // so DNS lookups are wasteful. We resolve these in-memory when possible. + forgeDomains := []string{config.DefaultDomainSuffix} + customDomain := cfg.AutoTLS.DomainSuffix.WithDefault(config.DefaultDomainSuffix) + if customDomain != config.DefaultDomainSuffix { + forgeDomains = append(forgeDomains, customDomain) + } + forgeResolver := NewP2PForgeResolver(forgeDomains, baseResolver) + + // Register p2p-forge resolver for each domain, fallback to baseResolver for others + opts := []madns.Option{madns.WithDefaultResolver(baseResolver)} + for _, domain := range forgeDomains { + opts = append(opts, madns.WithDomainResolver(domain+".", forgeResolver)) + } + + return madns.NewResolver(opts...) } diff --git a/core/node/p2pforge_resolver.go b/core/node/p2pforge_resolver.go new file mode 100644 index 000000000..6ddbb1904 --- /dev/null +++ b/core/node/p2pforge_resolver.go @@ -0,0 +1,120 @@ +package node + +import ( + "context" + "net" + "net/netip" + "strings" + + "github.com/libp2p/go-libp2p/core/peer" + madns "github.com/multiformats/go-multiaddr-dns" +) + +// p2pForgeResolver implements madns.BasicResolver for deterministic resolution +// of p2p-forge domains (e.g., *.libp2p.direct) without network I/O for A/AAAA queries. +// +// p2p-forge encodes IP addresses in DNS hostnames: +// - IPv4: 1-2-3-4.peerID.libp2p.direct -> 1.2.3.4 +// - IPv6: 2001-db8--1.peerID.libp2p.direct -> 2001:db8::1 +// +// When local parsing fails (invalid format, invalid peerID, etc.), the resolver +// falls back to network DNS. This ensures future .libp2p.direct records +// can still resolve if the authoritative DNS adds support for them. +// +// TXT queries always delegate to the fallback resolver. This is important for +// p2p-forge/client ACME DNS-01 challenges to work correctly, as Let's Encrypt +// needs to verify TXT records at _acme-challenge.peerID.libp2p.direct. +// +// See: https://github.com/ipshipyard/p2p-forge +type p2pForgeResolver struct { + suffixes []string + fallback madns.BasicResolver +} + +// Compile-time check that p2pForgeResolver implements madns.BasicResolver. +var _ madns.BasicResolver = (*p2pForgeResolver)(nil) + +// NewP2PForgeResolver creates a resolver for the given p2p-forge domain suffixes. +// Each suffix should be a bare domain like "libp2p.direct" (without leading dot). +// When local IP parsing fails, queries fall back to the provided resolver. +// TXT queries always delegate to the fallback resolver for ACME compatibility. +func NewP2PForgeResolver(suffixes []string, fallback madns.BasicResolver) *p2pForgeResolver { + normalized := make([]string, len(suffixes)) + for i, s := range suffixes { + normalized[i] = strings.ToLower(strings.TrimSuffix(s, ".")) + } + return &p2pForgeResolver{suffixes: normalized, fallback: fallback} +} + +// LookupIPAddr parses IP addresses encoded in the hostname. +// +// Format: .. +// - IPv4: 192-168-1-1.peerID.libp2p.direct -> [192.168.1.1] +// - IPv6: 2001-db8--1.peerID.libp2p.direct -> [2001:db8::1] +// +// If the hostname doesn't match the expected format (wrong suffix, invalid peerID, +// invalid IP encoding, or peerID-only), the lookup falls back to network DNS. +// This allows future DNS records like .libp2p.direct to resolve normally. +func (r *p2pForgeResolver) LookupIPAddr(ctx context.Context, hostname string) ([]net.IPAddr, error) { + // DNS is case-insensitive, normalize to lowercase + hostname = strings.ToLower(strings.TrimSuffix(hostname, ".")) + + // find matching suffix and extract subdomain + var subdomain string + for _, suffix := range r.suffixes { + if sub, found := strings.CutSuffix(hostname, "."+suffix); found { + subdomain = sub + break + } + } + if subdomain == "" { + // not a p2p-forge domain, fallback to network + return r.fallback.LookupIPAddr(ctx, hostname) + } + + // split subdomain into parts: should be [ip-prefix, peerID] + parts := strings.Split(subdomain, ".") + if len(parts) != 2 { + // not the expected . format, fallback to network + return r.fallback.LookupIPAddr(ctx, hostname) + } + + encodedIP := parts[0] + peerIDStr := parts[1] + + // validate peerID (same validation as libp2p.direct DNS server) + if _, err := peer.Decode(peerIDStr); err != nil { + // invalid peerID, fallback to network + return r.fallback.LookupIPAddr(ctx, hostname) + } + + // RFC 1123: hostname labels cannot start or end with hyphen + if len(encodedIP) == 0 || encodedIP[0] == '-' || encodedIP[len(encodedIP)-1] == '-' { + // invalid hostname label, fallback to network + return r.fallback.LookupIPAddr(ctx, hostname) + } + + // try parsing as IPv4 first: segments joined by "-" become "." + segments := strings.Split(encodedIP, "-") + if len(segments) == 4 { + ipv4Str := strings.Join(segments, ".") + if ip, err := netip.ParseAddr(ipv4Str); err == nil && ip.Is4() { + return []net.IPAddr{{IP: ip.AsSlice()}}, nil + } + } + + // try parsing as IPv6: segments joined by "-" become ":" + ipv6Str := strings.Join(segments, ":") + if ip, err := netip.ParseAddr(ipv6Str); err == nil && ip.Is6() { + return []net.IPAddr{{IP: ip.AsSlice()}}, nil + } + + // IP parsing failed, fallback to network + return r.fallback.LookupIPAddr(ctx, hostname) +} + +// LookupTXT delegates to the fallback resolver to support ACME DNS-01 challenges +// and any other TXT record lookups on p2p-forge domains. +func (r *p2pForgeResolver) LookupTXT(ctx context.Context, hostname string) ([]string, error) { + return r.fallback.LookupTXT(ctx, hostname) +} diff --git a/core/node/p2pforge_resolver_test.go b/core/node/p2pforge_resolver_test.go new file mode 100644 index 000000000..caa1b6409 --- /dev/null +++ b/core/node/p2pforge_resolver_test.go @@ -0,0 +1,172 @@ +package node + +import ( + "context" + "errors" + "net" + "testing" + + "github.com/ipfs/kubo/config" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// Test constants matching p2p-forge production format +const ( + // testPeerID is a valid peerID in CIDv1 base36 format as used by p2p-forge. + // Base36 is lowercase-only, making it safe for case-insensitive DNS. + // Corresponds to 12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN in base58btc. + testPeerID = "k51qzi5uqu5dhnwe629wdlncpql6frppdpwnz4wtlcw816aysd5wwlk63g4wmh" + + // domainSuffix is the default p2p-forge domain used in tests. + domainSuffix = config.DefaultDomainSuffix +) + +// mockResolver implements madns.BasicResolver for testing +type mockResolver struct { + txtRecords map[string][]string + ipRecords map[string][]net.IPAddr + ipErr error +} + +func (m *mockResolver) LookupIPAddr(_ context.Context, hostname string) ([]net.IPAddr, error) { + if m.ipErr != nil { + return nil, m.ipErr + } + if m.ipRecords != nil { + return m.ipRecords[hostname], nil + } + return nil, nil +} + +func (m *mockResolver) LookupTXT(_ context.Context, name string) ([]string, error) { + if m.txtRecords != nil { + return m.txtRecords[name], nil + } + return nil, nil +} + +// newTestResolver creates a p2pForgeResolver with default suffix. +func newTestResolver(t *testing.T) *p2pForgeResolver { + t.Helper() + return NewP2PForgeResolver([]string{domainSuffix}, &mockResolver{}) +} + +// assertLookupIP verifies that hostname resolves to wantIP. +func assertLookupIP(t *testing.T, r *p2pForgeResolver, hostname, wantIP string) { + t.Helper() + addrs, err := r.LookupIPAddr(t.Context(), hostname) + require.NoError(t, err) + require.Len(t, addrs, 1) + assert.Equal(t, wantIP, addrs[0].IP.String()) +} + +func TestP2PForgeResolver_LookupIPAddr(t *testing.T) { + r := newTestResolver(t) + + tests := []struct { + name string + hostname string + wantIP string + }{ + // IPv4 + {"ipv4/basic", "192-168-1-1." + testPeerID + "." + domainSuffix, "192.168.1.1"}, + {"ipv4/zeros", "0-0-0-0." + testPeerID + "." + domainSuffix, "0.0.0.0"}, + {"ipv4/max", "255-255-255-255." + testPeerID + "." + domainSuffix, "255.255.255.255"}, + {"ipv4/trailing dot", "10-0-0-1." + testPeerID + "." + domainSuffix + ".", "10.0.0.1"}, + {"ipv4/uppercase suffix", "192-168-1-1." + testPeerID + ".LIBP2P.DIRECT", "192.168.1.1"}, + // IPv6 + {"ipv6/full", "2001-db8-0-0-0-0-0-1." + testPeerID + "." + domainSuffix, "2001:db8::1"}, + {"ipv6/compressed", "2001-db8--1." + testPeerID + "." + domainSuffix, "2001:db8::1"}, + {"ipv6/loopback", "0--1." + testPeerID + "." + domainSuffix, "::1"}, + {"ipv6/all zeros", "0--0." + testPeerID + "." + domainSuffix, "::"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assertLookupIP(t, r, tt.hostname, tt.wantIP) + }) + } +} + +func TestP2PForgeResolver_LookupIPAddr_MultipleSuffixes(t *testing.T) { + r := NewP2PForgeResolver([]string{domainSuffix, "custom.example.com"}, &mockResolver{}) + + tests := []struct { + hostname string + wantIP string + }{ + {"192-168-1-1." + testPeerID + "." + domainSuffix, "192.168.1.1"}, + {"10-0-0-1." + testPeerID + ".custom.example.com", "10.0.0.1"}, + } + + for _, tt := range tests { + t.Run(tt.hostname, func(t *testing.T) { + assertLookupIP(t, r, tt.hostname, tt.wantIP) + }) + } +} + +func TestP2PForgeResolver_LookupIPAddr_FallbackToNetwork(t *testing.T) { + fallbackIP := []net.IPAddr{{IP: net.ParseIP("93.184.216.34")}} + + tests := []struct { + name string + hostname string + }{ + {"peerID only", testPeerID + "." + domainSuffix}, + {"invalid peerID", "192-168-1-1.invalid-peer-id." + domainSuffix}, + {"invalid IP encoding", "not-an-ip." + testPeerID + "." + domainSuffix}, + {"leading hyphen", "-192-168-1-1." + testPeerID + "." + domainSuffix}, + {"too many parts", "extra.192-168-1-1." + testPeerID + "." + domainSuffix}, + {"wrong suffix", "192-168-1-1." + testPeerID + ".example.com"}, + } + + // Build fallback records from test cases + ipRecords := make(map[string][]net.IPAddr, len(tests)) + for _, tt := range tests { + ipRecords[tt.hostname] = fallbackIP + } + fallback := &mockResolver{ipRecords: ipRecords} + r := NewP2PForgeResolver([]string{domainSuffix}, fallback) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + addrs, err := r.LookupIPAddr(t.Context(), tt.hostname) + require.NoError(t, err) + require.Len(t, addrs, 1, "should fallback to network") + assert.Equal(t, "93.184.216.34", addrs[0].IP.String()) + }) + } +} + +func TestP2PForgeResolver_LookupIPAddr_FallbackError(t *testing.T) { + expectedErr := errors.New("network error") + r := NewP2PForgeResolver([]string{domainSuffix}, &mockResolver{ipErr: expectedErr}) + + // peerID-only triggers fallback, which returns error + _, err := r.LookupIPAddr(t.Context(), testPeerID+"."+domainSuffix) + require.ErrorIs(t, err, expectedErr) +} + +func TestP2PForgeResolver_LookupTXT(t *testing.T) { + t.Run("delegates to fallback for ACME DNS-01", func(t *testing.T) { + acmeHost := "_acme-challenge." + testPeerID + "." + domainSuffix + fallback := &mockResolver{ + txtRecords: map[string][]string{acmeHost: {"acme-token-value"}}, + } + r := NewP2PForgeResolver([]string{domainSuffix}, fallback) + + records, err := r.LookupTXT(t.Context(), acmeHost) + require.NoError(t, err) + assert.Equal(t, []string{"acme-token-value"}, records) + }) + + t.Run("returns empty when fallback has no records", func(t *testing.T) { + r := NewP2PForgeResolver([]string{domainSuffix}, &mockResolver{}) + + records, err := r.LookupTXT(t.Context(), "anything."+domainSuffix) + require.NoError(t, err) + assert.Empty(t, records) + }) +} diff --git a/docs/changelogs/v0.40.md b/docs/changelogs/v0.40.md index fd8c41b5b..bcdb6f0e2 100644 --- a/docs/changelogs/v0.40.md +++ b/docs/changelogs/v0.40.md @@ -19,6 +19,7 @@ This release was brought to you by the [Shipyard](https://ipshipyard.com/) team. - [Improved `ipfs dag stat` output](#improved-ipfs-dag-stat-output) - [Skip bad keys when listing](#skip_bad_keys_when_listing) - [Accelerated DHT Client and Provide Sweep now work together](#accelerated-dht-client-and-provide-sweep-now-work-together) + - [🌐 No unnecessary DNS lookups for AutoTLS addresses](#-no-unnecessary-dns-lookups-for-autotls-addresses) - [⏱️ Configurable gateway request duration limit](#️-configurable-gateway-request-duration-limit) - [🔧 Recovery from corrupted MFS root](#-recovery-from-corrupted-mfs-root) - [📋 Long listing format for `ipfs ls`](#-long-listing-format-for-ipfs-ls) @@ -118,6 +119,12 @@ Change the `ipfs key list` behavior to log an error and continue listing keys wh Previously, provide operations could start before the Accelerated DHT Client discovered enough peers, causing sweep mode to lose its efficiency benefits. Now, providing waits for the initial network crawl (about 10 minutes). Your content will be properly distributed across DHT regions after initial DHT map is created. Check `ipfs provide stat` to see when providing begins. +#### 🌐 No unnecessary DNS lookups for AutoTLS addresses + +Kubo no longer makes DNS queries for [AutoTLS](https://blog.libp2p.io/autotls/) addresses like `1-2-3-4.peerid.libp2p.direct`. Since the IP is encoded in the hostname (`1-2-3-4` means `1.2.3.4`), Kubo extracts it locally. This reduces load on the public good DNS servers at `libp2p.direct` run by [Shipyard](https://ipshipyard.com), reserving them for web browsers which lack direct DNS access and must rely on the browser's resolver. + +To disable, set [`AutoTLS.SkipDNSLookup`](https://github.com/ipfs/kubo/blob/master/docs/config.md#autotlsskipdnslookup) to `false`. + #### ⏱️ Configurable gateway request duration limit [`Gateway.MaxRequestDuration`](https://github.com/ipfs/kubo/blob/master/docs/config.md#gatewaymaxrequestduration) sets an absolute deadline for gateway requests. Unlike `RetrievalTimeout` (which resets on each data write and catches stalled transfers), this is a hard limit on the total time a request can take. diff --git a/docs/config.md b/docs/config.md index e6ab44d04..8c160f062 100644 --- a/docs/config.md +++ b/docs/config.md @@ -779,6 +779,22 @@ Default: `true` Type: `flag` +### `AutoTLS.SkipDNSLookup` + +Optional. Controls whether to skip network DNS lookups for [p2p-forge] domains like `*.libp2p.direct`. + +This applies to DNS resolution performed via [`DNS.Resolvers`](#dnsresolvers), including `/dns*` multiaddrs resolved by go-libp2p (e.g., peer addresses from DHT or delegated routing). + +When enabled (default), A/AAAA queries for hostnames matching [`AutoTLS.DomainSuffix`](#autotlsdomainsuffix) are resolved locally by parsing the IP address directly from the hostname (e.g., `1-2-3-4.peerID.libp2p.direct` resolves to `1.2.3.4` without network I/O). This avoids unnecessary DNS queries since the IP is already encoded in the hostname. + +If the hostname format is invalid (wrong peerID, malformed IP encoding), the resolver falls back to network DNS, ensuring forward compatibility with potential future DNS record types. + +Set to `false` to always use network DNS for these domains. This is primarily useful for debugging or if you need to override resolution behavior via [`DNS.Resolvers`](#dnsresolvers). + +Default: `true` + +Type: `flag` + ### `AutoTLS.DomainSuffix` Optional override of the parent domain suffix that will be used in DNS+TLS+WebSockets multiaddrs generated by [p2p-forge] client. @@ -3489,7 +3505,7 @@ Please remove this option from your config. ## `DNS` -Options for configuring DNS resolution for [DNSLink](https://docs.ipfs.tech/concepts/dnslink/) and `/dns*` [Multiaddrs][libp2p-multiaddrs]. +Options for configuring DNS resolution for [DNSLink](https://docs.ipfs.tech/concepts/dnslink/) and `/dns*` [Multiaddrs][libp2p-multiaddrs] (including peer addresses discovered via DHT or delegated routing). ### `DNS.Resolvers` @@ -3519,6 +3535,7 @@ Be mindful that: - The default catch-all resolver is the cleartext one provided by your operating system. It can be overridden by adding a DoH entry for the DNS root indicated by `.` as illustrated above. - Out-of-the-box support for selected non-ICANN TLDs relies on third-party centralized services provided by respective communities on best-effort basis. - The special value `"auto"` uses DNS resolvers from [AutoConf](#autoconf) when enabled. For example: `{".": "auto"}` uses any custom DoH resolver (global or per TLD) provided by AutoConf system. +- When [`AutoTLS.SkipDNSLookup`](#autotlsskipdnslookup) is enabled (default), domains matching [`AutoTLS.DomainSuffix`](#autotlsdomainsuffix) (default: `libp2p.direct`) are resolved locally by parsing the IP directly from the hostname. Set `AutoTLS.SkipDNSLookup=false` to force network DNS lookups for these domains. Default: `{".": "auto"}` diff --git a/test/cli/dns_resolvers_multiaddr_test.go b/test/cli/dns_resolvers_multiaddr_test.go new file mode 100644 index 000000000..b330004ea --- /dev/null +++ b/test/cli/dns_resolvers_multiaddr_test.go @@ -0,0 +1,143 @@ +package cli + +import ( + "strings" + "testing" + "time" + + "github.com/ipfs/kubo/config" + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// testDomainSuffix is the default p2p-forge domain used in tests +const testDomainSuffix = config.DefaultDomainSuffix // libp2p.direct + +// TestDNSResolversApplyToMultiaddr is a regression test for: +// https://github.com/ipfs/kubo/issues/9199 +// +// It verifies that DNS.Resolvers config is used when resolving /dnsaddr, +// /dns, /dns4, /dns6 multiaddrs during peer connections, not just for +// DNSLink resolution. +func TestDNSResolversApplyToMultiaddr(t *testing.T) { + t.Parallel() + + t.Run("invalid DoH resolver causes multiaddr resolution to fail", func(t *testing.T) { + t.Parallel() + + node := harness.NewT(t).NewNode().Init("--profile=test") + + // Set an invalid DoH resolver that will fail when used. + // If DNS.Resolvers is properly wired to multiaddr resolution, + // swarm connect to a /dnsaddr will fail with an error mentioning + // the invalid resolver URL. + invalidResolver := "https://invalid.broken.resolver.test/dns-query" + node.SetIPFSConfig("DNS.Resolvers", map[string]string{ + ".": invalidResolver, + }) + + // Clear bootstrap peers to prevent background connection attempts + node.SetIPFSConfig("Bootstrap", []string{}) + + node.StartDaemon() + defer node.StopDaemon() + + // Give daemon time to fully start + time.Sleep(2 * time.Second) + + // Verify daemon is responsive + result := node.RunIPFS("id") + require.Equal(t, 0, result.ExitCode(), "daemon should be responsive") + + // Try to connect to a /dnsaddr peer - this should fail because + // the DNS.Resolvers config points to an invalid DoH server + result = node.RunIPFS("swarm", "connect", "/dnsaddr/bootstrap.libp2p.io") + + // The connection should fail + require.NotEqual(t, 0, result.ExitCode(), + "swarm connect should fail when DNS.Resolvers points to invalid DoH server") + + // The error should mention the invalid resolver, proving DNS.Resolvers + // is being used for multiaddr resolution + stderr := result.Stderr.String() + assert.True(t, + strings.Contains(stderr, "invalid.broken.resolver.test") || + strings.Contains(stderr, "no such host") || + strings.Contains(stderr, "lookup") || + strings.Contains(stderr, "dial"), + "error should indicate DNS resolution failure using custom resolver. got: %s", stderr) + }) + + t.Run("libp2p.direct resolves locally even with broken DNS.Resolvers", func(t *testing.T) { + t.Parallel() + + h := harness.NewT(t) + nodes := h.NewNodes(2).Init("--profile=test") + + // Configure node0 with a broken DNS resolver + // This would break all DNS resolution if libp2p.direct wasn't resolved locally + invalidResolver := "https://invalid.broken.resolver.test/dns-query" + nodes[0].SetIPFSConfig("DNS.Resolvers", map[string]string{ + ".": invalidResolver, + }) + + // Clear bootstrap peers on both nodes + for _, n := range nodes { + n.SetIPFSConfig("Bootstrap", []string{}) + } + + nodes.StartDaemons() + defer nodes.StopDaemons() + + // Get node1's peer ID in base36 format (what p2p-forge uses in DNS hostnames) + // DNS is case-insensitive, and base36 is lowercase-only, making it ideal for DNS + idResult := nodes[1].RunIPFS("id", "--peerid-base", "base36", "-f", "") + require.Equal(t, 0, idResult.ExitCode()) + node1IDBase36 := strings.TrimSpace(idResult.Stdout.String()) + node1ID := nodes[1].PeerID().String() + node1Addrs := nodes[1].SwarmAddrs() + + // Find a TCP address we can use + var tcpAddr string + for _, addr := range node1Addrs { + addrStr := addr.String() + if strings.Contains(addrStr, "/tcp/") && strings.Contains(addrStr, "/ip4/127.0.0.1") { + tcpAddr = addrStr + break + } + } + require.NotEmpty(t, tcpAddr, "node1 should have a local TCP address") + + // Extract port from address like /ip4/127.0.0.1/tcp/12345/... + parts := strings.Split(tcpAddr, "/") + var port string + for i, p := range parts { + if p == "tcp" && i+1 < len(parts) { + port = parts[i+1] + break + } + } + require.NotEmpty(t, port, "should find TCP port in address") + + // Construct a libp2p.direct hostname that encodes 127.0.0.1 + // Format: /dns4/..libp2p.direct/tcp//p2p/ + // p2p-forge uses base36 peerIDs in DNS hostnames (lowercase, DNS-safe) + libp2pDirectAddr := "/dns4/127-0-0-1." + node1IDBase36 + "." + testDomainSuffix + "/tcp/" + port + "/p2p/" + node1ID + + // This connection should succeed because libp2p.direct is resolved locally + // even though DNS.Resolvers points to a broken server + result := nodes[0].RunIPFS("swarm", "connect", libp2pDirectAddr) + + // The connection should succeed - local resolution bypasses broken DNS + assert.Equal(t, 0, result.ExitCode(), + "swarm connect to libp2p.direct should succeed with local resolution. stderr: %s", + result.Stderr.String()) + + // Verify the connection was actually established + result = nodes[0].RunIPFS("swarm", "peers") + require.Equal(t, 0, result.ExitCode()) + assert.Contains(t, result.Stdout.String(), node1ID, + "node0 should be connected to node1") + }) +} From 9539b4d8b8509031a45c243fecbec6a4234aef17 Mon Sep 17 00:00:00 2001 From: Marcin Rataj Date: Fri, 30 Jan 2026 19:38:42 +0100 Subject: [PATCH 4/6] docs: cleanup broken links and outdated content (#11100) - docs/README.md: restructure to surface 20+ previously undiscoverable docs - docs/README.md: fix broken github-issue-guide.md link (file was removed) - docs/add-code-flow.md: rewrite with current code flow and mermaid diagrams - docs/customizing.md, docs/gateway.md: use specs.ipfs.tech URLs - README.md: fix orphan #nix anchor, use go.dev links, link to contributors graph - remove stale docs/AUTHORS and docs/generate-authors.sh (last updated 2016) --- docs/AUTHORS | 112 ------------------ docs/README.md | 63 +++++++---- docs/add-code-flow.md | 239 ++++++++++++++++++++++++++++----------- docs/customizing.md | 2 +- docs/generate-authors.sh | 12 -- 5 files changed, 215 insertions(+), 213 deletions(-) delete mode 100644 docs/AUTHORS delete mode 100755 docs/generate-authors.sh diff --git a/docs/AUTHORS b/docs/AUTHORS deleted file mode 100644 index 85a6e160c..000000000 --- a/docs/AUTHORS +++ /dev/null @@ -1,112 +0,0 @@ -# This file lists all individuals having contributed content to the repository. -# For how it is generated, see `docs/generate-authors.sh`. - -Aaron Hill -Adam Gashlin -Adrian Ulrich -Alex -anarcat -Andres Buritica -Andrew Chin -Andy Leap -Artem Andreenko -Baptiste Jonglez -Brendan Benshoof -Brendan Mc -Brian Tiger Chow -Caio Alonso -Carlos Cobo -Cayman Nava -Chas Leichner -Chris Grimmett -Chris P -Chris Sasarak -Christian Couder -Christian Kniep -Christopher Sasarak -David -David Braun -David Dias -David Wagner -dignifiedquire -Dominic Della Valle -Dominic Tarr -drathir -Dylan Powers -Emery Hemingway -epitron -Ethan Buchman -Etienne Laurin -Forrest Weston -Francesco Canessa -gatesvp -Giuseppe Bertone -Harlan T Wood -Hector Sanjuan -Henry -Ho-Sheng Hsiao -Jakub Sztandera -Jason Carver -Jonathan Dahan -Juan Batiz-Benet -Karthik Bala -Kevin Atkinson -Kevin Wallace -klauspost -Knut Ahlers -Konstantin Koroviev -kpcyrd -Kristoffer Ström -Lars Gierth -llSourcell -Marcin Janczyk -Marcin Rataj -Markus Amalthea Magnuson -michael -Michael Lovci -Michael Muré -Michael Pfister -Mildred Ki'Lya -Muneeb Ali -Nick Hamann -palkeo -Patrick Connolly -Pavol Rusnak -Peter Borzov -Philip Nelson -Quinn Slack -ReadmeCritic -rht -Richard Littauer -Robert Carlsen -Roerick Sweeney -Sean Lang -SH -Shanti Bouchez-Mongardé -Shaun Bruce -Simon Kirkby -Siraj Ravel -Siva Chandran -slothbag -sroerick -Stephan Seidt -Stephen Sugden -Stephen Whitmore -Steven Allen -Tarnay Kálmán -theswitch -Thomas Gardner -Tim Groeneveld -Tommi Virtanen -Tonis Tiigi -Tor Arne Vestbø -Travis Person -verokarhu -Vijayee Kulkaa -Vitor Baptista -vitzli -W. Trevor King -Whyrusleeping -wzhd -Yuval Langer -ᴍᴀᴛᴛ ʙᴇʟʟ diff --git a/docs/README.md b/docs/README.md index 244aa4846..a3777546d 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1,39 +1,58 @@ # Developer Documentation and Guides -If you are looking for User Documentation & Guides, please visit [docs.ipfs.tech](https://docs.ipfs.tech/) or check [General Documentation](#general-documentation). +If you're looking for User Documentation & Guides, visit [docs.ipfs.tech](https://docs.ipfs.tech/). -If you’re experiencing an issue with IPFS, **please follow [our issue guide](github-issue-guide.md) when filing an issue!** +If you're experiencing an issue with IPFS, please [file an issue](https://github.com/ipfs/kubo/issues/new/choose) in this repository. -Otherwise, check out the following guides to using and developing IPFS: - -## General Documentation +## Configuration - [Configuration reference](config.md) - - [Datastore configuration](datastores.md) - - [Experimental features](experimental-features.md) + - [Datastore configuration](datastores.md) + - [Experimental features](experimental-features.md) +- [Environment variables](environment-variables.md) -## Developing `kubo` +## Running Kubo + +- [Gateway configuration](gateway.md) +- [Delegated routing](delegated-routing.md) +- [Content blocking](content-blocking.md) (for public node operators) +- [libp2p resource management](libp2p-resource-management.md) +- [Mounting IPFS with FUSE](fuse.md) + +## Metrics & Monitoring + +- [Prometheus metrics](metrics.md) +- [Telemetry plugin](telemetry.md) +- [Provider statistics](provide-stats.md) +- [Performance debugging](debug-guide.md) + +## Development - **[Developer Guide](developer-guide.md)** - prerequisites, build, test, and contribute - Contributing Guidelines [for IPFS projects](https://github.com/ipfs/community/blob/master/CONTRIBUTING.md) and for [Go code specifically](https://github.com/ipfs/community/blob/master/CONTRIBUTING_GO.md) -- Building on [Windows](windows.md) -- [Performance Debugging Guidelines](debug-guide.md) -- [Release Checklist](releases.md) +- [Building on Windows](windows.md) +- [Customizing Kubo](customizing.md) +- [Installing plugins](plugins.md) +- [Release checklist](releases.md) ## Guides -- [How to Implement an API Client](implement-api-bindings.md) -- [Connecting with Websockets](transports.md) — if you want `js-ipfs` nodes in web browsers to connect to your `kubo` node, you will need to turn on websocket support in your `kubo` node. +- [Transferring files over IPFS](file-transfer.md) +- [How to implement an API client](implement-api-bindings.md) +- [HTTP/RPC clients](http-rpc-clients.md) +- [Websocket transports](transports.md) +- [Command completion](command-completion.md) -## Advanced User Guides +## Production -- [Transferring a File Over IPFS](file-transfer.md) -- [Installing command completion](command-completion.md) -- [Mounting IPFS with FUSE](fuse.md) -- [Installing plugins](plugins.md) -- [Setting up an IPFS Gateway](https://github.com/ipfs/kubo/blob/master/docs/gateway.md) +- [Reverse proxy setup](production/reverse-proxy.md) -## Other +## Specifications -- [Thanks to all our contributors ❤️](AUTHORS) (We use the `generate-authors.sh` script to regenerate this list.) -- [How to file a GitHub Issue](github-issue-guide.md) +- [Repository structure](specifications/repository.md) +- [Filesystem datastore](specifications/repository_fs.md) +- [Keystore](specifications/keystore.md) + +## Examples + +- [Kubo as a library](examples/kubo-as-a-library/README.md) diff --git a/docs/add-code-flow.md b/docs/add-code-flow.md index 353d47166..0cdba3e8f 100644 --- a/docs/add-code-flow.md +++ b/docs/add-code-flow.md @@ -1,102 +1,209 @@ -# IPFS : The `Add` command demystified +# How `ipfs add` Works -The goal of this document is to capture the code flow for adding a file (see the `coreapi` package) using the IPFS CLI, in the process exploring some data structures and packages like `ipld.Node` (aka `dagnode`), `FSNode`, `MFS`, etc. +This document explains what happens when you run `ipfs add` to import files into IPFS. Understanding this flow helps when debugging, optimizing imports, or building applications on top of IPFS. -## Concepts -- [Files](https://github.com/ipfs/docs/issues/133) +- [The Big Picture](#the-big-picture) +- [Try It Yourself](#try-it-yourself) +- [Step by Step](#step-by-step) + - [Step 1: Chunking](#step-1-chunking) + - [Step 2: Building the DAG](#step-2-building-the-dag) + - [Step 3: Storing Blocks](#step-3-storing-blocks) + - [Step 4: Pinning](#step-4-pinning) + - [Alternative: Organizing with MFS](#alternative-organizing-with-mfs) +- [Options](#options) +- [UnixFS Format](#unixfs-format) +- [Code Architecture](#code-architecture) + - [Key Files](#key-files) + - [The Adder](#the-adder) +- [Further Reading](#further-reading) ---- +## The Big Picture -**Try this yourself** -> -> ``` -> # Convert a file to the IPFS format. -> echo "Hello World" > new-file -> ipfs add new-file -> added QmWATWQ7fVPP2EFGu71UkfnqhYXDYH566qy47CnJDgvs8u new-file -> 12 B / 12 B [=========================================================] 100.00% -> -> # Add a file to the MFS. -> NEW_FILE_HASH=$(ipfs add new-file -Q) -> ipfs files cp /ipfs/$NEW_FILE_HASH /new-file -> -> # Get information from the file in MFS. -> ipfs files stat /new-file -> # QmWATWQ7fVPP2EFGu71UkfnqhYXDYH566qy47CnJDgvs8u -> # Size: 12 -> # CumulativeSize: 20 -> # ChildBlocks: 0 -> # Type: file -> -> # Retrieve the contents. -> ipfs files read /new-file -> # Hello World -> ``` +When you add a file to IPFS, three main things happen: -## Code Flow +1. **Chunking** - The file is split into smaller pieces +2. **DAG Building** - Those pieces are organized into a tree structure (a [Merkle DAG](https://docs.ipfs.tech/concepts/merkle-dag/)) +3. **Pinning** - The root of the tree is pinned so it persists in your local node -**[`UnixfsAPI.Add()`](https://github.com/ipfs/go-ipfs/blob/v0.4.18/core/coreapi/unixfs.go#L31)** - *Entrypoint into the `Unixfs` package* +The result is a Content Identifier (CID) - a hash that uniquely identifies your content and can be used to retrieve it from anywhere in the IPFS network. -The `UnixfsAPI.Add()` acts on the input data or files, to build a _merkledag_ node (in essence it is the entire tree represented by the root node) and adds it to the _blockstore_. -Within the function, a new `Adder` is created with the configured `Blockstore` and __DAG service__`. +```mermaid +flowchart LR + A["Your File
(bytes)"] --> B["Chunker
(split data)"] + B --> C["DAG Builder
(tree)"] + C --> D["CID
(hash)"] +``` -- **[`adder.AddAllAndPin(files)`](https://github.com/ipfs/go-ipfs/blob/v0.4.18/core/coreunix/add.go#L403)** - *Entrypoint to the `Add` logic* - encapsulates a lot of the underlying functionality that will be investigated in the following sections. +## Try It Yourself - Our focus will be on the simplest case, a single file, handled by `Adder.addFile(file files.File)`. +```bash +# Add a simple file +echo "Hello World" > hello.txt +ipfs add hello.txt +# added QmWATWQ7fVPP2EFGu71UkfnqhYXDYH566qy47CnJDgvs8u hello.txt - - **[`adder.addFile(file files.File)`](https://github.com/ipfs/go-ipfs/blob/v0.4.18/core/coreunix/add.go#L450)** - *Create the _DAG_ and add to `MFS`* +# See what's inside +ipfs cat QmWATWQ7fVPP2EFGu71UkfnqhYXDYH566qy47CnJDgvs8u +# Hello World - The `addFile(file)` method takes the data and converts it into a __DAG__ tree and adds the root of the tree into the `MFS`. +# View the DAG structure +ipfs dag get QmWATWQ7fVPP2EFGu71UkfnqhYXDYH566qy47CnJDgvs8u +``` - https://github.com/ipfs/go-ipfs/blob/v0.4.18/core/coreunix/add.go#L508-L521 +## Step by Step - There are two main methods to focus on - +### Step 1: Chunking - 1. **[`adder.add(io.Reader)`](https://github.com/ipfs/go-ipfs/blob/v0.4.18/core/coreunix/add.go#L115)** - *Create and return the **root** __DAG__ node* +Big files are split into chunks because: - This method converts the input data (`io.Reader`) to a __DAG__ tree, by splitting the data into _chunks_ using the `Chunker` and organizing them into a __DAG__ (with a *trickle* or *balanced* layout. See [balanced](https://github.com/ipfs/go-unixfs/blob/6b769632e7eb8fe8f302e3f96bf5569232e7a3ee/importer/balanced/builder.go) for more info). +- Large files need to be broken down for efficient transfer +- Identical chunks across files are stored only once (deduplication) +- You can fetch parts of a file without downloading the whole thing - The method returns the **root** `ipld.Node` of the __DAG__. +**Chunking strategies** (set with `--chunker`): - 2. **[`adder.addNode(ipld.Node, path)`](https://github.com/ipfs/go-ipfs/blob/v0.4.18/core/coreunix/add.go#L366)** - *Add **root** __DAG__ node to the `MFS`* +| Strategy | Description | Best For | +|----------|-------------|----------| +| `size-N` | Fixed size chunks | General use | +| `rabin` | Content-defined chunks using rolling hash | Deduplication across similar files | +| `buzhash` | Alternative content-defined chunking | Similar to rabin | - Now that we have the **root** node of the `DAG`, this needs to be added to the `MFS` file system. - Fetch (or create, if doesn't already exist) the `MFS` **root** using `mfsRoot()`. +See `ipfs add --help` for current defaults, or [Import](config.md#import) for making them permanent. - > NOTE: The `MFS` **root** is an ephemeral root, created and destroyed solely for the `add` functionality. +Content-defined chunking (rabin/buzhash) finds natural boundaries in the data. This means if you edit the middle of a file, only the changed chunks need to be re-stored - the rest can be deduplicated. - Assuming the directory already exists in the MFS file system, (if it doesn't exist it will be created using `mfs.Mkdir()`), the **root** __DAG__ node is added to the `MFS` File system using the `mfs.PutNode()` function. +### Step 2: Building the DAG - - **[MFS] [`PutNode(mfs.Root, path, ipld.Node)`](https://github.com/ipfs/go-mfs/blob/v0.1.18/ops.go#L86)** - *Insert node at path into given `MFS`* +Each chunk becomes a leaf node in a tree. If a file has many chunks, intermediate nodes group them together. This creates a Merkle DAG (Directed Acyclic Graph) where: - The `path` param is used to determine the `MFS Directory`, which is first looked up in the `MFS` using `lookupDir()` function. This is followed by adding the **root** __DAG__ node (`ipld.Node`) into this `Directory` using `directory.AddChild()` method. +- Each node is identified by a hash of its contents +- Parent nodes contain links (hashes) to their children +- The root node's hash becomes the file's CID - - **[MFS] Add Child To `UnixFS`** - - **[`directory.AddChild(filename, ipld.Node)`](https://github.com/ipfs/go-mfs/blob/v0.1.18/dir.go#L350)** - *Add **root** __DAG__ node under this directory* +**Layout strategies**: - Within this method the node is added to the `Directory`'s __DAG service__ using the `dserv.Add()` method, followed by adding the **root** __DAG__ node with the given name, in the `directory.addUnixFSChild(directory.child{name, ipld.Node})` method. +**Balanced layout** (default): - - **[MFS] [`directory.addUnixFSChild(child)`](https://github.com/ipfs/go-mfs/blob/v0.1.18/dir.go#L375)** - *Add child to inner UnixFS Directory* +```mermaid +graph TD + Root --> Node1[Node] + Root --> Node2[Node] + Node1 --> Leaf1[Leaf] + Node1 --> Leaf2[Leaf] + Node2 --> Leaf3[Leaf] +``` - The node is then added as a child to the inner `UnixFS` directory using the `(BasicDirectory).AddChild()` method. +All leaves at similar depth. Good for random access - you can jump to any part of the file efficiently. - > NOTE: This is not to be confused with the `directory.AddChild(filename, ipld.Node)`, as this operates on the `UnixFS` `BasicDirectory` object. +**Trickle layout** (`--trickle`): - - **[UnixFS] [`(BasicDirectory).AddChild(ctx, name, ipld.Node)`](https://github.com/ipfs/go-unixfs/blob/v1.1.16/io/directory.go#L137)** - *Add child to `BasicDirectory`* +```mermaid +graph TD + Root --> Leaf1[Leaf] + Root --> Node1[Node] + Root --> Node2[Node] + Node1 --> Leaf2[Leaf] + Node2 --> Leaf3[Leaf] +``` - > IMPORTANT: It should be noted that the `BasicDirectory` object uses the `ProtoNode` type object which is an implementation of the `ipld.Node` interface, seen and used throughout this document. Ideally the `ipld.Node` should always be used, unless we need access to specific functions from `ProtoNode` (like `Copy()`) that are not available in the interface. +Leaves added progressively. Good for streaming - you can start reading before the whole file is added. - This method first attempts to remove any old links (`ProtoNode.RemoveNodeLink(name)`) to the `ProtoNode` prior to adding a link to the newly added `ipld.Node`, using `ProtoNode.AddNodeLink(name, ipld.Node)`. +### Step 3: Storing Blocks - - **[Merkledag] [`AddNodeLink()`](https://github.com/ipfs/go-merkledag/blob/v1.1.15/node.go#L99)** +As the DAG is built, each node is stored in the blockstore: - The `AddNodeLink()` method is where an `ipld.Link` is created with the `ipld.Node`'s `CID` and size in the `ipld.MakeLink(ipld.Node)` method, and is then appended to the `ProtoNode`'s links in the `ProtoNode.AddRawLink(name)` method. +- **Normal mode**: Data is copied into IPFS's internal storage (`~/.ipfs/blocks/`) +- **Filestore mode** (`--nocopy`): Only references to the original file are stored (saves disk space but the original file must remain in place) - - **[`adder.Finalize()`](https://github.com/ipfs/go-ipfs/blob/v0.4.18/core/coreunix/add.go#L200)** - *Fetch and return the __DAG__ **root** from the `MFS` and `UnixFS` directory* +### Step 4: Pinning - The `Finalize` method returns the `ipld.Node` from the `UnixFS` `Directory`. +By default, added content is pinned (`ipfs add --pin=true`). This tells your IPFS node to keep this data - without pinning, content may eventually be removed to free up space. - - **[`adder.PinRoot()`](https://github.com/ipfs/go-ipfs/blob/v0.4.18/core/coreunix/add.go#L171)** - *Pin all files under the `MFS` **root*** +### Alternative: Organizing with MFS - The whole process ends with `PinRoot` recursively pinning all the files under the `MFS` **root** +Instead of pinning, you can use the [Mutable File System (MFS)](https://docs.ipfs.tech/concepts/file-systems/#mutable-file-system-mfs) to organize content using familiar paths like `/photos/vacation.jpg` instead of raw CIDs: + +```bash +# Add directly to MFS path +ipfs add --to-files=/backups/ myfile.txt + +# Or copy an existing CID into MFS +ipfs files cp /ipfs/QmWATWQ7fVPP2EFGu71UkfnqhYXDYH566qy47CnJDgvs8u /docs/hello.txt +``` + +Content in MFS is implicitly pinned and stays organized across node restarts. + +## Options + +Run `ipfs add --help` to see all available options for controlling chunking, DAG layout, CID format, pinning behavior, and more. + +## UnixFS Format + +IPFS uses [UnixFS](https://specs.ipfs.tech/unixfs/) to represent files and directories. UnixFS is an abstraction layer that: + +- Gives names to raw data blobs (so you can have `/foo/bar.txt` instead of just hashes) +- Represents directories as lists of named links to other nodes +- Organizes large files as trees of smaller chunks +- Makes these structures cryptographically verifiable - any tampering is detectable because it would change the hashes + +With `--raw-leaves`, leaf nodes store raw data without the UnixFS wrapper. This is more efficient and is the default when using CIDv1. + +## Code Architecture + +The add flow spans several layers: + +```mermaid +flowchart TD + subgraph CLI ["CLI Layer (kubo)"] + A["core/commands/add.go
parses flags, shows progress"] + end + subgraph API ["CoreAPI Layer (kubo)"] + B["core/coreapi/unixfs.go
UnixfsAPI.Add() entry point"] + end + subgraph Adder ["Adder (kubo)"] + C["core/coreunix/add.go
orchestrates chunking, DAG building, MFS, pinning"] + end + subgraph Boxo ["boxo libraries"] + D["chunker/ - splits data into chunks"] + E["ipld/unixfs/ - DAG layout and UnixFS format"] + F["mfs/ - mutable filesystem abstraction"] + G["pinning/ - pin management"] + H["blockstore/ - block storage"] + end + A --> B --> C --> Boxo +``` + +### Key Files + +| Component | Location | +|-----------|----------| +| CLI command | `core/commands/add.go` | +| API implementation | `core/coreapi/unixfs.go` | +| Adder logic | `core/coreunix/add.go` | +| Chunking | [boxo/chunker](https://github.com/ipfs/boxo/tree/main/chunker) | +| DAG layouts | [boxo/ipld/unixfs/importer](https://github.com/ipfs/boxo/tree/main/ipld/unixfs/importer) | +| MFS | [boxo/mfs](https://github.com/ipfs/boxo/tree/main/mfs) | +| Pinning | [boxo/pinning/pinner](https://github.com/ipfs/boxo/tree/main/pinning/pinner) | + +### The Adder + +The `Adder` type in `core/coreunix/add.go` is the workhorse. It: + +1. **Creates an MFS root** - temporary in-memory filesystem for building the DAG +2. **Processes files recursively** - chunks each file and builds DAG nodes +3. **Commits to blockstore** - persists all blocks +4. **Pins the result** - keeps content from being removed +5. **Returns the root CID** + +Key methods: + +- `AddAllAndPin()` - main entry point +- `addFileNode()` - handles a single file or directory +- `add()` - chunks data and builds the DAG using boxo's layout builders + +## Further Reading + +- [UnixFS specification](https://specs.ipfs.tech/unixfs/) +- [IPLD and Merkle DAGs](https://docs.ipfs.tech/concepts/merkle-dag/) +- [Pinning](https://docs.ipfs.tech/concepts/persistence/) +- [MFS (Mutable File System)](https://docs.ipfs.tech/concepts/file-systems/#mutable-file-system-mfs) diff --git a/docs/customizing.md b/docs/customizing.md index 0f078999f..f1e726763 100644 --- a/docs/customizing.md +++ b/docs/customizing.md @@ -45,7 +45,7 @@ This gives a more Go-centric dependency updating flow to building a new binary w ## Bespoke Extension Points Certain Kubo functionality may have their own extension points. For example: -* Kubo supports the [Routing v1](https://github.com/ipfs/specs/blob/main/routing/ROUTING_V1_HTTP.md) API for delegating content routing to external processes +* Kubo supports the [Routing v1](https://specs.ipfs.tech/routing/http-routing-v1/) API for delegating content routing to external processes * Kubo supports the [Pinning Service API](https://github.com/ipfs/pinning-services-api-spec) for delegating pinning to external processes * Kubo supports [DNSLink](https://dnslink.dev/) for delegating name->CID mappings to DNS diff --git a/docs/generate-authors.sh b/docs/generate-authors.sh deleted file mode 100755 index 75b33b7e0..000000000 --- a/docs/generate-authors.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash -set -e - -# see also ".mailmap" for how email addresses and names are deduplicated - - -cat >AUTHORS <<-'EOF' -# This file lists all individuals having contributed content to the repository. -# For how it is generated, see `docs/generate-authors.sh`. - -EOF -git log --format='%aN <%aE>' | LC_ALL=C.UTF-8 sort -uf >>AUTHORS From 59b5d6ab4da8d7a2bebaa2e1eb7b82cbce476503 Mon Sep 17 00:00:00 2001 From: Vedant Madane <6527493+VedantMadane@users.noreply.github.com> Date: Sat, 31 Jan 2026 00:09:26 +0530 Subject: [PATCH 5/6] feat(key): add 'ipfs key ls' as alias for 'ipfs key list' (#11147) * feat(key): add 'ipfs key ls' as alias for 'ipfs key list' Add 'ls' as an alias for the 'list' subcommand in 'ipfs key' to be consistent with other ipfs commands like 'ipfs repo ls' and 'ipfs pin ls' which use 'ls' instead of 'list'. Fixes #10976 Signed-off-by: Vedant Madane <6527493+VedantMadane@users.noreply.github.com> * feat(key): make 'ipfs key ls' canonical, deprecate 'list' aligns with other commands like 'ipfs pin ls' and 'ipfs files ls'. 'ipfs key list' still works but shows deprecation warning. * fix(key): correct --key option description in verify command was copy-pasted from sign command and said "signing" instead of "verifying" --------- Signed-off-by: Vedant Madane <6527493+VedantMadane@users.noreply.github.com> Co-authored-by: Marcin Rataj --- client/rpc/key.go | 2 +- core/commands/commands_test.go | 1 + core/commands/keystore.go | 20 ++++++++++++++++---- docs/changelogs/v0.40.md | 8 +++++--- 4 files changed, 23 insertions(+), 8 deletions(-) diff --git a/client/rpc/key.go b/client/rpc/key.go index 710d9fb06..a38c0962a 100644 --- a/client/rpc/key.go +++ b/client/rpc/key.go @@ -101,7 +101,7 @@ func (api *KeyAPI) List(ctx context.Context) ([]iface.Key, error) { var out struct { Keys []keyOutput } - if err := api.core().Request("key/list").Exec(ctx, &out); err != nil { + if err := api.core().Request("key/ls").Exec(ctx, &out); err != nil { return nil, err } diff --git a/core/commands/commands_test.go b/core/commands/commands_test.go index 49e359e24..d4bad9601 100644 --- a/core/commands/commands_test.go +++ b/core/commands/commands_test.go @@ -106,6 +106,7 @@ func TestCommands(t *testing.T) { "/key/gen", "/key/import", "/key/list", + "/key/ls", "/key/rename", "/key/rm", "/key/rotate", diff --git a/core/commands/keystore.go b/core/commands/keystore.go index 6ce1b5a0d..afcdb62db 100644 --- a/core/commands/keystore.go +++ b/core/commands/keystore.go @@ -38,9 +38,9 @@ publish'. > ipfs key gen --type=rsa --size=2048 mykey > ipfs name publish --key=mykey QmSomeHash -'ipfs key list' lists the available keys. +'ipfs key ls' lists the available keys. - > ipfs key list + > ipfs key ls self mykey `, @@ -49,7 +49,8 @@ publish'. "gen": keyGenCmd, "export": keyExportCmd, "import": keyImportCmd, - "list": keyListCmd, + "list": keyListDeprecatedCmd, + "ls": keyListCmd, "rename": keyRenameCmd, "rm": keyRmCmd, "rotate": keyRotateCmd, @@ -488,6 +489,17 @@ var keyListCmd = &cmds.Command{ Type: KeyOutputList{}, } +var keyListDeprecatedCmd = &cmds.Command{ + Status: cmds.Deprecated, + Helptext: cmds.HelpText{ + Tagline: "Deprecated: use 'ipfs key ls' instead.", + }, + Options: keyListCmd.Options, + Run: keyListCmd.Run, + Encoders: keyListCmd.Encoders, + Type: keyListCmd.Type, +} + const ( keyStoreForceOptionName = "force" ) @@ -773,7 +785,7 @@ the signed payload is always prefixed with "libp2p-key signed message:". `, }, Options: []cmds.Option{ - cmds.StringOption("key", "k", "The name of the key to use for signing."), + cmds.StringOption("key", "k", "The name of the key to use for verifying."), cmds.StringOption("signature", "s", "Multibase-encoded signature to verify."), ke.OptionIPNSBase, }, diff --git a/docs/changelogs/v0.40.md b/docs/changelogs/v0.40.md index bcdb6f0e2..127e93c73 100644 --- a/docs/changelogs/v0.40.md +++ b/docs/changelogs/v0.40.md @@ -17,7 +17,7 @@ This release was brought to you by the [Shipyard](https://ipshipyard.com/) team. - [New `ipfs diag datastore` commands](#new-ipfs-diag-datastore-commands) - [🚇 Improved `ipfs p2p` tunnels with foreground mode](#-improved-ipfs-p2p-tunnels-with-foreground-mode) - [Improved `ipfs dag stat` output](#improved-ipfs-dag-stat-output) - - [Skip bad keys when listing](#skip_bad_keys_when_listing) + - [🔑 `ipfs key` improvements](#-ipfs-key-improvements) - [Accelerated DHT Client and Provide Sweep now work together](#accelerated-dht-client-and-provide-sweep-now-work-together) - [🌐 No unnecessary DNS lookups for AutoTLS addresses](#-no-unnecessary-dns-lookups-for-autotls-addresses) - [⏱️ Configurable gateway request duration limit](#️-configurable-gateway-request-duration-limit) @@ -111,9 +111,11 @@ Ratio: 1.500000 Use `--progress=true` to force progress even when piped, or `--progress=false` to disable it. -#### Skip bad keys when listing +#### 🔑 `ipfs key` improvements -Change the `ipfs key list` behavior to log an error and continue listing keys when a key cannot be read from the keystore or decoded. +`ipfs key ls` is now the canonical command for listing keys, matching `ipfs pin ls` and `ipfs files ls`. The old `ipfs key list` still works but is deprecated. + +Listing also became more resilient: bad keys are now skipped with an error log instead of failing the entire operation. #### Accelerated DHT Client and Provide Sweep now work together From 77ed3dd0ef485bc421f6975c43f6d745717a48f1 Mon Sep 17 00:00:00 2001 From: Andrew Gillis <11790789+gammazero@users.noreply.github.com> Date: Fri, 30 Jan 2026 14:41:55 -0800 Subject: [PATCH 6/6] feat(rpc): Content-Type headers and IPNS record get/put (#11067) * fix http header when compress enabled for get command Closes #2376 * fix(rpc): set Content-Type for ipfs get based on output format - set application/x-tar when outputting tar (default and --archive) - set application/gzip when compression is enabled (--compress) - update go-ipfs-cmds with Tar encoding type and RFC 6713 compliant MIME types (application/gzip instead of application/x-gzip) * test(rpc): add Content-Type header tests for ipfs get * feat(rpc): add Content-Type headers for binary responses set proper Content-Type headers for RPC endpoints that return binary data: - `dag export`: application/vnd.ipld.car - `block get`: application/vnd.ipld.raw - `diag profile`: application/zip - `get`: application/x-tar or application/gzip (already worked, migrated to new API) uses the new OctetStream encoding type and SetContentType() method from go-ipfs-cmds to specify custom MIME types for binary responses. refs: https://github.com/ipfs/kubo/issues/2376 * feat(rpc): add `ipfs name get` command for IPNS record retrieval add dedicated command to retrieve raw signed IPNS records from the routing system. returns protobuf-encoded IPNS record with Content-Type `application/vnd.ipfs.ipns-record`. this provides a more convenient alternative to `ipfs routing get /ipns/` which returns JSON with base64-encoded data. the raw output can be piped directly to `ipfs name inspect`: ipfs name get | ipfs name inspect spec: https://specs.ipfs.tech/ipns/ipns-record/ * feat(rpc): add `ipfs name put` command for IPNS record storage adds `ipfs name put` to complement `ipfs name get`, allowing users to store IPNS records obtained from external sources without needing the private key. useful for backup, restore, and debugging workflows. the command validates records by default (signature, sequence number). use `--force` to bypass validation for testing how routing handles malformed or outdated records. also reorganizes test/cli files: - rename http_rpc_* -> rpc_* to match existing convention - merge name_get_put_test.go into name_test.go - add file header comments documenting test purposes * chore(deps): update go-ipfs-cmds to latest master includes SetContentType() for dynamic Content-Type headers --------- Co-authored-by: Marcin Rataj --- core/commands/block.go | 5 + core/commands/commands_test.go | 2 + core/commands/dag/dag.go | 3 + core/commands/dag/export.go | 2 + core/commands/extra.go | 4 +- core/commands/get.go | 13 + core/commands/name/name.go | 294 ++++++++++++- core/commands/profile.go | 5 + docs/changelogs/v0.40.md | 28 ++ docs/examples/kubo-as-a-library/go.mod | 2 +- docs/examples/kubo-as-a-library/go.sum | 4 +- go.mod | 2 +- go.sum | 4 +- test/cli/name_test.go | 550 +++++++++++++++++++++++++ test/cli/rpc_content_type_test.go | 167 ++++++++ test/cli/rpc_get_output_test.go | 74 ++++ test/dependencies/go.mod | 2 +- test/dependencies/go.sum | 4 +- 18 files changed, 1144 insertions(+), 21 deletions(-) create mode 100644 test/cli/rpc_content_type_test.go create mode 100644 test/cli/rpc_get_output_test.go diff --git a/core/commands/block.go b/core/commands/block.go index b4b0fd204..1402a8531 100644 --- a/core/commands/block.go +++ b/core/commands/block.go @@ -98,6 +98,9 @@ var blockGetCmd = &cmds.Command{ 'ipfs block get' is a plumbing command for retrieving raw IPFS blocks. It takes a , and outputs the block to stdout. `, + HTTP: &cmds.HTTPHelpText{ + ResponseContentType: "application/vnd.ipld.raw", + }, }, Arguments: []cmds.Argument{ @@ -119,6 +122,8 @@ It takes a , and outputs the block to stdout. return err } + res.SetEncodingType(cmds.OctetStream) + res.SetContentType("application/vnd.ipld.raw") return res.Emit(r) }, } diff --git a/core/commands/commands_test.go b/core/commands/commands_test.go index d4bad9601..2573d4cd5 100644 --- a/core/commands/commands_test.go +++ b/core/commands/commands_test.go @@ -124,12 +124,14 @@ func TestCommands(t *testing.T) { "/multibase/transcode", "/multibase/list", "/name", + "/name/get", "/name/inspect", "/name/publish", "/name/pubsub", "/name/pubsub/cancel", "/name/pubsub/state", "/name/pubsub/subs", + "/name/put", "/name/resolve", "/object", "/object/data", diff --git a/core/commands/dag/dag.go b/core/commands/dag/dag.go index caf7a5474..73eeddd94 100644 --- a/core/commands/dag/dag.go +++ b/core/commands/dag/dag.go @@ -276,6 +276,9 @@ Note that at present only single root selections / .car files are supported. The output of blocks happens in strict DAG-traversal, first-seen, order. CAR file follows the CARv1 format: https://ipld.io/specs/transport/car/carv1/ `, + HTTP: &cmds.HTTPHelpText{ + ResponseContentType: "application/vnd.ipld.car", + }, }, Arguments: []cmds.Argument{ cmds.StringArg("root", true, false, "CID of a root to recursively export").EnableStdin(), diff --git a/core/commands/dag/export.go b/core/commands/dag/export.go index 9f11c43de..48223f860 100644 --- a/core/commands/dag/export.go +++ b/core/commands/dag/export.go @@ -73,6 +73,8 @@ func dagExport(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment }() + res.SetEncodingType(cmds.OctetStream) + res.SetContentType("application/vnd.ipld.car") if err := res.Emit(pipeR); err != nil { pipeR.Close() // ignore the error if any return err diff --git a/core/commands/extra.go b/core/commands/extra.go index e70fb029a..12b02eabc 100644 --- a/core/commands/extra.go +++ b/core/commands/extra.go @@ -1,6 +1,8 @@ package commands -import cmds "github.com/ipfs/go-ipfs-cmds" +import ( + cmds "github.com/ipfs/go-ipfs-cmds" +) func CreateCmdExtras(opts ...func(e *cmds.Extra)) *cmds.Extra { e := new(cmds.Extra) diff --git a/core/commands/get.go b/core/commands/get.go index 12a3ea8ca..804836b9a 100644 --- a/core/commands/get.go +++ b/core/commands/get.go @@ -45,6 +45,9 @@ To output a TAR archive instead of unpacked files, use '--archive' or '-a'. To compress the output with GZIP compression, use '--compress' or '-C'. You may also specify the level of compression by specifying '-l=<1-9>'. `, + HTTP: &cmds.HTTPHelpText{ + ResponseContentType: "application/x-tar, or application/gzip when compress=true", + }, }, Arguments: []cmds.Argument{ @@ -103,6 +106,16 @@ may also specify the level of compression by specifying '-l=<1-9>'. reader.Close() }() + // Set Content-Type based on output format. + // When compression is enabled, output is gzip (or tar.gz for directories). + // Otherwise, tar is used as the transport format. + res.SetEncodingType(cmds.OctetStream) + if cmplvl != gzip.NoCompression { + res.SetContentType("application/gzip") + } else { + res.SetContentType("application/x-tar") + } + return res.Emit(reader) }, PostRun: cmds.PostRunMap{ diff --git a/core/commands/name/name.go b/core/commands/name/name.go index 912629d68..73d540f62 100644 --- a/core/commands/name/name.go +++ b/core/commands/name/name.go @@ -3,15 +3,18 @@ package name import ( "bytes" "encoding/hex" + "errors" "fmt" "io" + "strings" "text/tabwriter" "time" "github.com/ipfs/boxo/ipns" ipns_pb "github.com/ipfs/boxo/ipns/pb" cmds "github.com/ipfs/go-ipfs-cmds" - cmdenv "github.com/ipfs/kubo/core/commands/cmdenv" + "github.com/ipfs/kubo/core/commands/cmdenv" + "github.com/ipfs/kubo/core/coreiface/options" "google.golang.org/protobuf/proto" ) @@ -42,29 +45,30 @@ Examples: Publish an with your default name: - > ipfs name publish /ipfs/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy - Published to QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n: /ipfs/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy + > ipfs name publish /ipfs/bafkreifjjcie6lypi6ny7amxnfftagclbuxndqonfipmb64f2km2devei4 + Published to k51qzi5uqu5dgklc20hksmmzhoy5lfrn5xcnryq6xp4r50b5yc0vnivpywfu9p: /ipfs/bafk... Publish an with another name, added by an 'ipfs key' command: - > ipfs key gen --type=rsa --size=2048 mykey - > ipfs name publish --key=mykey /ipfs/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy - Published to QmSrPmbaUKA3ZodhzPWZnpFgcPMFWF4QsxXbkWfEptTBJd: /ipfs/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy + > ipfs key gen --type=ed25519 mykey + k51qzi5uqu5dlz49qkb657myg6f1buu6rauv8c6b489a9i1e4dkt7a3yo9j2wr + > ipfs name publish --key=mykey /ipfs/bafkreifjjcie6lypi6ny7amxnfftagclbuxndqonfipmb64f2km2devei4 + Published to k51qzi5uqu5dlz49qkb657myg6f1buu6rauv8c6b489a9i1e4dkt7a3yo9j2wr: /ipfs/bafk... Resolve the value of your name: > ipfs name resolve - /ipfs/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy + /ipfs/bafk... Resolve the value of another name: - > ipfs name resolve QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ - /ipfs/QmSiTko9JZyabH56y2fussEt1A5oDqsFXB3CkvAqraFryz + > ipfs name resolve k51qzi5uqu5dlz49qkb657myg6f1buu6rauv8c6b489a9i1e4dkt7a3yo9j2wr + /ipfs/bafk... Resolve the value of a dnslink: - > ipfs name resolve ipfs.io - /ipfs/QmaBvfZooxWkrv7D3r8LS9moNjzD2o525XMZze69hhoxf5 + > ipfs name resolve specs.ipfs.tech + /ipfs/bafy... `, }, @@ -74,6 +78,8 @@ Resolve the value of a dnslink: "resolve": IpnsCmd, "pubsub": IpnsPubsubCmd, "inspect": IpnsInspectCmd, + "get": IpnsGetCmd, + "put": IpnsPutCmd, }, } @@ -123,6 +129,9 @@ in Multibase. The Data field is DAG-CBOR represented as DAG-JSON. Passing --verify will verify signature against provided public key. `, + HTTP: &cmds.HTTPHelpText{ + Description: "Request body should be `multipart/form-data` with the IPNS record bytes.", + }, }, Arguments: []cmds.Argument{ cmds.FileArg("record", true, false, "The IPNS record payload to be verified.").EnableStdin(), @@ -267,3 +276,266 @@ Passing --verify will verify signature against provided public key. }), }, } + +var IpnsGetCmd = &cmds.Command{ + Status: cmds.Experimental, + Helptext: cmds.HelpText{ + Tagline: "Retrieve a signed IPNS record.", + ShortDescription: ` +Retrieves the signed IPNS record for a given name from the routing system. + +The output is the raw IPNS record (protobuf) as defined in the IPNS spec: +https://specs.ipfs.tech/ipns/ipns-record/ + +The record can be inspected with 'ipfs name inspect': + + ipfs name get | ipfs name inspect + +This is equivalent to 'ipfs routing get /ipns/' but only accepts +IPNS names (not arbitrary routing keys). + +Note: The routing system returns the "best" IPNS record it knows about. +For IPNS, "best" means the record with the highest sequence number. +If multiple records exist (e.g., after using 'ipfs name put'), this command +returns the one the routing system considers most current. +`, + HTTP: &cmds.HTTPHelpText{ + ResponseContentType: "application/vnd.ipfs.ipns-record", + }, + }, + Arguments: []cmds.Argument{ + cmds.StringArg("name", true, false, "The IPNS name to look up."), + }, + Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { + api, err := cmdenv.GetApi(env, req) + if err != nil { + return err + } + + // Normalize the argument: accept both "k51..." and "/ipns/k51..." + name := req.Arguments[0] + if !strings.HasPrefix(name, "/ipns/") { + name = "/ipns/" + name + } + + data, err := api.Routing().Get(req.Context, name) + if err != nil { + return err + } + + res.SetEncodingType(cmds.OctetStream) + res.SetContentType("application/vnd.ipfs.ipns-record") + return res.Emit(bytes.NewReader(data)) + }, +} + +const ( + forceOptionName = "force" + putAllowOfflineOption = "allow-offline" + allowDelegatedOption = "allow-delegated" + maxIPNSRecordSize = 10 << 10 // 10 KiB per IPNS spec +) + +var errPutAllowOffline = errors.New("can't put while offline: pass `--allow-offline` to store locally or `--allow-delegated` if Ipns.DelegatedPublishers are set up") + +var IpnsPutCmd = &cmds.Command{ + Status: cmds.Experimental, + Helptext: cmds.HelpText{ + Tagline: "Store a pre-signed IPNS record in the routing system.", + ShortDescription: ` +Stores a pre-signed IPNS record in the routing system. + +This command accepts a raw IPNS record (protobuf) as defined in the IPNS spec: +https://specs.ipfs.tech/ipns/ipns-record/ + +The record must be signed by the private key corresponding to the IPNS name. +Use 'ipfs name get' to retrieve records and 'ipfs name inspect' to examine. +`, + LongDescription: ` +Stores a pre-signed IPNS record in the routing system. + +This command accepts a raw IPNS record (protobuf) as defined in the IPNS spec: +https://specs.ipfs.tech/ipns/ipns-record/ + +The record must be signed by the private key corresponding to the IPNS name. +Use 'ipfs name get' to retrieve records and 'ipfs name inspect' to examine. + +Use Cases: + + - Re-publishing third-party records: store someone else's signed record + - Cross-node sync: import records exported from another node + - Backup/restore: export with 'name get', restore with 'name put' + +Validation: + +By default, the command validates that: + + - The record is a valid IPNS record (protobuf) + - The record size is within 10 KiB limit + - The signature matches the provided IPNS name + - The record's sequence number is higher than any existing record + +The --force flag skips this command's validation and passes the record +directly to the routing system. Note that --force only affects this command; +it does not control how the routing system handles the record. The routing +system may still reject invalid records or prefer records with higher sequence +numbers. Use --force primarily for testing (e.g., to observe how the routing +system reacts to incorrectly signed or malformed records). + +Important: Even after a successful 'name put', a subsequent 'name get' may +return a different record if one with a higher sequence number exists. +This is expected IPNS behavior, not a bug. + +Publishing Modes: + +By default, IPNS records are published to both the DHT and any configured +HTTP delegated publishers. You can control this behavior with: + + --allow-offline Store locally without requiring network connectivity + --allow-delegated Publish via HTTP delegated publishers only (no DHT) + +Examples: + +Export and re-import a record: + + > ipfs name get k51... > record.bin + > ipfs name put k51... record.bin + +Store a record received from someone else: + + > ipfs name put k51... third-party-record.bin + +Force store a record to test routing validation: + + > ipfs name put --force k51... possibly-invalid-record.bin +`, + HTTP: &cmds.HTTPHelpText{ + Description: "Request body should be `multipart/form-data` with the IPNS record bytes.", + }, + }, + Arguments: []cmds.Argument{ + cmds.StringArg("name", true, false, "The IPNS name to store the record for (e.g., k51... or /ipns/k51...)."), + cmds.FileArg("record", true, false, "Path to file containing the signed IPNS record.").EnableStdin(), + }, + Options: []cmds.Option{ + cmds.BoolOption(forceOptionName, "f", "Skip validation (signature, sequence, size)."), + cmds.BoolOption(putAllowOfflineOption, "Store locally without broadcasting to the network."), + cmds.BoolOption(allowDelegatedOption, "Publish via HTTP delegated publishers only (no DHT)."), + }, + Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { + nd, err := cmdenv.GetNode(env) + if err != nil { + return err + } + + api, err := cmdenv.GetApi(env, req) + if err != nil { + return err + } + + // Parse options + force, _ := req.Options[forceOptionName].(bool) + allowOffline, _ := req.Options[putAllowOfflineOption].(bool) + allowDelegated, _ := req.Options[allowDelegatedOption].(bool) + + // Validate flag combinations + if allowOffline && allowDelegated { + return errors.New("cannot use both --allow-offline and --allow-delegated flags") + } + + // Handle different publishing modes + if allowDelegated { + // AllowDelegated mode: check if delegated publishers are configured + cfg, err := nd.Repo.Config() + if err != nil { + return fmt.Errorf("failed to read config: %w", err) + } + delegatedPublishers := cfg.DelegatedPublishersWithAutoConf() + if len(delegatedPublishers) == 0 { + return errors.New("no delegated publishers configured: add Ipns.DelegatedPublishers or use --allow-offline for local-only publishing") + } + // For allow-delegated mode, we proceed even if offline + // since we're using HTTP publishing via delegated publishers + } + + // Parse the IPNS name argument + nameArg := req.Arguments[0] + if !strings.HasPrefix(nameArg, "/ipns/") { + nameArg = "/ipns/" + nameArg + } + // Extract the name part after /ipns/ + namePart := strings.TrimPrefix(nameArg, "/ipns/") + name, err := ipns.NameFromString(namePart) + if err != nil { + return fmt.Errorf("invalid IPNS name: %w", err) + } + + // Read raw record bytes from file/stdin + file, err := cmdenv.GetFileArg(req.Files.Entries()) + if err != nil { + return err + } + defer file.Close() + + // Read record data (limit to 1 MiB for memory safety) + data, err := io.ReadAll(io.LimitReader(file, 1<<20)) + if err != nil { + return fmt.Errorf("failed to read record: %w", err) + } + if len(data) == 0 { + return errors.New("record is empty") + } + + // Validate unless --force + if !force { + // Check size limit per IPNS spec + if len(data) > maxIPNSRecordSize { + return fmt.Errorf("record exceeds maximum size of %d bytes, use --force to skip size check", maxIPNSRecordSize) + } + rec, err := ipns.UnmarshalRecord(data) + if err != nil { + return fmt.Errorf("invalid IPNS record: %w", err) + } + + // Validate signature against provided name + err = ipns.ValidateWithName(rec, name) + if err != nil { + return fmt.Errorf("record validation failed: %w", err) + } + + // Check for sequence conflicts with existing record + existingData, err := api.Routing().Get(req.Context, nameArg) + if err == nil { + // We have an existing record, check sequence + existingRec, parseErr := ipns.UnmarshalRecord(existingData) + if parseErr == nil { + existingSeq, seqErr := existingRec.Sequence() + newSeq, newSeqErr := rec.Sequence() + if seqErr == nil && newSeqErr == nil { + if existingSeq >= newSeq { + return fmt.Errorf("existing record has sequence %d >= new record sequence %d, use --force to overwrite", existingSeq, newSeq) + } + } + } + } + // If Get fails (no existing record), that's fine - proceed with put + } + + // Publish the original bytes as-is + // When allowDelegated is true, we set allowOffline to allow the operation + // even without DHT connectivity (delegated publishers use HTTP) + opts := []options.RoutingPutOption{ + options.Routing.AllowOffline(allowOffline || allowDelegated), + } + + err = api.Routing().Put(req.Context, nameArg, data, opts...) + if err != nil { + if err.Error() == "can't put while offline" { + return errPutAllowOffline + } + return err + } + + return nil + }, +} diff --git a/core/commands/profile.go b/core/commands/profile.go index 9f54e0612..b230c873e 100644 --- a/core/commands/profile.go +++ b/core/commands/profile.go @@ -70,6 +70,9 @@ However, it could reveal: - Memory offsets of various data structures. - Any modifications you've made to go-ipfs. `, + HTTP: &cmds.HTTPHelpText{ + ResponseContentType: "application/zip", + }, }, NoLocal: true, Options: []cmds.Option{ @@ -121,6 +124,8 @@ However, it could reveal: archive.Close() _ = w.CloseWithError(err) }() + res.SetEncodingType(cmds.OctetStream) + res.SetContentType("application/zip") return res.Emit(r) }, PostRun: cmds.PostRunMap{ diff --git a/docs/changelogs/v0.40.md b/docs/changelogs/v0.40.md index 127e93c73..d3538aaac 100644 --- a/docs/changelogs/v0.40.md +++ b/docs/changelogs/v0.40.md @@ -22,6 +22,8 @@ This release was brought to you by the [Shipyard](https://ipshipyard.com/) team. - [🌐 No unnecessary DNS lookups for AutoTLS addresses](#-no-unnecessary-dns-lookups-for-autotls-addresses) - [⏱️ Configurable gateway request duration limit](#️-configurable-gateway-request-duration-limit) - [🔧 Recovery from corrupted MFS root](#-recovery-from-corrupted-mfs-root) + - [📡 RPC `Content-Type` headers for binary responses](#-rpc-content-type-headers-for-binary-responses) + - [🔖 New `ipfs name get|put` commands](#-new-ipfs-name-getput-commands) - [📋 Long listing format for `ipfs ls`](#-long-listing-format-for-ipfs-ls) - [📦️ Dependency updates](#-dependency-updates) - [📝 Changelog](#-changelog) @@ -149,6 +151,32 @@ $ ipfs files chroot --confirm QmYourBackupCID See `ipfs files chroot --help` for details. +#### 📡 RPC `Content-Type` headers for binary responses + +HTTP RPC endpoints that return binary data now set appropriate `Content-Type` headers, making it easier to integrate with HTTP clients and tooling that rely on MIME types. On CLI these commands behave the same as before, but over HTTP RPC you now get proper headers: + +| Endpoint | Content-Type | +|------------------------|-------------------------------------------| +| `/api/v0/get` | `application/x-tar` or `application/gzip` | +| `/api/v0/dag/export` | `application/vnd.ipld.car` | +| `/api/v0/block/get` | `application/vnd.ipld.raw` | +| `/api/v0/name/get` | `application/vnd.ipfs.ipns-record` | +| `/api/v0/diag/profile` | `application/zip` | + +#### 🔖 New `ipfs name get|put` commands + +You can now backup, restore, and share IPNS records without needing the private key. + +```console +$ ipfs name get /ipns/k51... > record.bin +$ ipfs name get /ipns/k51... | ipfs name inspect +$ ipfs name put k51... record.bin +``` + +These are low-level tools primarily for debugging and testing IPNS. + +The `put` command validates records by default. Use `--force` to skip validation and test how routing systems handle malformed or outdated records. Note that `--force` only bypasses this command's checks; the routing system may still reject invalid records. + #### 📋 Long listing format for `ipfs ls` The `ipfs ls` command now supports `--long` (`-l`) flag for displaying Unix-style file permissions and modification times. This works with files added using `--preserve-mode` and `--preserve-mtime`. See `ipfs ls --help` for format details and examples. diff --git a/docs/examples/kubo-as-a-library/go.mod b/docs/examples/kubo-as-a-library/go.mod index c109c37f6..1a3d9aeb5 100644 --- a/docs/examples/kubo-as-a-library/go.mod +++ b/docs/examples/kubo-as-a-library/go.mod @@ -85,7 +85,7 @@ require ( github.com/ipfs/go-ds-pebble v0.5.9 // indirect github.com/ipfs/go-dsqueue v0.1.2 // indirect github.com/ipfs/go-fs-lock v0.1.1 // indirect - github.com/ipfs/go-ipfs-cmds v0.15.0 // indirect + github.com/ipfs/go-ipfs-cmds v0.15.1-0.20260130221847-44581e1f62e1 // indirect github.com/ipfs/go-ipfs-ds-help v1.1.1 // indirect github.com/ipfs/go-ipfs-pq v0.0.4 // indirect github.com/ipfs/go-ipfs-redirects-file v0.1.2 // indirect diff --git a/docs/examples/kubo-as-a-library/go.sum b/docs/examples/kubo-as-a-library/go.sum index 1a50e42b4..ca60866cc 100644 --- a/docs/examples/kubo-as-a-library/go.sum +++ b/docs/examples/kubo-as-a-library/go.sum @@ -303,8 +303,8 @@ github.com/ipfs/go-dsqueue v0.1.2 h1:jBMsgvT9Pj9l3cqI0m5jYpW/aWDYkW4Us6EuzrcSGbs github.com/ipfs/go-dsqueue v0.1.2/go.mod h1:OU94YuMVUIF/ctR7Ysov9PI4gOa2XjPGN9nd8imSv78= github.com/ipfs/go-fs-lock v0.1.1 h1:TecsP/Uc7WqYYatasreZQiP9EGRy4ZnKoG4yXxR33nw= github.com/ipfs/go-fs-lock v0.1.1/go.mod h1:2goSXMCw7QfscHmSe09oXiR34DQeUdm+ei+dhonqly0= -github.com/ipfs/go-ipfs-cmds v0.15.0 h1:nQDgKadrzyiFyYoZMARMIoVoSwe3gGTAfGvrWLeAQbQ= -github.com/ipfs/go-ipfs-cmds v0.15.0/go.mod h1:VABf/mv/wqvYX6hLG6Z+40eNAEw3FQO0bSm370Or3Wk= +github.com/ipfs/go-ipfs-cmds v0.15.1-0.20260130221847-44581e1f62e1 h1:l1DaJI5/+uOKdmvYrXwN3j/zOApLr8EBB0IGMTB7UaM= +github.com/ipfs/go-ipfs-cmds v0.15.1-0.20260130221847-44581e1f62e1/go.mod h1:YmhRbpaLKg40i9Ogj2+L41tJ+8x50fF8u1FJJD/WNhc= github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ= github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= diff --git a/go.mod b/go.mod index a4a5c4ecb..cb38a63ce 100644 --- a/go.mod +++ b/go.mod @@ -33,7 +33,7 @@ require ( github.com/ipfs/go-ds-measure v0.2.2 github.com/ipfs/go-ds-pebble v0.5.9 github.com/ipfs/go-fs-lock v0.1.1 - github.com/ipfs/go-ipfs-cmds v0.15.0 + github.com/ipfs/go-ipfs-cmds v0.15.1-0.20260130221847-44581e1f62e1 github.com/ipfs/go-ipld-cbor v0.2.1 github.com/ipfs/go-ipld-format v0.6.3 github.com/ipfs/go-ipld-git v0.1.1 diff --git a/go.sum b/go.sum index 475c95728..562bd85a6 100644 --- a/go.sum +++ b/go.sum @@ -374,8 +374,8 @@ github.com/ipfs/go-dsqueue v0.1.2 h1:jBMsgvT9Pj9l3cqI0m5jYpW/aWDYkW4Us6EuzrcSGbs github.com/ipfs/go-dsqueue v0.1.2/go.mod h1:OU94YuMVUIF/ctR7Ysov9PI4gOa2XjPGN9nd8imSv78= github.com/ipfs/go-fs-lock v0.1.1 h1:TecsP/Uc7WqYYatasreZQiP9EGRy4ZnKoG4yXxR33nw= github.com/ipfs/go-fs-lock v0.1.1/go.mod h1:2goSXMCw7QfscHmSe09oXiR34DQeUdm+ei+dhonqly0= -github.com/ipfs/go-ipfs-cmds v0.15.0 h1:nQDgKadrzyiFyYoZMARMIoVoSwe3gGTAfGvrWLeAQbQ= -github.com/ipfs/go-ipfs-cmds v0.15.0/go.mod h1:VABf/mv/wqvYX6hLG6Z+40eNAEw3FQO0bSm370Or3Wk= +github.com/ipfs/go-ipfs-cmds v0.15.1-0.20260130221847-44581e1f62e1 h1:l1DaJI5/+uOKdmvYrXwN3j/zOApLr8EBB0IGMTB7UaM= +github.com/ipfs/go-ipfs-cmds v0.15.1-0.20260130221847-44581e1f62e1/go.mod h1:YmhRbpaLKg40i9Ogj2+L41tJ+8x50fF8u1FJJD/WNhc= github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ= github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= diff --git a/test/cli/name_test.go b/test/cli/name_test.go index cf5df2bb0..c9ce0ac26 100644 --- a/test/cli/name_test.go +++ b/test/cli/name_test.go @@ -1,3 +1,7 @@ +// Tests for `ipfs name` CLI commands. +// - TestName: tests name publish, resolve, and inspect +// - TestNameGetPut: tests name get and put for raw IPNS record handling + package cli import ( @@ -5,6 +9,7 @@ import ( "encoding/json" "fmt" "os" + "path/filepath" "strings" "testing" @@ -337,3 +342,548 @@ func TestName(t *testing.T) { require.Contains(t, res.Stdout.String(), publishPath2, "New content should now be published") }) } + +func TestNameGetPut(t *testing.T) { + t.Parallel() + + const ( + fixturePath = "fixtures/TestName.car" + fixtureCid = "bafybeidg3uxibfrt7uqh7zd5yaodetik7wjwi4u7rwv2ndbgj6ec7lsv2a" + ) + + makeDaemon := func(t *testing.T, daemonArgs ...string) *harness.Node { + node := harness.NewT(t).NewNode().Init("--profile=test") + r, err := os.Open(fixturePath) + require.NoError(t, err) + defer r.Close() + err = node.IPFSDagImport(r, fixtureCid) + require.NoError(t, err) + return node.StartDaemon(daemonArgs...) + } + + // makeKey creates a unique IPNS key for a test and returns the IPNS name + makeKey := func(t *testing.T, node *harness.Node, keyName string) ipns.Name { + res := node.IPFS("key", "gen", "--type=ed25519", keyName) + keyID := strings.TrimSpace(res.Stdout.String()) + name, err := ipns.NameFromString(keyID) + require.NoError(t, err) + return name + } + + // makeExternalRecord creates an IPNS record on an ephemeral node that is + // shut down before returning. This ensures the test node has no local + // knowledge of the record, properly testing put/get functionality. + // We use short --lifetime so if IPNS records from tests get published on + // the public DHT, they won't waste storage for long. + makeExternalRecord := func(t *testing.T, h *harness.Harness, publishPath string, publishArgs ...string) (ipns.Name, []byte) { + node := h.NewNode().Init("--profile=test") + + r, err := os.Open(fixturePath) + require.NoError(t, err) + defer r.Close() + err = node.IPFSDagImport(r, fixtureCid) + require.NoError(t, err) + + node.StartDaemon() + + res := node.IPFS("key", "gen", "--type=ed25519", "ephemeral-key") + keyID := strings.TrimSpace(res.Stdout.String()) + ipnsName, err := ipns.NameFromString(keyID) + require.NoError(t, err) + + args := []string{"name", "publish", "--key=ephemeral-key", "--lifetime=5m"} + args = append(args, publishArgs...) + args = append(args, publishPath) + node.IPFS(args...) + + res = node.IPFS("name", "get", ipnsName.String()) + record := res.Stdout.Bytes() + require.NotEmpty(t, record) + + node.StopDaemon() + + return ipnsName, record + } + + t.Run("name get retrieves IPNS record", func(t *testing.T) { + t.Parallel() + node := makeDaemon(t) + defer node.StopDaemon() + + publishPath := "/ipfs/" + fixtureCid + ipnsName := makeKey(t, node, "testkey") + + // publish a record first + node.IPFS("name", "publish", "--key=testkey", "--lifetime=5m", publishPath) + + // retrieve the record using name get + res := node.IPFS("name", "get", ipnsName.String()) + record := res.Stdout.Bytes() + require.NotEmpty(t, record, "expected non-empty IPNS record") + + // verify the record is valid by inspecting it + res = node.PipeToIPFS(bytes.NewReader(record), "name", "inspect", "--verify="+ipnsName.String()) + require.Contains(t, res.Stdout.String(), "Valid: true") + require.Contains(t, res.Stdout.String(), publishPath) + }) + + t.Run("name get accepts /ipns/ prefix", func(t *testing.T) { + t.Parallel() + node := makeDaemon(t) + defer node.StopDaemon() + + publishPath := "/ipfs/" + fixtureCid + ipnsName := makeKey(t, node, "testkey") + + node.IPFS("name", "publish", "--key=testkey", "--lifetime=5m", publishPath) + + // retrieve with /ipns/ prefix + res := node.IPFS("name", "get", "/ipns/"+ipnsName.String()) + record := res.Stdout.Bytes() + require.NotEmpty(t, record) + + // verify the record + res = node.PipeToIPFS(bytes.NewReader(record), "name", "inspect", "--verify="+ipnsName.String()) + require.Contains(t, res.Stdout.String(), "Valid: true") + }) + + t.Run("name get fails for non-existent name", func(t *testing.T) { + t.Parallel() + node := makeDaemon(t) + defer node.StopDaemon() + + // try to get a record for a random peer ID that doesn't exist + res := node.RunIPFS("name", "get", "12D3KooWRirYjmmQATx2kgHBfky6DADsLP7ex1t7BRxJ6nqLs9WH") + require.Error(t, res.Err) + require.NotEqual(t, 0, res.ExitCode()) + }) + + t.Run("name get fails for invalid name format", func(t *testing.T) { + t.Parallel() + node := makeDaemon(t) + defer node.StopDaemon() + + res := node.RunIPFS("name", "get", "not-a-valid-ipns-name") + require.Error(t, res.Err) + require.NotEqual(t, 0, res.ExitCode()) + }) + + t.Run("name put accepts /ipns/ prefix", func(t *testing.T) { + t.Parallel() + node := makeDaemon(t) + defer node.StopDaemon() + + publishPath := "/ipfs/" + fixtureCid + ipnsName := makeKey(t, node, "testkey") + + node.IPFS("name", "publish", "--key=testkey", "--lifetime=5m", publishPath) + + res := node.IPFS("name", "get", ipnsName.String()) + record := res.Stdout.Bytes() + + // put with /ipns/ prefix + res = node.PipeToIPFS(bytes.NewReader(record), "name", "put", "--force", "/ipns/"+ipnsName.String()) + require.NoError(t, res.Err) + }) + + t.Run("name put fails for invalid name format", func(t *testing.T) { + t.Parallel() + node := makeDaemon(t) + defer node.StopDaemon() + + // create a dummy file + recordFile := filepath.Join(node.Dir, "dummy.bin") + err := os.WriteFile(recordFile, []byte("dummy"), 0644) + require.NoError(t, err) + + res := node.RunIPFS("name", "put", "not-a-valid-ipns-name", recordFile) + require.Error(t, res.Err) + require.Contains(t, res.Stderr.String(), "invalid IPNS name") + }) + + t.Run("name put rejects oversized record", func(t *testing.T) { + t.Parallel() + node := makeDaemon(t) + defer node.StopDaemon() + + ipnsName := makeKey(t, node, "testkey") + + // create a file larger than 10 KiB + oversizedRecord := make([]byte, 11*1024) + recordFile := filepath.Join(node.Dir, "oversized.bin") + err := os.WriteFile(recordFile, oversizedRecord, 0644) + require.NoError(t, err) + + res := node.RunIPFS("name", "put", ipnsName.String(), recordFile) + require.Error(t, res.Err) + require.Contains(t, res.Stderr.String(), "exceeds maximum size") + }) + + t.Run("name put --force skips size check", func(t *testing.T) { + t.Parallel() + node := makeDaemon(t) + defer node.StopDaemon() + + ipnsName := makeKey(t, node, "testkey") + + // create a file larger than 10 KiB + oversizedRecord := make([]byte, 11*1024) + recordFile := filepath.Join(node.Dir, "oversized.bin") + err := os.WriteFile(recordFile, oversizedRecord, 0644) + require.NoError(t, err) + + // with --force, size check is skipped (but routing will likely reject it) + res := node.RunIPFS("name", "put", "--force", ipnsName.String(), recordFile) + // the command itself should not fail on size, but routing may reject + // we just verify it doesn't fail with "exceeds maximum size" + if res.Err != nil { + require.NotContains(t, res.Stderr.String(), "exceeds maximum size") + } + }) + + t.Run("name put stores IPNS record", func(t *testing.T) { + t.Parallel() + h := harness.NewT(t) + publishPath := "/ipfs/" + fixtureCid + + // create a record on an ephemeral node (shut down before test node starts) + ipnsName, record := makeExternalRecord(t, h, publishPath) + + // start test node (has no local knowledge of the record) + node := makeDaemon(t) + defer node.StopDaemon() + + // put the record (should succeed since no existing record) + recordFile := filepath.Join(node.Dir, "record.bin") + err := os.WriteFile(recordFile, record, 0644) + require.NoError(t, err) + + res := node.RunIPFS("name", "put", ipnsName.String(), recordFile) + require.NoError(t, res.Err) + + // verify the record was stored by getting it back + res = node.IPFS("name", "get", ipnsName.String()) + retrievedRecord := res.Stdout.Bytes() + require.Equal(t, record, retrievedRecord, "stored record should match original") + }) + + t.Run("name put with --force overwrites existing record", func(t *testing.T) { + t.Parallel() + h := harness.NewT(t) + publishPath := "/ipfs/" + fixtureCid + + // create a record on an ephemeral node + ipnsName, record := makeExternalRecord(t, h, publishPath) + + // start test node + node := makeDaemon(t) + defer node.StopDaemon() + + // first put the record normally + recordFile := filepath.Join(node.Dir, "record.bin") + err := os.WriteFile(recordFile, record, 0644) + require.NoError(t, err) + + res := node.RunIPFS("name", "put", ipnsName.String(), recordFile) + require.NoError(t, res.Err) + + // now try to put the same record again (should fail - same sequence) + res = node.RunIPFS("name", "put", ipnsName.String(), recordFile) + require.Error(t, res.Err) + require.Contains(t, res.Stderr.String(), "existing record has sequence") + + // put the record with --force (should succeed) + res = node.RunIPFS("name", "put", "--force", ipnsName.String(), recordFile) + require.NoError(t, res.Err) + }) + + t.Run("name put validates signature against name", func(t *testing.T) { + t.Parallel() + h := harness.NewT(t) + publishPath := "/ipfs/" + fixtureCid + + // create a record on an ephemeral node + _, record := makeExternalRecord(t, h, publishPath) + + // start test node + node := makeDaemon(t) + defer node.StopDaemon() + + // write the record to a file + recordFile := filepath.Join(node.Dir, "record.bin") + err := os.WriteFile(recordFile, record, 0644) + require.NoError(t, err) + + // try to put with a wrong name (should fail validation) + wrongName := "12D3KooWRirYjmmQATx2kgHBfky6DADsLP7ex1t7BRxJ6nqLs9WH" + res := node.RunIPFS("name", "put", wrongName, recordFile) + require.Error(t, res.Err) + require.Contains(t, res.Stderr.String(), "record validation failed") + }) + + t.Run("name put with --force skips command validation", func(t *testing.T) { + t.Parallel() + h := harness.NewT(t) + publishPath := "/ipfs/" + fixtureCid + + // create a record on an ephemeral node + ipnsName, record := makeExternalRecord(t, h, publishPath) + + // start test node + node := makeDaemon(t) + defer node.StopDaemon() + + // with --force the command skips its own validation (signature, sequence check) + // and passes the record directly to the routing layer + res := node.PipeToIPFS(bytes.NewReader(record), "name", "put", "--force", ipnsName.String()) + require.NoError(t, res.Err) + }) + + t.Run("name put rejects empty record", func(t *testing.T) { + t.Parallel() + node := makeDaemon(t) + defer node.StopDaemon() + + ipnsName := makeKey(t, node, "testkey") + + // create an empty file + recordFile := filepath.Join(node.Dir, "empty.bin") + err := os.WriteFile(recordFile, []byte{}, 0644) + require.NoError(t, err) + + res := node.RunIPFS("name", "put", ipnsName.String(), recordFile) + require.Error(t, res.Err) + require.Contains(t, res.Stderr.String(), "record is empty") + }) + + t.Run("name put rejects invalid record", func(t *testing.T) { + t.Parallel() + node := makeDaemon(t) + defer node.StopDaemon() + + ipnsName := makeKey(t, node, "testkey") + + // create a file with garbage data + recordFile := filepath.Join(node.Dir, "garbage.bin") + err := os.WriteFile(recordFile, []byte("not a valid ipns record"), 0644) + require.NoError(t, err) + + res := node.RunIPFS("name", "put", ipnsName.String(), recordFile) + require.Error(t, res.Err) + require.Contains(t, res.Stderr.String(), "invalid IPNS record") + }) + + t.Run("name put accepts stdin", func(t *testing.T) { + t.Parallel() + h := harness.NewT(t) + publishPath := "/ipfs/" + fixtureCid + + // create a record on an ephemeral node + ipnsName, record := makeExternalRecord(t, h, publishPath) + + // start test node (has no local knowledge of the record) + node := makeDaemon(t) + defer node.StopDaemon() + + // put via stdin (no --force needed since no existing record) + res := node.PipeToIPFS(bytes.NewReader(record), "name", "put", ipnsName.String()) + require.NoError(t, res.Err) + }) + + t.Run("name put fails when offline without --allow-offline", func(t *testing.T) { + t.Parallel() + h := harness.NewT(t) + publishPath := "/ipfs/" + fixtureCid + + // create a record on an ephemeral node + ipnsName, record := makeExternalRecord(t, h, publishPath) + + // write the record to a file + recordFile := filepath.Join(h.Dir, "record.bin") + err := os.WriteFile(recordFile, record, 0644) + require.NoError(t, err) + + // start test node in offline mode + node := h.NewNode().Init("--profile=test") + node.StartDaemon("--offline") + defer node.StopDaemon() + + // try to put without --allow-offline (should fail) + res := node.RunIPFS("name", "put", ipnsName.String(), recordFile) + require.Error(t, res.Err) + // error can come from our command or from the routing layer + stderr := res.Stderr.String() + require.True(t, strings.Contains(stderr, "offline") || strings.Contains(stderr, "online mode"), + "expected offline-related error, got: %s", stderr) + }) + + t.Run("name put succeeds with --allow-offline", func(t *testing.T) { + t.Parallel() + h := harness.NewT(t) + publishPath := "/ipfs/" + fixtureCid + + // create a record on an ephemeral node + ipnsName, record := makeExternalRecord(t, h, publishPath) + + // write the record to a file + recordFile := filepath.Join(h.Dir, "record.bin") + err := os.WriteFile(recordFile, record, 0644) + require.NoError(t, err) + + // start test node in offline mode + node := h.NewNode().Init("--profile=test") + node.StartDaemon("--offline") + defer node.StopDaemon() + + // put with --allow-offline (should succeed, no --force needed since no existing record) + res := node.RunIPFS("name", "put", "--allow-offline", ipnsName.String(), recordFile) + require.NoError(t, res.Err) + }) + + t.Run("name get/put round trip preserves record bytes", func(t *testing.T) { + t.Parallel() + h := harness.NewT(t) + publishPath := "/ipfs/" + fixtureCid + + // create a record on an ephemeral node + ipnsName, originalRecord := makeExternalRecord(t, h, publishPath) + + // start test node (has no local knowledge of the record) + node := makeDaemon(t) + defer node.StopDaemon() + + // put the record + res := node.PipeToIPFS(bytes.NewReader(originalRecord), "name", "put", ipnsName.String()) + require.NoError(t, res.Err) + + // get the record back + res = node.IPFS("name", "get", ipnsName.String()) + retrievedRecord := res.Stdout.Bytes() + + // the records should be byte-for-byte identical + require.Equal(t, originalRecord, retrievedRecord, "record bytes should be preserved after get/put round trip") + }) + + t.Run("name put --force allows storing lower sequence record", func(t *testing.T) { + t.Parallel() + h := harness.NewT(t) + publishPath := "/ipfs/" + fixtureCid + + // create an ephemeral node to generate two records with different sequences + ephNode := h.NewNode().Init("--profile=test") + + r, err := os.Open(fixturePath) + require.NoError(t, err) + err = ephNode.IPFSDagImport(r, fixtureCid) + r.Close() + require.NoError(t, err) + + ephNode.StartDaemon() + + res := ephNode.IPFS("key", "gen", "--type=ed25519", "ephemeral-key") + keyID := strings.TrimSpace(res.Stdout.String()) + ipnsName, err := ipns.NameFromString(keyID) + require.NoError(t, err) + + // publish record with sequence 100 + ephNode.IPFS("name", "publish", "--key=ephemeral-key", "--lifetime=5m", "--sequence=100", publishPath) + res = ephNode.IPFS("name", "get", ipnsName.String()) + record100 := res.Stdout.Bytes() + + // publish record with sequence 200 + ephNode.IPFS("name", "publish", "--key=ephemeral-key", "--lifetime=5m", "--sequence=200", publishPath) + res = ephNode.IPFS("name", "get", ipnsName.String()) + record200 := res.Stdout.Bytes() + + ephNode.StopDaemon() + + // start test node (has no local knowledge of the records) + node := makeDaemon(t) + defer node.StopDaemon() + + // helper to get sequence from record + getSequence := func(record []byte) uint64 { + res := node.PipeToIPFS(bytes.NewReader(record), "name", "inspect", "--enc=json") + var result name.IpnsInspectResult + err := json.Unmarshal(res.Stdout.Bytes(), &result) + require.NoError(t, err) + require.NotNil(t, result.Entry.Sequence) + return *result.Entry.Sequence + } + + // verify we have the right records + require.Equal(t, uint64(100), getSequence(record100)) + require.Equal(t, uint64(200), getSequence(record200)) + + // put record with sequence 200 first + res = node.PipeToIPFS(bytes.NewReader(record200), "name", "put", ipnsName.String()) + require.NoError(t, res.Err) + + // verify current record has sequence 200 + res = node.IPFS("name", "get", ipnsName.String()) + require.Equal(t, uint64(200), getSequence(res.Stdout.Bytes())) + + // now put the lower sequence record (100) with --force + // this should succeed (--force bypasses our sequence check) + res = node.PipeToIPFS(bytes.NewReader(record100), "name", "put", "--force", ipnsName.String()) + require.NoError(t, res.Err, "putting lower sequence record with --force should succeed") + + // note: when we get the record, IPNS resolution returns the "best" record + // (highest sequence), so we'll get the sequence 200 record back + // this is expected IPNS behavior - the put succeeded, but get returns the best record + res = node.IPFS("name", "get", ipnsName.String()) + retrievedSeq := getSequence(res.Stdout.Bytes()) + require.Equal(t, uint64(200), retrievedSeq, "IPNS get returns the best (highest sequence) record") + }) + + t.Run("name put sequence conflict detection", func(t *testing.T) { + t.Parallel() + h := harness.NewT(t) + publishPath := "/ipfs/" + fixtureCid + + // create an ephemeral node to generate two records with different sequences + ephNode := h.NewNode().Init("--profile=test") + + r, err := os.Open(fixturePath) + require.NoError(t, err) + err = ephNode.IPFSDagImport(r, fixtureCid) + r.Close() + require.NoError(t, err) + + ephNode.StartDaemon() + + res := ephNode.IPFS("key", "gen", "--type=ed25519", "ephemeral-key") + keyID := strings.TrimSpace(res.Stdout.String()) + ipnsName, err := ipns.NameFromString(keyID) + require.NoError(t, err) + + // publish record with sequence 100 + ephNode.IPFS("name", "publish", "--key=ephemeral-key", "--lifetime=5m", "--sequence=100", publishPath) + res = ephNode.IPFS("name", "get", ipnsName.String()) + record100 := res.Stdout.Bytes() + + // publish record with sequence 200 + ephNode.IPFS("name", "publish", "--key=ephemeral-key", "--lifetime=5m", "--sequence=200", publishPath) + res = ephNode.IPFS("name", "get", ipnsName.String()) + record200 := res.Stdout.Bytes() + + ephNode.StopDaemon() + + // start test node (has no local knowledge of the records) + node := makeDaemon(t) + defer node.StopDaemon() + + // put record with sequence 200 first + res = node.PipeToIPFS(bytes.NewReader(record200), "name", "put", ipnsName.String()) + require.NoError(t, res.Err) + + // try to put record with sequence 100 (lower than current 200) + recordFile := filepath.Join(node.Dir, "record100.bin") + err = os.WriteFile(recordFile, record100, 0644) + require.NoError(t, err) + + res = node.RunIPFS("name", "put", ipnsName.String(), recordFile) + require.Error(t, res.Err) + require.Contains(t, res.Stderr.String(), "existing record has sequence 200 >= new record sequence 100") + }) +} diff --git a/test/cli/rpc_content_type_test.go b/test/cli/rpc_content_type_test.go new file mode 100644 index 000000000..9124cfaac --- /dev/null +++ b/test/cli/rpc_content_type_test.go @@ -0,0 +1,167 @@ +// Tests HTTP RPC Content-Type headers. +// These tests verify that RPC endpoints return correct Content-Type headers +// for binary responses (CAR, tar, gzip, raw blocks, IPNS records). + +package cli + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "io" + "net/http" + "testing" + + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// TestRPCDagExportContentType verifies that the RPC endpoint for `ipfs dag export` +// returns the correct Content-Type header for CAR output. +func TestRPCDagExportContentType(t *testing.T) { + t.Parallel() + + node := harness.NewT(t).NewNode().Init() + node.StartDaemon("--offline") + + // add test content + cid := node.IPFSAddStr("test content for dag export") + + url := node.APIURL() + "/api/v0/dag/export?arg=" + cid + + req, err := http.NewRequest(http.MethodPost, url, nil) + require.NoError(t, err) + + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + defer resp.Body.Close() + + assert.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/vnd.ipld.car", resp.Header.Get("Content-Type"), + "dag export should return application/vnd.ipld.car") +} + +// TestRPCBlockGetContentType verifies that the RPC endpoint for `ipfs block get` +// returns the correct Content-Type header for raw block data. +func TestRPCBlockGetContentType(t *testing.T) { + t.Parallel() + + node := harness.NewT(t).NewNode().Init() + node.StartDaemon("--offline") + + // add test content + cid := node.IPFSAddStr("test content for block get") + + url := node.APIURL() + "/api/v0/block/get?arg=" + cid + + req, err := http.NewRequest(http.MethodPost, url, nil) + require.NoError(t, err) + + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + defer resp.Body.Close() + + assert.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/vnd.ipld.raw", resp.Header.Get("Content-Type"), + "block get should return application/vnd.ipld.raw") +} + +// TestRPCProfileContentType verifies that the RPC endpoint for `ipfs diag profile` +// returns the correct Content-Type header for ZIP output. +func TestRPCProfileContentType(t *testing.T) { + t.Parallel() + + node := harness.NewT(t).NewNode().Init() + node.StartDaemon("--offline") + + // use profile-time=0 to skip sampling profiles and return quickly + url := node.APIURL() + "/api/v0/diag/profile?profile-time=0" + + req, err := http.NewRequest(http.MethodPost, url, nil) + require.NoError(t, err) + + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + defer resp.Body.Close() + + assert.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/zip", resp.Header.Get("Content-Type"), + "diag profile should return application/zip") +} + +// TestHTTPRPCNameGet verifies the behavior of `ipfs name get` vs `ipfs routing get`: +// +// `ipfs name get `: +// - Purpose: dedicated command for retrieving IPNS records +// - Returns: raw IPNS record bytes (protobuf) +// - Content-Type: application/vnd.ipfs.ipns-record +// +// `ipfs routing get /ipns/`: +// - Purpose: generic routing get for any key type +// - Returns: JSON with base64-encoded record in "Extra" field +// - Content-Type: application/json +// +// Both commands retrieve the same underlying IPNS record data. +func TestHTTPRPCNameGet(t *testing.T) { + t.Parallel() + + node := harness.NewT(t).NewNode().Init() + node.StartDaemon() // must be online to use routing + + // add test content and publish IPNS record + cid := node.IPFSAddStr("test content for name get") + node.IPFS("name", "publish", cid) + + // get the node's peer ID (which is also the IPNS name) + peerID := node.PeerID().String() + + // Test ipfs name get - returns raw IPNS record bytes with specific Content-Type + nameGetURL := node.APIURL() + "/api/v0/name/get?arg=" + peerID + nameGetReq, err := http.NewRequest(http.MethodPost, nameGetURL, nil) + require.NoError(t, err) + + nameGetResp, err := http.DefaultClient.Do(nameGetReq) + require.NoError(t, err) + defer nameGetResp.Body.Close() + + assert.Equal(t, http.StatusOK, nameGetResp.StatusCode) + assert.Equal(t, "application/vnd.ipfs.ipns-record", nameGetResp.Header.Get("Content-Type"), + "name get should return application/vnd.ipfs.ipns-record") + + nameGetBytes, err := io.ReadAll(nameGetResp.Body) + require.NoError(t, err) + + // Test ipfs routing get /ipns/... - returns JSON with base64-encoded record + routingGetURL := node.APIURL() + "/api/v0/routing/get?arg=/ipns/" + peerID + routingGetReq, err := http.NewRequest(http.MethodPost, routingGetURL, nil) + require.NoError(t, err) + + routingGetResp, err := http.DefaultClient.Do(routingGetReq) + require.NoError(t, err) + defer routingGetResp.Body.Close() + + assert.Equal(t, http.StatusOK, routingGetResp.StatusCode) + assert.Equal(t, "application/json", routingGetResp.Header.Get("Content-Type"), + "routing get should return application/json") + + // Parse JSON response and decode base64 record from "Extra" field + var routingResp struct { + Extra string `json:"Extra"` + Type int `json:"Type"` + } + err = json.NewDecoder(routingGetResp.Body).Decode(&routingResp) + require.NoError(t, err) + + routingGetBytes, err := base64.StdEncoding.DecodeString(routingResp.Extra) + require.NoError(t, err) + + // Verify both commands return identical IPNS record bytes + assert.Equal(t, nameGetBytes, routingGetBytes, + "name get and routing get should return identical IPNS record bytes") + + // Verify the record can be inspected and contains the published CID + inspectOutput := node.PipeToIPFS(bytes.NewReader(nameGetBytes), "name", "inspect") + assert.Contains(t, inspectOutput.Stdout.String(), cid, + "ipfs name inspect should show the published CID") +} diff --git a/test/cli/rpc_get_output_test.go b/test/cli/rpc_get_output_test.go new file mode 100644 index 000000000..ded237958 --- /dev/null +++ b/test/cli/rpc_get_output_test.go @@ -0,0 +1,74 @@ +package cli + +import ( + "net/http" + "testing" + + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// TestRPCGetContentType verifies that the RPC endpoint for `ipfs get` returns +// the correct Content-Type header based on output format options. +// +// Output formats and expected Content-Type: +// - default (no flags): tar (transport format) -> application/x-tar +// - --archive: tar archive -> application/x-tar +// - --compress: gzip -> application/gzip +// - --archive --compress: tar.gz -> application/gzip +// +// Fixes: https://github.com/ipfs/kubo/issues/2376 +func TestRPCGetContentType(t *testing.T) { + t.Parallel() + + node := harness.NewT(t).NewNode().Init() + node.StartDaemon("--offline") + + // add test content + cid := node.IPFSAddStr("test content for Content-Type header verification") + + tests := []struct { + name string + query string + expectedContentType string + }{ + { + name: "default returns application/x-tar", + query: "?arg=" + cid, + expectedContentType: "application/x-tar", + }, + { + name: "archive=true returns application/x-tar", + query: "?arg=" + cid + "&archive=true", + expectedContentType: "application/x-tar", + }, + { + name: "compress=true returns application/gzip", + query: "?arg=" + cid + "&compress=true", + expectedContentType: "application/gzip", + }, + { + name: "archive=true&compress=true returns application/gzip", + query: "?arg=" + cid + "&archive=true&compress=true", + expectedContentType: "application/gzip", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + url := node.APIURL() + "/api/v0/get" + tt.query + + req, err := http.NewRequest(http.MethodPost, url, nil) + require.NoError(t, err) + + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + defer resp.Body.Close() + + assert.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, tt.expectedContentType, resp.Header.Get("Content-Type"), + "Content-Type header mismatch for %s", tt.name) + }) + } +} diff --git a/test/dependencies/go.mod b/test/dependencies/go.mod index 1326c72b4..a37c8035b 100644 --- a/test/dependencies/go.mod +++ b/test/dependencies/go.mod @@ -141,7 +141,7 @@ require ( github.com/ipfs/go-cid v0.6.0 // indirect github.com/ipfs/go-datastore v0.9.0 // indirect github.com/ipfs/go-dsqueue v0.1.2 // indirect - github.com/ipfs/go-ipfs-cmds v0.15.0 // indirect + github.com/ipfs/go-ipfs-cmds v0.15.1-0.20260130221847-44581e1f62e1 // indirect github.com/ipfs/go-ipfs-redirects-file v0.1.2 // indirect github.com/ipfs/go-ipld-cbor v0.2.1 // indirect github.com/ipfs/go-ipld-format v0.6.3 // indirect diff --git a/test/dependencies/go.sum b/test/dependencies/go.sum index 8351caa9d..36876ffd0 100644 --- a/test/dependencies/go.sum +++ b/test/dependencies/go.sum @@ -314,8 +314,8 @@ github.com/ipfs/go-ds-leveldb v0.5.2 h1:6nmxlQ2zbp4LCNdJVsmHfs9GP0eylfBNxpmY1csp github.com/ipfs/go-ds-leveldb v0.5.2/go.mod h1:2fAwmcvD3WoRT72PzEekHBkQmBDhc39DJGoREiuGmYo= github.com/ipfs/go-dsqueue v0.1.2 h1:jBMsgvT9Pj9l3cqI0m5jYpW/aWDYkW4Us6EuzrcSGbs= github.com/ipfs/go-dsqueue v0.1.2/go.mod h1:OU94YuMVUIF/ctR7Ysov9PI4gOa2XjPGN9nd8imSv78= -github.com/ipfs/go-ipfs-cmds v0.15.0 h1:nQDgKadrzyiFyYoZMARMIoVoSwe3gGTAfGvrWLeAQbQ= -github.com/ipfs/go-ipfs-cmds v0.15.0/go.mod h1:VABf/mv/wqvYX6hLG6Z+40eNAEw3FQO0bSm370Or3Wk= +github.com/ipfs/go-ipfs-cmds v0.15.1-0.20260130221847-44581e1f62e1 h1:l1DaJI5/+uOKdmvYrXwN3j/zOApLr8EBB0IGMTB7UaM= +github.com/ipfs/go-ipfs-cmds v0.15.1-0.20260130221847-44581e1f62e1/go.mod h1:YmhRbpaLKg40i9Ogj2+L41tJ+8x50fF8u1FJJD/WNhc= github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ= github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= github.com/ipfs/go-ipfs-pq v0.0.4 h1:U7jjENWJd1jhcrR8X/xHTaph14PTAK9O+yaLJbjqgOw=