This commit is contained in:
Marcin Rataj 2026-01-19 05:14:47 +00:00 committed by GitHub
commit f682a471a1
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
20 changed files with 1440 additions and 332 deletions

View File

@ -29,6 +29,19 @@ const (
// write-batch. The total size of the batch is limited by
// BatchMaxnodes and BatchMaxSize.
DefaultBatchMaxSize = 100 << 20 // 20MiB
// HAMTSizeEstimation values for Import.UnixFSHAMTDirectorySizeEstimation
HAMTSizeEstimationLinks = "links" // legacy: estimate using link names + CID byte lengths
HAMTSizeEstimationBlock = "block" // full serialized dag-pb block size
HAMTSizeEstimationDisabled = "disabled" // disable HAMT sharding entirely
// DAGLayout values for Import.UnixFSDAGLayout
DAGLayoutBalanced = "balanced" // balanced DAG layout (default)
DAGLayoutTrickle = "trickle" // trickle DAG layout
DefaultUnixFSHAMTDirectorySizeEstimation = HAMTSizeEstimationLinks // legacy behavior
DefaultUnixFSDAGLayout = DAGLayoutBalanced // balanced DAG layout
DefaultUnixFSIncludeEmptyDirs = true // include empty directories
)
var (
@ -40,18 +53,20 @@ var (
// Import configures the default options for ingesting data. This affects commands
// that ingest data, such as 'ipfs add', 'ipfs dag put, 'ipfs block put', 'ipfs files write'.
type Import struct {
CidVersion OptionalInteger
UnixFSRawLeaves Flag
UnixFSChunker OptionalString
HashFunction OptionalString
UnixFSFileMaxLinks OptionalInteger
UnixFSDirectoryMaxLinks OptionalInteger
UnixFSHAMTDirectoryMaxFanout OptionalInteger
UnixFSHAMTDirectorySizeThreshold OptionalBytes
BatchMaxNodes OptionalInteger
BatchMaxSize OptionalInteger
FastProvideRoot Flag
FastProvideWait Flag
CidVersion OptionalInteger
UnixFSRawLeaves Flag
UnixFSChunker OptionalString
HashFunction OptionalString
UnixFSFileMaxLinks OptionalInteger
UnixFSDirectoryMaxLinks OptionalInteger
UnixFSHAMTDirectoryMaxFanout OptionalInteger
UnixFSHAMTDirectorySizeThreshold OptionalBytes
UnixFSHAMTDirectorySizeEstimation OptionalString // "links", "block", or "disabled"
UnixFSDAGLayout OptionalString // "balanced" or "trickle"
BatchMaxNodes OptionalInteger
BatchMaxSize OptionalInteger
FastProvideRoot Flag
FastProvideWait Flag
}
// ValidateImportConfig validates the Import configuration according to UnixFS spec requirements.
@ -129,6 +144,30 @@ func ValidateImportConfig(cfg *Import) error {
}
}
// Validate UnixFSHAMTDirectorySizeEstimation
if !cfg.UnixFSHAMTDirectorySizeEstimation.IsDefault() {
est := cfg.UnixFSHAMTDirectorySizeEstimation.WithDefault(DefaultUnixFSHAMTDirectorySizeEstimation)
switch est {
case HAMTSizeEstimationLinks, HAMTSizeEstimationBlock, HAMTSizeEstimationDisabled:
// valid
default:
return fmt.Errorf("Import.UnixFSHAMTDirectorySizeEstimation must be %q, %q, or %q, got %q",
HAMTSizeEstimationLinks, HAMTSizeEstimationBlock, HAMTSizeEstimationDisabled, est)
}
}
// Validate UnixFSDAGLayout
if !cfg.UnixFSDAGLayout.IsDefault() {
layout := cfg.UnixFSDAGLayout.WithDefault(DefaultUnixFSDAGLayout)
switch layout {
case DAGLayoutBalanced, DAGLayoutTrickle:
// valid
default:
return fmt.Errorf("Import.UnixFSDAGLayout must be %q or %q, got %q",
DAGLayoutBalanced, DAGLayoutTrickle, layout)
}
}
return nil
}
@ -144,8 +183,7 @@ func isValidChunker(chunker string) bool {
}
// Check for size-<bytes> format
if strings.HasPrefix(chunker, "size-") {
sizeStr := strings.TrimPrefix(chunker, "size-")
if sizeStr, ok := strings.CutPrefix(chunker, "size-"); ok {
if sizeStr == "" {
return false
}
@ -167,7 +205,7 @@ func isValidChunker(chunker string) bool {
// Parse and validate min, avg, max values
values := make([]int, 3)
for i := 0; i < 3; i++ {
for i := range 3 {
val, err := strconv.Atoi(parts[i+1])
if err != nil {
return false
@ -182,3 +220,17 @@ func isValidChunker(chunker string) bool {
return false
}
// HAMTSizeEstimationMode returns the boxo SizeEstimationMode based on the config value.
func (i *Import) HAMTSizeEstimationMode() io.SizeEstimationMode {
switch i.UnixFSHAMTDirectorySizeEstimation.WithDefault(DefaultUnixFSHAMTDirectorySizeEstimation) {
case HAMTSizeEstimationLinks:
return io.SizeEstimationLinks
case HAMTSizeEstimationBlock:
return io.SizeEstimationBlock
case HAMTSizeEstimationDisabled:
return io.SizeEstimationDisabled
default:
return io.SizeEstimationLinks
}
}

View File

@ -4,6 +4,7 @@ import (
"strings"
"testing"
"github.com/ipfs/boxo/ipld/unixfs/io"
mh "github.com/multiformats/go-multihash"
)
@ -406,3 +407,104 @@ func TestIsPowerOfTwo(t *testing.T) {
})
}
}
func TestValidateImportConfig_HAMTSizeEstimation(t *testing.T) {
tests := []struct {
name string
value string
wantErr bool
errMsg string
}{
{name: "valid links", value: HAMTSizeEstimationLinks, wantErr: false},
{name: "valid block", value: HAMTSizeEstimationBlock, wantErr: false},
{name: "valid disabled", value: HAMTSizeEstimationDisabled, wantErr: false},
{name: "invalid unknown", value: "unknown", wantErr: true, errMsg: "must be"},
{name: "invalid empty", value: "", wantErr: true, errMsg: "must be"},
{name: "invalid typo", value: "link", wantErr: true, errMsg: "must be"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cfg := &Import{
UnixFSHAMTDirectorySizeEstimation: *NewOptionalString(tt.value),
}
err := ValidateImportConfig(cfg)
if tt.wantErr {
if err == nil {
t.Errorf("expected error for value=%q, got nil", tt.value)
} else if tt.errMsg != "" && !strings.Contains(err.Error(), tt.errMsg) {
t.Errorf("error = %v, want error containing %q", err, tt.errMsg)
}
} else {
if err != nil {
t.Errorf("unexpected error for value=%q: %v", tt.value, err)
}
}
})
}
}
func TestValidateImportConfig_DAGLayout(t *testing.T) {
tests := []struct {
name string
value string
wantErr bool
errMsg string
}{
{name: "valid balanced", value: DAGLayoutBalanced, wantErr: false},
{name: "valid trickle", value: DAGLayoutTrickle, wantErr: false},
{name: "invalid unknown", value: "unknown", wantErr: true, errMsg: "must be"},
{name: "invalid empty", value: "", wantErr: true, errMsg: "must be"},
{name: "invalid flat", value: "flat", wantErr: true, errMsg: "must be"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cfg := &Import{
UnixFSDAGLayout: *NewOptionalString(tt.value),
}
err := ValidateImportConfig(cfg)
if tt.wantErr {
if err == nil {
t.Errorf("expected error for value=%q, got nil", tt.value)
} else if tt.errMsg != "" && !strings.Contains(err.Error(), tt.errMsg) {
t.Errorf("error = %v, want error containing %q", err, tt.errMsg)
}
} else {
if err != nil {
t.Errorf("unexpected error for value=%q: %v", tt.value, err)
}
}
})
}
}
func TestImport_HAMTSizeEstimationMode(t *testing.T) {
tests := []struct {
cfg string
want io.SizeEstimationMode
}{
{HAMTSizeEstimationLinks, io.SizeEstimationLinks},
{HAMTSizeEstimationBlock, io.SizeEstimationBlock},
{HAMTSizeEstimationDisabled, io.SizeEstimationDisabled},
{"", io.SizeEstimationLinks}, // default (unset returns default)
{"unknown", io.SizeEstimationLinks}, // fallback to default
}
for _, tt := range tests {
t.Run(tt.cfg, func(t *testing.T) {
var imp Import
if tt.cfg != "" {
imp.UnixFSHAMTDirectorySizeEstimation = *NewOptionalString(tt.cfg)
}
got := imp.HAMTSizeEstimationMode()
if got != tt.want {
t.Errorf("Import.HAMTSizeEstimationMode() with %q = %v, want %v", tt.cfg, got, tt.want)
}
})
}
}

View File

@ -312,45 +312,33 @@ fetching may be degraded.
return nil
},
},
"unixfs-v0-2015": {
Description: `Legacy UnixFS import profile for backward-compatible CID generation.
Produces CIDv0 with no raw leaves, sha2-256, 256 KiB chunks, and
link-based HAMT size estimation. Use only when legacy CIDs are required.
See https://github.com/ipfs/specs/pull/499. Alias: legacy-cid-v0`,
Transform: applyUnixFSv02015,
},
"legacy-cid-v0": {
Description: `Makes UnixFS import produce legacy CIDv0 with no raw leaves, sha2-256 and 256 KiB chunks. This is likely the least optimal preset, use only if legacy behavior is required.`,
Transform: func(c *Config) error {
c.Import.CidVersion = *NewOptionalInteger(0)
c.Import.UnixFSRawLeaves = False
c.Import.UnixFSChunker = *NewOptionalString("size-262144")
c.Import.HashFunction = *NewOptionalString("sha2-256")
c.Import.UnixFSFileMaxLinks = *NewOptionalInteger(174)
c.Import.UnixFSDirectoryMaxLinks = *NewOptionalInteger(0)
c.Import.UnixFSHAMTDirectoryMaxFanout = *NewOptionalInteger(256)
c.Import.UnixFSHAMTDirectorySizeThreshold = *NewOptionalBytes("256KiB")
return nil
},
Description: `Alias for unixfs-v0-2015 profile.`,
Transform: applyUnixFSv02015,
},
"test-cid-v1": {
Description: `Makes UnixFS import produce CIDv1 with raw leaves, sha2-256 and 1 MiB chunks (max 174 links per file, 256 per HAMT node, switch dir to HAMT above 256KiB).`,
"unixfs-v1-2025": {
Description: `Recommended UnixFS import profile for cross-implementation CID determinism.
Uses CIDv1, raw leaves, sha2-256, 1 MiB chunks, 1024 links per file node,
256 HAMT fanout, and block-based size estimation for HAMT threshold.
See https://github.com/ipfs/specs/pull/499`,
Transform: func(c *Config) error {
c.Import.CidVersion = *NewOptionalInteger(1)
c.Import.UnixFSRawLeaves = True
c.Import.UnixFSChunker = *NewOptionalString("size-1048576")
c.Import.HashFunction = *NewOptionalString("sha2-256")
c.Import.UnixFSFileMaxLinks = *NewOptionalInteger(174)
c.Import.UnixFSDirectoryMaxLinks = *NewOptionalInteger(0)
c.Import.UnixFSHAMTDirectoryMaxFanout = *NewOptionalInteger(256)
c.Import.UnixFSHAMTDirectorySizeThreshold = *NewOptionalBytes("256KiB")
return nil
},
},
"test-cid-v1-wide": {
Description: `Makes UnixFS import produce CIDv1 with raw leaves, sha2-256 and 1MiB chunks and wider file DAGs (max 1024 links per every node type, switch dir to HAMT above 1MiB).`,
Transform: func(c *Config) error {
c.Import.CidVersion = *NewOptionalInteger(1)
c.Import.UnixFSRawLeaves = True
c.Import.UnixFSChunker = *NewOptionalString("size-1048576") // 1MiB
c.Import.UnixFSChunker = *NewOptionalString("size-1048576") // 1 MiB
c.Import.HashFunction = *NewOptionalString("sha2-256")
c.Import.UnixFSFileMaxLinks = *NewOptionalInteger(1024)
c.Import.UnixFSDirectoryMaxLinks = *NewOptionalInteger(0) // no limit here, use size-based Import.UnixFSHAMTDirectorySizeThreshold instead
c.Import.UnixFSHAMTDirectoryMaxFanout = *NewOptionalInteger(1024)
c.Import.UnixFSHAMTDirectorySizeThreshold = *NewOptionalBytes("1MiB") // 1MiB
c.Import.UnixFSDirectoryMaxLinks = *NewOptionalInteger(0)
c.Import.UnixFSHAMTDirectoryMaxFanout = *NewOptionalInteger(256)
c.Import.UnixFSHAMTDirectorySizeThreshold = *NewOptionalBytes("256KiB")
c.Import.UnixFSHAMTDirectorySizeEstimation = *NewOptionalString(HAMTSizeEstimationBlock)
c.Import.UnixFSDAGLayout = *NewOptionalString(DAGLayoutBalanced)
return nil
},
},
@ -435,3 +423,18 @@ func mapKeys(m map[string]struct{}) []string {
}
return out
}
// applyUnixFSv02015 applies the legacy UnixFS v0 (2015) import settings.
func applyUnixFSv02015(c *Config) error {
c.Import.CidVersion = *NewOptionalInteger(0)
c.Import.UnixFSRawLeaves = False
c.Import.UnixFSChunker = *NewOptionalString("size-262144") // 256 KiB
c.Import.HashFunction = *NewOptionalString("sha2-256")
c.Import.UnixFSFileMaxLinks = *NewOptionalInteger(174)
c.Import.UnixFSDirectoryMaxLinks = *NewOptionalInteger(0)
c.Import.UnixFSHAMTDirectoryMaxFanout = *NewOptionalInteger(256)
c.Import.UnixFSHAMTDirectorySizeThreshold = *NewOptionalBytes("256KiB")
c.Import.UnixFSHAMTDirectorySizeEstimation = *NewOptionalString(HAMTSizeEstimationLinks)
c.Import.UnixFSDAGLayout = *NewOptionalString(DAGLayoutBalanced)
return nil
}

View File

@ -15,6 +15,7 @@ import (
"github.com/cheggaaa/pb"
"github.com/ipfs/boxo/files"
uio "github.com/ipfs/boxo/ipld/unixfs/io"
mfs "github.com/ipfs/boxo/mfs"
"github.com/ipfs/boxo/path"
"github.com/ipfs/boxo/verifcid"
@ -68,6 +69,7 @@ const (
mtimeNsecsOptionName = "mtime-nsecs"
fastProvideRootOptionName = "fast-provide-root"
fastProvideWaitOptionName = "fast-provide-wait"
emptyDirsOptionName = "empty-dirs"
)
const (
@ -147,6 +149,18 @@ to find it in the future:
See 'ipfs files --help' to learn more about using MFS
for keeping track of added files and directories.
SYMLINK HANDLING:
By default, symbolic links are preserved as UnixFS symlink nodes that store
the target path. Use --dereference-symlinks to resolve symlinks to their
target content instead:
> ipfs add -r --dereference-symlinks ./mydir
This recursively resolves all symlinks encountered during directory traversal.
Symlinks to files become regular file content, symlinks to directories are
traversed and their contents are added.
CHUNKING EXAMPLES:
The chunker option, '-s', specifies the chunking strategy that dictates
@ -200,11 +214,13 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#import
Options: []cmds.Option{
// Input Processing
cmds.OptionRecursivePath, // a builtin option that allows recursive paths (-r, --recursive)
cmds.OptionDerefArgs, // a builtin option that resolves passed in filesystem links (--dereference-args)
cmds.OptionDerefArgs, // DEPRECATED: use --dereference-symlinks instead
cmds.OptionStdinName, // a builtin option that optionally allows wrapping stdin into a named file
cmds.OptionHidden,
cmds.OptionIgnore,
cmds.OptionIgnoreRules,
cmds.BoolOption(emptyDirsOptionName, "E", "Include empty directories in the import.").WithDefault(config.DefaultUnixFSIncludeEmptyDirs),
cmds.OptionDerefSymlinks, // resolve symlinks to their target content
// Output Control
cmds.BoolOption(quietOptionName, "q", "Write minimal output."),
cmds.BoolOption(quieterOptionName, "Q", "Write only final hash."),
@ -274,7 +290,7 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#import
}
progress, _ := req.Options[progressOptionName].(bool)
trickle, _ := req.Options[trickleOptionName].(bool)
trickle, trickleSet := req.Options[trickleOptionName].(bool)
wrap, _ := req.Options[wrapOptionName].(bool)
onlyHash, _ := req.Options[onlyHashOptionName].(bool)
silent, _ := req.Options[silentOptionName].(bool)
@ -285,6 +301,7 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#import
maxFileLinks, maxFileLinksSet := req.Options[maxFileLinksOptionName].(int)
maxDirectoryLinks, maxDirectoryLinksSet := req.Options[maxDirectoryLinksOptionName].(int)
maxHAMTFanout, maxHAMTFanoutSet := req.Options[maxHAMTFanoutOptionName].(int)
var sizeEstimationMode uio.SizeEstimationMode
nocopy, _ := req.Options[noCopyOptionName].(bool)
fscache, _ := req.Options[fstoreCacheOptionName].(bool)
cidVer, cidVerSet := req.Options[cidVersionOptionName].(int)
@ -312,6 +329,19 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#import
mtimeNsecs, _ := req.Options[mtimeNsecsOptionName].(uint)
fastProvideRoot, fastProvideRootSet := req.Options[fastProvideRootOptionName].(bool)
fastProvideWait, fastProvideWaitSet := req.Options[fastProvideWaitOptionName].(bool)
emptyDirs, _ := req.Options[emptyDirsOptionName].(bool)
// Handle --dereference-args deprecation
derefArgs, derefArgsSet := req.Options[cmds.DerefLong].(bool)
if derefArgsSet && derefArgs {
return fmt.Errorf("--dereference-args is deprecated: use --dereference-symlinks instead")
}
// Wire --trickle from config
if !trickleSet && !cfg.Import.UnixFSDAGLayout.IsDefault() {
layout := cfg.Import.UnixFSDAGLayout.WithDefault(config.DefaultUnixFSDAGLayout)
trickle = layout == config.DAGLayoutTrickle
}
if chunker == "" {
chunker = cfg.Import.UnixFSChunker.WithDefault(config.DefaultUnixFSChunker)
@ -348,6 +378,9 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#import
maxHAMTFanout = int(cfg.Import.UnixFSHAMTDirectoryMaxFanout.WithDefault(config.DefaultUnixFSHAMTDirectoryMaxFanout))
}
// SizeEstimationMode is always set from config (no CLI flag)
sizeEstimationMode = cfg.Import.HAMTSizeEstimationMode()
fastProvideRoot = config.ResolveBoolFromConfig(fastProvideRoot, fastProvideRootSet, cfg.Import.FastProvideRoot, config.DefaultFastProvideRoot)
fastProvideWait = config.ResolveBoolFromConfig(fastProvideWait, fastProvideWaitSet, cfg.Import.FastProvideWait, config.DefaultFastProvideWait)
@ -409,6 +442,8 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#import
options.Unixfs.PreserveMode(preserveMode),
options.Unixfs.PreserveMtime(preserveMtime),
options.Unixfs.IncludeEmptyDirs(emptyDirs),
}
if mode != 0 {
@ -441,6 +476,9 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#import
opts = append(opts, options.Unixfs.MaxHAMTFanout(maxHAMTFanout))
}
// SizeEstimationMode is always set from config
opts = append(opts, options.Unixfs.SizeEstimationMode(sizeEstimationMode))
if trickle {
opts = append(opts, options.Unixfs.Layout(options.TrickleLayout))
}

View File

@ -177,12 +177,18 @@ func (api *UnixfsAPI) Add(ctx context.Context, files files.Node, opts ...options
if settings.MaxHAMTFanoutSet {
fileAdder.MaxHAMTFanout = settings.MaxHAMTFanout
}
if settings.SizeEstimationModeSet {
fileAdder.SizeEstimationMode = settings.SizeEstimationMode
}
fileAdder.NoCopy = settings.NoCopy
fileAdder.CidBuilder = prefix
fileAdder.PreserveMode = settings.PreserveMode
fileAdder.PreserveMtime = settings.PreserveMtime
fileAdder.FileMode = settings.Mode
fileAdder.FileMtime = settings.Mtime
if settings.IncludeEmptyDirsSet {
fileAdder.IncludeEmptyDirs = settings.IncludeEmptyDirs
}
switch settings.Layout {
case options.BalancedLayout:

View File

@ -24,16 +24,18 @@ type UnixfsAddSettings struct {
CidVersion int
MhType uint64
Inline bool
InlineLimit int
RawLeaves bool
RawLeavesSet bool
MaxFileLinks int
MaxFileLinksSet bool
MaxDirectoryLinks int
MaxDirectoryLinksSet bool
MaxHAMTFanout int
MaxHAMTFanoutSet bool
Inline bool
InlineLimit int
RawLeaves bool
RawLeavesSet bool
MaxFileLinks int
MaxFileLinksSet bool
MaxDirectoryLinks int
MaxDirectoryLinksSet bool
MaxHAMTFanout int
MaxHAMTFanoutSet bool
SizeEstimationMode *io.SizeEstimationMode
SizeEstimationModeSet bool
Chunker string
Layout Layout
@ -48,10 +50,12 @@ type UnixfsAddSettings struct {
Silent bool
Progress bool
PreserveMode bool
PreserveMtime bool
Mode os.FileMode
Mtime time.Time
PreserveMode bool
PreserveMtime bool
Mode os.FileMode
Mtime time.Time
IncludeEmptyDirs bool
IncludeEmptyDirsSet bool
}
type UnixfsLsSettings struct {
@ -93,10 +97,12 @@ func UnixfsAddOptions(opts ...UnixfsAddOption) (*UnixfsAddSettings, cid.Prefix,
Silent: false,
Progress: false,
PreserveMode: false,
PreserveMtime: false,
Mode: 0,
Mtime: time.Time{},
PreserveMode: false,
PreserveMtime: false,
Mode: 0,
Mtime: time.Time{},
IncludeEmptyDirs: true, // default: include empty directories
IncludeEmptyDirsSet: false,
}
for _, opt := range opts {
@ -235,6 +241,15 @@ func (unixfsOpts) MaxHAMTFanout(n int) UnixfsAddOption {
}
}
// SizeEstimationMode specifies how directory size is estimated for HAMT sharding decisions.
func (unixfsOpts) SizeEstimationMode(mode io.SizeEstimationMode) UnixfsAddOption {
return func(settings *UnixfsAddSettings) error {
settings.SizeEstimationMode = &mode
settings.SizeEstimationModeSet = true
return nil
}
}
// Inline tells the adder to inline small blocks into CIDs
func (unixfsOpts) Inline(enable bool) UnixfsAddOption {
return func(settings *UnixfsAddSettings) error {
@ -396,3 +411,12 @@ func (unixfsOpts) Mtime(seconds int64, nsecs uint32) UnixfsAddOption {
return nil
}
}
// IncludeEmptyDirs tells the adder to include empty directories in the DAG
func (unixfsOpts) IncludeEmptyDirs(include bool) UnixfsAddOption {
return func(settings *UnixfsAddSettings) error {
settings.IncludeEmptyDirs = include
settings.IncludeEmptyDirsSet = true
return nil
}
}

View File

@ -26,6 +26,7 @@ import (
"github.com/ipfs/go-cid"
ipld "github.com/ipfs/go-ipld-format"
logging "github.com/ipfs/go-log/v2"
"github.com/ipfs/kubo/config"
coreiface "github.com/ipfs/kubo/core/coreiface"
"github.com/ipfs/kubo/tracing"
@ -52,49 +53,52 @@ func NewAdder(ctx context.Context, p pin.Pinner, bs bstore.GCLocker, ds ipld.DAG
bufferedDS := ipld.NewBufferedDAG(ctx, ds)
return &Adder{
ctx: ctx,
pinning: p,
gcLocker: bs,
dagService: ds,
bufferedDS: bufferedDS,
Progress: false,
Pin: true,
Trickle: false,
MaxLinks: ihelper.DefaultLinksPerBlock,
MaxHAMTFanout: uio.DefaultShardWidth,
Chunker: "",
ctx: ctx,
pinning: p,
gcLocker: bs,
dagService: ds,
bufferedDS: bufferedDS,
Progress: false,
Pin: true,
Trickle: false,
MaxLinks: ihelper.DefaultLinksPerBlock,
MaxHAMTFanout: uio.DefaultShardWidth,
Chunker: "",
IncludeEmptyDirs: config.DefaultUnixFSIncludeEmptyDirs,
}, nil
}
// Adder holds the switches passed to the `add` command.
type Adder struct {
ctx context.Context
pinning pin.Pinner
gcLocker bstore.GCLocker
dagService ipld.DAGService
bufferedDS *ipld.BufferedDAG
Out chan<- interface{}
Progress bool
Pin bool
PinName string
Trickle bool
RawLeaves bool
MaxLinks int
MaxDirectoryLinks int
MaxHAMTFanout int
Silent bool
NoCopy bool
Chunker string
mroot *mfs.Root
unlocker bstore.Unlocker
tempRoot cid.Cid
CidBuilder cid.Builder
liveNodes uint64
ctx context.Context
pinning pin.Pinner
gcLocker bstore.GCLocker
dagService ipld.DAGService
bufferedDS *ipld.BufferedDAG
Out chan<- interface{}
Progress bool
Pin bool
PinName string
Trickle bool
RawLeaves bool
MaxLinks int
MaxDirectoryLinks int
MaxHAMTFanout int
SizeEstimationMode *uio.SizeEstimationMode
Silent bool
NoCopy bool
Chunker string
mroot *mfs.Root
unlocker bstore.Unlocker
tempRoot cid.Cid
CidBuilder cid.Builder
liveNodes uint64
PreserveMode bool
PreserveMtime bool
FileMode os.FileMode
FileMtime time.Time
PreserveMode bool
PreserveMtime bool
FileMode os.FileMode
FileMtime time.Time
IncludeEmptyDirs bool
}
func (adder *Adder) mfsRoot() (*mfs.Root, error) {
@ -104,9 +108,10 @@ func (adder *Adder) mfsRoot() (*mfs.Root, error) {
// Note, this adds it to DAGService already.
mr, err := mfs.NewEmptyRoot(adder.ctx, adder.dagService, nil, nil, mfs.MkdirOpts{
CidBuilder: adder.CidBuilder,
MaxLinks: adder.MaxDirectoryLinks,
MaxHAMTFanout: adder.MaxHAMTFanout,
CidBuilder: adder.CidBuilder,
MaxLinks: adder.MaxDirectoryLinks,
MaxHAMTFanout: adder.MaxHAMTFanout,
SizeEstimationMode: adder.SizeEstimationMode,
})
if err != nil {
return nil, err
@ -270,11 +275,12 @@ func (adder *Adder) addNode(node ipld.Node, path string) error {
dir := gopath.Dir(path)
if dir != "." {
opts := mfs.MkdirOpts{
Mkparents: true,
Flush: false,
CidBuilder: adder.CidBuilder,
MaxLinks: adder.MaxDirectoryLinks,
MaxHAMTFanout: adder.MaxHAMTFanout,
Mkparents: true,
Flush: false,
CidBuilder: adder.CidBuilder,
MaxLinks: adder.MaxDirectoryLinks,
MaxHAMTFanout: adder.MaxHAMTFanout,
SizeEstimationMode: adder.SizeEstimationMode,
}
if err := mfs.Mkdir(mr, dir, opts); err != nil {
return err
@ -480,15 +486,34 @@ func (adder *Adder) addFile(path string, file files.File) error {
func (adder *Adder) addDir(ctx context.Context, path string, dir files.Directory, toplevel bool) error {
log.Infof("adding directory: %s", path)
// Peek at first entry to check if directory is empty.
// We advance the iterator once here and continue from this position
// in the processing loop below. This avoids allocating a slice to
// collect all entries just to check for emptiness.
it := dir.Entries()
hasEntry := it.Next()
if !hasEntry {
if err := it.Err(); err != nil {
return err
}
// Directory is empty. Skip it unless IncludeEmptyDirs is set or
// this is the toplevel directory (we always include the root).
if !adder.IncludeEmptyDirs && !toplevel {
log.Debugf("skipping empty directory: %s", path)
return nil
}
}
// if we need to store mode or modification time then create a new root which includes that data
if toplevel && (adder.FileMode != 0 || !adder.FileMtime.IsZero()) {
mr, err := mfs.NewEmptyRoot(ctx, adder.dagService, nil, nil,
mfs.MkdirOpts{
CidBuilder: adder.CidBuilder,
MaxLinks: adder.MaxDirectoryLinks,
MaxHAMTFanout: adder.MaxHAMTFanout,
ModTime: adder.FileMtime,
Mode: adder.FileMode,
CidBuilder: adder.CidBuilder,
MaxLinks: adder.MaxDirectoryLinks,
MaxHAMTFanout: adder.MaxHAMTFanout,
ModTime: adder.FileMtime,
Mode: adder.FileMode,
SizeEstimationMode: adder.SizeEstimationMode,
})
if err != nil {
return err
@ -502,26 +527,28 @@ func (adder *Adder) addDir(ctx context.Context, path string, dir files.Directory
return err
}
err = mfs.Mkdir(mr, path, mfs.MkdirOpts{
Mkparents: true,
Flush: false,
CidBuilder: adder.CidBuilder,
Mode: adder.FileMode,
ModTime: adder.FileMtime,
MaxLinks: adder.MaxDirectoryLinks,
MaxHAMTFanout: adder.MaxHAMTFanout,
Mkparents: true,
Flush: false,
CidBuilder: adder.CidBuilder,
Mode: adder.FileMode,
ModTime: adder.FileMtime,
MaxLinks: adder.MaxDirectoryLinks,
MaxHAMTFanout: adder.MaxHAMTFanout,
SizeEstimationMode: adder.SizeEstimationMode,
})
if err != nil {
return err
}
}
it := dir.Entries()
for it.Next() {
// Process directory entries. The iterator was already advanced once above
// to peek for emptiness, so we start from that position.
for hasEntry {
fpath := gopath.Join(path, it.Name())
err := adder.addFileNode(ctx, fpath, it.Node(), false)
if err != nil {
if err := adder.addFileNode(ctx, fpath, it.Node(), false); err != nil {
return err
}
hasEntry = it.Next()
}
return it.Err()

View File

@ -439,11 +439,12 @@ func IPFS(ctx context.Context, bcfg *BuildCfg) fx.Option {
}
// Auto-sharding settings
shardSingThresholdInt := cfg.Import.UnixFSHAMTDirectorySizeThreshold.WithDefault(config.DefaultUnixFSHAMTDirectorySizeThreshold)
shardSizeThreshold := cfg.Import.UnixFSHAMTDirectorySizeThreshold.WithDefault(config.DefaultUnixFSHAMTDirectorySizeThreshold)
shardMaxFanout := cfg.Import.UnixFSHAMTDirectoryMaxFanout.WithDefault(config.DefaultUnixFSHAMTDirectoryMaxFanout)
// TODO: avoid overriding this globally, see if we can extend Directory interface like Get/SetMaxLinks from https://github.com/ipfs/boxo/pull/906
uio.HAMTShardingSize = int(shardSingThresholdInt)
uio.HAMTShardingSize = int(shardSizeThreshold)
uio.DefaultShardWidth = int(shardMaxFanout)
uio.HAMTSizeEstimation = cfg.Import.HAMTSizeEstimationMode()
providerStrategy := cfg.Provide.Strategy.WithDefault(config.DefaultProvideStrategy)

View File

@ -10,6 +10,7 @@ This release was brought to you by the [Shipyard](https://ipshipyard.com/) team.
- [Overview](#overview)
- [🔦 Highlights](#-highlights)
- [🔢 UnixFS CID Profiles (IPIP-499)](#-unixfs-cid-profiles-ipip-499)
- [🧹 Automatic cleanup of interrupted imports](#-automatic-cleanup-of-interrupted-imports)
- [Routing V1 HTTP API now exposed by default](#routing-v1-http-api-now-exposed-by-default)
- [Track total size when adding pins](#track-total-size-when-adding-pins)
@ -30,6 +31,47 @@ This release was brought to you by the [Shipyard](https://ipshipyard.com/) team.
### 🔦 Highlights
#### 🔢 UnixFS CID Profiles (IPIP-499)
This release introduces [IPIP-499](https://github.com/ipfs/specs/pull/499) UnixFS CID Profiles for reproducible CID generation across IPFS implementations.
**New Profiles**
- `unixfs-v1-2025`: the recommended profile for CIDv1 imports with deterministic HAMT directory sharding based on block size estimation
- `unixfs-v0-2015` (alias `legacy-cid-v0`): preserves legacy CIDv0 behavior for backward compatibility
Apply a profile with: `ipfs config profile apply unixfs-v1-2025`
**New `Import.*` Configuration Options**
New [`Import.*`](https://github.com/ipfs/kubo/blob/master/docs/config.md#import) options allow fine-grained control over import parameters:
- `Import.CidVersion`: CID version (0 or 1)
- `Import.HashFunction`: hash algorithm
- `Import.UnixFSChunker`: chunking strategy
- `Import.UnixFSRawLeaves`: raw leaf blocks
- `Import.UnixFSFileMaxLinks`: max children per file node
- `Import.UnixFSDirectoryMaxLinks`: max children per basic directory
- `Import.UnixFSHAMTDirectoryMaxFanout`: HAMT shard width
- `Import.UnixFSHAMTDirectorySizeThreshold`: threshold for HAMT sharding
- `Import.UnixFSHAMTDirectorySizeEstimation`: estimation mode (`links`, `block`, or `disabled`)
- `Import.UnixFSDAGLayout`: DAG layout (`balanced` or `trickle`)
**Deprecated Profiles**
The `test-cid-v1` and `test-cid-v1-wide` profiles have been removed. Use `unixfs-v1-2025` for CIDv1 imports.
**CLI Changes**
- New `--dereference-symlinks` flag for `ipfs add` recursively resolves symlinks to their target content (replaces deprecated `--dereference-args` which only worked on CLI arguments)
- New `--empty-dirs` / `-E` flag for `ipfs add` controls inclusion of empty directories (default: true)
- New `--hidden` / `-H` flag for `ipfs add` includes hidden files (default: false)
- The `--trickle` flag in `ipfs add` now respects `Import.UnixFSDAGLayout` config default
**HAMT Threshold Fix**
The HAMT directory sharding threshold comparison was aligned with the JS implementation ([ipfs/boxo@6707376](https://github.com/ipfs/boxo/commit/6707376002a3d4ba64895749ce9be2e00d265ed5)). The comparison changed from `>=` to `>`, meaning a directory exactly at the 256 KiB threshold now stays as a basic (flat) directory instead of converting to HAMT. This is a subtle 1-byte boundary change that improves CID determinism across implementations.
#### 🧹 Automatic cleanup of interrupted imports
If you cancel `ipfs add` or `ipfs dag import` mid-operation, Kubo now automatically cleans up incomplete data on the next daemon start. Previously, interrupted imports would leave orphan blocks in your repository that were difficult to identify and remove without pins and running explicit garbage collection.

View File

@ -242,6 +242,8 @@ config file at runtime.
- [`Import.UnixFSDirectoryMaxLinks`](#importunixfsdirectorymaxlinks)
- [`Import.UnixFSHAMTDirectoryMaxFanout`](#importunixfshamtdirectorymaxfanout)
- [`Import.UnixFSHAMTDirectorySizeThreshold`](#importunixfshamtdirectorysizethreshold)
- [`Import.UnixFSHAMTDirectorySizeEstimation`](#importunixfshamtdirectorysizeestimation)
- [`Import.UnixFSDAGLayout`](#importunixfsdaglayout)
- [`Version`](#version)
- [`Version.AgentSuffix`](#versionagentsuffix)
- [`Version.SwarmCheckEnabled`](#versionswarmcheckenabled)
@ -263,9 +265,9 @@ config file at runtime.
- [`lowpower` profile](#lowpower-profile)
- [`announce-off` profile](#announce-off-profile)
- [`announce-on` profile](#announce-on-profile)
- [`unixfs-v0-2015` profile](#unixfs-v0-2015-profile)
- [`legacy-cid-v0` profile](#legacy-cid-v0-profile)
- [`test-cid-v1` profile](#test-cid-v1-profile)
- [`test-cid-v1-wide` profile](#test-cid-v1-wide-profile)
- [`unixfs-v1-2025` profile](#unixfs-v1-2025-profile)
- [Security](#security)
- [Port and Network Exposure](#port-and-network-exposure)
- [Security Best Practices](#security-best-practices)
@ -3639,9 +3641,11 @@ Type: `flag`
## `Import`
Options to configure the default options used for ingesting data, in commands such as `ipfs add` or `ipfs block put`. All affected commands are detailed per option.
Options to configure the default parameters used for ingesting data, in commands such as `ipfs add` or `ipfs block put`. All affected commands are detailed per option.
Note that using flags will override the options defined here.
These options implement [IPIP-499: UnixFS CID Profiles](https://github.com/ipfs/specs/pull/499) for reproducible CID generation across IPFS implementations. Instead of configuring individual options, you can apply a predefined profile with `ipfs config profile apply <profile-name>`. See [Profiles](#profiles) for available options like `unixfs-v1-2025`.
Note that using CLI flags will override the options defined here.
### `Import.CidVersion`
@ -3821,6 +3825,42 @@ Default: `256KiB` (may change, inspect `DefaultUnixFSHAMTDirectorySizeThreshold`
Type: [`optionalBytes`](#optionalbytes)
### `Import.UnixFSHAMTDirectorySizeEstimation`
Controls how directory size is estimated when deciding whether to switch
from a basic UnixFS directory to HAMT sharding.
Accepted values:
- `links` (default): Legacy estimation using sum of link names and CID byte lengths.
- `block`: Full serialized dag-pb block size for accurate threshold decisions.
- `disabled`: Disable HAMT sharding entirely (directories always remain basic).
The `block` estimation is recommended for new profiles as it provides more
accurate threshold decisions and better cross-implementation consistency.
See [IPIP-499](https://github.com/ipfs/specs/pull/499) for more details.
Commands affected: `ipfs add`
Default: `links`
Type: `optionalString`
### `Import.UnixFSDAGLayout`
Controls the DAG layout used when chunking files.
Accepted values:
- `balanced` (default): Balanced DAG layout with uniform leaf depth.
- `trickle`: Trickle DAG layout optimized for streaming.
Commands affected: `ipfs add`
Default: `balanced`
Type: `optionalString`
## `Version`
Options to configure agent version announced to the swarm, and leveraging
@ -3864,7 +3904,7 @@ applied with the `--profile` flag to `ipfs init` or with the `ipfs config profil
apply` command. When a profile is applied a backup of the configuration file
will be created in `$IPFS_PATH`.
Configuration profiles can be applied additively. For example, both the `test-cid-v1` and `lowpower` profiles can be applied one after the other.
Configuration profiles can be applied additively. For example, both the `unixfs-v1-2025` and `lowpower` profiles can be applied one after the other.
The available configuration profiles are listed below. You can also find them
documented in `ipfs config profile --help`.
@ -4021,42 +4061,35 @@ Disables [Provide](#provide) system (and announcing to Amino DHT).
(Re-)enables [Provide](#provide) system (reverts [`announce-off` profile](#announce-off-profile)).
### `unixfs-v0-2015` profile
Legacy UnixFS import profile for backward-compatible CID generation.
Produces CIDv0 with no raw leaves, sha2-256, 256 KiB chunks, and
link-based HAMT size estimation.
See <https://github.com/ipfs/kubo/blob/master/config/profile.go> for exact [`Import.*`](#import) settings.
> [!NOTE]
> Use only when legacy CIDs are required. For new projects, use [`unixfs-v1-2025`](#unixfs-v1-2025-profile).
>
> See [IPIP-499](https://github.com/ipfs/specs/pull/499) for more details.
### `legacy-cid-v0` profile
Makes UnixFS import (`ipfs add`) produce legacy CIDv0 with no raw leaves, sha2-256 and 256 KiB chunks.
Alias for [`unixfs-v0-2015`](#unixfs-v0-2015-profile) profile.
### `unixfs-v1-2025` profile
Recommended UnixFS import profile for cross-implementation CID determinism.
Uses CIDv1, raw leaves, sha2-256, 1 MiB chunks, 1024 links per file node,
256 HAMT fanout, and block-based size estimation for HAMT threshold.
See <https://github.com/ipfs/kubo/blob/master/config/profile.go> for exact [`Import.*`](#import) settings.
> [!NOTE]
> This profile is provided for legacy users and should not be used for new projects.
### `test-cid-v1` profile
Makes UnixFS import (`ipfs add`) produce modern CIDv1 with raw leaves, sha2-256
and 1 MiB chunks (max 174 links per file, 256 per HAMT node, switch dir to HAMT
above 256KiB).
See <https://github.com/ipfs/kubo/blob/master/config/profile.go> for exact [`Import.*`](#import) settings.
> [!NOTE]
> [`Import.*`](#import) settings applied by this profile MAY change in future release. Provided for testing purposes.
> This profile ensures CID consistency across different IPFS implementations.
>
> Follow [kubo#4143](https://github.com/ipfs/kubo/issues/4143) for more details,
> and provide feedback in [discuss.ipfs.tech/t/should-we-profile-cids](https://discuss.ipfs.tech/t/should-we-profile-cids/18507) or [ipfs/specs#499](https://github.com/ipfs/specs/pull/499).
### `test-cid-v1-wide` profile
Makes UnixFS import (`ipfs add`) produce modern CIDv1 with raw leaves, sha2-256
and 1 MiB chunks and wider file DAGs (max 1024 links per every node type,
switch dir to HAMT above 1MiB).
See <https://github.com/ipfs/kubo/blob/master/config/profile.go> for exact [`Import.*`](#import) settings.
> [!NOTE]
> [`Import.*`](#import) settings applied by this profile MAY change in future release. Provided for testing purposes.
>
> Follow [kubo#4143](https://github.com/ipfs/kubo/issues/4143) for more details,
> and provide feedback in [discuss.ipfs.tech/t/should-we-profile-cids](https://discuss.ipfs.tech/t/should-we-profile-cids/18507) or [ipfs/specs#499](https://github.com/ipfs/specs/pull/499).
> See [IPIP-499](https://github.com/ipfs/specs/pull/499) for more details.
## Security

View File

@ -7,7 +7,7 @@ go 1.25
replace github.com/ipfs/kubo => ./../../..
require (
github.com/ipfs/boxo v0.35.3-0.20260109213916-89dc184784f2
github.com/ipfs/boxo v0.35.3-0.20260119043727-6707376002a3
github.com/ipfs/kubo v0.0.0-00010101000000-000000000000
github.com/libp2p/go-libp2p v0.46.0
github.com/multiformats/go-multiaddr v0.16.1
@ -84,7 +84,7 @@ require (
github.com/ipfs/go-ds-pebble v0.5.8 // indirect
github.com/ipfs/go-dsqueue v0.1.1 // indirect
github.com/ipfs/go-fs-lock v0.1.1 // indirect
github.com/ipfs/go-ipfs-cmds v0.15.0 // indirect
github.com/ipfs/go-ipfs-cmds v0.15.1-0.20260117043932-17687e216294 // indirect
github.com/ipfs/go-ipfs-ds-help v1.1.1 // indirect
github.com/ipfs/go-ipfs-pq v0.0.3 // indirect
github.com/ipfs/go-ipfs-redirects-file v0.1.2 // indirect

View File

@ -265,8 +265,8 @@ github.com/ipfs-shipyard/nopfs/ipfs v0.25.0 h1:OqNqsGZPX8zh3eFMO8Lf8EHRRnSGBMqcd
github.com/ipfs-shipyard/nopfs/ipfs v0.25.0/go.mod h1:BxhUdtBgOXg1B+gAPEplkg/GpyTZY+kCMSfsJvvydqU=
github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs=
github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0=
github.com/ipfs/boxo v0.35.3-0.20260109213916-89dc184784f2 h1:pRQYSSGnGQa921d8v0uhXg2BGzoSf9ndTWTlR7ImVoo=
github.com/ipfs/boxo v0.35.3-0.20260109213916-89dc184784f2/go.mod h1:Abmp1if6bMQG87/0SQPIB9fkxJnZMLCt2nQw3yUZHH0=
github.com/ipfs/boxo v0.35.3-0.20260119043727-6707376002a3 h1:X6iiSyBUwhKgQMzM57wSXVUZfivm5nWm5S/Y2SrSjhA=
github.com/ipfs/boxo v0.35.3-0.20260119043727-6707376002a3/go.mod h1:Abmp1if6bMQG87/0SQPIB9fkxJnZMLCt2nQw3yUZHH0=
github.com/ipfs/go-bitfield v1.1.0 h1:fh7FIo8bSwaJEh6DdTWbCeZ1eqOaOkKFI74SCnsWbGA=
github.com/ipfs/go-bitfield v1.1.0/go.mod h1:paqf1wjq/D2BBmzfTVFlJQ9IlFOZpg422HL0HqsGWHU=
github.com/ipfs/go-block-format v0.0.3/go.mod h1:4LmD4ZUw0mhO+JSKdpWwrzATiEfM7WWgQ8H5l6P8MVk=
@ -301,8 +301,8 @@ github.com/ipfs/go-dsqueue v0.1.1 h1:6PQlHDyf9PSTN69NmwUir5+0is3tU0vRJj8zLlgK8Mc
github.com/ipfs/go-dsqueue v0.1.1/go.mod h1:Xxg353WSwwzYn3FGSzZ+taSQII3pIZ+EJC8/oWRDM10=
github.com/ipfs/go-fs-lock v0.1.1 h1:TecsP/Uc7WqYYatasreZQiP9EGRy4ZnKoG4yXxR33nw=
github.com/ipfs/go-fs-lock v0.1.1/go.mod h1:2goSXMCw7QfscHmSe09oXiR34DQeUdm+ei+dhonqly0=
github.com/ipfs/go-ipfs-cmds v0.15.0 h1:nQDgKadrzyiFyYoZMARMIoVoSwe3gGTAfGvrWLeAQbQ=
github.com/ipfs/go-ipfs-cmds v0.15.0/go.mod h1:VABf/mv/wqvYX6hLG6Z+40eNAEw3FQO0bSm370Or3Wk=
github.com/ipfs/go-ipfs-cmds v0.15.1-0.20260117043932-17687e216294 h1:pQ6LhtU+nEBajAgFz3uU7ta6JN4KY0W5T7JxuaRQJVE=
github.com/ipfs/go-ipfs-cmds v0.15.1-0.20260117043932-17687e216294/go.mod h1:WG//DD2nimQcQ/+MTqB8mSeZQZBZC8KLZ+OeVGk9We0=
github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw=
github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ=
github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw=

4
go.mod
View File

@ -21,7 +21,7 @@ require (
github.com/hashicorp/go-version v1.7.0
github.com/ipfs-shipyard/nopfs v0.0.14
github.com/ipfs-shipyard/nopfs/ipfs v0.25.0
github.com/ipfs/boxo v0.35.3-0.20260109213916-89dc184784f2
github.com/ipfs/boxo v0.35.3-0.20260119043727-6707376002a3
github.com/ipfs/go-block-format v0.2.3
github.com/ipfs/go-cid v0.6.0
github.com/ipfs/go-cidutil v0.1.0
@ -33,7 +33,7 @@ require (
github.com/ipfs/go-ds-measure v0.2.2
github.com/ipfs/go-ds-pebble v0.5.8
github.com/ipfs/go-fs-lock v0.1.1
github.com/ipfs/go-ipfs-cmds v0.15.0
github.com/ipfs/go-ipfs-cmds v0.15.1-0.20260117043932-17687e216294
github.com/ipfs/go-ipld-cbor v0.2.1
github.com/ipfs/go-ipld-format v0.6.3
github.com/ipfs/go-ipld-git v0.1.1

8
go.sum
View File

@ -336,8 +336,8 @@ github.com/ipfs-shipyard/nopfs/ipfs v0.25.0 h1:OqNqsGZPX8zh3eFMO8Lf8EHRRnSGBMqcd
github.com/ipfs-shipyard/nopfs/ipfs v0.25.0/go.mod h1:BxhUdtBgOXg1B+gAPEplkg/GpyTZY+kCMSfsJvvydqU=
github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs=
github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0=
github.com/ipfs/boxo v0.35.3-0.20260109213916-89dc184784f2 h1:pRQYSSGnGQa921d8v0uhXg2BGzoSf9ndTWTlR7ImVoo=
github.com/ipfs/boxo v0.35.3-0.20260109213916-89dc184784f2/go.mod h1:Abmp1if6bMQG87/0SQPIB9fkxJnZMLCt2nQw3yUZHH0=
github.com/ipfs/boxo v0.35.3-0.20260119043727-6707376002a3 h1:X6iiSyBUwhKgQMzM57wSXVUZfivm5nWm5S/Y2SrSjhA=
github.com/ipfs/boxo v0.35.3-0.20260119043727-6707376002a3/go.mod h1:Abmp1if6bMQG87/0SQPIB9fkxJnZMLCt2nQw3yUZHH0=
github.com/ipfs/go-bitfield v1.1.0 h1:fh7FIo8bSwaJEh6DdTWbCeZ1eqOaOkKFI74SCnsWbGA=
github.com/ipfs/go-bitfield v1.1.0/go.mod h1:paqf1wjq/D2BBmzfTVFlJQ9IlFOZpg422HL0HqsGWHU=
github.com/ipfs/go-block-format v0.0.3/go.mod h1:4LmD4ZUw0mhO+JSKdpWwrzATiEfM7WWgQ8H5l6P8MVk=
@ -372,8 +372,8 @@ github.com/ipfs/go-dsqueue v0.1.1 h1:6PQlHDyf9PSTN69NmwUir5+0is3tU0vRJj8zLlgK8Mc
github.com/ipfs/go-dsqueue v0.1.1/go.mod h1:Xxg353WSwwzYn3FGSzZ+taSQII3pIZ+EJC8/oWRDM10=
github.com/ipfs/go-fs-lock v0.1.1 h1:TecsP/Uc7WqYYatasreZQiP9EGRy4ZnKoG4yXxR33nw=
github.com/ipfs/go-fs-lock v0.1.1/go.mod h1:2goSXMCw7QfscHmSe09oXiR34DQeUdm+ei+dhonqly0=
github.com/ipfs/go-ipfs-cmds v0.15.0 h1:nQDgKadrzyiFyYoZMARMIoVoSwe3gGTAfGvrWLeAQbQ=
github.com/ipfs/go-ipfs-cmds v0.15.0/go.mod h1:VABf/mv/wqvYX6hLG6Z+40eNAEw3FQO0bSm370Or3Wk=
github.com/ipfs/go-ipfs-cmds v0.15.1-0.20260117043932-17687e216294 h1:pQ6LhtU+nEBajAgFz3uU7ta6JN4KY0W5T7JxuaRQJVE=
github.com/ipfs/go-ipfs-cmds v0.15.1-0.20260117043932-17687e216294/go.mod h1:WG//DD2nimQcQ/+MTqB8mSeZQZBZC8KLZ+OeVGk9We0=
github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw=
github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ=
github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw=

View File

@ -8,7 +8,6 @@ import (
"testing"
"time"
"github.com/dustin/go-humanize"
"github.com/ipfs/kubo/config"
"github.com/ipfs/kubo/test/cli/harness"
"github.com/ipfs/kubo/test/cli/testutils"
@ -166,7 +165,7 @@ func TestAdd(t *testing.T) {
//
// UnixFSChunker=size-262144 (256KiB)
// Import.UnixFSFileMaxLinks=174
node := harness.NewT(t).NewNode().Init("--profile=legacy-cid-v0") // legacy-cid-v0 for determinism across all params
node := harness.NewT(t).NewNode().Init("--profile=unixfs-v0-2015") // unixfs-v0-2015 for determinism across all params
node.UpdateConfig(func(cfg *config.Config) {
cfg.Import.UnixFSChunker = *config.NewOptionalString("size-262144") // 256 KiB chunks
cfg.Import.UnixFSFileMaxLinks = *config.NewOptionalInteger(174) // max 174 per level
@ -187,9 +186,9 @@ func TestAdd(t *testing.T) {
require.Equal(t, "QmbBftNHWmjSWKLC49dMVrfnY8pjrJYntiAXirFJ7oJrNk", cidStr)
})
t.Run("ipfs init --profile=legacy-cid-v0 sets config that produces legacy CIDv0", func(t *testing.T) {
t.Run("ipfs init --profile=unixfs-v0-2015 sets config that produces legacy CIDv0", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init("--profile=legacy-cid-v0")
node := harness.NewT(t).NewNode().Init("--profile=unixfs-v0-2015")
node.StartDaemon()
defer node.StopDaemon()
@ -197,10 +196,10 @@ func TestAdd(t *testing.T) {
require.Equal(t, shortStringCidV0, cidStr)
})
t.Run("ipfs init --profile=legacy-cid-v0 applies UnixFSChunker=size-262144 and UnixFSFileMaxLinks", func(t *testing.T) {
t.Run("ipfs init --profile=unixfs-v0-2015 applies UnixFSChunker=size-262144 and UnixFSFileMaxLinks", func(t *testing.T) {
t.Parallel()
seed := "v0-seed"
profile := "--profile=legacy-cid-v0"
profile := "--profile=unixfs-v0-2015"
t.Run("under UnixFSFileMaxLinks=174", func(t *testing.T) {
t.Parallel()
@ -232,12 +231,15 @@ func TestAdd(t *testing.T) {
})
})
t.Run("ipfs init --profile=legacy-cid-v0 applies UnixFSHAMTDirectoryMaxFanout=256 and UnixFSHAMTDirectorySizeThreshold=256KiB", func(t *testing.T) {
t.Run("ipfs init --profile=unixfs-v0-2015 applies UnixFSHAMTDirectoryMaxFanout=256 and UnixFSHAMTDirectorySizeThreshold=256KiB", func(t *testing.T) {
t.Parallel()
seed := "hamt-legacy-cid-v0"
profile := "--profile=legacy-cid-v0"
seed := "hamt-unixfs-v0-2015"
profile := "--profile=unixfs-v0-2015"
t.Run("under UnixFSHAMTDirectorySizeThreshold=256KiB", func(t *testing.T) {
// unixfs-v0-2015 uses links-based estimation: size = sum(nameLen + cidLen)
// Threshold is 256KiB = 262144 bytes
t.Run("at UnixFSHAMTDirectorySizeThreshold=256KiB (links estimation)", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init(profile)
node.StartDaemon()
@ -246,18 +248,24 @@ func TestAdd(t *testing.T) {
randDir, err := os.MkdirTemp(node.Dir, seed)
require.NoError(t, err)
// Create directory with a lot of files that have filenames which together take close to UnixFSHAMTDirectorySizeThreshold in total
err = createDirectoryForHAMT(randDir, cidV0Length, "255KiB", seed)
// Create directory exactly at the 256KiB threshold using links estimation.
// Links estimation: size = numFiles * (nameLen + cidLen)
// 4096 * (30 + 34) = 4096 * 64 = 262144 = threshold exactly
// With > comparison: stays as basic directory
// With >= comparison: converts to HAMT
const numFiles, nameLen = 4096, 30
err = createDirectoryForHAMTLinksEstimation(randDir, cidV0Length, numFiles, nameLen, nameLen, seed)
require.NoError(t, err)
cidStr := node.IPFS("add", "-r", "-Q", randDir).Stdout.Trimmed()
// Confirm the number of links is more than UnixFSHAMTDirectorySizeThreshold (indicating regular "basic" directory"
// Should remain a basic directory (threshold uses > not >=)
root, err := node.InspectPBNode(cidStr)
assert.NoError(t, err)
require.Equal(t, 903, len(root.Links))
require.Equal(t, numFiles, len(root.Links), "expected basic directory at exact threshold")
})
t.Run("above UnixFSHAMTDirectorySizeThreshold=256KiB", func(t *testing.T) {
t.Run("over UnixFSHAMTDirectorySizeThreshold=256KiB (links estimation)", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init(profile)
node.StartDaemon()
@ -266,21 +274,25 @@ func TestAdd(t *testing.T) {
randDir, err := os.MkdirTemp(node.Dir, seed)
require.NoError(t, err)
// Create directory with a lot of files that have filenames which together take close to UnixFSHAMTDirectorySizeThreshold in total
err = createDirectoryForHAMT(randDir, cidV0Length, "257KiB", seed)
// Create directory just over the 256KiB threshold using links estimation.
// Links estimation: size = numFiles * (nameLen + cidLen)
// 4097 * (30 + 34) = 4097 * 64 = 262208 > 262144, exceeds threshold
const numFiles, nameLen = 4097, 30
err = createDirectoryForHAMTLinksEstimation(randDir, cidV0Length, numFiles, nameLen, nameLen, seed)
require.NoError(t, err)
cidStr := node.IPFS("add", "-r", "-Q", randDir).Stdout.Trimmed()
// Confirm this time, the number of links is less than UnixFSHAMTDirectorySizeThreshold
// Should be HAMT sharded (root links <= fanout of 256)
root, err := node.InspectPBNode(cidStr)
assert.NoError(t, err)
require.Equal(t, 252, len(root.Links))
require.LessOrEqual(t, len(root.Links), 256, "expected HAMT directory when over threshold")
})
})
t.Run("ipfs init --profile=test-cid-v1 produces CIDv1 with raw leaves", func(t *testing.T) {
t.Run("ipfs init --profile=unixfs-v1-2025 produces CIDv1 with raw leaves", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init("--profile=test-cid-v1")
node := harness.NewT(t).NewNode().Init("--profile=unixfs-v1-2025")
node.StartDaemon()
defer node.StopDaemon()
@ -288,105 +300,21 @@ func TestAdd(t *testing.T) {
require.Equal(t, shortStringCidV1, cidStr) // raw leaf
})
t.Run("ipfs init --profile=test-cid-v1 applies UnixFSChunker=size-1048576", func(t *testing.T) {
t.Run("ipfs init --profile=unixfs-v1-2025 applies UnixFSChunker=size-1048576 and UnixFSFileMaxLinks=1024", func(t *testing.T) {
t.Parallel()
seed := "v1-seed"
profile := "--profile=test-cid-v1"
t.Run("under UnixFSFileMaxLinks=174", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init(profile)
node.StartDaemon()
defer node.StopDaemon()
// Add 174MiB file:
// 174 * 1MiB should fit in single layer
cidStr := node.IPFSAddDeterministic("174MiB", seed)
root, err := node.InspectPBNode(cidStr)
assert.NoError(t, err)
require.Equal(t, 174, len(root.Links))
// expect same CID every time
require.Equal(t, "bafybeigwduxcf2aawppv3isnfeshnimkyplvw3hthxjhr2bdeje4tdaicu", cidStr)
})
t.Run("above UnixFSFileMaxLinks=174", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init(profile)
node.StartDaemon()
defer node.StopDaemon()
// add +1MiB (one more block), it should force rebalancing DAG and moving most to second layer
cidStr := node.IPFSAddDeterministic("175MiB", seed)
root, err := node.InspectPBNode(cidStr)
assert.NoError(t, err)
require.Equal(t, 2, len(root.Links))
// expect same CID every time
require.Equal(t, "bafybeidhd7lo2n2v7lta5yamob3xwhbxcczmmtmhquwhjesi35jntf7mpu", cidStr)
})
})
t.Run("ipfs init --profile=test-cid-v1 applies UnixFSHAMTDirectoryMaxFanout=256 and UnixFSHAMTDirectorySizeThreshold=256KiB", func(t *testing.T) {
t.Parallel()
seed := "hamt-cid-v1"
profile := "--profile=test-cid-v1"
t.Run("under UnixFSHAMTDirectorySizeThreshold=256KiB", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init(profile)
node.StartDaemon()
defer node.StopDaemon()
randDir, err := os.MkdirTemp(node.Dir, seed)
require.NoError(t, err)
// Create directory with a lot of files that have filenames which together take close to UnixFSHAMTDirectorySizeThreshold in total
err = createDirectoryForHAMT(randDir, cidV1Length, "255KiB", seed)
require.NoError(t, err)
cidStr := node.IPFS("add", "-r", "-Q", randDir).Stdout.Trimmed()
// Confirm the number of links is more than UnixFSHAMTDirectoryMaxFanout (indicating regular "basic" directory"
root, err := node.InspectPBNode(cidStr)
assert.NoError(t, err)
require.Equal(t, 897, len(root.Links))
})
t.Run("above UnixFSHAMTDirectorySizeThreshold=256KiB", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init(profile)
node.StartDaemon()
defer node.StopDaemon()
randDir, err := os.MkdirTemp(node.Dir, seed)
require.NoError(t, err)
// Create directory with a lot of files that have filenames which together take close to UnixFSHAMTDirectorySizeThreshold in total
err = createDirectoryForHAMT(randDir, cidV1Length, "257KiB", seed)
require.NoError(t, err)
cidStr := node.IPFS("add", "-r", "-Q", randDir).Stdout.Trimmed()
// Confirm this time, the number of links is less than UnixFSHAMTDirectoryMaxFanout
root, err := node.InspectPBNode(cidStr)
assert.NoError(t, err)
require.Equal(t, 252, len(root.Links))
})
})
t.Run("ipfs init --profile=test-cid-v1-wide applies UnixFSChunker=size-1048576 and UnixFSFileMaxLinks=1024", func(t *testing.T) {
t.Parallel()
seed := "v1-seed-1024"
profile := "--profile=test-cid-v1-wide"
seed := "v1-2025-seed"
profile := "--profile=unixfs-v1-2025"
t.Run("under UnixFSFileMaxLinks=1024", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init(profile)
node.StartDaemon()
defer node.StopDaemon()
// Add 174MiB file:
// 1024 * 1MiB should fit in single layer
cidStr := node.IPFSAddDeterministic("1024MiB", seed)
root, err := node.InspectPBNode(cidStr)
assert.NoError(t, err)
require.Equal(t, 1024, len(root.Links))
// expect same CID every time
require.Equal(t, "bafybeiej5w63ir64oxgkr5htqmlerh5k2rqflurn2howimexrlkae64xru", cidStr)
})
t.Run("above UnixFSFileMaxLinks=1024", func(t *testing.T) {
@ -399,17 +327,19 @@ func TestAdd(t *testing.T) {
root, err := node.InspectPBNode(cidStr)
assert.NoError(t, err)
require.Equal(t, 2, len(root.Links))
// expect same CID every time
require.Equal(t, "bafybeieilp2qx24pe76hxrxe6bpef5meuxto3kj5dd6mhb5kplfeglskdm", cidStr)
})
})
t.Run("ipfs init --profile=test-cid-v1-wide applies UnixFSHAMTDirectoryMaxFanout=256 and UnixFSHAMTDirectorySizeThreshold=1MiB", func(t *testing.T) {
t.Run("ipfs init --profile=unixfs-v1-2025 applies UnixFSHAMTDirectoryMaxFanout=256 and UnixFSHAMTDirectorySizeThreshold=256KiB", func(t *testing.T) {
t.Parallel()
seed := "hamt-cid-v1"
profile := "--profile=test-cid-v1-wide"
seed := "hamt-unixfs-v1-2025"
profile := "--profile=unixfs-v1-2025"
t.Run("under UnixFSHAMTDirectorySizeThreshold=1MiB", func(t *testing.T) {
// unixfs-v1-2025 uses block-based size estimation: size = sum(LinkSerializedSize)
// where LinkSerializedSize includes protobuf overhead (tags, varints, wrappers).
// Threshold is 256KiB = 262144 bytes
t.Run("at UnixFSHAMTDirectorySizeThreshold=256KiB (block estimation)", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init(profile)
node.StartDaemon()
@ -418,18 +348,25 @@ func TestAdd(t *testing.T) {
randDir, err := os.MkdirTemp(node.Dir, seed)
require.NoError(t, err)
// Create directory with a lot of files that have filenames which together take close to UnixFSHAMTDirectorySizeThreshold in total
err = createDirectoryForHAMT(randDir, cidV1Length, "1023KiB", seed)
// Create directory exactly at the 256KiB threshold using block estimation.
// Block estimation: size = baseOverhead + numFiles * LinkSerializedSize
// LinkSerializedSize(11, 36, 0) = 55 bytes per link
// 4766 * 55 + 14 = 262130 + 14 = 262144 = threshold exactly
// With > comparison: stays as basic directory
// With >= comparison: converts to HAMT
const numFiles, nameLen = 4766, 11
err = createDirectoryForHAMTBlockEstimation(randDir, cidV1Length, numFiles, nameLen, nameLen, seed)
require.NoError(t, err)
cidStr := node.IPFS("add", "-r", "-Q", randDir).Stdout.Trimmed()
// Confirm the number of links is more than UnixFSHAMTDirectoryMaxFanout (indicating regular "basic" directory"
// Should remain a basic directory (threshold uses > not >=)
root, err := node.InspectPBNode(cidStr)
assert.NoError(t, err)
require.Equal(t, 3599, len(root.Links))
require.Equal(t, numFiles, len(root.Links), "expected basic directory at exact threshold")
})
t.Run("above UnixFSHAMTDirectorySizeThreshold=1MiB", func(t *testing.T) {
t.Run("over UnixFSHAMTDirectorySizeThreshold=256KiB (block estimation)", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init(profile)
node.StartDaemon()
@ -438,15 +375,199 @@ func TestAdd(t *testing.T) {
randDir, err := os.MkdirTemp(node.Dir, seed)
require.NoError(t, err)
// Create directory with a lot of files that have filenames which together take close to UnixFSHAMTDirectorySizeThreshold in total
err = createDirectoryForHAMT(randDir, cidV1Length, "1025KiB", seed)
// Create directory just over the 256KiB threshold using block estimation.
// Block estimation: size = baseOverhead + numFiles * LinkSerializedSize
// 4767 * 55 + 14 = 262185 + 14 = 262199 > 262144, exceeds threshold
const numFiles, nameLen = 4767, 11
err = createDirectoryForHAMTBlockEstimation(randDir, cidV1Length, numFiles, nameLen, nameLen, seed)
require.NoError(t, err)
cidStr := node.IPFS("add", "-r", "-Q", randDir).Stdout.Trimmed()
// Confirm this time, the number of links is less than UnixFSHAMTDirectoryMaxFanout
// Should be HAMT sharded (root links <= fanout of 256)
root, err := node.InspectPBNode(cidStr)
assert.NoError(t, err)
require.Equal(t, 992, len(root.Links))
require.LessOrEqual(t, len(root.Links), 256, "expected HAMT directory when over threshold")
})
})
t.Run("ipfs add --hidden", func(t *testing.T) {
t.Parallel()
// Helper to create test directory with hidden file
setupTestDir := func(t *testing.T, node *harness.Node) string {
testDir, err := os.MkdirTemp(node.Dir, "hidden-test")
require.NoError(t, err)
require.NoError(t, os.WriteFile(filepath.Join(testDir, "visible.txt"), []byte("visible"), 0644))
require.NoError(t, os.WriteFile(filepath.Join(testDir, ".hidden"), []byte("hidden"), 0644))
return testDir
}
t.Run("default excludes hidden files", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
testDir := setupTestDir(t, node)
cidStr := node.IPFS("add", "-r", "-Q", testDir).Stdout.Trimmed()
lsOutput := node.IPFS("ls", cidStr).Stdout.Trimmed()
require.Contains(t, lsOutput, "visible.txt")
require.NotContains(t, lsOutput, ".hidden")
})
t.Run("--hidden includes hidden files", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
testDir := setupTestDir(t, node)
cidStr := node.IPFS("add", "-r", "-Q", "--hidden", testDir).Stdout.Trimmed()
lsOutput := node.IPFS("ls", cidStr).Stdout.Trimmed()
require.Contains(t, lsOutput, "visible.txt")
require.Contains(t, lsOutput, ".hidden")
})
t.Run("-H includes hidden files", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
testDir := setupTestDir(t, node)
cidStr := node.IPFS("add", "-r", "-Q", "-H", testDir).Stdout.Trimmed()
lsOutput := node.IPFS("ls", cidStr).Stdout.Trimmed()
require.Contains(t, lsOutput, "visible.txt")
require.Contains(t, lsOutput, ".hidden")
})
})
t.Run("ipfs add --empty-dirs", func(t *testing.T) {
t.Parallel()
// Helper to create test directory with empty subdirectory
setupTestDir := func(t *testing.T, node *harness.Node) string {
testDir, err := os.MkdirTemp(node.Dir, "empty-dirs-test")
require.NoError(t, err)
require.NoError(t, os.Mkdir(filepath.Join(testDir, "empty-subdir"), 0755))
require.NoError(t, os.WriteFile(filepath.Join(testDir, "file.txt"), []byte("content"), 0644))
return testDir
}
t.Run("default includes empty directories", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
testDir := setupTestDir(t, node)
cidStr := node.IPFS("add", "-r", "-Q", testDir).Stdout.Trimmed()
require.Contains(t, node.IPFS("ls", cidStr).Stdout.Trimmed(), "empty-subdir")
})
t.Run("--empty-dirs=true includes empty directories", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
testDir := setupTestDir(t, node)
cidStr := node.IPFS("add", "-r", "-Q", "--empty-dirs=true", testDir).Stdout.Trimmed()
require.Contains(t, node.IPFS("ls", cidStr).Stdout.Trimmed(), "empty-subdir")
})
t.Run("--empty-dirs=false excludes empty directories", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
testDir := setupTestDir(t, node)
cidStr := node.IPFS("add", "-r", "-Q", "--empty-dirs=false", testDir).Stdout.Trimmed()
lsOutput := node.IPFS("ls", cidStr).Stdout.Trimmed()
require.NotContains(t, lsOutput, "empty-subdir")
require.Contains(t, lsOutput, "file.txt")
})
})
t.Run("ipfs add --dereference-symlinks", func(t *testing.T) {
t.Parallel()
// Helper to create test directory with a file and symlink to it
setupTestDir := func(t *testing.T, node *harness.Node) string {
testDir, err := os.MkdirTemp(node.Dir, "deref-symlinks-test")
require.NoError(t, err)
targetFile := filepath.Join(testDir, "target.txt")
require.NoError(t, os.WriteFile(targetFile, []byte("target content"), 0644))
// Create symlink pointing to target
require.NoError(t, os.Symlink("target.txt", filepath.Join(testDir, "link.txt")))
return testDir
}
t.Run("default preserves symlinks", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
testDir := setupTestDir(t, node)
// Add directory with symlink (default: preserve)
dirCID := node.IPFS("add", "-r", "-Q", testDir).Stdout.Trimmed()
// Get to a new directory and verify symlink is preserved
outDir, err := os.MkdirTemp(node.Dir, "symlink-get-out")
require.NoError(t, err)
node.IPFS("get", "-o", outDir, dirCID)
// Check that link.txt is a symlink (ipfs get -o puts files directly in outDir)
linkPath := filepath.Join(outDir, "link.txt")
fi, err := os.Lstat(linkPath)
require.NoError(t, err)
require.True(t, fi.Mode()&os.ModeSymlink != 0, "link.txt should be a symlink")
// Verify symlink target
target, err := os.Readlink(linkPath)
require.NoError(t, err)
require.Equal(t, "target.txt", target)
})
t.Run("--dereference-symlinks resolves nested symlinks", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
testDir := setupTestDir(t, node)
// Add directory with dereference flag - nested symlinks should be resolved
dirCID := node.IPFS("add", "-r", "-Q", "--dereference-symlinks", testDir).Stdout.Trimmed()
// Get and verify symlink was dereferenced to regular file
outDir, err := os.MkdirTemp(node.Dir, "symlink-get-out")
require.NoError(t, err)
node.IPFS("get", "-o", outDir, dirCID)
linkPath := filepath.Join(outDir, "link.txt")
fi, err := os.Lstat(linkPath)
require.NoError(t, err)
// Should be a regular file, not a symlink
require.False(t, fi.Mode()&os.ModeSymlink != 0,
"link.txt should be dereferenced to regular file, not preserved as symlink")
// Content should match the target file
content, err := os.ReadFile(linkPath)
require.NoError(t, err)
require.Equal(t, "target content", string(content))
})
t.Run("--dereference-args is deprecated", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
testDir := setupTestDir(t, node)
res := node.RunIPFS("add", "-Q", "--dereference-args", filepath.Join(testDir, "link.txt"))
require.Error(t, res.Err)
require.Contains(t, res.Stderr.String(), "--dereference-args is deprecated")
})
})
}
@ -627,30 +748,56 @@ func TestAddFastProvide(t *testing.T) {
})
}
// createDirectoryForHAMT aims to create enough files with long names for the directory block to be close to the UnixFSHAMTDirectorySizeThreshold.
// The calculation is based on boxo's HAMTShardingSize and sizeBelowThreshold which calculates ballpark size of the block
// by adding length of link names and the binary cid length.
// See https://github.com/ipfs/boxo/blob/6c5a07602aed248acc86598f30ab61923a54a83e/ipld/unixfs/io/directory.go#L491
func createDirectoryForHAMT(dirPath string, cidLength int, unixfsNodeSizeTarget, seed string) error {
hamtThreshold, err := humanize.ParseBytes(unixfsNodeSizeTarget)
if err != nil {
return err
}
// createDirectoryForHAMTLinksEstimation creates a directory with the specified number
// of files using the links-based size estimation formula (size = numFiles * (nameLen + cidLen)).
// Used by legacy profiles (unixfs-v0-2015).
//
// Threshold behavior: boxo uses > comparison, so directory at exact threshold stays basic.
// Use DirBasicFiles for basic directory test, DirHAMTFiles for HAMT directory test.
//
// The lastNameLen parameter allows the last file to have a different name length,
// enabling exact +1 byte threshold tests.
//
// See boxo/ipld/unixfs/io/directory.go sizeBelowThreshold() for the links estimation.
func createDirectoryForHAMTLinksEstimation(dirPath string, cidLength, numFiles, nameLen, lastNameLen int, seed string) error {
return createDeterministicFiles(dirPath, numFiles, nameLen, lastNameLen, seed)
}
// Calculate how many files with long filenames are needed to hit UnixFSHAMTDirectorySizeThreshold
nameLen := 255 // max that works across windows/macos/linux
// createDirectoryForHAMTBlockEstimation creates a directory with the specified number
// of files using the block-based size estimation formula (LinkSerializedSize with protobuf overhead).
// Used by modern profiles (unixfs-v1-2025).
//
// Threshold behavior: boxo uses > comparison, so directory at exact threshold stays basic.
// Use DirBasicFiles for basic directory test, DirHAMTFiles for HAMT directory test.
//
// The lastNameLen parameter allows the last file to have a different name length,
// enabling exact +1 byte threshold tests.
//
// See boxo/ipld/unixfs/io/directory.go estimatedBlockSize() for the block estimation.
func createDirectoryForHAMTBlockEstimation(dirPath string, cidLength, numFiles, nameLen, lastNameLen int, seed string) error {
return createDeterministicFiles(dirPath, numFiles, nameLen, lastNameLen, seed)
}
// createDeterministicFiles creates numFiles files with deterministic names.
// Files 0 to numFiles-2 have nameLen characters, and the last file has lastNameLen characters.
// Each file contains "x" (1 byte) for non-zero tsize in directory links.
func createDeterministicFiles(dirPath string, numFiles, nameLen, lastNameLen int, seed string) error {
alphabetLen := len(testutils.AlphabetEasy)
numFiles := int(hamtThreshold) / (nameLen + cidLength)
// Deterministic pseudo-random bytes for static CID
drand, err := testutils.DeterministicRandomReader(unixfsNodeSizeTarget, seed)
// Deterministic pseudo-random bytes for static filenames
drand, err := testutils.DeterministicRandomReader("1MiB", seed)
if err != nil {
return err
}
// Create necessary files in a single, flat directory
for i := 0; i < numFiles; i++ {
buf := make([]byte, nameLen)
// Use lastNameLen for the final file
currentNameLen := nameLen
if i == numFiles-1 {
currentNameLen = lastNameLen
}
buf := make([]byte, currentNameLen)
_, err := io.ReadFull(drand, buf)
if err != nil {
return err
@ -658,21 +805,17 @@ func createDirectoryForHAMT(dirPath string, cidLength int, unixfsNodeSizeTarget,
// Convert deterministic pseudo-random bytes to ASCII
var sb strings.Builder
for _, b := range buf {
// Map byte to printable ASCII range (33-126)
char := testutils.AlphabetEasy[int(b)%alphabetLen]
sb.WriteRune(char)
}
filename := sb.String()[:nameLen]
filename := sb.String()[:currentNameLen]
filePath := filepath.Join(dirPath, filename)
// Create empty file
f, err := os.Create(filePath)
if err != nil {
// Create file with 1-byte content for non-zero tsize
if err := os.WriteFile(filePath, []byte("x"), 0644); err != nil {
return err
}
f.Close()
}
return nil
}

View File

@ -0,0 +1,592 @@
package cli
import (
"encoding/json"
"os"
"path/filepath"
"strings"
"testing"
"github.com/ipfs/kubo/test/cli/harness"
"github.com/ipfs/kubo/test/cli/testutils"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// cidProfileExpectations defines expected behaviors for a UnixFS import profile.
// This allows DRY testing of multiple profiles with the same test logic.
type cidProfileExpectations struct {
// Profile identification
Name string // canonical profile name from IPIP-499
ProfileArgs []string // args to pass to ipfs init (empty for default behavior)
// CID format expectations
CIDVersion int // 0 or 1
HashFunc string // e.g., "sha2-256"
RawLeaves bool // true = raw codec for small files, false = dag-pb wrapped
// File chunking expectations
ChunkSize string // e.g., "1MiB" or "256KiB"
FileMaxLinks int // max links before DAG rebalancing
// HAMT directory sharding expectations.
// Threshold behavior: boxo converts to HAMT when size > HAMTThreshold (not >=).
// This means a directory exactly at the threshold stays as a basic (flat) directory.
HAMTFanout int // max links per HAMT shard bucket (256)
HAMTThreshold int // sharding threshold in bytes (262144 = 256 KiB)
HAMTSizeEstimation string // "block" (protobuf size) or "links" (legacy name+cid)
// Test vector parameters for threshold boundary tests.
// - DirBasic: size == threshold (stays basic)
// - DirHAMT: size > threshold (converts to HAMT)
// For block estimation, last filename length is adjusted to hit exact thresholds.
DirBasicNameLen int // filename length for basic directory (files 0 to N-2)
DirBasicLastNameLen int // filename length for last file (0 = same as DirBasicNameLen)
DirBasicFiles int // file count for basic directory (at exact threshold)
DirHAMTNameLen int // filename length for HAMT directory (files 0 to N-2)
DirHAMTLastNameLen int // filename length for last file (0 = same as DirHAMTNameLen)
DirHAMTFiles int // total file count for HAMT directory (over threshold)
// Expected deterministic CIDs for test vectors
SmallFileCID string // CID for single byte "x"
FileAtMaxLinksCID string // CID for file at max links
FileOverMaxLinksCID string // CID for file triggering rebalance
DirBasicCID string // CID for basic directory (at exact threshold, stays flat)
DirHAMTCID string // CID for HAMT directory (over threshold, sharded)
}
// unixfsV02015 is the legacy profile for backward-compatible CID generation.
// Alias: legacy-cid-v0
var unixfsV02015 = cidProfileExpectations{
Name: "unixfs-v0-2015",
ProfileArgs: []string{"--profile=unixfs-v0-2015"},
CIDVersion: 0,
HashFunc: "sha2-256",
RawLeaves: false,
ChunkSize: "256KiB",
FileMaxLinks: 174,
HAMTFanout: 256,
HAMTThreshold: 262144, // 256 KiB
HAMTSizeEstimation: "links",
DirBasicNameLen: 30, // 4096 * (30 + 34) = 262144 exactly at threshold
DirBasicFiles: 4096, // 4096 * 64 = 262144 (stays basic with >)
DirHAMTNameLen: 31, // 4033 * (31 + 34) = 262145 exactly +1 over threshold
DirHAMTLastNameLen: 0, // 0 = same as DirHAMTNameLen (uniform filenames)
DirHAMTFiles: 4033, // 4033 * 65 = 262145 (becomes HAMT)
SmallFileCID: "Qmf412jQZiuVUtdgnB36FXFX7xg5V6KEbSJ4dpQuhkLyfD", // "hello world" dag-pb wrapped
FileAtMaxLinksCID: "QmUbBALi174SnogsUzLpYbD4xPiBSFANF4iztWCsHbMKh2", // 44544KiB with seed "v0-seed"
FileOverMaxLinksCID: "QmepeWtdmS1hHXx1oZXsPUv6bMrfRRKfZcoPPU4eEfjnbf", // 44800KiB with seed "v0-seed"
DirBasicCID: "QmX5GtRk3TSSEHtdrykgqm4eqMEn3n2XhfkFAis5fjyZmN", // 4096 files at threshold
DirHAMTCID: "QmeMiJzmhpJAUgynAcxTQYek5PPKgdv3qEvFsdV3XpVnvP", // 4033 files +1 over threshold
}
// unixfsV12025 is the recommended profile for cross-implementation CID determinism.
var unixfsV12025 = cidProfileExpectations{
Name: "unixfs-v1-2025",
ProfileArgs: []string{"--profile=unixfs-v1-2025"},
CIDVersion: 1,
HashFunc: "sha2-256",
RawLeaves: true,
ChunkSize: "1MiB",
FileMaxLinks: 1024,
HAMTFanout: 256,
HAMTThreshold: 262144, // 256 KiB
HAMTSizeEstimation: "block",
// Block size = numFiles * linkSize + 4 bytes overhead
// LinkSerializedSize(11, 36, 1) = 55, LinkSerializedSize(21, 36, 1) = 65, LinkSerializedSize(22, 36, 1) = 66
DirBasicNameLen: 11, // 4765 files * 55 bytes
DirBasicLastNameLen: 21, // last file: 65 bytes; total: 4765*55 + 65 + 4 = 262144 (at threshold)
DirBasicFiles: 4766, // stays basic with > comparison
DirHAMTNameLen: 11, // 4765 files * 55 bytes
DirHAMTLastNameLen: 22, // last file: 66 bytes; total: 4765*55 + 66 + 4 = 262145 (+1 over threshold)
DirHAMTFiles: 4766, // becomes HAMT
SmallFileCID: "bafkreifzjut3te2nhyekklss27nh3k72ysco7y32koao5eei66wof36n5e", // "hello world" raw leaf
FileAtMaxLinksCID: "bafybeihmf37wcuvtx4hpu7he5zl5qaf2ineo2lqlfrapokkm5zzw7zyhvm", // 1024MiB with seed "v1-2025-seed"
FileOverMaxLinksCID: "bafybeihmzokxxjqwxjcryerhp5ezpcog2wcawfryb2xm64xiakgm4a5jue", // 1025MiB with seed "v1-2025-seed"
DirBasicCID: "bafybeic3h7rwruealwxkacabdy45jivq2crwz6bufb5ljwupn36gicplx4", // 4766 files at 262144 bytes (threshold)
DirHAMTCID: "bafybeiegvuterwurhdtkikfhbxcldohmxp566vpjdofhzmnhv6o4freidu", // 4766 files at 262145 bytes (+1 over)
}
// defaultProfile points to the profile that matches Kubo's implicit default behavior.
// Today this is unixfs-v0-2015. When Kubo changes defaults, update this pointer.
var defaultProfile = unixfsV02015
const (
cidV0Length = 34 // CIDv0 sha2-256
cidV1Length = 36 // CIDv1 sha2-256
)
// TestCIDProfiles generates deterministic test vectors for CID profile verification.
// Set CID_PROFILES_CAR_OUTPUT environment variable to export CAR files.
// Example: CID_PROFILES_CAR_OUTPUT=/tmp/cid-profiles go test -run TestCIDProfiles -v
func TestCIDProfiles(t *testing.T) {
t.Parallel()
carOutputDir := os.Getenv("CID_PROFILES_CAR_OUTPUT")
exportCARs := carOutputDir != ""
if exportCARs {
if err := os.MkdirAll(carOutputDir, 0755); err != nil {
t.Fatalf("failed to create CAR output directory: %v", err)
}
t.Logf("CAR export enabled, writing to: %s", carOutputDir)
}
// Test both IPIP-499 profiles
for _, profile := range []cidProfileExpectations{unixfsV02015, unixfsV12025} {
t.Run(profile.Name, func(t *testing.T) {
t.Parallel()
runProfileTests(t, profile, carOutputDir, exportCARs)
})
}
// Test default behavior (no profile specified)
t.Run("default", func(t *testing.T) {
t.Parallel()
// Default behavior should match defaultProfile (currently unixfs-v0-2015)
defaultExp := defaultProfile
defaultExp.Name = "default"
defaultExp.ProfileArgs = nil // no profile args = default behavior
runProfileTests(t, defaultExp, carOutputDir, exportCARs)
})
}
// runProfileTests runs all test vectors for a given profile.
func runProfileTests(t *testing.T, exp cidProfileExpectations, carOutputDir string, exportCARs bool) {
cidLen := cidV0Length
if exp.CIDVersion == 1 {
cidLen = cidV1Length
}
t.Run("small-file", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init(exp.ProfileArgs...)
node.StartDaemon()
defer node.StopDaemon()
// Use "hello world" for determinism - matches CIDs in add_test.go
cidStr := node.IPFSAddStr("hello world")
// Verify CID version
verifyCIDVersion(t, node, cidStr, exp.CIDVersion)
// Verify hash function
verifyHashFunction(t, node, cidStr, exp.HashFunc)
// Verify raw leaves vs wrapped
verifyRawLeaves(t, node, cidStr, exp.RawLeaves)
// Verify deterministic CID if expected
if exp.SmallFileCID != "" {
require.Equal(t, exp.SmallFileCID, cidStr, "expected deterministic CID for small file")
}
if exportCARs {
carPath := filepath.Join(carOutputDir, exp.Name+"_small-file.car")
require.NoError(t, node.IPFSDagExport(cidStr, carPath))
t.Logf("exported: %s -> %s", cidStr, carPath)
}
})
t.Run("file-at-max-links", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init(exp.ProfileArgs...)
node.StartDaemon()
defer node.StopDaemon()
// Calculate file size: maxLinks * chunkSize
fileSize := fileAtMaxLinksSize(exp)
// Seed matches add_test.go for deterministic CIDs
seed := seedForProfile(exp)
cidStr := node.IPFSAddDeterministic(fileSize, seed)
root, err := node.InspectPBNode(cidStr)
assert.NoError(t, err)
require.Equal(t, exp.FileMaxLinks, len(root.Links),
"expected exactly %d links at max", exp.FileMaxLinks)
// Verify hash function on root
verifyHashFunction(t, node, cidStr, exp.HashFunc)
// Verify deterministic CID if expected
if exp.FileAtMaxLinksCID != "" {
require.Equal(t, exp.FileAtMaxLinksCID, cidStr, "expected deterministic CID for file at max links")
}
if exportCARs {
carPath := filepath.Join(carOutputDir, exp.Name+"_file-at-max-links.car")
require.NoError(t, node.IPFSDagExport(cidStr, carPath))
t.Logf("exported: %s -> %s", cidStr, carPath)
}
})
t.Run("file-over-max-links-rebalanced", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init(exp.ProfileArgs...)
node.StartDaemon()
defer node.StopDaemon()
// One more chunk triggers rebalancing
fileSize := fileOverMaxLinksSize(exp)
// Seed matches add_test.go for deterministic CIDs
seed := seedForProfile(exp)
cidStr := node.IPFSAddDeterministic(fileSize, seed)
root, err := node.InspectPBNode(cidStr)
assert.NoError(t, err)
require.Equal(t, 2, len(root.Links), "expected 2 links after DAG rebalancing")
// Verify hash function on root
verifyHashFunction(t, node, cidStr, exp.HashFunc)
// Verify deterministic CID if expected
if exp.FileOverMaxLinksCID != "" {
require.Equal(t, exp.FileOverMaxLinksCID, cidStr, "expected deterministic CID for rebalanced file")
}
if exportCARs {
carPath := filepath.Join(carOutputDir, exp.Name+"_file-over-max-links.car")
require.NoError(t, node.IPFSDagExport(cidStr, carPath))
t.Logf("exported: %s -> %s", cidStr, carPath)
}
})
t.Run("dir-basic", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init(exp.ProfileArgs...)
node.StartDaemon()
defer node.StopDaemon()
// Use consistent seed for deterministic CIDs
seed := hamtSeedForProfile(exp)
randDir, err := os.MkdirTemp(node.Dir, seed)
require.NoError(t, err)
// Create basic (flat) directory exactly at threshold.
// With > comparison, directory at exact threshold stays basic.
basicLastNameLen := exp.DirBasicLastNameLen
if basicLastNameLen == 0 {
basicLastNameLen = exp.DirBasicNameLen
}
if exp.HAMTSizeEstimation == "block" {
err = createDirectoryForHAMTBlockEstimation(randDir, cidLen, exp.DirBasicFiles, exp.DirBasicNameLen, basicLastNameLen, seed)
} else {
err = createDirectoryForHAMTLinksEstimation(randDir, cidLen, exp.DirBasicFiles, exp.DirBasicNameLen, basicLastNameLen, seed)
}
require.NoError(t, err)
cidStr := node.IPFS("add", "-r", "-Q", randDir).Stdout.Trimmed()
root, err := node.InspectPBNode(cidStr)
assert.NoError(t, err)
require.Equal(t, exp.DirBasicFiles, len(root.Links),
"expected basic directory with %d links", exp.DirBasicFiles)
// Verify hash function
verifyHashFunction(t, node, cidStr, exp.HashFunc)
// Verify size is exactly at threshold
if exp.HAMTSizeEstimation == "block" {
// Block estimation: verify actual serialized block size
blockSize := getBlockSize(t, node, cidStr)
require.Equal(t, exp.HAMTThreshold, blockSize,
"expected basic directory block size to be exactly at threshold (%d), got %d", exp.HAMTThreshold, blockSize)
}
if exp.HAMTSizeEstimation == "links" {
// Links estimation: verify sum of (name_len + cid_len) for all links
linksSize := 0
for _, link := range root.Links {
linksSize += len(link.Name) + cidLen
}
require.Equal(t, exp.HAMTThreshold, linksSize,
"expected basic directory links size to be exactly at threshold (%d), got %d", exp.HAMTThreshold, linksSize)
}
// Verify deterministic CID
if exp.DirBasicCID != "" {
require.Equal(t, exp.DirBasicCID, cidStr, "expected deterministic CID for basic directory")
}
if exportCARs {
carPath := filepath.Join(carOutputDir, exp.Name+"_dir-basic.car")
require.NoError(t, node.IPFSDagExport(cidStr, carPath))
t.Logf("exported: %s (%d files) -> %s", cidStr, exp.DirBasicFiles, carPath)
}
})
t.Run("dir-hamt", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init(exp.ProfileArgs...)
node.StartDaemon()
defer node.StopDaemon()
// Use consistent seed for deterministic CIDs
seed := hamtSeedForProfile(exp)
randDir, err := os.MkdirTemp(node.Dir, seed)
require.NoError(t, err)
// Create HAMT (sharded) directory exactly +1 byte over threshold.
// With > comparison, directory over threshold becomes HAMT.
lastNameLen := exp.DirHAMTLastNameLen
if lastNameLen == 0 {
lastNameLen = exp.DirHAMTNameLen
}
if exp.HAMTSizeEstimation == "block" {
err = createDirectoryForHAMTBlockEstimation(randDir, cidLen, exp.DirHAMTFiles, exp.DirHAMTNameLen, lastNameLen, seed)
} else {
err = createDirectoryForHAMTLinksEstimation(randDir, cidLen, exp.DirHAMTFiles, exp.DirHAMTNameLen, lastNameLen, seed)
}
require.NoError(t, err)
cidStr := node.IPFS("add", "-r", "-Q", randDir).Stdout.Trimmed()
root, err := node.InspectPBNode(cidStr)
assert.NoError(t, err)
require.LessOrEqual(t, len(root.Links), exp.HAMTFanout,
"expected HAMT directory with <=%d links", exp.HAMTFanout)
// Verify hash function
verifyHashFunction(t, node, cidStr, exp.HashFunc)
// Verify deterministic CID
if exp.DirHAMTCID != "" {
require.Equal(t, exp.DirHAMTCID, cidStr, "expected deterministic CID for HAMT directory")
}
if exportCARs {
carPath := filepath.Join(carOutputDir, exp.Name+"_dir-hamt.car")
require.NoError(t, node.IPFSDagExport(cidStr, carPath))
t.Logf("exported: %s (%d files, HAMT root links: %d) -> %s",
cidStr, exp.DirHAMTFiles, len(root.Links), carPath)
}
})
}
// verifyCIDVersion checks that the CID has the expected version.
func verifyCIDVersion(t *testing.T, _ *harness.Node, cidStr string, expectedVersion int) {
t.Helper()
if expectedVersion == 0 {
require.True(t, strings.HasPrefix(cidStr, "Qm"),
"expected CIDv0 (starts with Qm), got: %s", cidStr)
} else {
require.True(t, strings.HasPrefix(cidStr, "b"),
"expected CIDv1 (base32, starts with b), got: %s", cidStr)
}
}
// verifyHashFunction checks that the CID uses the expected hash function.
func verifyHashFunction(t *testing.T, node *harness.Node, cidStr, expectedHash string) {
t.Helper()
// Use ipfs cid format to get hash function info
// Format string %h gives the hash function name
res := node.IPFS("cid", "format", "-f", "%h", cidStr)
hashFunc := strings.TrimSpace(res.Stdout.String())
require.Equal(t, expectedHash, hashFunc,
"expected hash function %s, got %s for CID %s", expectedHash, hashFunc, cidStr)
}
// verifyRawLeaves checks whether the CID represents a raw leaf or dag-pb wrapped block.
// For CIDv1: raw leaves have codec 0x55 (raw), wrapped have codec 0x70 (dag-pb).
// For CIDv0: always dag-pb (no raw leaves possible).
func verifyRawLeaves(t *testing.T, node *harness.Node, cidStr string, expectRaw bool) {
t.Helper()
// Use ipfs cid format to get codec info
// Format string %c gives the codec name
res := node.IPFS("cid", "format", "-f", "%c", cidStr)
codec := strings.TrimSpace(res.Stdout.String())
if expectRaw {
require.Equal(t, "raw", codec,
"expected raw codec for raw leaves, got %s for CID %s", codec, cidStr)
} else {
require.Equal(t, "dag-pb", codec,
"expected dag-pb codec for wrapped leaves, got %s for CID %s", codec, cidStr)
}
}
// getBlockSize returns the size of a block in bytes using ipfs block stat.
func getBlockSize(t *testing.T, node *harness.Node, cidStr string) int {
t.Helper()
res := node.IPFS("block", "stat", "--enc=json", cidStr)
var stat struct {
Size int `json:"Size"`
}
require.NoError(t, json.Unmarshal(res.Stdout.Bytes(), &stat))
return stat.Size
}
// fileAtMaxLinksSize returns the file size that produces exactly FileMaxLinks chunks.
func fileAtMaxLinksSize(exp cidProfileExpectations) string {
switch exp.ChunkSize {
case "1MiB":
return strings.Replace(exp.ChunkSize, "1MiB", "", 1) +
string(rune('0'+exp.FileMaxLinks/1000)) +
string(rune('0'+(exp.FileMaxLinks%1000)/100)) +
string(rune('0'+(exp.FileMaxLinks%100)/10)) +
string(rune('0'+exp.FileMaxLinks%10)) + "MiB"
case "256KiB":
// 174 * 256 KiB = 44544 KiB
totalKiB := exp.FileMaxLinks * 256
return intToStr(totalKiB) + "KiB"
default:
panic("unknown chunk size: " + exp.ChunkSize)
}
}
// fileOverMaxLinksSize returns the file size that triggers DAG rebalancing.
func fileOverMaxLinksSize(exp cidProfileExpectations) string {
switch exp.ChunkSize {
case "1MiB":
return intToStr(exp.FileMaxLinks+1) + "MiB"
case "256KiB":
// (174 + 1) * 256 KiB = 44800 KiB
totalKiB := (exp.FileMaxLinks + 1) * 256
return intToStr(totalKiB) + "KiB"
default:
panic("unknown chunk size: " + exp.ChunkSize)
}
}
func intToStr(n int) string {
if n == 0 {
return "0"
}
var digits []byte
for n > 0 {
digits = append([]byte{byte('0' + n%10)}, digits...)
n /= 10
}
return string(digits)
}
// seedForProfile returns the deterministic seed used in add_test.go for file tests.
func seedForProfile(exp cidProfileExpectations) string {
switch exp.Name {
case "unixfs-v0-2015", "default":
return "v0-seed"
case "unixfs-v1-2025":
return "v1-2025-seed"
default:
return exp.Name + "-seed"
}
}
// hamtSeedForProfile returns the deterministic seed for HAMT directory tests.
// Uses the same seed for both under/at threshold tests to ensure consistency.
func hamtSeedForProfile(exp cidProfileExpectations) string {
switch exp.Name {
case "unixfs-v0-2015", "default":
return "hamt-unixfs-v0-2015"
case "unixfs-v1-2025":
return "hamt-unixfs-v1-2025"
default:
return "hamt-" + exp.Name
}
}
// TestDefaultMatchesExpectedProfile verifies that default ipfs add behavior
// matches the expected profile (currently unixfs-v0-2015).
func TestDefaultMatchesExpectedProfile(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
node.StartDaemon()
defer node.StopDaemon()
// Small file test
cidDefault := node.IPFSAddStr("x")
// Same file with explicit profile
nodeWithProfile := harness.NewT(t).NewNode().Init(defaultProfile.ProfileArgs...)
nodeWithProfile.StartDaemon()
defer nodeWithProfile.StopDaemon()
cidWithProfile := nodeWithProfile.IPFSAddStr("x")
require.Equal(t, cidWithProfile, cidDefault,
"default behavior should match %s profile", defaultProfile.Name)
}
// TestProtobufHelpers verifies the protobuf size calculation helpers.
func TestProtobufHelpers(t *testing.T) {
t.Parallel()
t.Run("VarintLen", func(t *testing.T) {
// Varint encoding: 7 bits per byte, MSB indicates continuation
cases := []struct {
value uint64
expected int
}{
{0, 1},
{127, 1}, // 0x7F - max 1-byte varint
{128, 2}, // 0x80 - min 2-byte varint
{16383, 2}, // 0x3FFF - max 2-byte varint
{16384, 3}, // 0x4000 - min 3-byte varint
{2097151, 3}, // 0x1FFFFF - max 3-byte varint
{2097152, 4}, // 0x200000 - min 4-byte varint
{268435455, 4}, // 0xFFFFFFF - max 4-byte varint
{268435456, 5}, // 0x10000000 - min 5-byte varint
{34359738367, 5}, // 0x7FFFFFFFF - max 5-byte varint
}
for _, tc := range cases {
got := testutils.VarintLen(tc.value)
require.Equal(t, tc.expected, got, "VarintLen(%d)", tc.value)
}
})
t.Run("LinkSerializedSize", func(t *testing.T) {
// Test typical cases for directory links
cases := []struct {
nameLen int
cidLen int
tsize uint64
expected int
}{
// 255-char name, CIDv0 (34 bytes), tsize=0
// Inner: 1+1+34 + 1+2+255 + 1+1 = 296
// Outer: 1 + 2 + 296 = 299
{255, 34, 0, 299},
// 255-char name, CIDv1 (36 bytes), tsize=0
// Inner: 1+1+36 + 1+2+255 + 1+1 = 298
// Outer: 1 + 2 + 298 = 301
{255, 36, 0, 301},
// Short name (10 chars), CIDv1, tsize=0
// Inner: 1+1+36 + 1+1+10 + 1+1 = 52
// Outer: 1 + 1 + 52 = 54
{10, 36, 0, 54},
// 255-char name, CIDv1, large tsize
// Inner: 1+1+36 + 1+2+255 + 1+5 = 302 (tsize uses 5-byte varint)
// Outer: 1 + 2 + 302 = 305
{255, 36, 34359738367, 305},
}
for _, tc := range cases {
got := testutils.LinkSerializedSize(tc.nameLen, tc.cidLen, tc.tsize)
require.Equal(t, tc.expected, got, "LinkSerializedSize(%d, %d, %d)", tc.nameLen, tc.cidLen, tc.tsize)
}
})
t.Run("EstimateFilesForBlockThreshold", func(t *testing.T) {
threshold := 262144
nameLen := 255
cidLen := 36
var tsize uint64 = 0
numFiles := testutils.EstimateFilesForBlockThreshold(threshold, nameLen, cidLen, tsize)
require.Equal(t, 870, numFiles, "expected 870 files for threshold 262144")
numFilesUnder := testutils.EstimateFilesForBlockThreshold(threshold-1, nameLen, cidLen, tsize)
require.Equal(t, 870, numFilesUnder, "expected 870 files for threshold 262143")
numFilesOver := testutils.EstimateFilesForBlockThreshold(262185, nameLen, cidLen, tsize)
require.Equal(t, 871, numFilesOver, "expected 871 files for threshold 262185")
})
}

View File

@ -4,6 +4,7 @@ import (
"encoding/json"
"fmt"
"io"
"os"
"reflect"
"strings"
@ -148,9 +149,15 @@ func (n *Node) IPFSDagImport(content io.Reader, cid string, args ...string) erro
return res.Err
}
/*
func (n *Node) IPFSDagExport(cid string, car *os.File) error {
log.Debugf("node %d dag export of %s to %q with args: %v", n.ID, cid, car.Name())
// IPFSDagExport exports a DAG rooted at cid to a CAR file at carPath.
func (n *Node) IPFSDagExport(cid string, carPath string) error {
log.Debugf("node %d dag export of %s to %q", n.ID, cid, carPath)
car, err := os.Create(carPath)
if err != nil {
return err
}
defer car.Close()
res := n.Runner.MustRun(RunRequest{
Path: n.IPFSBin,
Args: []string{"dag", "export", cid},
@ -158,4 +165,3 @@ func (n *Node) IPFSDagExport(cid string, car *os.File) error {
})
return res.Err
}
*/

View File

@ -0,0 +1,39 @@
package testutils
import "math/bits"
// VarintLen returns the number of bytes needed to encode v as a protobuf varint.
func VarintLen(v uint64) int {
return int(9*uint32(bits.Len64(v))+64) / 64
}
// LinkSerializedSize calculates the serialized size of a single PBLink in a dag-pb block.
// This matches the calculation in boxo/ipld/unixfs/io/directory.go estimatedBlockSize().
//
// The protobuf wire format for a PBLink is:
//
// PBNode.Links wrapper tag (1 byte)
// + varint length of inner message
// + Hash field: tag (1) + varint(cidLen) + cidLen
// + Name field: tag (1) + varint(nameLen) + nameLen
// + Tsize field: tag (1) + varint(tsize)
func LinkSerializedSize(nameLen, cidLen int, tsize uint64) int {
// Inner link message size
linkLen := 1 + VarintLen(uint64(cidLen)) + cidLen + // Hash field
1 + VarintLen(uint64(nameLen)) + nameLen + // Name field
1 + VarintLen(tsize) // Tsize field
// Outer wrapper: tag (1 byte) + varint(linkLen) + linkLen
return 1 + VarintLen(uint64(linkLen)) + linkLen
}
// EstimateFilesForBlockThreshold estimates how many files with given name/cid lengths
// will fit under the block size threshold.
// Returns the number of files that keeps the block size just under the threshold.
func EstimateFilesForBlockThreshold(threshold, nameLen, cidLen int, tsize uint64) int {
linkSize := LinkSerializedSize(nameLen, cidLen, tsize)
// Base overhead for empty directory node (Data field + minimal structure)
// Empirically determined to be 4 bytes for dag-pb directories
baseOverhead := 4
return (threshold - baseOverhead) / linkSize
}

View File

@ -135,13 +135,13 @@ require (
github.com/huin/goupnp v1.3.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/ipfs/bbloom v0.0.4 // indirect
github.com/ipfs/boxo v0.35.3-0.20260109213916-89dc184784f2 // indirect
github.com/ipfs/boxo v0.35.3-0.20260119043727-6707376002a3 // indirect
github.com/ipfs/go-bitfield v1.1.0 // indirect
github.com/ipfs/go-block-format v0.2.3 // indirect
github.com/ipfs/go-cid v0.6.0 // indirect
github.com/ipfs/go-datastore v0.9.0 // indirect
github.com/ipfs/go-dsqueue v0.1.1 // indirect
github.com/ipfs/go-ipfs-cmds v0.15.0 // indirect
github.com/ipfs/go-ipfs-cmds v0.15.1-0.20260117043932-17687e216294 // indirect
github.com/ipfs/go-ipfs-redirects-file v0.1.2 // indirect
github.com/ipfs/go-ipld-cbor v0.2.1 // indirect
github.com/ipfs/go-ipld-format v0.6.3 // indirect

View File

@ -294,8 +294,8 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs=
github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0=
github.com/ipfs/boxo v0.35.3-0.20260109213916-89dc184784f2 h1:pRQYSSGnGQa921d8v0uhXg2BGzoSf9ndTWTlR7ImVoo=
github.com/ipfs/boxo v0.35.3-0.20260109213916-89dc184784f2/go.mod h1:Abmp1if6bMQG87/0SQPIB9fkxJnZMLCt2nQw3yUZHH0=
github.com/ipfs/boxo v0.35.3-0.20260119043727-6707376002a3 h1:X6iiSyBUwhKgQMzM57wSXVUZfivm5nWm5S/Y2SrSjhA=
github.com/ipfs/boxo v0.35.3-0.20260119043727-6707376002a3/go.mod h1:Abmp1if6bMQG87/0SQPIB9fkxJnZMLCt2nQw3yUZHH0=
github.com/ipfs/go-bitfield v1.1.0 h1:fh7FIo8bSwaJEh6DdTWbCeZ1eqOaOkKFI74SCnsWbGA=
github.com/ipfs/go-bitfield v1.1.0/go.mod h1:paqf1wjq/D2BBmzfTVFlJQ9IlFOZpg422HL0HqsGWHU=
github.com/ipfs/go-block-format v0.2.3 h1:mpCuDaNXJ4wrBJLrtEaGFGXkferrw5eqVvzaHhtFKQk=
@ -312,8 +312,8 @@ github.com/ipfs/go-ds-leveldb v0.5.2 h1:6nmxlQ2zbp4LCNdJVsmHfs9GP0eylfBNxpmY1csp
github.com/ipfs/go-ds-leveldb v0.5.2/go.mod h1:2fAwmcvD3WoRT72PzEekHBkQmBDhc39DJGoREiuGmYo=
github.com/ipfs/go-dsqueue v0.1.1 h1:6PQlHDyf9PSTN69NmwUir5+0is3tU0vRJj8zLlgK8Mc=
github.com/ipfs/go-dsqueue v0.1.1/go.mod h1:Xxg353WSwwzYn3FGSzZ+taSQII3pIZ+EJC8/oWRDM10=
github.com/ipfs/go-ipfs-cmds v0.15.0 h1:nQDgKadrzyiFyYoZMARMIoVoSwe3gGTAfGvrWLeAQbQ=
github.com/ipfs/go-ipfs-cmds v0.15.0/go.mod h1:VABf/mv/wqvYX6hLG6Z+40eNAEw3FQO0bSm370Or3Wk=
github.com/ipfs/go-ipfs-cmds v0.15.1-0.20260117043932-17687e216294 h1:pQ6LhtU+nEBajAgFz3uU7ta6JN4KY0W5T7JxuaRQJVE=
github.com/ipfs/go-ipfs-cmds v0.15.1-0.20260117043932-17687e216294/go.mod h1:WG//DD2nimQcQ/+MTqB8mSeZQZBZC8KLZ+OeVGk9We0=
github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ=
github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw=
github.com/ipfs/go-ipfs-pq v0.0.3 h1:YpoHVJB+jzK15mr/xsWC574tyDLkezVrDNeaalQBsTE=