This commit is contained in:
ljluestc 2026-01-21 21:02:49 +00:00 committed by GitHub
commit 36ddd8d56c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 169 additions and 0 deletions

View File

@ -7,6 +7,7 @@ import (
"fmt"
"io"
"os"
"strings"
"time"
"github.com/ipfs/boxo/files"
@ -63,6 +64,10 @@ func (api *UnixfsAPI) Add(ctx context.Context, f files.Node, opts ...caopts.Unix
req.Option("trickle", true)
}
if d, ok := f.(files.Directory); ok {
f = &skippingDirectory{Directory: d}
}
d := files.NewMapDirectory(map[string]files.Node{"": f}) // unwrapped on the other side
version, err := api.core().loadRemoteVersion()
@ -221,3 +226,51 @@ func (api *UnixfsAPI) Ls(ctx context.Context, p path.Path, out chan<- iface.DirE
func (api *UnixfsAPI) core() *HttpApi {
return (*HttpApi)(api)
}
type skippingDirectory struct {
files.Directory
}
func (d *skippingDirectory) Entries() files.DirIterator {
return &skippingIterator{DirIterator: d.Directory.Entries()}
}
type skippingIterator struct {
files.DirIterator
lastErrString string
}
func (it *skippingIterator) Next() bool {
for {
if it.DirIterator.Next() {
it.lastErrString = ""
return true
}
// We only get here if Next() returned false.
// Check if it was because of an error we want to skip.
if err := it.DirIterator.Err(); err != nil {
if strings.Contains(err.Error(), "unrecognized file type") {
// Check for stagnation (EOF with sticky error)
if err.Error() == it.lastErrString {
return false
}
it.lastErrString = err.Error()
continue
}
}
return false
}
}
func (it *skippingIterator) Err() error {
err := it.DirIterator.Err()
if err != nil && strings.Contains(err.Error(), "unrecognized file type") {
return nil
}
return err
}
// Ensure api.go imports strings if not already.
// Checking imports... unixfs.go imports: context, encoding/json, errors, fmt, io, os, time, github..., multiformats...
// Need to add "strings".

111
client/rpc/unixfs_test.go Normal file
View File

@ -0,0 +1,111 @@
package rpc
import (
"io"
"net"
"os"
"path/filepath"
"testing"
"github.com/ipfs/boxo/files"
"github.com/stretchr/testify/require"
)
func TestSkippingIterator(t *testing.T) {
// Create a temporary directory
tmpDir := t.TempDir()
// Create a socket file
sockPath := filepath.Join(tmpDir, "test.sock")
l, err := net.Listen("unix", sockPath)
require.NoError(t, err)
defer l.Close()
// Create a regular file
regPath := filepath.Join(tmpDir, "regular.txt")
err = os.WriteFile(regPath, []byte("some content"), 0644)
require.NoError(t, err)
// Create a SerialFile from the directory
stat, err := os.Stat(tmpDir)
require.NoError(t, err)
dirNode, err := files.NewSerialFile(tmpDir, false, stat)
require.NoError(t, err)
d, ok := dirNode.(files.Directory)
require.True(t, ok)
// Wrap in skippingDirectory
skippingDir := &skippingDirectory{Directory: d}
// Verify entries
it := skippingDir.Entries()
// We expect to find 'regular.txt' and skip 'test.sock'
// The order depends on the filesystem, but we should find at least one valid entry
// and no error on socket.
foundRegular := false
count := 0
for it.Next() {
count++
name := it.Name()
if name == "regular.txt" {
foundRegular = true
}
// If we find the socket, that means our skipping logic failed OR the underlying iterator didn't error.
// On some systems/configs, opening a socket might work?
// But in our repro it failed with "unrecognized file type".
if name == "test.sock" {
// This is unexpected if NewSerialFile behavior is consistent with failure.
// However, if it succeeds, then skipping logic wasn't triggered.
// Let's check the Node.
// node := it.Node()
}
}
require.NoError(t, it.Err())
require.True(t, foundRegular, "Should have found regular.txt")
// If we skipped the socket, count should be 1. If we didn't (and it didn't error), it would be 2.
// But if it errored and we didn't skip, we would have seen error in it.Err().
// So if we have no error, we are good.
}
func TestSkippingIterator_WithMultiFileReader(t *testing.T) {
// This test integrates with MultiFileReader to ensure skipping works in that context
tmpDir := t.TempDir()
// Create a socket file
sockPath := filepath.Join(tmpDir, "test.sock")
l, err := net.Listen("unix", sockPath)
require.NoError(t, err)
defer l.Close()
// Create a regular file
regPath := filepath.Join(tmpDir, "regular.txt")
err = os.WriteFile(regPath, []byte("content"), 0644)
require.NoError(t, err)
stat, err := os.Stat(tmpDir)
require.NoError(t, err)
dirNode, err := files.NewSerialFile(tmpDir, false, stat)
require.NoError(t, err)
d := dirNode.(files.Directory)
skippingDir := &skippingDirectory{Directory: d}
md := files.NewMapDirectory(map[string]files.Node{"": skippingDir})
mfr := files.NewMultiFileReader(md, false, false)
// Read everything
buf := make([]byte, 1024)
for {
_, err := mfr.Read(buf)
if err == io.EOF {
break
}
require.NoError(t, err)
}
}

View File

@ -18,6 +18,7 @@ This release was brought to you by the [Shipyard](https://ipshipyard.com/) team.
- [🚇 Improved `ipfs p2p` tunnels with foreground mode](#-improved-ipfs-p2p-tunnels-with-foreground-mode)
- [Improved `ipfs dag stat` output](#improved-ipfs-dag-stat-output)
- [Skip bad keys when listing](#skip_bad_keys_when_listing)
- [Gracefully skip socket files on add](#gracefully-skip-socket-files-on-add)
- [Accelerated DHT Client and Provide Sweep now work together](#accelerated-dht-client-and-provide-sweep-now-work-together)
- [⏱️ Configurable gateway request duration limit](#-configurable-gateway-request-duration-limit)
- [🔧 Recovery from corrupted MFS root](#-recovery-from-corrupted-mfs-root)
@ -114,6 +115,10 @@ Use `--progress=true` to force progress even when piped, or `--progress=false` t
Change the `ipfs key list` behavior to log an error and continue listing keys when a key cannot be read from the keystore or decoded.
#### Gracefully skip socket files on add
`ipfs add` previously failed with a misleading "use of closed network connection" error when encountering socket files. It now gracefully skips them.
#### Accelerated DHT Client and Provide Sweep now work together
Previously, provide operations could start before the Accelerated DHT Client discovered enough peers, causing sweep mode to lose its efficiency benefits. Now, providing waits for the initial network crawl (about 10 minutes). Your content will be properly distributed across DHT regions after initial DHT map is created. Check `ipfs provide stat` to see when providing begins.