kubo/test/cli/routing_dht_test.go
Marcin Rataj 6a008fc74c
refactor: apply go fix modernizers from Go 1.26 (#11190)
* chore: apply go fix modernizers from Go 1.26

automated refactoring: interface{} to any, slices.Contains,
and other idiomatic updates.

* feat(ci): add `go fix` check to Go analysis workflow

ensures Go 1.26 modernizers are applied, fails CI if `go fix ./...`
produces any changes (similar to existing `go fmt` enforcement)
2026-02-11 01:01:32 +01:00

171 lines
5.2 KiB
Go

package cli
import (
"fmt"
"strconv"
"strings"
"testing"
"time"
"github.com/ipfs/kubo/test/cli/harness"
"github.com/ipfs/kubo/test/cli/testutils"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func waitUntilProvidesComplete(t *testing.T, n *harness.Node) {
getCidsCount := func(line string) int {
trimmed := strings.TrimSpace(line)
countStr := strings.SplitN(trimmed, " ", 2)[0]
count, err := strconv.Atoi(countStr)
require.NoError(t, err)
return count
}
queuedProvides, ongoingProvides := true, true
for queuedProvides || ongoingProvides {
res := n.IPFS("provide", "stat", "-a")
require.NoError(t, res.Err)
for _, line := range res.Stdout.Lines() {
if trimmed, ok := strings.CutPrefix(line, " Provide queue:"); ok {
provideQueueSize := getCidsCount(trimmed)
queuedProvides = provideQueueSize > 0
}
if trimmed, ok := strings.CutPrefix(line, " Ongoing provides:"); ok {
ongoingProvideCount := getCidsCount(trimmed)
ongoingProvides = ongoingProvideCount > 0
}
}
time.Sleep(10 * time.Millisecond)
}
}
func testRoutingDHT(t *testing.T, enablePubsub bool) {
t.Run(fmt.Sprintf("enablePubSub=%v", enablePubsub), func(t *testing.T) {
t.Parallel()
nodes := harness.NewT(t).NewNodes(5).Init()
nodes.ForEachPar(func(node *harness.Node) {
node.IPFS("config", "Routing.Type", "dht")
})
var daemonArgs []string
if enablePubsub {
daemonArgs = []string{
"--enable-pubsub-experiment",
"--enable-namesys-pubsub",
}
}
nodes.StartDaemons(daemonArgs...).Connect()
t.Cleanup(func() { nodes.StopDaemons() })
t.Run("ipfs routing findpeer", func(t *testing.T) {
t.Parallel()
res := nodes[1].RunIPFS("routing", "findpeer", nodes[0].PeerID().String())
assert.Equal(t, 0, res.ExitCode())
swarmAddr := nodes[0].SwarmAddrsWithoutPeerIDs()[0]
require.Equal(t, swarmAddr.String(), res.Stdout.Trimmed())
})
t.Run("ipfs routing get <key>", func(t *testing.T) {
t.Parallel()
hash := nodes[2].IPFSAddStr("hello world")
nodes[2].IPFS("name", "publish", "/ipfs/"+hash)
res := nodes[1].IPFS("routing", "get", "/ipns/"+nodes[2].PeerID().String())
assert.Contains(t, res.Stdout.String(), "/ipfs/"+hash)
t.Run("put round trips (#3124)", func(t *testing.T) {
t.Parallel()
nodes[0].WriteBytes("get_result", res.Stdout.Bytes())
res := nodes[0].IPFS("routing", "put", "/ipns/"+nodes[2].PeerID().String(), "get_result")
assert.Greater(t, len(res.Stdout.Lines()), 0, "should put to at least one node")
})
t.Run("put with bad keys fails (issue #5113, #4611)", func(t *testing.T) {
t.Parallel()
keys := []string{"foo", "/pk/foo", "/ipns/foo"}
for _, key := range keys {
t.Run(key, func(t *testing.T) {
t.Parallel()
res := nodes[0].RunIPFS("routing", "put", key)
assert.Equal(t, 1, res.ExitCode())
assert.Contains(t, res.Stderr.String(), "invalid")
assert.Empty(t, res.Stdout.String())
})
}
})
t.Run("get with bad keys (issue #4611)", func(t *testing.T) {
for _, key := range []string{"foo", "/pk/foo"} {
t.Run(key, func(t *testing.T) {
t.Parallel()
res := nodes[0].RunIPFS("routing", "get", key)
assert.Equal(t, 1, res.ExitCode())
assert.Contains(t, res.Stderr.String(), "invalid")
assert.Empty(t, res.Stdout.String())
})
}
})
})
t.Run("ipfs routing findprovs", func(t *testing.T) {
t.Parallel()
hash := nodes[3].IPFSAddStr("some stuff")
waitUntilProvidesComplete(t, nodes[3])
res := nodes[4].IPFS("routing", "findprovs", hash)
assert.Equal(t, nodes[3].PeerID().String(), res.Stdout.Trimmed())
})
t.Run("routing commands fail when offline", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
// these cannot be run in parallel due to repo locking
// this seems like a bug, we should be able to run these without locking the repo
t.Run("routing findprovs", func(t *testing.T) {
res := node.RunIPFS("routing", "findprovs", testutils.CIDEmptyDir)
assert.Equal(t, 1, res.ExitCode())
assert.Contains(t, res.Stderr.String(), "this command must be run in online mode")
})
t.Run("routing findpeer", func(t *testing.T) {
res := node.RunIPFS("routing", "findpeer", testutils.CIDEmptyDir)
assert.Equal(t, 1, res.ExitCode())
assert.Contains(t, res.Stderr.String(), "this command must be run in online mode")
})
t.Run("routing put", func(t *testing.T) {
node.WriteBytes("foo", []byte("foo"))
res := node.RunIPFS("routing", "put", "/ipns/"+node.PeerID().String(), "foo")
assert.Equal(t, 1, res.ExitCode())
assert.Contains(t, res.Stderr.String(), "can't put while offline: pass `--allow-offline` to override")
})
})
})
}
func testSelfFindDHT(t *testing.T) {
t.Run("ipfs routing findpeer fails for self", func(t *testing.T) {
t.Parallel()
nodes := harness.NewT(t).NewNodes(1).Init()
nodes.ForEachPar(func(node *harness.Node) {
node.IPFS("config", "Routing.Type", "dht")
})
nodes.StartDaemons()
defer nodes.StopDaemons()
res := nodes[0].RunIPFS("dht", "findpeer", nodes[0].PeerID().String())
assert.Equal(t, 1, res.ExitCode())
})
}
func TestRoutingDHT(t *testing.T) {
testRoutingDHT(t, false)
testRoutingDHT(t, true)
testSelfFindDHT(t)
}