test: port legacy DHT tests to Go

This commit is contained in:
Gus Eggert 2023-03-08 15:48:56 -05:00 committed by galargh
parent d339059a7f
commit bfa425fc67
6 changed files with 192 additions and 136 deletions

137
test/cli/dht_legacy_test.go Normal file
View File

@ -0,0 +1,137 @@
package cli
import (
"sort"
"sync"
"testing"
"github.com/ipfs/kubo/test/cli/harness"
"github.com/ipfs/kubo/test/cli/testutils"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestLegacyDHT(t *testing.T) {
nodes := harness.NewT(t).NewNodes(5).Init()
nodes.ForEachPar(func(node *harness.Node) {
node.IPFS("config", "Routing.Type", "dht")
})
nodes.StartDaemons().Connect()
t.Run("ipfs dht findpeer", func(t *testing.T) {
t.Parallel()
res := nodes[1].RunIPFS("dht", "findpeer", nodes[0].PeerID().String())
assert.Equal(t, 0, res.ExitCode())
swarmAddr := nodes[0].SwarmAddrsWithoutPeerIDs()[0]
require.Equal(t, swarmAddr.String(), res.Stdout.Trimmed())
})
t.Run("ipfs dht get <key>", func(t *testing.T) {
t.Parallel()
hash := nodes[2].IPFSAddStr("hello world")
nodes[2].IPFS("name", "publish", "/ipfs/"+hash)
res := nodes[1].IPFS("dht", "get", "/ipns/"+nodes[2].PeerID().String())
assert.Contains(t, res.Stdout.String(), "/ipfs/"+hash)
t.Run("put round trips (#3124)", func(t *testing.T) {
t.Parallel()
nodes[0].WriteBytes("get_result", res.Stdout.Bytes())
res := nodes[0].IPFS("dht", "put", "/ipns/"+nodes[2].PeerID().String(), "get_result")
assert.Greater(t, len(res.Stdout.Lines()), 0, "should put to at least one node")
})
t.Run("put with bad keys fails (issue #5113, #4611)", func(t *testing.T) {
t.Parallel()
keys := []string{"foo", "/pk/foo", "/ipns/foo"}
for _, key := range keys {
key := key
t.Run(key, func(t *testing.T) {
t.Parallel()
res := nodes[0].RunIPFS("dht", "put", key)
assert.Equal(t, 1, res.ExitCode())
assert.Contains(t, res.Stderr.String(), "invalid")
assert.Empty(t, res.Stdout.String())
})
}
})
t.Run("get with bad keys (issue #4611)", func(t *testing.T) {
for _, key := range []string{"foo", "/pk/foo"} {
key := key
t.Run(key, func(t *testing.T) {
t.Parallel()
res := nodes[0].RunIPFS("dht", "get", key)
assert.Equal(t, 1, res.ExitCode())
assert.Contains(t, res.Stderr.String(), "invalid")
assert.Empty(t, res.Stdout.String())
})
}
})
})
t.Run("ipfs dht findprovs", func(t *testing.T) {
t.Parallel()
hash := nodes[3].IPFSAddStr("some stuff")
res := nodes[4].IPFS("dht", "findprovs", hash)
assert.Equal(t, nodes[3].PeerID().String(), res.Stdout.Trimmed())
})
t.Run("ipfs dht query <peerID>", func(t *testing.T) {
t.Parallel()
t.Run("normal DHT configuration", func(t *testing.T) {
t.Parallel()
hash := nodes[0].IPFSAddStr("some other stuff")
peerCounts := map[string]int{}
peerCountsMut := sync.Mutex{}
harness.Nodes(nodes).ForEachPar(func(node *harness.Node) {
res := node.IPFS("dht", "query", hash)
closestPeer := res.Stdout.Lines()[0]
// check that it's a valid peer ID
_, err := peer.Decode(closestPeer)
require.NoError(t, err)
peerCountsMut.Lock()
peerCounts[closestPeer]++
peerCountsMut.Unlock()
})
// 4 nodes should see the same peer ID
// 1 node (the closest) should see a different one
var counts []int
for _, count := range peerCounts {
counts = append(counts, count)
}
sort.IntSlice(counts).Sort()
assert.Equal(t, []int{1, 4}, counts)
})
})
t.Run("dht commands fail when offline", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
// these cannot be run in parallel due to repo locking (seems like a bug)
t.Run("dht findprovs", func(t *testing.T) {
res := node.RunIPFS("dht", "findprovs", testutils.CIDEmptyDir)
assert.Equal(t, 1, res.ExitCode())
assert.Contains(t, res.Stderr.String(), "this command must be run in online mode")
})
t.Run("dht findpeer", func(t *testing.T) {
res := node.RunIPFS("dht", "findpeer", testutils.CIDEmptyDir)
assert.Equal(t, 1, res.ExitCode())
assert.Contains(t, res.Stderr.String(), "this command must be run in online mode")
})
t.Run("dht put", func(t *testing.T) {
node.WriteBytes("foo", []byte("foo"))
res := node.RunIPFS("dht", "put", "/ipns/"+node.PeerID().String(), "foo")
assert.Equal(t, 1, res.ExitCode())
assert.Contains(t, res.Stderr.String(), "this action must be run in online mode")
})
})
}

View File

@ -171,7 +171,7 @@ func (h *Harness) Mkdirs(paths ...string) {
}
}
func (h *Harness) Sh(expr string) RunResult {
func (h *Harness) Sh(expr string) *RunResult {
return h.Runner.Run(RunRequest{
Path: "bash",
Args: []string{"-c", expr},

View File

@ -129,23 +129,23 @@ func (n *Node) UpdateConfigAndUserSuppliedResourceManagerOverrides(f func(cfg *c
n.WriteUserSuppliedResourceOverrides(overrides)
}
func (n *Node) IPFS(args ...string) RunResult {
func (n *Node) IPFS(args ...string) *RunResult {
res := n.RunIPFS(args...)
n.Runner.AssertNoError(res)
return res
}
func (n *Node) PipeStrToIPFS(s string, args ...string) RunResult {
func (n *Node) PipeStrToIPFS(s string, args ...string) *RunResult {
return n.PipeToIPFS(strings.NewReader(s), args...)
}
func (n *Node) PipeToIPFS(reader io.Reader, args ...string) RunResult {
func (n *Node) PipeToIPFS(reader io.Reader, args ...string) *RunResult {
res := n.RunPipeToIPFS(reader, args...)
n.Runner.AssertNoError(res)
return res
}
func (n *Node) RunPipeToIPFS(reader io.Reader, args ...string) RunResult {
func (n *Node) RunPipeToIPFS(reader io.Reader, args ...string) *RunResult {
return n.Runner.Run(RunRequest{
Path: n.IPFSBin,
Args: args,
@ -153,7 +153,7 @@ func (n *Node) RunPipeToIPFS(reader io.Reader, args ...string) RunResult {
})
}
func (n *Node) RunIPFS(args ...string) RunResult {
func (n *Node) RunIPFS(args ...string) *RunResult {
return n.Runner.Run(RunRequest{
Path: n.IPFSBin,
Args: args,
@ -216,7 +216,7 @@ func (n *Node) StartDaemon(ipfsArgs ...string) *Node {
RunFunc: (*exec.Cmd).Start,
})
n.Daemon = &res
n.Daemon = res
log.Debugf("node %d started, checking API", n.ID)
n.WaitOnAPI()
@ -399,8 +399,6 @@ func (n *Node) SwarmAddrs() []multiaddr.Multiaddr {
Path: n.IPFSBin,
Args: []string{"swarm", "addrs", "local"},
})
ipfsProtocol := multiaddr.ProtocolWithCode(multiaddr.P_IPFS).Name
peerID := n.PeerID()
out := strings.TrimSpace(res.Stdout.String())
outLines := strings.Split(out, "\n")
var addrs []multiaddr.Multiaddr
@ -409,9 +407,18 @@ func (n *Node) SwarmAddrs() []multiaddr.Multiaddr {
if err != nil {
panic(err)
}
addrs = append(addrs, ma)
}
return addrs
}
func (n *Node) SwarmAddrsWithPeerIDs() []multiaddr.Multiaddr {
ipfsProtocol := multiaddr.ProtocolWithCode(multiaddr.P_IPFS).Name
peerID := n.PeerID()
var addrs []multiaddr.Multiaddr
for _, ma := range n.SwarmAddrs() {
// add the peer ID to the multiaddr if it doesn't have it
_, err = ma.ValueForProtocol(multiaddr.P_IPFS)
_, err := ma.ValueForProtocol(multiaddr.P_IPFS)
if errors.Is(err, multiaddr.ErrProtocolNotFound) {
comp, err := multiaddr.NewComponent(ipfsProtocol, peerID.String())
if err != nil {
@ -424,10 +431,27 @@ func (n *Node) SwarmAddrs() []multiaddr.Multiaddr {
return addrs
}
func (n *Node) SwarmAddrsWithoutPeerIDs() []multiaddr.Multiaddr {
var addrs []multiaddr.Multiaddr
for _, ma := range n.SwarmAddrs() {
var components []multiaddr.Multiaddr
multiaddr.ForEach(ma, func(c multiaddr.Component) bool {
if c.Protocol().Code == multiaddr.P_IPFS {
return true
}
components = append(components, &c)
return true
})
ma = multiaddr.Join(components...)
addrs = append(addrs, ma)
}
return addrs
}
func (n *Node) Connect(other *Node) *Node {
n.Runner.MustRun(RunRequest{
Path: n.IPFSBin,
Args: []string{"swarm", "connect", other.SwarmAddrs()[0].String()},
Args: []string{"swarm", "connect", other.SwarmAddrsWithPeerIDs()[0].String()},
})
return n
}

View File

@ -4,6 +4,7 @@ import (
"sync"
"github.com/multiformats/go-multiaddr"
"golang.org/x/sync/errgroup"
)
// Nodes is a collection of Kubo nodes along with operations on groups of nodes.
@ -16,6 +17,21 @@ func (n Nodes) Init(args ...string) Nodes {
return n
}
func (n Nodes) ForEachPar(f func(*Node)) {
group := &errgroup.Group{}
for _, node := range n {
node := node
group.Go(func() error {
f(node)
return nil
})
}
err := group.Wait()
if err != nil {
panic(err)
}
}
func (n Nodes) Connect() Nodes {
wg := sync.WaitGroup{}
for i, node := range n {

View File

@ -51,7 +51,7 @@ func environToMap(environ []string) map[string]string {
return m
}
func (r *Runner) Run(req RunRequest) RunResult {
func (r *Runner) Run(req RunRequest) *RunResult {
cmd := exec.Command(req.Path, req.Args...)
stdout := &Buffer{}
stderr := &Buffer{}
@ -86,17 +86,17 @@ func (r *Runner) Run(req RunRequest) RunResult {
result.ExitErr = exitErr
}
return result
return &result
}
// MustRun runs the command and fails the test if the command fails.
func (r *Runner) MustRun(req RunRequest) RunResult {
func (r *Runner) MustRun(req RunRequest) *RunResult {
result := r.Run(req)
r.AssertNoError(result)
return result
}
func (r *Runner) AssertNoError(result RunResult) {
func (r *Runner) AssertNoError(result *RunResult) {
if result.ExitErr != nil {
log.Panicf("'%s' returned error, code: %d, err: %s\nstdout:%s\nstderr:%s\n",
result.Cmd.Args, result.ExitErr.ExitCode(), result.ExitErr.Error(), result.Stdout.String(), result.Stderr.String())

View File

@ -1,121 +0,0 @@
#!/usr/bin/env bash
# Legacy / deprecated, see: t0170-routing-dht.sh
test_description="Test dht command"
. lib/test-lib.sh
test_dht() {
NUM_NODES=5
test_expect_success 'init iptb' '
rm -rf .iptb/ &&
iptb testbed create -type localipfs -count $NUM_NODES -init
'
test_expect_success 'DHT-only routing' '
iptb run -- ipfs config Routing.Type dht
'
startup_cluster $NUM_NODES $@
test_expect_success 'peer ids' '
PEERID_0=$(iptb attr get 0 id) &&
PEERID_2=$(iptb attr get 2 id)
'
# ipfs dht findpeer <peerID>
test_expect_success 'findpeer' '
ipfsi 1 dht findpeer $PEERID_0 | sort >actual &&
ipfsi 0 id -f "<addrs>" | cut -d / -f 1-5 | sort >expected &&
test_cmp actual expected
'
# ipfs dht get <key>
test_expect_success 'get with good keys works' '
HASH="$(echo "hello world" | ipfsi 2 add -q)" &&
ipfsi 2 name publish "/ipfs/$HASH" &&
ipfsi 1 dht get "/ipns/$PEERID_2" >get_result
'
test_expect_success 'get with good keys contains the right value' '
cat get_result | grep -aq "/ipfs/$HASH"
'
test_expect_success 'put round trips (#3124)' '
ipfsi 0 dht put "/ipns/$PEERID_2" get_result | sort >putted &&
[ -s putted ] ||
test_fsh cat putted
'
test_expect_success 'put with bad keys fails (issue #5113)' '
ipfsi 0 dht put "foo" <<<bar >putted
ipfsi 0 dht put "/pk/foo" <<<bar >>putted
ipfsi 0 dht put "/ipns/foo" <<<bar >>putted
[ ! -s putted ] ||
test_fsh cat putted
'
test_expect_success 'put with bad keys returns error (issue #4611)' '
test_must_fail ipfsi 0 dht put "foo" <<<bar &&
test_must_fail ipfsi 0 dht put "/pk/foo" <<<bar &&
test_must_fail ipfsi 0 dht put "/ipns/foo" <<<bar
'
test_expect_success 'get with bad keys (issue #4611)' '
test_must_fail ipfsi 0 dht get "foo" &&
test_must_fail ipfsi 0 dht get "/pk/foo"
'
test_expect_success "add a ref so we can find providers for it" '
echo "some stuff" > afile &&
HASH=$(ipfsi 3 add -q afile)
'
# ipfs dht findprovs <key>
test_expect_success 'findprovs' '
ipfsi 4 dht findprovs $HASH > provs &&
iptb attr get 3 id > expected &&
test_cmp provs expected
'
# ipfs dht query <peerID>
#
# We test all nodes. 4 nodes should see the same peer ID, one node (the
# closest) should see a different one.
for i in $(test_seq 0 4); do
test_expect_success "query from $i" '
ipfsi "$i" dht query "$HASH" | head -1 >closest-$i
'
done
test_expect_success "collecting results" '
cat closest-* | sort | uniq -c | sed -e "s/ *\([0-9]\+\) .*/\1/g" | sort -g > actual &&
echo 1 > expected &&
echo 4 >> expected
'
test_expect_success "checking results" '
test_cmp actual expected
'
test_expect_success 'stop iptb' '
iptb stop
'
test_expect_success "dht commands fail when offline" '
test_must_fail ipfsi 0 dht findprovs "$HASH" 2>err_findprovs &&
test_must_fail ipfsi 0 dht findpeer "$HASH" 2>err_findpeer &&
test_must_fail ipfsi 0 dht put "/ipns/$PEERID_2" "get_result" 2>err_put &&
test_should_contain "this command must be run in online mode" err_findprovs &&
test_should_contain "this command must be run in online mode" err_findpeer &&
test_should_contain "this action must be run in online mode" err_put
'
}
test_dht
test_dht --enable-pubsub-experiment --enable-namesys-pubsub
test_done