mirror of
https://github.com/ipfs/kubo.git
synced 2026-02-21 10:27:46 +08:00
shutdown daemon after test (#11135)
Some checks failed
CodeQL / codeql (push) Has been cancelled
Docker Check / lint (push) Has been cancelled
Docker Check / build (push) Has been cancelled
Gateway Conformance / gateway-conformance (push) Has been cancelled
Gateway Conformance / gateway-conformance-libp2p-experiment (push) Has been cancelled
Go Build / go-build (push) Has been cancelled
Go Check / go-check (push) Has been cancelled
Go Lint / go-lint (push) Has been cancelled
Go Test / unit-tests (push) Has been cancelled
Go Test / cli-tests (push) Has been cancelled
Go Test / example-tests (push) Has been cancelled
Interop / interop-prep (push) Has been cancelled
Sharness / sharness-test (push) Has been cancelled
Spell Check / spellcheck (push) Has been cancelled
Interop / helia-interop (push) Has been cancelled
Interop / ipfs-webui (push) Has been cancelled
Some checks failed
CodeQL / codeql (push) Has been cancelled
Docker Check / lint (push) Has been cancelled
Docker Check / build (push) Has been cancelled
Gateway Conformance / gateway-conformance (push) Has been cancelled
Gateway Conformance / gateway-conformance-libp2p-experiment (push) Has been cancelled
Go Build / go-build (push) Has been cancelled
Go Check / go-check (push) Has been cancelled
Go Lint / go-lint (push) Has been cancelled
Go Test / unit-tests (push) Has been cancelled
Go Test / cli-tests (push) Has been cancelled
Go Test / example-tests (push) Has been cancelled
Interop / interop-prep (push) Has been cancelled
Sharness / sharness-test (push) Has been cancelled
Spell Check / spellcheck (push) Has been cancelled
Interop / helia-interop (push) Has been cancelled
Interop / ipfs-webui (push) Has been cancelled
This commit is contained in:
parent
1301710a91
commit
aa3c88dcdd
@ -39,7 +39,9 @@ func TestBackupBootstrapPeers(t *testing.T) {
|
||||
|
||||
// Start 1 and 2. 2 does not know anyone yet.
|
||||
nodes[1].StartDaemon()
|
||||
defer nodes[1].StopDaemon()
|
||||
nodes[2].StartDaemon()
|
||||
defer nodes[2].StopDaemon()
|
||||
assert.Len(t, nodes[1].Peers(), 0)
|
||||
assert.Len(t, nodes[2].Peers(), 0)
|
||||
|
||||
@ -51,6 +53,7 @@ func TestBackupBootstrapPeers(t *testing.T) {
|
||||
// Start 0, wait a bit. Should connect to 1, and then discover 2 via the
|
||||
// backup bootstrap peers.
|
||||
nodes[0].StartDaemon()
|
||||
defer nodes[0].StopDaemon()
|
||||
time.Sleep(time.Millisecond * 500)
|
||||
|
||||
// Check if they're all connected.
|
||||
|
||||
@ -22,7 +22,9 @@ func TestBitswapConfig(t *testing.T) {
|
||||
t.Parallel()
|
||||
h := harness.NewT(t)
|
||||
provider := h.NewNode().Init().StartDaemon()
|
||||
defer provider.StopDaemon()
|
||||
requester := h.NewNode().Init().StartDaemon()
|
||||
defer requester.StopDaemon()
|
||||
|
||||
hash := provider.IPFSAddStr(string(testData))
|
||||
requester.Connect(provider)
|
||||
@ -38,8 +40,10 @@ func TestBitswapConfig(t *testing.T) {
|
||||
provider := h.NewNode().Init()
|
||||
provider.SetIPFSConfig("Bitswap.ServerEnabled", false)
|
||||
provider = provider.StartDaemon()
|
||||
defer provider.StopDaemon()
|
||||
|
||||
requester := h.NewNode().Init().StartDaemon()
|
||||
defer requester.StopDaemon()
|
||||
|
||||
hash := provider.IPFSAddStr(string(testData))
|
||||
requester.Connect(provider)
|
||||
@ -70,8 +74,10 @@ func TestBitswapConfig(t *testing.T) {
|
||||
requester := h.NewNode().Init()
|
||||
requester.SetIPFSConfig("Bitswap.ServerEnabled", false)
|
||||
requester.StartDaemon()
|
||||
defer requester.StopDaemon()
|
||||
|
||||
provider := h.NewNode().Init().StartDaemon()
|
||||
defer provider.StopDaemon()
|
||||
hash := provider.IPFSAddStr(string(testData))
|
||||
requester.Connect(provider)
|
||||
|
||||
@ -91,8 +97,10 @@ func TestBitswapConfig(t *testing.T) {
|
||||
cfg.HTTPRetrieval.Enabled = config.True
|
||||
})
|
||||
requester.StartDaemon()
|
||||
defer requester.StopDaemon()
|
||||
|
||||
provider := h.NewNode().Init().StartDaemon()
|
||||
defer provider.StopDaemon()
|
||||
hash := provider.IPFSAddStr(string(testData))
|
||||
|
||||
requester.Connect(provider)
|
||||
@ -126,7 +134,9 @@ func TestBitswapConfig(t *testing.T) {
|
||||
cfg.HTTPRetrieval.Enabled = config.True
|
||||
})
|
||||
provider = provider.StartDaemon()
|
||||
defer provider.StopDaemon()
|
||||
requester := h.NewNode().Init().StartDaemon()
|
||||
defer requester.StopDaemon()
|
||||
requester.Connect(provider)
|
||||
|
||||
// read libp2p identify from remote peer, and print protocols
|
||||
|
||||
@ -76,6 +76,7 @@ func TestContentBlocking(t *testing.T) {
|
||||
|
||||
// Start daemon, it should pick up denylist from $IPFS_PATH/denylists/test.deny
|
||||
node.StartDaemon() // we need online mode for GatewayOverLibp2p tests
|
||||
t.Cleanup(func() { node.StopDaemon() })
|
||||
client := node.GatewayClient()
|
||||
|
||||
// First, confirm gateway works
|
||||
|
||||
@ -47,6 +47,8 @@ func TestDag(t *testing.T) {
|
||||
t.Run("ipfs dag stat --enc=json", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
node := harness.NewT(t).NewNode().Init().StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
|
||||
// Import fixture
|
||||
r, err := os.Open(fixtureFile)
|
||||
assert.Nil(t, err)
|
||||
@ -91,6 +93,7 @@ func TestDag(t *testing.T) {
|
||||
t.Run("ipfs dag stat", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
node := harness.NewT(t).NewNode().Init().StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
r, err := os.Open(fixtureFile)
|
||||
assert.NoError(t, err)
|
||||
defer r.Close()
|
||||
|
||||
@ -60,6 +60,10 @@ func TestRoutingV1Proxy(t *testing.T) {
|
||||
})
|
||||
nodes[2].StartDaemon()
|
||||
|
||||
t.Cleanup(func() {
|
||||
nodes.StopDaemons()
|
||||
})
|
||||
|
||||
// Connect them.
|
||||
nodes.Connect()
|
||||
|
||||
|
||||
@ -32,6 +32,7 @@ func TestRoutingV1Server(t *testing.T) {
|
||||
})
|
||||
})
|
||||
nodes.StartDaemons().Connect()
|
||||
t.Cleanup(func() { nodes.StopDaemons() })
|
||||
return nodes
|
||||
}
|
||||
|
||||
@ -133,6 +134,7 @@ func TestRoutingV1Server(t *testing.T) {
|
||||
cfg.Routing.Type = config.NewOptionalString("dht")
|
||||
})
|
||||
node.StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
|
||||
// Put IPNS record in lonely node. It should be accepted as it is a valid record.
|
||||
c, err = client.New(node.GatewayURL())
|
||||
@ -196,6 +198,7 @@ func TestRoutingV1Server(t *testing.T) {
|
||||
}
|
||||
})
|
||||
node.StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
|
||||
c, err := client.New(node.GatewayURL())
|
||||
require.NoError(t, err)
|
||||
@ -238,6 +241,7 @@ func TestRoutingV1Server(t *testing.T) {
|
||||
cfg.Bootstrap = autoconf.FallbackBootstrapPeers
|
||||
})
|
||||
node.StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
|
||||
c, err := client.New(node.GatewayURL())
|
||||
require.NoError(t, err)
|
||||
|
||||
@ -16,6 +16,7 @@ func TestDHTAutoclient(t *testing.T) {
|
||||
node.IPFS("config", "Routing.Type", "autoclient")
|
||||
})
|
||||
nodes.StartDaemons().Connect()
|
||||
t.Cleanup(func() { nodes.StopDaemons() })
|
||||
|
||||
t.Run("file added on node in client mode is retrievable from node in client mode", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@ -22,6 +22,7 @@ func TestDHTOptimisticProvide(t *testing.T) {
|
||||
})
|
||||
|
||||
nodes.StartDaemons().Connect()
|
||||
defer nodes.StopDaemons()
|
||||
|
||||
hash := nodes[0].IPFSAddStr(string(random.Bytes(100)))
|
||||
nodes[0].IPFS("routing", "provide", hash)
|
||||
|
||||
@ -19,6 +19,7 @@ func TestFilesCp(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
node := harness.NewT(t).NewNode().Init().StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
|
||||
// Create simple text file
|
||||
data := "testing files cp command"
|
||||
@ -36,6 +37,7 @@ func TestFilesCp(t *testing.T) {
|
||||
t.Run("files cp with unsupported DAG node type fails", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
node := harness.NewT(t).NewNode().Init().StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
|
||||
// MFS UnixFS is limited to dag-pb or raw, so we create a dag-cbor node to test this
|
||||
jsonData := `{"data": "not a UnixFS node"}`
|
||||
@ -53,6 +55,7 @@ func TestFilesCp(t *testing.T) {
|
||||
t.Run("files cp with invalid UnixFS data structure fails", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
node := harness.NewT(t).NewNode().Init().StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
|
||||
// Create an invalid proto file
|
||||
data := []byte{0xDE, 0xAD, 0xBE, 0xEF} // Invalid protobuf data
|
||||
@ -75,6 +78,7 @@ func TestFilesCp(t *testing.T) {
|
||||
t.Run("files cp with raw node succeeds", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
node := harness.NewT(t).NewNode().Init().StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
|
||||
// Create a raw node
|
||||
data := "raw data"
|
||||
@ -98,6 +102,7 @@ func TestFilesCp(t *testing.T) {
|
||||
t.Run("files cp creates intermediate directories with -p", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
node := harness.NewT(t).NewNode().Init().StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
|
||||
// Create a simple text file and add it to IPFS
|
||||
data := "hello parent directories"
|
||||
@ -130,6 +135,7 @@ func TestFilesRm(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
node := harness.NewT(t).NewNode().Init().StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
|
||||
// Create a file to remove
|
||||
node.IPFS("files", "mkdir", "/test-dir")
|
||||
@ -149,6 +155,7 @@ func TestFilesRm(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
node := harness.NewT(t).NewNode().Init().StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
|
||||
// Create a file to remove
|
||||
node.IPFS("files", "mkdir", "/test-dir")
|
||||
@ -166,6 +173,7 @@ func TestFilesRm(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
node := harness.NewT(t).NewNode().Init().StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
|
||||
// Create a file to remove
|
||||
node.IPFS("files", "mkdir", "/test-dir")
|
||||
@ -186,6 +194,7 @@ func TestFilesNoFlushLimit(t *testing.T) {
|
||||
t.Run("reaches default limit of 256 operations", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
node := harness.NewT(t).NewNode().Init().StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
|
||||
// Perform 256 operations with --flush=false (should succeed)
|
||||
for i := 0; i < 256; i++ {
|
||||
@ -214,6 +223,7 @@ func TestFilesNoFlushLimit(t *testing.T) {
|
||||
})
|
||||
|
||||
node.StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
|
||||
// Perform 5 operations (should succeed)
|
||||
for i := 0; i < 5; i++ {
|
||||
@ -239,6 +249,7 @@ func TestFilesNoFlushLimit(t *testing.T) {
|
||||
})
|
||||
|
||||
node.StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
|
||||
// Do 2 operations with --flush=false
|
||||
node.IPFS("files", "mkdir", "--flush=false", "/dir1")
|
||||
@ -271,6 +282,7 @@ func TestFilesNoFlushLimit(t *testing.T) {
|
||||
})
|
||||
|
||||
node.StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
|
||||
// Do 2 operations with --flush=false
|
||||
node.IPFS("files", "mkdir", "--flush=false", "/dir1")
|
||||
@ -303,6 +315,7 @@ func TestFilesNoFlushLimit(t *testing.T) {
|
||||
})
|
||||
|
||||
node.StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
|
||||
// Should be able to do many operations without error
|
||||
for i := 0; i < 300; i++ {
|
||||
@ -322,6 +335,7 @@ func TestFilesNoFlushLimit(t *testing.T) {
|
||||
})
|
||||
|
||||
node.StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
|
||||
// Mix of different MFS operations (5 operations to hit the limit)
|
||||
node.IPFS("files", "mkdir", "--flush=false", "/testdir")
|
||||
|
||||
@ -28,6 +28,7 @@ func TestGatewayLimits(t *testing.T) {
|
||||
cfg.Gateway.RetrievalTimeout = config.NewOptionalDuration(1 * time.Second)
|
||||
})
|
||||
node.StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
|
||||
// Add content that can be retrieved quickly
|
||||
cid := node.IPFSAddStr("test content")
|
||||
@ -69,6 +70,7 @@ func TestGatewayLimits(t *testing.T) {
|
||||
cfg.Gateway.RetrievalTimeout = config.NewOptionalDuration(2 * time.Second)
|
||||
})
|
||||
node.StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
|
||||
// Add some content - use a non-existent CID that will block during retrieval
|
||||
// to ensure we can control timing
|
||||
|
||||
@ -27,6 +27,7 @@ func TestGatewayHAMTDirectory(t *testing.T) {
|
||||
// Start node
|
||||
h := harness.NewT(t)
|
||||
node := h.NewNode().Init("--empty-repo", "--profile=test").StartDaemon("--offline")
|
||||
defer node.StopDaemon()
|
||||
client := node.GatewayClient()
|
||||
|
||||
// Import fixtures
|
||||
@ -56,6 +57,7 @@ func TestGatewayHAMTRanges(t *testing.T) {
|
||||
// Start node
|
||||
h := harness.NewT(t)
|
||||
node := h.NewNode().Init("--empty-repo", "--profile=test").StartDaemon("--offline")
|
||||
t.Cleanup(func() { node.StopDaemon() })
|
||||
client := node.GatewayClient()
|
||||
|
||||
// Import fixtures
|
||||
|
||||
@ -28,6 +28,7 @@ func TestGateway(t *testing.T) {
|
||||
t.Parallel()
|
||||
h := harness.NewT(t)
|
||||
node := h.NewNode().Init().StartDaemon("--offline")
|
||||
t.Cleanup(func() { node.StopDaemon() })
|
||||
cid := node.IPFSAddStr("Hello Worlds!")
|
||||
|
||||
peerID, err := peer.ToCid(node.PeerID()).StringOfBase(multibase.Base36)
|
||||
@ -234,6 +235,7 @@ func TestGateway(t *testing.T) {
|
||||
cfg.API.HTTPHeaders = map[string][]string{header: values}
|
||||
})
|
||||
node.StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
|
||||
resp := node.APIClient().DisableRedirects().Get("/webui/")
|
||||
assert.Equal(t, resp.Headers.Values(header), values)
|
||||
@ -257,6 +259,7 @@ func TestGateway(t *testing.T) {
|
||||
t.Run("pprof", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
node := harness.NewT(t).NewNode().Init().StartDaemon()
|
||||
t.Cleanup(func() { node.StopDaemon() })
|
||||
apiClient := node.APIClient()
|
||||
t.Run("mutex", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
@ -300,6 +303,7 @@ func TestGateway(t *testing.T) {
|
||||
t.Parallel()
|
||||
h := harness.NewT(t)
|
||||
node := h.NewNode().Init().StartDaemon()
|
||||
t.Cleanup(func() { node.StopDaemon() })
|
||||
|
||||
h.WriteFile("index/index.html", "<p></p>")
|
||||
cid := node.IPFS("add", "-Q", "-r", filepath.Join(h.Dir, "index")).Stderr.Trimmed()
|
||||
@ -367,6 +371,7 @@ func TestGateway(t *testing.T) {
|
||||
cfg.Addresses.Gateway = config.Strings{"/ip4/127.0.0.1/tcp/32563"}
|
||||
})
|
||||
node.StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
|
||||
b, err := os.ReadFile(filepath.Join(node.Dir, "gateway"))
|
||||
require.NoError(t, err)
|
||||
@ -388,6 +393,7 @@ func TestGateway(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
nodes.StartDaemons().Connect()
|
||||
t.Cleanup(func() { nodes.StopDaemons() })
|
||||
|
||||
t.Run("not present", func(t *testing.T) {
|
||||
cidFoo := node2.IPFSAddStr("foo")
|
||||
@ -460,6 +466,7 @@ func TestGateway(t *testing.T) {
|
||||
}
|
||||
})
|
||||
node.StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
|
||||
cidFoo := node.IPFSAddStr("foo")
|
||||
client := node.GatewayClient()
|
||||
@ -509,6 +516,7 @@ func TestGateway(t *testing.T) {
|
||||
|
||||
node := harness.NewT(t).NewNode().Init()
|
||||
node.StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
client := node.GatewayClient()
|
||||
|
||||
res := client.Get("/ipfs/invalid-thing", func(r *http.Request) {
|
||||
@ -526,6 +534,7 @@ func TestGateway(t *testing.T) {
|
||||
cfg.Gateway.DisableHTMLErrors = config.True
|
||||
})
|
||||
node.StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
client := node.GatewayClient()
|
||||
|
||||
res := client.Get("/ipfs/invalid-thing", func(r *http.Request) {
|
||||
@ -546,6 +555,7 @@ func TestLogs(t *testing.T) {
|
||||
t.Setenv("GOLOG_LOG_LEVEL", "info")
|
||||
|
||||
node := h.NewNode().Init().StartDaemon("--offline")
|
||||
defer node.StopDaemon()
|
||||
cid := node.IPFSAddStr("Hello Worlds!")
|
||||
|
||||
peerID, err := peer.ToCid(node.PeerID()).StringOfBase(multibase.Base36)
|
||||
|
||||
@ -32,6 +32,7 @@ func TestGatewayOverLibp2p(t *testing.T) {
|
||||
p2pProxyNode := nodes[1]
|
||||
|
||||
nodes.StartDaemons().Connect()
|
||||
defer nodes.StopDaemons()
|
||||
|
||||
// Add data to the gateway node
|
||||
cidDataOnGatewayNode := cid.MustParse(gwNode.IPFSAddStr("Hello Worlds2!"))
|
||||
@ -65,6 +66,7 @@ func TestGatewayOverLibp2p(t *testing.T) {
|
||||
// Enable the experimental feature and reconnect the nodes
|
||||
gwNode.IPFS("config", "--json", "Experimental.GatewayOverLibp2p", "true")
|
||||
gwNode.StopDaemon().StartDaemon()
|
||||
t.Cleanup(func() { gwNode.StopDaemon() })
|
||||
nodes.Connect()
|
||||
|
||||
// Note: the bare HTTP requests here assume that the gateway is mounted at `/`
|
||||
|
||||
@ -75,6 +75,7 @@ func TestHTTPRetrievalClient(t *testing.T) {
|
||||
|
||||
// Start Kubo
|
||||
node.StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
|
||||
if debug {
|
||||
fmt.Printf("delegatedRoutingServer.URL: %s\n", delegatedRoutingServer.URL)
|
||||
|
||||
@ -155,6 +155,7 @@ func TestInit(t *testing.T) {
|
||||
t.Run("ipfs init should not run while daemon is running", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
node := harness.NewT(t).NewNode().Init().StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
res := node.RunIPFS("init")
|
||||
assert.NotEqual(t, 0, res.ExitErr.ExitCode())
|
||||
assert.Contains(t, res.Stderr.String(), "Error: ipfs daemon is running. please stop it to run this command")
|
||||
|
||||
@ -103,6 +103,7 @@ func TestName(t *testing.T) {
|
||||
})
|
||||
|
||||
node.StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
|
||||
t.Run("Resolving self offline succeeds (daemon on)", func(t *testing.T) {
|
||||
res = node.IPFS("name", "resolve", "--offline", "/ipns/"+name.String())
|
||||
@ -147,6 +148,7 @@ func TestName(t *testing.T) {
|
||||
t.Run("Fails to publish in offline mode", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
node := makeDaemon(t, nil).StartDaemon("--offline")
|
||||
defer node.StopDaemon()
|
||||
res := node.RunIPFS("name", "publish", "/ipfs/"+fixtureCid)
|
||||
require.Error(t, res.Err)
|
||||
require.Equal(t, 1, res.ExitCode())
|
||||
@ -157,6 +159,7 @@ func TestName(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
node := makeDaemon(t, nil).StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
ipnsName := ipns.NameFromPeer(node.PeerID()).String()
|
||||
ipnsPath := ipns.NamespacePrefix + ipnsName
|
||||
publishPath := "/ipfs/" + fixtureCid
|
||||
@ -187,6 +190,7 @@ func TestName(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
node := makeDaemon(t, nil).StartDaemon()
|
||||
t.Cleanup(func() { node.StopDaemon() })
|
||||
ipnsPath := ipns.NamespacePrefix + ipns.NameFromPeer(node.PeerID()).String()
|
||||
publishPath := "/ipfs/" + fixtureCid
|
||||
|
||||
@ -227,6 +231,7 @@ func TestName(t *testing.T) {
|
||||
t.Run("Inspect with verification using wrong RSA key errors", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
node := makeDaemon(t, nil).StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
|
||||
// Prepare RSA Key 1
|
||||
res := node.IPFS("key", "gen", "--type=rsa", "--size=4096", "key1")
|
||||
@ -299,6 +304,7 @@ func TestName(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
node := makeDaemon(t, nil).StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
publishPath1 := "/ipfs/" + fixtureCid
|
||||
publishPath2 := "/ipfs/" + dagCid // Different content
|
||||
name := ipns.NameFromPeer(node.PeerID())
|
||||
|
||||
@ -62,6 +62,7 @@ func TestPeering(t *testing.T) {
|
||||
h, nodes := harness.CreatePeerNodes(t, 3, peerings)
|
||||
|
||||
nodes.StartDaemons()
|
||||
defer nodes.StopDaemons()
|
||||
assertPeerings(h, nodes, peerings)
|
||||
|
||||
nodes[0].Disconnect(nodes[1])
|
||||
@ -74,6 +75,7 @@ func TestPeering(t *testing.T) {
|
||||
h, nodes := harness.CreatePeerNodes(t, 3, peerings)
|
||||
|
||||
nodes.StartDaemons()
|
||||
defer nodes.StopDaemons()
|
||||
assertPeerings(h, nodes, peerings)
|
||||
|
||||
nodes[2].Disconnect(nodes[1])
|
||||
@ -85,6 +87,7 @@ func TestPeering(t *testing.T) {
|
||||
peerings := []harness.Peering{{From: 0, To: 1}, {From: 1, To: 0}, {From: 1, To: 2}}
|
||||
h, nodes := harness.CreatePeerNodes(t, 3, peerings)
|
||||
|
||||
defer nodes.StopDaemons()
|
||||
nodes[0].StartDaemon()
|
||||
nodes[1].StartDaemon()
|
||||
assertPeerings(h, nodes, []harness.Peering{{From: 0, To: 1}, {From: 1, To: 0}})
|
||||
@ -99,6 +102,7 @@ func TestPeering(t *testing.T) {
|
||||
h, nodes := harness.CreatePeerNodes(t, 3, peerings)
|
||||
|
||||
nodes.StartDaemons()
|
||||
defer nodes.StopDaemons()
|
||||
assertPeerings(h, nodes, peerings)
|
||||
|
||||
nodes[2].StopDaemon()
|
||||
|
||||
@ -28,6 +28,9 @@ func setupTestNode(t *testing.T) *harness.Node {
|
||||
t.Helper()
|
||||
node := harness.NewT(t).NewNode().Init()
|
||||
node.StartDaemon("--offline")
|
||||
t.Cleanup(func() {
|
||||
node.StopDaemon()
|
||||
})
|
||||
return node
|
||||
}
|
||||
|
||||
@ -498,7 +501,6 @@ func TestPinLsEdgeCases(t *testing.T) {
|
||||
t.Run("invalid pin type returns error", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
node := setupTestNode(t)
|
||||
defer node.StopDaemon()
|
||||
|
||||
// Try to list pins with invalid type
|
||||
res := node.RunIPFS("pin", "ls", "--type=invalid")
|
||||
@ -510,7 +512,6 @@ func TestPinLsEdgeCases(t *testing.T) {
|
||||
t.Run("non-existent path returns proper error", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
node := setupTestNode(t)
|
||||
defer node.StopDaemon()
|
||||
|
||||
// Try to list a non-existent CID
|
||||
fakeCID := "QmNonExistent123456789"
|
||||
@ -521,7 +522,6 @@ func TestPinLsEdgeCases(t *testing.T) {
|
||||
t.Run("unpinned CID returns not pinned error", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
node := setupTestNode(t)
|
||||
defer node.StopDaemon()
|
||||
|
||||
// Add content but don't pin it explicitly (it's just in blockstore)
|
||||
unpinnedCID := node.IPFSAddStr("unpinned content", "--pin=false")
|
||||
|
||||
@ -15,6 +15,7 @@ func TestPing(t *testing.T) {
|
||||
t.Run("other", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
nodes := harness.NewT(t).NewNodes(2).Init().StartDaemons().Connect()
|
||||
defer nodes.StopDaemons()
|
||||
node1 := nodes[0]
|
||||
node2 := nodes[1]
|
||||
|
||||
@ -25,6 +26,7 @@ func TestPing(t *testing.T) {
|
||||
t.Run("ping unreachable peer", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
nodes := harness.NewT(t).NewNodes(2).Init().StartDaemons().Connect()
|
||||
defer nodes.StopDaemons()
|
||||
node1 := nodes[0]
|
||||
|
||||
badPeer := "QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJx"
|
||||
@ -37,6 +39,7 @@ func TestPing(t *testing.T) {
|
||||
t.Run("self", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
nodes := harness.NewT(t).NewNodes(2).Init().StartDaemons()
|
||||
defer nodes.StopDaemons()
|
||||
node1 := nodes[0]
|
||||
node2 := nodes[1]
|
||||
|
||||
@ -52,6 +55,7 @@ func TestPing(t *testing.T) {
|
||||
t.Run("0", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
nodes := harness.NewT(t).NewNodes(2).Init().StartDaemons().Connect()
|
||||
defer nodes.StopDaemons()
|
||||
node1 := nodes[0]
|
||||
node2 := nodes[1]
|
||||
|
||||
@ -63,6 +67,7 @@ func TestPing(t *testing.T) {
|
||||
t.Run("offline", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
nodes := harness.NewT(t).NewNodes(2).Init().StartDaemons().Connect()
|
||||
defer nodes.StopDaemons()
|
||||
node1 := nodes[0]
|
||||
node2 := nodes[1]
|
||||
|
||||
|
||||
@ -51,6 +51,7 @@ func TestRemotePinning(t *testing.T) {
|
||||
node.IPFS("config", "--json", "Pinning.RemoteServices.svc.Policies.MFS.Enable", "true")
|
||||
|
||||
node.StartDaemon()
|
||||
t.Cleanup(func() { node.StopDaemon() })
|
||||
|
||||
node.IPFS("files", "cp", "/ipfs/bafkqaaa", "/mfs-pinning-test-"+uuid.NewString())
|
||||
node.IPFS("files", "flush")
|
||||
@ -133,6 +134,8 @@ func TestRemotePinning(t *testing.T) {
|
||||
t.Run("pin remote service ls --stat", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
node := harness.NewT(t).NewNode().Init().StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
|
||||
_, svcURL := runPinningService(t, authToken)
|
||||
|
||||
node.IPFS("pin", "remote", "service", "add", "svc", svcURL, authToken)
|
||||
@ -155,6 +158,7 @@ func TestRemotePinning(t *testing.T) {
|
||||
t.Run("adding service with invalid URL fails", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
node := harness.NewT(t).NewNode().Init().StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
|
||||
res := node.RunIPFS("pin", "remote", "service", "add", "svc", "invalid-service.example.com", "key")
|
||||
assert.Equal(t, 1, res.ExitCode())
|
||||
@ -168,6 +172,7 @@ func TestRemotePinning(t *testing.T) {
|
||||
t.Run("unauthorized pinning service calls fail", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
node := harness.NewT(t).NewNode().Init().StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
_, svcURL := runPinningService(t, authToken)
|
||||
|
||||
node.IPFS("pin", "remote", "service", "add", "svc", svcURL, "othertoken")
|
||||
@ -180,6 +185,7 @@ func TestRemotePinning(t *testing.T) {
|
||||
t.Run("pinning service calls fail when there is a wrong path", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
node := harness.NewT(t).NewNode().Init().StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
_, svcURL := runPinningService(t, authToken)
|
||||
node.IPFS("pin", "remote", "service", "add", "svc", svcURL+"/invalid-path", authToken)
|
||||
|
||||
@ -191,6 +197,7 @@ func TestRemotePinning(t *testing.T) {
|
||||
t.Run("pinning service calls fail when DNS resolution fails", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
node := harness.NewT(t).NewNode().Init().StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
node.IPFS("pin", "remote", "service", "add", "svc", "https://invalid-service.example.com", authToken)
|
||||
|
||||
res := node.RunIPFS("pin", "remote", "ls", "--service=svc")
|
||||
@ -201,6 +208,7 @@ func TestRemotePinning(t *testing.T) {
|
||||
t.Run("pin remote service rm", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
node := harness.NewT(t).NewNode().Init().StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
node.IPFS("pin", "remote", "service", "add", "svc", "https://example.com", authToken)
|
||||
node.IPFS("pin", "remote", "service", "rm", "svc")
|
||||
res := node.IPFS("pin", "remote", "service", "ls")
|
||||
@ -225,6 +233,7 @@ func TestRemotePinning(t *testing.T) {
|
||||
|
||||
t.Run("'ipfs pin remote add --background=true'", func(t *testing.T) {
|
||||
node := harness.NewT(t).NewNode().Init().StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
svc, svcURL := runPinningService(t, authToken)
|
||||
node.IPFS("pin", "remote", "service", "add", "svc", svcURL, authToken)
|
||||
|
||||
@ -266,6 +275,7 @@ func TestRemotePinning(t *testing.T) {
|
||||
t.Run("'ipfs pin remote add --background=false'", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
node := harness.NewT(t).NewNode().Init().StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
svc, svcURL := runPinningService(t, authToken)
|
||||
node.IPFS("pin", "remote", "service", "add", "svc", svcURL, authToken)
|
||||
|
||||
@ -287,6 +297,7 @@ func TestRemotePinning(t *testing.T) {
|
||||
t.Run("'ipfs pin remote ls' with multiple statuses", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
node := harness.NewT(t).NewNode().Init().StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
svc, svcURL := runPinningService(t, authToken)
|
||||
node.IPFS("pin", "remote", "service", "add", "svc", svcURL, authToken)
|
||||
|
||||
@ -340,6 +351,7 @@ func TestRemotePinning(t *testing.T) {
|
||||
t.Run("'ipfs pin remote ls' by CID", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
node := harness.NewT(t).NewNode().Init().StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
svc, svcURL := runPinningService(t, authToken)
|
||||
node.IPFS("pin", "remote", "service", "add", "svc", svcURL, authToken)
|
||||
|
||||
@ -360,6 +372,7 @@ func TestRemotePinning(t *testing.T) {
|
||||
t.Run("'ipfs pin remote rm --name' without --force when multiple pins match", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
node := harness.NewT(t).NewNode().Init().StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
svc, svcURL := runPinningService(t, authToken)
|
||||
node.IPFS("pin", "remote", "service", "add", "svc", svcURL, authToken)
|
||||
|
||||
@ -388,6 +401,7 @@ func TestRemotePinning(t *testing.T) {
|
||||
t.Run("'ipfs pin remote rm --name --force' remove multiple pins", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
node := harness.NewT(t).NewNode().Init().StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
svc, svcURL := runPinningService(t, authToken)
|
||||
node.IPFS("pin", "remote", "service", "add", "svc", svcURL, authToken)
|
||||
|
||||
@ -408,6 +422,7 @@ func TestRemotePinning(t *testing.T) {
|
||||
t.Run("'ipfs pin remote rm --force' removes all pins", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
node := harness.NewT(t).NewNode().Init().StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
svc, svcURL := runPinningService(t, authToken)
|
||||
node.IPFS("pin", "remote", "service", "add", "svc", svcURL, authToken)
|
||||
|
||||
|
||||
@ -26,6 +26,7 @@ func testPins(t *testing.T, args testPinsArgs) {
|
||||
node := harness.NewT(t).NewNode().Init()
|
||||
if args.runDaemon {
|
||||
node.StartDaemon("--offline")
|
||||
defer node.StopDaemon()
|
||||
}
|
||||
|
||||
strs := []string{"a", "b", "c", "d", "e", "f", "g"}
|
||||
@ -127,6 +128,7 @@ func testPinsErrorReporting(t *testing.T, args testPinsArgs) {
|
||||
node := harness.NewT(t).NewNode().Init()
|
||||
if args.runDaemon {
|
||||
node.StartDaemon("--offline")
|
||||
defer node.StopDaemon()
|
||||
}
|
||||
randomCID := "Qme8uX5n9hn15pw9p6WcVKoziyyC9LXv4LEgvsmKMULjnV"
|
||||
res := node.RunIPFS(StrCat("pin", "add", args.pinArg, randomCID)...)
|
||||
@ -142,6 +144,7 @@ func testPinDAG(t *testing.T, args testPinsArgs) {
|
||||
node := h.NewNode().Init()
|
||||
if args.runDaemon {
|
||||
node.StartDaemon("--offline")
|
||||
defer node.StopDaemon()
|
||||
}
|
||||
bytes := random.Bytes(1 << 20) // 1 MiB
|
||||
tmpFile := h.WriteToTemp(string(bytes))
|
||||
@ -168,6 +171,7 @@ func testPinProgress(t *testing.T, args testPinsArgs) {
|
||||
|
||||
if args.runDaemon {
|
||||
node.StartDaemon("--offline")
|
||||
defer node.StopDaemon()
|
||||
}
|
||||
|
||||
bytes := random.Bytes(1 << 20) // 1 MiB
|
||||
|
||||
@ -26,6 +26,7 @@ func TestRcmgr(t *testing.T) {
|
||||
})
|
||||
|
||||
node.StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
|
||||
t.Run("swarm resources should fail", func(t *testing.T) {
|
||||
res := node.RunIPFS("swarm", "resources")
|
||||
@ -41,6 +42,7 @@ func TestRcmgr(t *testing.T) {
|
||||
cfg.Swarm.ResourceMgr.Enabled = config.False
|
||||
})
|
||||
node.StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
|
||||
t.Run("swarm resources should fail", func(t *testing.T) {
|
||||
res := node.RunIPFS("swarm", "resources")
|
||||
@ -56,6 +58,7 @@ func TestRcmgr(t *testing.T) {
|
||||
cfg.Swarm.ConnMgr.HighWater = config.NewOptionalInteger(1000)
|
||||
})
|
||||
node.StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
|
||||
res := node.RunIPFS("swarm", "resources", "--enc=json")
|
||||
require.Equal(t, 0, res.ExitCode())
|
||||
@ -73,7 +76,9 @@ func TestRcmgr(t *testing.T) {
|
||||
node.UpdateConfig(func(cfg *config.Config) {
|
||||
cfg.Swarm.ConnMgr.HighWater = config.NewOptionalInteger(1000)
|
||||
})
|
||||
|
||||
node.StartDaemon()
|
||||
t.Cleanup(func() { node.StopDaemon() })
|
||||
|
||||
t.Run("conns and streams are above 800 for default connmgr settings", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
@ -135,6 +140,7 @@ func TestRcmgr(t *testing.T) {
|
||||
overrides.System.ConnsInbound = rcmgr.Unlimited
|
||||
})
|
||||
node.StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
|
||||
res := node.RunIPFS("swarm", "resources", "--enc=json")
|
||||
limits := unmarshalLimits(t, res.Stdout.Bytes())
|
||||
@ -150,6 +156,7 @@ func TestRcmgr(t *testing.T) {
|
||||
overrides.Transient.Memory = 88888
|
||||
})
|
||||
node.StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
|
||||
res := node.RunIPFS("swarm", "resources", "--enc=json")
|
||||
limits := unmarshalLimits(t, res.Stdout.Bytes())
|
||||
@ -163,6 +170,7 @@ func TestRcmgr(t *testing.T) {
|
||||
overrides.Service = map[string]rcmgr.ResourceLimits{"foo": {Memory: 77777}}
|
||||
})
|
||||
node.StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
|
||||
res := node.RunIPFS("swarm", "resources", "--enc=json")
|
||||
limits := unmarshalLimits(t, res.Stdout.Bytes())
|
||||
@ -176,6 +184,7 @@ func TestRcmgr(t *testing.T) {
|
||||
overrides.Protocol = map[protocol.ID]rcmgr.ResourceLimits{"foo": {Memory: 66666}}
|
||||
})
|
||||
node.StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
|
||||
res := node.RunIPFS("swarm", "resources", "--enc=json")
|
||||
limits := unmarshalLimits(t, res.Stdout.Bytes())
|
||||
@ -191,6 +200,7 @@ func TestRcmgr(t *testing.T) {
|
||||
overrides.Peer = map[peer.ID]rcmgr.ResourceLimits{validPeerID: {Memory: 55555}}
|
||||
})
|
||||
node.StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
|
||||
res := node.RunIPFS("swarm", "resources", "--enc=json")
|
||||
limits := unmarshalLimits(t, res.Stdout.Bytes())
|
||||
@ -218,6 +228,7 @@ func TestRcmgr(t *testing.T) {
|
||||
})
|
||||
|
||||
nodes.StartDaemons()
|
||||
t.Cleanup(func() { nodes.StopDaemons() })
|
||||
|
||||
t.Run("node 0 should fail to connect to and ping node 1", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@ -57,6 +57,7 @@ func testRoutingDHT(t *testing.T, enablePubsub bool) {
|
||||
}
|
||||
|
||||
nodes.StartDaemons(daemonArgs...).Connect()
|
||||
t.Cleanup(func() { nodes.StopDaemons() })
|
||||
|
||||
t.Run("ipfs routing findpeer", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
@ -157,6 +158,7 @@ func testSelfFindDHT(t *testing.T) {
|
||||
})
|
||||
|
||||
nodes.StartDaemons()
|
||||
defer nodes.StopDaemons()
|
||||
|
||||
res := nodes[0].RunIPFS("dht", "findpeer", nodes[0].PeerID().String())
|
||||
assert.Equal(t, 1, res.ExitCode())
|
||||
|
||||
@ -14,6 +14,7 @@ func TestStats(t *testing.T) {
|
||||
t.Run("stats dht", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
nodes := harness.NewT(t).NewNodes(2).Init().StartDaemons().Connect()
|
||||
defer nodes.StopDaemons()
|
||||
node1 := nodes[0]
|
||||
|
||||
res := node1.IPFS("stats", "dht")
|
||||
|
||||
@ -31,6 +31,7 @@ func TestSwarm(t *testing.T) {
|
||||
t.Run("ipfs swarm peers returns empty peers when a node is not connected to any peers", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
node := harness.NewT(t).NewNode().Init().StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
res := node.RunIPFS("swarm", "peers", "--enc=json", "--identify")
|
||||
var output expectedOutputType
|
||||
err := json.Unmarshal(res.Stdout.Bytes(), &output)
|
||||
@ -40,7 +41,9 @@ func TestSwarm(t *testing.T) {
|
||||
t.Run("ipfs swarm peers with flag identify outputs expected identify information about connected peers", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
node := harness.NewT(t).NewNode().Init().StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
otherNode := harness.NewT(t).NewNode().Init().StartDaemon()
|
||||
defer otherNode.StopDaemon()
|
||||
node.Connect(otherNode)
|
||||
|
||||
res := node.RunIPFS("swarm", "peers", "--enc=json", "--identify")
|
||||
@ -67,7 +70,9 @@ func TestSwarm(t *testing.T) {
|
||||
t.Run("ipfs swarm peers with flag identify outputs Identify field with data that matches calling ipfs id on a peer", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
node := harness.NewT(t).NewNode().Init().StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
otherNode := harness.NewT(t).NewNode().Init().StartDaemon()
|
||||
defer otherNode.StopDaemon()
|
||||
node.Connect(otherNode)
|
||||
|
||||
otherNodeIDResponse := otherNode.RunIPFS("id", "--enc=json")
|
||||
|
||||
@ -76,6 +76,7 @@ func TestTracing(t *testing.T) {
|
||||
node.Runner.Env["OTEL_EXPORTER_OTLP_PROTOCOL"] = "grpc"
|
||||
node.Runner.Env["OTEL_EXPORTER_OTLP_ENDPOINT"] = "http://localhost:4317"
|
||||
node.StartDaemon()
|
||||
defer node.StopDaemon()
|
||||
|
||||
assert.Eventually(t,
|
||||
func() bool {
|
||||
|
||||
@ -74,6 +74,7 @@ func TestTransports(t *testing.T) {
|
||||
t.Parallel()
|
||||
nodes := tcpNodes(t).StartDaemons().Connect()
|
||||
runTests(nodes)
|
||||
nodes.StopDaemons()
|
||||
})
|
||||
|
||||
t.Run("tcp with NOISE", func(t *testing.T) {
|
||||
@ -86,6 +87,7 @@ func TestTransports(t *testing.T) {
|
||||
})
|
||||
nodes.StartDaemons().Connect()
|
||||
runTests(nodes)
|
||||
nodes.StopDaemons()
|
||||
})
|
||||
|
||||
t.Run("QUIC", func(t *testing.T) {
|
||||
@ -104,6 +106,7 @@ func TestTransports(t *testing.T) {
|
||||
disableRouting(nodes)
|
||||
nodes.StartDaemons().Connect()
|
||||
runTests(nodes)
|
||||
nodes.StopDaemons()
|
||||
})
|
||||
|
||||
t.Run("QUIC+Webtransport", func(t *testing.T) {
|
||||
@ -122,6 +125,7 @@ func TestTransports(t *testing.T) {
|
||||
disableRouting(nodes)
|
||||
nodes.StartDaemons().Connect()
|
||||
runTests(nodes)
|
||||
nodes.StopDaemons()
|
||||
})
|
||||
|
||||
t.Run("QUIC connects with non-dialable transports", func(t *testing.T) {
|
||||
@ -144,6 +148,7 @@ func TestTransports(t *testing.T) {
|
||||
disableRouting(nodes)
|
||||
nodes.StartDaemons().Connect()
|
||||
runTests(nodes)
|
||||
nodes.StopDaemons()
|
||||
})
|
||||
|
||||
t.Run("WebRTC Direct", func(t *testing.T) {
|
||||
@ -162,5 +167,6 @@ func TestTransports(t *testing.T) {
|
||||
disableRouting(nodes)
|
||||
nodes.StartDaemons().Connect()
|
||||
runTests(nodes)
|
||||
nodes.StopDaemons()
|
||||
})
|
||||
}
|
||||
|
||||
Loading…
Reference in New Issue
Block a user