mirror of
https://github.com/ipfs/kubo.git
synced 2026-02-26 04:47:45 +08:00
* update go-libp2p to v0.18.0
* initialize the resource manager
* add resource manager stats/limit commands
* load limit file when building resource manager
* log absent limit file
* write rcmgr to file when IPFS_DEBUG_RCMGR is set
* fix: mark swarm limit|stats as experimental
* feat(cfg): opt-in Swarm.ResourceMgr
This ensures we can safely test the resource manager without impacting
default behavior.
- Resource manager is disabled by default
- Default for Swarm.ResourceMgr.Enabled is false for now
- Swarm.ResourceMgr.Limits allows user to tweak limits per specific
scope in a way that is persisted across restarts
- 'ipfs swarm limit system' outputs human-readable json
- 'ipfs swarm limit system new-limits.json' sets new runtime limits
(but does not change Swarm.ResourceMgr.Limits in the config)
Conventions to make libp2p devs life easier:
- 'IPFS_RCMGR=1 ipfs daemon' overrides the config and enables resource manager
- 'limit.json' overrides implicit defaults from libp2p (if present)
* docs(config): small tweaks
* fix: skip libp2p.ResourceManager if disabled
This ensures 'ipfs swarm limit|stats' work only when enabled.
* fix: use NullResourceManager when disabled
This reverts commit b19f7c9eca.
after clarification feedback from
https://github.com/ipfs/go-ipfs/pull/8680#discussion_r841680182
* style: rename IPFS_RCMGR to LIBP2P_RCMGR
preexisting libp2p toggles use LIBP2P_ prefix
* test: Swarm.ResourceMgr
* fix: location of opt-in limit.json and rcmgr.json.gz
Places these files inside of IPFS_PATH
* Update docs/config.md
* feat: expose rcmgr metrics when enabled (#8785)
* add metrics for the resource manager
* export protocol and service name in Prometheus metrics
* fix: expose rcmgr metrics only when enabled
Co-authored-by: Marcin Rataj <lidel@lidel.org>
* refactor: rcmgr_metrics.go
* refactor: rcmgr_defaults.go
This file defines implicit limit defaults used when Swarm.ResourceMgr.Enabled
We keep vendored copy to ensure go-ipfs is not impacted when go-libp2p
decides to change defaults in any of the future releases.
* refactor: adjustedDefaultLimits
Cleans up the way we initialize defaults and adds a fix for case
when connection manager runs with high limits.
It also hides `Swarm.ResourceMgr.Limits` until we have a better
understanding what syntax makes sense.
* chore: cleanup after a review
* fix: restore go-ipld-prime v0.14.2
* fix: restore go-ds-flatfs v0.5.1
Co-authored-by: Lucas Molas <schomatis@gmail.com>
Co-authored-by: Marcin Rataj <lidel@lidel.org>
173 lines
5.5 KiB
Go
173 lines
5.5 KiB
Go
package peering
|
|
|
|
import (
|
|
"context"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/libp2p/go-libp2p"
|
|
"github.com/libp2p/go-libp2p-core/host"
|
|
"github.com/libp2p/go-libp2p-core/network"
|
|
"github.com/libp2p/go-libp2p-core/peer"
|
|
"github.com/libp2p/go-libp2p/p2p/net/connmgr"
|
|
|
|
"github.com/stretchr/testify/require"
|
|
)
|
|
|
|
func newNode(t *testing.T) host.Host {
|
|
cm, err := connmgr.NewConnManager(1, 100, connmgr.WithGracePeriod(0))
|
|
require.NoError(t, err)
|
|
h, err := libp2p.New(
|
|
libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0"),
|
|
// We'd like to set the connection manager low water to 0, but
|
|
// that would disable the connection manager.
|
|
libp2p.ConnectionManager(cm),
|
|
)
|
|
require.NoError(t, err)
|
|
return h
|
|
}
|
|
|
|
func TestPeeringService(t *testing.T) {
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
defer cancel()
|
|
|
|
h1 := newNode(t)
|
|
ps1 := NewPeeringService(h1)
|
|
|
|
h2 := newNode(t)
|
|
h3 := newNode(t)
|
|
h4 := newNode(t)
|
|
|
|
// peer 1 -> 2
|
|
ps1.AddPeer(peer.AddrInfo{ID: h2.ID(), Addrs: h2.Addrs()})
|
|
require.Contains(t, ps1.ListPeers(), peer.AddrInfo{ID: h2.ID(), Addrs: h2.Addrs()})
|
|
|
|
// We haven't started so we shouldn't have any peers.
|
|
require.Never(t, func() bool {
|
|
return len(h1.Network().Peers()) > 0
|
|
}, 100*time.Millisecond, 1*time.Second, "expected host 1 to have no peers")
|
|
|
|
// Use p4 to take up the one slot we have in the connection manager.
|
|
for _, h := range []host.Host{h1, h2} {
|
|
require.NoError(t, h.Connect(ctx, peer.AddrInfo{ID: h4.ID(), Addrs: h4.Addrs()}))
|
|
h.ConnManager().TagPeer(h4.ID(), "sticky-peer", 1000)
|
|
}
|
|
|
|
// Now start.
|
|
require.NoError(t, ps1.Start())
|
|
// starting twice is fine.
|
|
require.NoError(t, ps1.Start())
|
|
|
|
// We should eventually connect.
|
|
t.Logf("waiting for h1 to connect to h2")
|
|
require.Eventually(t, func() bool {
|
|
return h1.Network().Connectedness(h2.ID()) == network.Connected
|
|
}, 30*time.Second, 10*time.Millisecond)
|
|
|
|
// Now explicitly connect to h3.
|
|
t.Logf("waiting for h1's connection to h3 to work")
|
|
require.NoError(t, h1.Connect(ctx, peer.AddrInfo{ID: h3.ID(), Addrs: h3.Addrs()}))
|
|
require.Eventually(t, func() bool {
|
|
return h1.Network().Connectedness(h3.ID()) == network.Connected
|
|
}, 30*time.Second, 100*time.Millisecond)
|
|
|
|
require.Len(t, h1.Network().Peers(), 3)
|
|
|
|
// force a disconnect
|
|
h1.ConnManager().TrimOpenConns(ctx)
|
|
|
|
// Should disconnect from h3.
|
|
t.Logf("waiting for h1's connection to h3 to disconnect")
|
|
require.Eventually(t, func() bool {
|
|
return h1.Network().Connectedness(h3.ID()) != network.Connected
|
|
}, 5*time.Second, 10*time.Millisecond)
|
|
|
|
// Should remain connected to p2
|
|
require.Never(t, func() bool {
|
|
return h1.Network().Connectedness(h2.ID()) != network.Connected
|
|
}, 5*time.Second, 1*time.Second)
|
|
|
|
// Now force h2 to disconnect (we have an asymmetric peering).
|
|
conns := h2.Network().ConnsToPeer(h1.ID())
|
|
require.NotEmpty(t, conns)
|
|
h2.ConnManager().TrimOpenConns(ctx)
|
|
|
|
// All conns to peer should eventually close.
|
|
t.Logf("waiting for all connections to close")
|
|
for _, c := range conns {
|
|
require.Eventually(t, func() bool {
|
|
s, err := c.NewStream(context.Background())
|
|
if s != nil {
|
|
_ = s.Reset()
|
|
}
|
|
return err != nil
|
|
}, 5*time.Second, 10*time.Millisecond)
|
|
}
|
|
|
|
// Should eventually re-connect.
|
|
require.Eventually(t, func() bool {
|
|
return h1.Network().Connectedness(h2.ID()) == network.Connected
|
|
}, 30*time.Second, 1*time.Second)
|
|
|
|
// Unprotect 2 from 1.
|
|
ps1.RemovePeer(h2.ID())
|
|
require.NotContains(t, ps1.ListPeers(), peer.AddrInfo{ID: h2.ID(), Addrs: h2.Addrs()})
|
|
|
|
// Trim connections.
|
|
h1.ConnManager().TrimOpenConns(ctx)
|
|
|
|
// Should disconnect
|
|
t.Logf("waiting for h1 to disconnect from h2")
|
|
require.Eventually(t, func() bool {
|
|
return h1.Network().Connectedness(h2.ID()) != network.Connected
|
|
}, 5*time.Second, 10*time.Millisecond)
|
|
|
|
// Should never reconnect.
|
|
t.Logf("ensuring h1 is not connected to h2 again")
|
|
require.Never(t, func() bool {
|
|
return h1.Network().Connectedness(h2.ID()) == network.Connected
|
|
}, 20*time.Second, 1*time.Second)
|
|
|
|
// Until added back
|
|
ps1.AddPeer(peer.AddrInfo{ID: h2.ID(), Addrs: h2.Addrs()})
|
|
require.Contains(t, ps1.ListPeers(), peer.AddrInfo{ID: h2.ID(), Addrs: h2.Addrs()})
|
|
ps1.AddPeer(peer.AddrInfo{ID: h3.ID(), Addrs: h3.Addrs()})
|
|
require.Contains(t, ps1.ListPeers(), peer.AddrInfo{ID: h3.ID(), Addrs: h3.Addrs()})
|
|
t.Logf("wait for h1 to connect to h2 and h3 again")
|
|
require.Eventually(t, func() bool {
|
|
return h1.Network().Connectedness(h2.ID()) == network.Connected
|
|
}, 30*time.Second, 1*time.Second)
|
|
require.Eventually(t, func() bool {
|
|
return h1.Network().Connectedness(h3.ID()) == network.Connected
|
|
}, 30*time.Second, 1*time.Second)
|
|
|
|
// Should be able to repeatedly stop.
|
|
require.NoError(t, ps1.Stop())
|
|
require.NoError(t, ps1.Stop())
|
|
|
|
// Adding and removing should work after stopping.
|
|
ps1.AddPeer(peer.AddrInfo{ID: h4.ID(), Addrs: h4.Addrs()})
|
|
require.Contains(t, ps1.ListPeers(), peer.AddrInfo{ID: h4.ID(), Addrs: h4.Addrs()})
|
|
ps1.RemovePeer(h2.ID())
|
|
require.NotContains(t, ps1.ListPeers(), peer.AddrInfo{ID: h2.ID(), Addrs: h2.Addrs()})
|
|
}
|
|
|
|
func TestNextBackoff(t *testing.T) {
|
|
minMaxBackoff := (100 - maxBackoffJitter) / 100 * maxBackoff
|
|
for x := 0; x < 1000; x++ {
|
|
ph := peerHandler{nextDelay: time.Second}
|
|
for min, max := time.Second*3/2, time.Second*5/2; min < minMaxBackoff; min, max = min*3/2, max*5/2 {
|
|
b := ph.nextBackoff()
|
|
if b > max || b < min {
|
|
t.Errorf("expected backoff %s to be between %s and %s", b, min, max)
|
|
}
|
|
}
|
|
for i := 0; i < 100; i++ {
|
|
b := ph.nextBackoff()
|
|
if b < minMaxBackoff || b > maxBackoff {
|
|
t.Fatal("failed to stay within max bounds")
|
|
}
|
|
}
|
|
}
|
|
}
|