mirror of
https://github.com/ipfs/kubo.git
synced 2026-03-03 15:27:57 +08:00
* update go-libp2p to v0.18.0
* initialize the resource manager
* add resource manager stats/limit commands
* load limit file when building resource manager
* log absent limit file
* write rcmgr to file when IPFS_DEBUG_RCMGR is set
* fix: mark swarm limit|stats as experimental
* feat(cfg): opt-in Swarm.ResourceMgr
This ensures we can safely test the resource manager without impacting
default behavior.
- Resource manager is disabled by default
- Default for Swarm.ResourceMgr.Enabled is false for now
- Swarm.ResourceMgr.Limits allows user to tweak limits per specific
scope in a way that is persisted across restarts
- 'ipfs swarm limit system' outputs human-readable json
- 'ipfs swarm limit system new-limits.json' sets new runtime limits
(but does not change Swarm.ResourceMgr.Limits in the config)
Conventions to make libp2p devs life easier:
- 'IPFS_RCMGR=1 ipfs daemon' overrides the config and enables resource manager
- 'limit.json' overrides implicit defaults from libp2p (if present)
* docs(config): small tweaks
* fix: skip libp2p.ResourceManager if disabled
This ensures 'ipfs swarm limit|stats' work only when enabled.
* fix: use NullResourceManager when disabled
This reverts commit b19f7c9eca.
after clarification feedback from
https://github.com/ipfs/go-ipfs/pull/8680#discussion_r841680182
* style: rename IPFS_RCMGR to LIBP2P_RCMGR
preexisting libp2p toggles use LIBP2P_ prefix
* test: Swarm.ResourceMgr
* fix: location of opt-in limit.json and rcmgr.json.gz
Places these files inside of IPFS_PATH
* Update docs/config.md
* feat: expose rcmgr metrics when enabled (#8785)
* add metrics for the resource manager
* export protocol and service name in Prometheus metrics
* fix: expose rcmgr metrics only when enabled
Co-authored-by: Marcin Rataj <lidel@lidel.org>
* refactor: rcmgr_metrics.go
* refactor: rcmgr_defaults.go
This file defines implicit limit defaults used when Swarm.ResourceMgr.Enabled
We keep vendored copy to ensure go-ipfs is not impacted when go-libp2p
decides to change defaults in any of the future releases.
* refactor: adjustedDefaultLimits
Cleans up the way we initialize defaults and adds a fix for case
when connection manager runs with high limits.
It also hides `Swarm.ResourceMgr.Limits` until we have a better
understanding what syntax makes sense.
* chore: cleanup after a review
* fix: restore go-ipld-prime v0.14.2
* fix: restore go-ds-flatfs v0.5.1
Co-authored-by: Lucas Molas <schomatis@gmail.com>
Co-authored-by: Marcin Rataj <lidel@lidel.org>
103 lines
2.2 KiB
Go
103 lines
2.2 KiB
Go
package integrationtest
|
|
|
|
import (
|
|
"bytes"
|
|
"context"
|
|
"testing"
|
|
|
|
blocks "github.com/ipfs/go-block-format"
|
|
"github.com/ipfs/go-cid"
|
|
"github.com/ipfs/go-ipfs/core"
|
|
coremock "github.com/ipfs/go-ipfs/core/mock"
|
|
"github.com/ipfs/go-ipfs/core/node/libp2p"
|
|
mocknet "github.com/libp2p/go-libp2p/p2p/net/mock"
|
|
)
|
|
|
|
func TestBitswapWithoutRouting(t *testing.T) {
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
defer cancel()
|
|
const numPeers = 4
|
|
|
|
// create network
|
|
mn := mocknet.New()
|
|
|
|
var nodes []*core.IpfsNode
|
|
for i := 0; i < numPeers; i++ {
|
|
n, err := core.NewNode(ctx, &core.BuildCfg{
|
|
Online: true,
|
|
Host: coremock.MockHostOption(mn),
|
|
Routing: libp2p.NilRouterOption, // no routing
|
|
})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
defer n.Close()
|
|
nodes = append(nodes, n)
|
|
}
|
|
|
|
err := mn.LinkAll()
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
// connect them
|
|
for _, n1 := range nodes {
|
|
for _, n2 := range nodes {
|
|
if n1 == n2 {
|
|
continue
|
|
}
|
|
|
|
log.Debug("connecting to other hosts")
|
|
p2 := n2.PeerHost.Peerstore().PeerInfo(n2.PeerHost.ID())
|
|
if err := n1.PeerHost.Connect(ctx, p2); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
}
|
|
}
|
|
|
|
// add blocks to each before
|
|
log.Debug("adding block.")
|
|
block0 := blocks.NewBlock([]byte("block0"))
|
|
block1 := blocks.NewBlock([]byte("block1"))
|
|
|
|
// put 1 before
|
|
if err := nodes[0].Blockstore.Put(ctx, block0); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
// get it out.
|
|
for i, n := range nodes {
|
|
// skip first because block not in its exchange. will hang.
|
|
if i == 0 {
|
|
continue
|
|
}
|
|
|
|
log.Debugf("%d %s get block.", i, n.Identity)
|
|
b, err := n.Blocks.GetBlock(ctx, cid.NewCidV0(block0.Multihash()))
|
|
if err != nil {
|
|
t.Error(err)
|
|
} else if !bytes.Equal(b.RawData(), block0.RawData()) {
|
|
t.Error("byte comparison fail")
|
|
} else {
|
|
log.Debug("got block: %s", b.Cid())
|
|
}
|
|
}
|
|
|
|
// put 1 after
|
|
if err := nodes[1].Blockstore.Put(ctx, block1); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
// get it out.
|
|
for _, n := range nodes {
|
|
b, err := n.Blocks.GetBlock(ctx, cid.NewCidV0(block1.Multihash()))
|
|
if err != nil {
|
|
t.Error(err)
|
|
} else if !bytes.Equal(b.RawData(), block1.RawData()) {
|
|
t.Error("byte comparison fail")
|
|
} else {
|
|
log.Debug("got block: %s", b.Cid())
|
|
}
|
|
}
|
|
}
|