kubo/core/core.go
Guillaume Michel 006f9dc704
feat: opt-in new Sweep provide system (#10834)
* reprovide sweep draft

* update reprovider dep

* go mod tidy

* fix provider type

* change router type

* dual reprovider

* revert to provider.System

* back to start

* SweepingReprovider test

* fix nil pointer deref

* noop provider for nil dht

* disabled initial network estimation

* another iteration

* suppress missing self addrs err

* silence empty rt err on lan dht

* comments

* new attempt at integrating

* reverting changes in core/node/libp2p/routing.go

* removing SweepingProvider

* make reprovider optional

* add noop reprovider

* update KeyChanFunc type alias

* restore boxo KeyChanFunc

* fix missing KeyChanFunc

* test(sharness): PARALLEL=1 and timeout 30m

running sequentially to see where timeout occurs

* initialize MHStore

* revert workflow debug

* config

* config docs

* merged IpfsNode provider and reprovider

* move Provider interface to from kad-dht to node

* moved Provider interface from kad-dht to kubo/core/node

* mod_tidy

* Add Clear to Provider interface

* use latest kad-dht commit

* make linter happy

* updated boxo provide interface

* boxo PR fix

* using latest kad-dht commit

* use latest boxo release

* fix fx

* fx cyclic deps

* fix merge issues

* extended tests

* don't provide LAN DHT

* docs

* restore dual dht provider

* don't start provider before it is online

* address linter

* dual/provider fix

* add delay in provider tests for dht bootstrap

* add OfflineDelay parameter to config

* remove increase number of workers in test

* improved keystore gc process

* fix: replace incorrect logger import in coreapi

replaced github.com/labstack/gommon/log with the standard
github.com/ipfs/go-log/v2 logger used throughout kubo.
removed unused labstack dependency from go.mod files.

* fix: remove duplicate WithDefault call in provider config

* fix: use correct option method for burst workers

* fix: improve error messages for experimental sweeping provider

updated error messages to clearly indicate when commands are unavailable
due to experimental sweeping provider being enabled via Reprovider.Sweep.Enabled=true

* docs: remove obsolete KeyStoreGCInterval config

removed from config.md as option no longer exists (removed in b540fba1a)
updated keystore description to reflect gc happens at reprovide interval

* docs: add TODO placeholder changelog for experimental sweeping DHT provider

using v0.38-TODO.md name to avoid merge conflicts with master branch
and allow CI tests to run. will be renamed to v0.38.md once config
migration is added to the PR

* fix: provideKeysRec go routine

* clear keystore on close

* fix: datastore prefix

* fix: improve error handling in provideKeysRec

- close errCh channel to distinguish between nil and pending errors
- check for pending errors when provided.New closes
- handle context cancellation during error send
- prevent race condition where errors could be silently lost

this ensures DAG walk errors are always propagated correctly

* address gammazero's review

* rename BurstProvider to LegacyProvider

* use latest provider/keystore

* boxo: make mfs StartProviding async

* bump boxo

* chore: update boxo to f2b4e12fb9a8ac138ccb82aae3b51ec51d9f631c

- updated boxo dependency to specified commit
- updated go.mod and go.sum files across all modules

* use latest kad-dht/boxo

* Buffered SweepingProvider wrapper

* use latest kad-dht commit

* allow no DHT router

* use latest kad-dht & boxo

---------

Co-authored-by: Marcin Rataj <lidel@lidel.org>
Co-authored-by: gammazero <11790789+gammazero@users.noreply.github.com>
2025-09-18 17:22:04 +02:00

254 lines
9.5 KiB
Go

/*
Package core implements the IpfsNode object and related methods.
Packages underneath core/ provide a (relatively) stable, low-level API
to carry out most IPFS-related tasks. For more details on the other
interfaces and how core/... fits into the bigger IPFS picture, see:
$ godoc github.com/ipfs/go-ipfs
*/
package core
import (
"context"
"encoding/json"
"io"
"time"
"github.com/ipfs/boxo/filestore"
pin "github.com/ipfs/boxo/pinning/pinner"
"github.com/ipfs/go-datastore"
bitswap "github.com/ipfs/boxo/bitswap"
bserv "github.com/ipfs/boxo/blockservice"
bstore "github.com/ipfs/boxo/blockstore"
exchange "github.com/ipfs/boxo/exchange"
"github.com/ipfs/boxo/fetcher"
mfs "github.com/ipfs/boxo/mfs"
pathresolver "github.com/ipfs/boxo/path/resolver"
provider "github.com/ipfs/boxo/provider"
ipld "github.com/ipfs/go-ipld-format"
logging "github.com/ipfs/go-log/v2"
ddht "github.com/libp2p/go-libp2p-kad-dht/dual"
pubsub "github.com/libp2p/go-libp2p-pubsub"
psrouter "github.com/libp2p/go-libp2p-pubsub-router"
record "github.com/libp2p/go-libp2p-record"
connmgr "github.com/libp2p/go-libp2p/core/connmgr"
ic "github.com/libp2p/go-libp2p/core/crypto"
p2phost "github.com/libp2p/go-libp2p/core/host"
metrics "github.com/libp2p/go-libp2p/core/metrics"
"github.com/libp2p/go-libp2p/core/network"
peer "github.com/libp2p/go-libp2p/core/peer"
pstore "github.com/libp2p/go-libp2p/core/peerstore"
routing "github.com/libp2p/go-libp2p/core/routing"
"github.com/libp2p/go-libp2p/p2p/discovery/mdns"
p2pbhost "github.com/libp2p/go-libp2p/p2p/host/basic"
ma "github.com/multiformats/go-multiaddr"
madns "github.com/multiformats/go-multiaddr-dns"
"github.com/ipfs/boxo/bootstrap"
"github.com/ipfs/boxo/namesys"
ipnsrp "github.com/ipfs/boxo/namesys/republisher"
"github.com/ipfs/boxo/peering"
"github.com/ipfs/kubo/config"
"github.com/ipfs/kubo/core/node"
"github.com/ipfs/kubo/core/node/libp2p"
"github.com/ipfs/kubo/fuse/mount"
"github.com/ipfs/kubo/p2p"
"github.com/ipfs/kubo/repo"
irouting "github.com/ipfs/kubo/routing"
)
var log = logging.Logger("core")
// IpfsNode is IPFS Core module. It represents an IPFS instance.
type IpfsNode struct {
// Self
Identity peer.ID // the local node's identity
Repo repo.Repo
// Local node
Pinning pin.Pinner // the pinning manager
Mounts Mounts `optional:"true"` // current mount state, if any.
PrivateKey ic.PrivKey `optional:"true"` // the local node's private Key
PNetFingerprint libp2p.PNetFingerprint `optional:"true"` // fingerprint of private network
// Services
Peerstore pstore.Peerstore `optional:"true"` // storage for other Peer instances
Blockstore bstore.GCBlockstore // the block store (lower level)
Filestore *filestore.Filestore `optional:"true"` // the filestore blockstore
BaseBlocks node.BaseBlocks // the raw blockstore, no filestore wrapping
GCLocker bstore.GCLocker // the locker used to protect the blockstore during gc
Blocks bserv.BlockService // the block service, get/add blocks.
DAG ipld.DAGService // the merkle dag service, get/add objects.
IPLDFetcherFactory fetcher.Factory `name:"ipldFetcher"` // fetcher that paths over the IPLD data model
UnixFSFetcherFactory fetcher.Factory `name:"unixfsFetcher"` // fetcher that interprets UnixFS data
OfflineIPLDFetcherFactory fetcher.Factory `name:"offlineIpldFetcher"` // fetcher that paths over the IPLD data model without fetching new blocks
OfflineUnixFSFetcherFactory fetcher.Factory `name:"offlineUnixfsFetcher"` // fetcher that interprets UnixFS data without fetching new blocks
Reporter *metrics.BandwidthCounter `optional:"true"`
Discovery mdns.Service `optional:"true"`
FilesRoot *mfs.Root
RecordValidator record.Validator
// Online
PeerHost p2phost.Host `optional:"true"` // the network host (server+client)
Peering *peering.PeeringService `optional:"true"`
Filters *ma.Filters `optional:"true"`
Bootstrapper io.Closer `optional:"true"` // the periodic bootstrapper
ContentDiscovery routing.ContentDiscovery `optional:"true"` // the discovery part of the routing system
DNSResolver *madns.Resolver // the DNS resolver
IPLDPathResolver pathresolver.Resolver `name:"ipldPathResolver"` // The IPLD path resolver
UnixFSPathResolver pathresolver.Resolver `name:"unixFSPathResolver"` // The UnixFS path resolver
OfflineIPLDPathResolver pathresolver.Resolver `name:"offlineIpldPathResolver"` // The IPLD path resolver that uses only locally available blocks
OfflineUnixFSPathResolver pathresolver.Resolver `name:"offlineUnixFSPathResolver"` // The UnixFS path resolver that uses only locally available blocks
Exchange exchange.Interface // the block exchange + strategy
Bitswap *bitswap.Bitswap `optional:"true"` // The Bitswap instance
Namesys namesys.NameSystem // the name system, resolves paths to hashes
ProvidingStrategy config.ReproviderStrategy `optional:"true"`
ProvidingKeyChanFunc provider.KeyChanFunc `optional:"true"`
IpnsRepub *ipnsrp.Republisher `optional:"true"`
ResourceManager network.ResourceManager `optional:"true"`
PubSub *pubsub.PubSub `optional:"true"`
PSRouter *psrouter.PubsubValueStore `optional:"true"`
Routing irouting.ProvideManyRouter `optional:"true"` // the routing system. recommend ipfs-dht
Provider node.DHTProvider // the value provider system
DHT *ddht.DHT `optional:"true"`
DHTClient routing.Routing `name:"dhtc" optional:"true"`
P2P *p2p.P2P `optional:"true"`
ctx context.Context
stop func() error
// Flags
IsOnline bool `optional:"true"` // Online is set when networking is enabled.
IsDaemon bool `optional:"true"` // Daemon is set when running on a long-running daemon.
}
// Mounts defines what the node's mount state is. This should
// perhaps be moved to the daemon or mount. It's here because
// it needs to be accessible across daemon requests.
type Mounts struct {
Ipfs mount.Mount
Ipns mount.Mount
Mfs mount.Mount
}
// Close calls Close() on the App object
func (n *IpfsNode) Close() error {
return n.stop()
}
// Context returns the IpfsNode context
func (n *IpfsNode) Context() context.Context {
if n.ctx == nil {
n.ctx = context.TODO()
}
return n.ctx
}
// Bootstrap will set and call the IpfsNodes bootstrap function.
func (n *IpfsNode) Bootstrap(cfg bootstrap.BootstrapConfig) error {
// TODO what should return value be when in offlineMode?
if n.Routing == nil {
return nil
}
if n.Bootstrapper != nil {
n.Bootstrapper.Close() // stop previous bootstrap process.
}
// if the caller did not specify a bootstrap peer function, get the
// freshest bootstrap peers from config. this responds to live changes.
if cfg.BootstrapPeers == nil {
cfg.BootstrapPeers = func() []peer.AddrInfo {
ps, err := n.loadBootstrapPeers()
if err != nil {
log.Warn("failed to parse bootstrap peers from config")
return nil
}
return ps
}
}
if load, _ := cfg.BackupPeers(); load == nil {
save := func(ctx context.Context, peerList []peer.AddrInfo) {
err := n.saveTempBootstrapPeers(ctx, peerList)
if err != nil {
log.Warnf("saveTempBootstrapPeers failed: %s", err)
return
}
}
load = func(ctx context.Context) []peer.AddrInfo {
peerList, err := n.loadTempBootstrapPeers(ctx)
if err != nil {
log.Warnf("loadTempBootstrapPeers failed: %s", err)
return nil
}
return peerList
}
cfg.SetBackupPeers(load, save)
}
repoConf, err := n.Repo.Config()
if err != nil {
return err
}
if repoConf.Internal.BackupBootstrapInterval != nil {
cfg.BackupBootstrapInterval = repoConf.Internal.BackupBootstrapInterval.WithDefault(time.Hour)
}
n.Bootstrapper, err = bootstrap.Bootstrap(n.Identity, n.PeerHost, n.Routing, cfg)
return err
}
var TempBootstrapPeersKey = datastore.NewKey("/local/temp_bootstrap_peers")
func (n *IpfsNode) loadBootstrapPeers() ([]peer.AddrInfo, error) {
cfg, err := n.Repo.Config()
if err != nil {
return nil, err
}
// Use auto-config resolution for actual bootstrap connectivity
return cfg.BootstrapPeersWithAutoConf()
}
func (n *IpfsNode) saveTempBootstrapPeers(ctx context.Context, peerList []peer.AddrInfo) error {
ds := n.Repo.Datastore()
bytes, err := json.Marshal(config.BootstrapPeerStrings(peerList))
if err != nil {
return err
}
if err := ds.Put(ctx, TempBootstrapPeersKey, bytes); err != nil {
return err
}
return ds.Sync(ctx, TempBootstrapPeersKey)
}
func (n *IpfsNode) loadTempBootstrapPeers(ctx context.Context) ([]peer.AddrInfo, error) {
ds := n.Repo.Datastore()
bytes, err := ds.Get(ctx, TempBootstrapPeersKey)
if err != nil {
return nil, err
}
var addrs []string
if err := json.Unmarshal(bytes, &addrs); err != nil {
return nil, err
}
return config.ParseBootstrapPeers(addrs)
}
type ConstructPeerHostOpts struct {
AddrsFactory p2pbhost.AddrsFactory
DisableNatPortMap bool
DisableRelay bool
EnableRelayHop bool
ConnectionManager connmgr.ConnManager
}