mirror of
https://github.com/ipfs/kubo.git
synced 2026-02-21 18:37:45 +08:00
Some checks are pending
CodeQL / codeql (push) Waiting to run
Docker Check / lint (push) Waiting to run
Docker Check / build (push) Waiting to run
Gateway Conformance / gateway-conformance (push) Waiting to run
Gateway Conformance / gateway-conformance-libp2p-experiment (push) Waiting to run
Go Build / go-build (push) Waiting to run
Go Check / go-check (push) Waiting to run
Go Lint / go-lint (push) Waiting to run
Go Test / go-test (push) Waiting to run
Interop / interop-prep (push) Waiting to run
Interop / helia-interop (push) Blocked by required conditions
Interop / ipfs-webui (push) Blocked by required conditions
Sharness / sharness-test (push) Waiting to run
Spell Check / spellcheck (push) Waiting to run
* refactor: consolidate Provider/Reprovider into unified Provide config - merge Provider and Reprovider configs into single Provide section - add fs-repo-17-to-18 migration for config consolidation - improve migration ergonomics with common package utilities - convert deprecated "flat" strategy to "all" during migration - improve Provide docs * docs: add total_provide_count metric guidance - document how to monitor provide success rates via prometheus metrics - add performance comparison section to changelog - explain how to evaluate sweep vs legacy provider effectiveness * fix: add OpenTelemetry meter provider for metrics - set up meter provider with Prometheus exporter in daemon - enables metrics from external libs like go-libp2p-kad-dht - fixes missing total_provide_count_total when SweepEnabled=true - update docs to reflect actual metric names --------- Co-authored-by: gammazero <11790789+gammazero@users.noreply.github.com> Co-authored-by: guillaumemichel <guillaume@michel.id> Co-authored-by: Daniel Norman <1992255+2color@users.noreply.github.com> Co-authored-by: Hector Sanjuan <code@hector.link>
91 lines
2.9 KiB
Go
91 lines
2.9 KiB
Go
package node
|
|
|
|
import (
|
|
blockstore "github.com/ipfs/boxo/blockstore"
|
|
"github.com/ipfs/go-datastore"
|
|
config "github.com/ipfs/kubo/config"
|
|
"go.uber.org/fx"
|
|
|
|
"github.com/ipfs/boxo/filestore"
|
|
"github.com/ipfs/kubo/core/node/helpers"
|
|
"github.com/ipfs/kubo/repo"
|
|
"github.com/ipfs/kubo/thirdparty/verifbs"
|
|
)
|
|
|
|
// RepoConfig loads configuration from the repo
|
|
func RepoConfig(repo repo.Repo) (*config.Config, error) {
|
|
cfg, err := repo.Config()
|
|
return cfg, err
|
|
}
|
|
|
|
// Datastore provides the datastore
|
|
func Datastore(repo repo.Repo) datastore.Datastore {
|
|
return repo.Datastore()
|
|
}
|
|
|
|
// BaseBlocks is the lower level blockstore without GC or Filestore layers
|
|
type BaseBlocks blockstore.Blockstore
|
|
|
|
// BaseBlockstoreCtor creates cached blockstore backed by the provided datastore
|
|
func BaseBlockstoreCtor(
|
|
cacheOpts blockstore.CacheOpts,
|
|
hashOnRead bool,
|
|
writeThrough bool,
|
|
providingStrategy string,
|
|
) func(mctx helpers.MetricsCtx, repo repo.Repo, prov DHTProvider, lc fx.Lifecycle) (bs BaseBlocks, err error) {
|
|
return func(mctx helpers.MetricsCtx, repo repo.Repo, prov DHTProvider, lc fx.Lifecycle) (bs BaseBlocks, err error) {
|
|
opts := []blockstore.Option{blockstore.WriteThrough(writeThrough)}
|
|
|
|
// Blockstore providing integration:
|
|
// When strategy includes "all" the blockstore directly provides blocks as they're Put.
|
|
// Important: Provide calls from blockstore are intentionally BLOCKING.
|
|
// The Provider implementation (not the blockstore) should handle concurrency/queuing.
|
|
// This avoids spawning unbounded goroutines for concurrent block additions.
|
|
strategyFlag := config.ParseProvideStrategy(providingStrategy)
|
|
if strategyFlag&config.ProvideStrategyAll != 0 {
|
|
opts = append(opts, blockstore.Provider(prov))
|
|
}
|
|
|
|
// hash security
|
|
bs = blockstore.NewBlockstore(
|
|
repo.Datastore(),
|
|
opts...,
|
|
)
|
|
bs = &verifbs.VerifBS{Blockstore: bs}
|
|
bs, err = blockstore.CachedBlockstore(helpers.LifecycleCtx(mctx, lc), bs, cacheOpts)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
bs = blockstore.NewIdStore(bs)
|
|
|
|
if hashOnRead {
|
|
bs = &blockstore.ValidatingBlockstore{Blockstore: bs}
|
|
}
|
|
|
|
return
|
|
}
|
|
}
|
|
|
|
// GcBlockstoreCtor wraps the base blockstore with GC and Filestore layers
|
|
func GcBlockstoreCtor(bb BaseBlocks) (gclocker blockstore.GCLocker, gcbs blockstore.GCBlockstore, bs blockstore.Blockstore) {
|
|
gclocker = blockstore.NewGCLocker()
|
|
gcbs = blockstore.NewGCBlockstore(bb, gclocker)
|
|
|
|
bs = gcbs
|
|
return
|
|
}
|
|
|
|
// FilestoreBlockstoreCtor wraps GcBlockstore and adds Filestore support
|
|
func FilestoreBlockstoreCtor(repo repo.Repo, bb BaseBlocks) (gclocker blockstore.GCLocker, gcbs blockstore.GCBlockstore, bs blockstore.Blockstore, fstore *filestore.Filestore) {
|
|
gclocker = blockstore.NewGCLocker()
|
|
|
|
// hash security
|
|
fstore = filestore.NewFilestore(bb, repo.FileManager())
|
|
gcbs = blockstore.NewGCBlockstore(fstore, gclocker)
|
|
gcbs = &verifbs.VerifBSGC{GCBlockstore: gcbs}
|
|
|
|
bs = gcbs
|
|
return
|
|
}
|