mirror of
https://github.com/ipfs/kubo.git
synced 2026-02-21 10:27:46 +08:00
Remove DI module dependency on BuildCfg
License: MIT Signed-off-by: Łukasz Magiera <magik6k@gmail.com>
This commit is contained in:
parent
7046626ecc
commit
fd0c06a825
@ -105,18 +105,22 @@ func NewNode(ctx context.Context, cfg *BuildCfg) (*IpfsNode, error) {
|
||||
return cfg.Repo
|
||||
})
|
||||
|
||||
// TODO: Remove this, use only for passing node config
|
||||
cfgOption := fx.Provide(func() *node.BuildCfg {
|
||||
return (*node.BuildCfg)(cfg)
|
||||
})
|
||||
|
||||
metricsCtx := fx.Provide(func() node.MetricsCtx {
|
||||
return node.MetricsCtx(ctx)
|
||||
})
|
||||
|
||||
hostOption := fx.Provide(func() node.HostOption {
|
||||
return cfg.Host
|
||||
})
|
||||
|
||||
routingOption := fx.Provide(func() node.RoutingOption {
|
||||
return cfg.Routing
|
||||
})
|
||||
|
||||
params := fx.Options(
|
||||
repoOption,
|
||||
cfgOption,
|
||||
hostOption,
|
||||
routingOption,
|
||||
metricsCtx,
|
||||
)
|
||||
|
||||
@ -137,10 +141,10 @@ func NewNode(ctx context.Context, cfg *BuildCfg) (*IpfsNode, error) {
|
||||
fx.Provide(baseProcess),
|
||||
|
||||
params,
|
||||
node.Storage,
|
||||
node.Storage((*node.BuildCfg)(cfg)),
|
||||
node.Identity,
|
||||
node.IPNS,
|
||||
node.Networked(cfg.Online),
|
||||
node.Networked((*node.BuildCfg)(cfg)),
|
||||
|
||||
fx.Invoke(setupSharding),
|
||||
|
||||
|
||||
@ -9,24 +9,20 @@ import (
|
||||
"github.com/ipfs/go-ipfs/provider"
|
||||
)
|
||||
|
||||
var LibP2P = fx.Options(
|
||||
var BaseLibP2P = fx.Options(
|
||||
fx.Provide(P2PAddrFilters),
|
||||
fx.Provide(P2PBandwidthCounter),
|
||||
fx.Provide(P2PPNet),
|
||||
fx.Provide(P2PAddrsFactory),
|
||||
fx.Provide(P2PConnectionManager),
|
||||
fx.Provide(P2PSmuxTransport),
|
||||
fx.Provide(P2PNatPortMap),
|
||||
fx.Provide(P2PRelay),
|
||||
fx.Provide(P2PAutoRealy),
|
||||
fx.Provide(P2PDefaultTransports),
|
||||
fx.Provide(P2PQUIC),
|
||||
|
||||
fx.Provide(P2PHostOption),
|
||||
fx.Provide(P2PHost),
|
||||
fx.Provide(P2POnlineRouting),
|
||||
|
||||
fx.Provide(Pubsub),
|
||||
fx.Provide(NewDiscoveryHandler),
|
||||
|
||||
fx.Invoke(AutoNATService),
|
||||
@ -35,12 +31,26 @@ var LibP2P = fx.Options(
|
||||
fx.Invoke(SetupDiscovery),
|
||||
)
|
||||
|
||||
var Storage = fx.Options(
|
||||
fx.Provide(RepoConfig),
|
||||
fx.Provide(DatastoreCtor),
|
||||
fx.Provide(BaseBlockstoreCtor),
|
||||
fx.Provide(GcBlockstoreCtor),
|
||||
)
|
||||
func LibP2P(cfg *BuildCfg) fx.Option {
|
||||
return fx.Options(
|
||||
BaseLibP2P,
|
||||
|
||||
MaybeProvide(P2PNoSecurity, cfg.DisableEncryptedConnections),
|
||||
MaybeProvide(Pubsub, cfg.getOpt("pubsub") || cfg.getOpt("ipnsps")),
|
||||
|
||||
fx.Provide(P2PSmuxTransport(cfg.getOpt("mplex"))),
|
||||
fx.Provide(P2POnlineRouting(cfg.getOpt("ipnsps"))),
|
||||
)
|
||||
}
|
||||
|
||||
func Storage(cfg *BuildCfg) fx.Option {
|
||||
return fx.Options(
|
||||
fx.Provide(RepoConfig),
|
||||
fx.Provide(DatastoreCtor),
|
||||
fx.Provide(BaseBlockstoreCtor(cfg.Permanent, cfg.NilRepo)),
|
||||
fx.Provide(GcBlockstoreCtor),
|
||||
)
|
||||
}
|
||||
|
||||
var Identity = fx.Options(
|
||||
fx.Provide(PeerID),
|
||||
@ -61,18 +71,19 @@ var Providers = fx.Options(
|
||||
fx.Invoke(provider.Provider.Run),
|
||||
)
|
||||
|
||||
var Online = fx.Options(
|
||||
fx.Provide(OnlineExchangeCtor),
|
||||
fx.Provide(OnlineNamesysCtor),
|
||||
func Online(cfg *BuildCfg) fx.Option {
|
||||
return fx.Options(
|
||||
fx.Provide(OnlineExchangeCtor),
|
||||
fx.Provide(OnlineNamesysCtor),
|
||||
|
||||
fx.Invoke(IpnsRepublisher),
|
||||
fx.Invoke(IpnsRepublisher),
|
||||
|
||||
fx.Provide(p2p.NewP2P),
|
||||
|
||||
LibP2P,
|
||||
Providers,
|
||||
)
|
||||
fx.Provide(p2p.NewP2P),
|
||||
|
||||
LibP2P(cfg),
|
||||
Providers,
|
||||
)
|
||||
}
|
||||
var Offline = fx.Options(
|
||||
fx.Provide(offline.Exchange),
|
||||
fx.Provide(OfflineNamesysCtor),
|
||||
@ -80,9 +91,16 @@ var Offline = fx.Options(
|
||||
fx.Provide(provider.NewOfflineProvider),
|
||||
)
|
||||
|
||||
func Networked(online bool) fx.Option {
|
||||
if online {
|
||||
return Online
|
||||
func Networked(cfg *BuildCfg) fx.Option {
|
||||
if cfg.Online {
|
||||
return Online(cfg)
|
||||
}
|
||||
return Offline
|
||||
}
|
||||
|
||||
func MaybeProvide(opt interface{}, enable bool) fx.Option {
|
||||
if enable {
|
||||
return fx.Provide(opt)
|
||||
}
|
||||
return fx.Options()
|
||||
}
|
||||
|
||||
@ -320,9 +320,11 @@ func makeSmuxTransportOption(mplexExp bool) libp2p.Option {
|
||||
return libp2p.ChainOptions(opts...)
|
||||
}
|
||||
|
||||
func P2PSmuxTransport(bcfg *BuildCfg) (opts Libp2pOpts, err error) {
|
||||
opts.Opts = append(opts.Opts, makeSmuxTransportOption(bcfg.getOpt("mplex")))
|
||||
return
|
||||
func P2PSmuxTransport(mplex bool) func() (opts Libp2pOpts, err error) {
|
||||
return func() (opts Libp2pOpts, err error) {
|
||||
opts.Opts = append(opts.Opts, makeSmuxTransportOption(mplex))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func P2PNatPortMap(cfg *config.Config) (opts Libp2pOpts, err error) {
|
||||
@ -366,15 +368,23 @@ func P2PQUIC(cfg *config.Config) (opts Libp2pOpts, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func P2PNoSecurity() (opts Libp2pOpts) {
|
||||
opts.Opts = append(opts.Opts, libp2p.NoSecurity)
|
||||
// TODO: shouldn't this be Errorf to guarantee visibility?
|
||||
log.Warningf(`Your IPFS node has been configured to run WITHOUT ENCRYPTED CONNECTIONS.
|
||||
You will not be able to connect to any nodes configured to use encrypted connections`)
|
||||
return opts
|
||||
}
|
||||
|
||||
type P2PHostIn struct {
|
||||
fx.In
|
||||
|
||||
BCfg *BuildCfg
|
||||
Repo repo.Repo
|
||||
Validator record.Validator
|
||||
HostOption HostOption
|
||||
ID peer.ID
|
||||
Peerstore peerstore.Peerstore
|
||||
Repo repo.Repo
|
||||
Validator record.Validator
|
||||
HostOption HostOption
|
||||
RoutingOption RoutingOption
|
||||
ID peer.ID
|
||||
Peerstore peerstore.Peerstore
|
||||
|
||||
Opts [][]libp2p.Option `group:"libp2p"`
|
||||
}
|
||||
@ -404,7 +414,7 @@ func P2PHost(mctx MetricsCtx, lc fx.Lifecycle, params P2PHostIn) (out P2PHostOut
|
||||
})
|
||||
|
||||
opts = append(opts, libp2p.Routing(func(h host.Host) (routing.PeerRouting, error) {
|
||||
r, err := params.BCfg.Routing(ctx, h, params.Repo.Datastore(), params.Validator)
|
||||
r, err := params.RoutingOption(ctx, h, params.Repo.Datastore(), params.Validator)
|
||||
out.Routing = r
|
||||
return r, err
|
||||
}))
|
||||
@ -417,7 +427,7 @@ func P2PHost(mctx MetricsCtx, lc fx.Lifecycle, params P2PHostIn) (out P2PHostOut
|
||||
// this code is necessary just for tests: mock network constructions
|
||||
// ignore the libp2p constructor options that actually construct the routing!
|
||||
if out.Routing == nil {
|
||||
r, err := params.BCfg.Routing(ctx, out.Host, params.Repo.Datastore(), params.Validator)
|
||||
r, err := params.RoutingOption(ctx, out.Host, params.Repo.Datastore(), params.Validator)
|
||||
if err != nil {
|
||||
return P2PHostOut{}, err
|
||||
}
|
||||
@ -461,11 +471,10 @@ func P2PHost(mctx MetricsCtx, lc fx.Lifecycle, params P2PHostIn) (out P2PHostOut
|
||||
type p2pRoutingIn struct {
|
||||
fx.In
|
||||
|
||||
BCfg *BuildCfg
|
||||
Repo repo.Repo
|
||||
Validator record.Validator
|
||||
Host host.Host
|
||||
PubSub *pubsub.PubSub
|
||||
PubSub *pubsub.PubSub `optional:"true"`
|
||||
|
||||
BaseRouting BaseRouting
|
||||
}
|
||||
@ -474,36 +483,38 @@ type p2pRoutingOut struct {
|
||||
fx.Out
|
||||
|
||||
IpfsRouting routing.IpfsRouting
|
||||
PSRouter *namesys.PubsubValueStore // TODO: optional
|
||||
PSRouter *namesys.PubsubValueStore
|
||||
}
|
||||
|
||||
func P2POnlineRouting(mctx MetricsCtx, lc fx.Lifecycle, in p2pRoutingIn) (out p2pRoutingOut) {
|
||||
out.IpfsRouting = in.BaseRouting
|
||||
func P2POnlineRouting(ipnsps bool) func(mctx MetricsCtx, lc fx.Lifecycle, in p2pRoutingIn) (out p2pRoutingOut) {
|
||||
return func(mctx MetricsCtx, lc fx.Lifecycle, in p2pRoutingIn) (out p2pRoutingOut) {
|
||||
out.IpfsRouting = in.BaseRouting
|
||||
|
||||
if in.BCfg.getOpt("ipnsps") {
|
||||
out.PSRouter = namesys.NewPubsubValueStore(
|
||||
lifecycleCtx(mctx, lc),
|
||||
in.Host,
|
||||
in.BaseRouting,
|
||||
in.PubSub,
|
||||
in.Validator,
|
||||
)
|
||||
|
||||
out.IpfsRouting = routinghelpers.Tiered{
|
||||
Routers: []routing.IpfsRouting{
|
||||
// Always check pubsub first.
|
||||
&routinghelpers.Compose{
|
||||
ValueStore: &routinghelpers.LimitedValueStore{
|
||||
ValueStore: out.PSRouter,
|
||||
Namespaces: []string{"ipns"},
|
||||
},
|
||||
},
|
||||
if ipnsps {
|
||||
out.PSRouter = namesys.NewPubsubValueStore(
|
||||
lifecycleCtx(mctx, lc),
|
||||
in.Host,
|
||||
in.BaseRouting,
|
||||
},
|
||||
Validator: in.Validator,
|
||||
in.PubSub,
|
||||
in.Validator,
|
||||
)
|
||||
|
||||
out.IpfsRouting = routinghelpers.Tiered{
|
||||
Routers: []routing.IpfsRouting{
|
||||
// Always check pubsub first.
|
||||
&routinghelpers.Compose{
|
||||
ValueStore: &routinghelpers.LimitedValueStore{
|
||||
ValueStore: out.PSRouter,
|
||||
Namespaces: []string{"ipns"},
|
||||
},
|
||||
},
|
||||
in.BaseRouting,
|
||||
},
|
||||
Validator: in.Validator,
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func AutoNATService(mctx MetricsCtx, lc fx.Lifecycle, cfg *config.Config, host host.Host) error {
|
||||
@ -519,11 +530,7 @@ func AutoNATService(mctx MetricsCtx, lc fx.Lifecycle, cfg *config.Config, host h
|
||||
return err
|
||||
}
|
||||
|
||||
func Pubsub(mctx MetricsCtx, lc fx.Lifecycle, host host.Host, bcfg *BuildCfg, cfg *config.Config) (service *pubsub.PubSub, err error) {
|
||||
if !(bcfg.getOpt("pubsub") || bcfg.getOpt("ipnsps")) {
|
||||
return nil, nil // TODO: mark optional
|
||||
}
|
||||
|
||||
func Pubsub(mctx MetricsCtx, lc fx.Lifecycle, host host.Host, cfg *config.Config) (service *pubsub.PubSub, err error) {
|
||||
var pubsubOptions []pubsub.Option
|
||||
if cfg.Pubsub.DisableSigning {
|
||||
pubsubOptions = append(pubsubOptions, pubsub.WithMessageSigning(false))
|
||||
@ -581,17 +588,3 @@ func StartListening(host host.Host, cfg *config.Config) error {
|
||||
log.Infof("Swarm listening at: %s", addrs)
|
||||
return nil
|
||||
}
|
||||
|
||||
func P2PHostOption(bcfg *BuildCfg) (hostOption HostOption, err error) {
|
||||
hostOption = bcfg.Host
|
||||
if bcfg.DisableEncryptedConnections {
|
||||
innerHostOption := hostOption
|
||||
hostOption = func(ctx context.Context, id peer.ID, ps peerstore.Peerstore, options ...libp2p.Option) (host.Host, error) {
|
||||
return innerHostOption(ctx, id, ps, append(options, libp2p.NoSecurity)...)
|
||||
}
|
||||
// TODO: shouldn't this be Errorf to guarantee visibility?
|
||||
log.Warningf(`Your IPFS node has been configured to run WITHOUT ENCRYPTED CONNECTIONS.
|
||||
You will not be able to connect to any nodes configured to use encrypted connections`)
|
||||
}
|
||||
return hostOption, nil
|
||||
}
|
||||
|
||||
@ -37,46 +37,48 @@ func DatastoreCtor(repo repo.Repo) datastore.Datastore {
|
||||
|
||||
type BaseBlocks blockstore.Blockstore
|
||||
|
||||
func BaseBlockstoreCtor(mctx MetricsCtx, repo repo.Repo, cfg *config.Config, bcfg *BuildCfg, lc fx.Lifecycle) (bs BaseBlocks, err error) {
|
||||
rds := &retrystore.Datastore{
|
||||
Batching: repo.Datastore(),
|
||||
Delay: time.Millisecond * 200,
|
||||
Retries: 6,
|
||||
TempErrFunc: isTooManyFDError,
|
||||
}
|
||||
// hash security
|
||||
bs = blockstore.NewBlockstore(rds)
|
||||
bs = &verifbs.VerifBS{Blockstore: bs}
|
||||
|
||||
opts := blockstore.DefaultCacheOpts()
|
||||
opts.HasBloomFilterSize = cfg.Datastore.BloomFilterSize
|
||||
if !bcfg.Permanent {
|
||||
opts.HasBloomFilterSize = 0
|
||||
}
|
||||
|
||||
if !bcfg.NilRepo {
|
||||
ctx, cancel := context.WithCancel(mctx)
|
||||
|
||||
lc.Append(fx.Hook{
|
||||
OnStop: func(context context.Context) error {
|
||||
cancel()
|
||||
return nil
|
||||
},
|
||||
})
|
||||
bs, err = blockstore.CachedBlockstore(ctx, bs, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
func BaseBlockstoreCtor(permanent bool, nilRepo bool) func(mctx MetricsCtx, repo repo.Repo, cfg *config.Config, lc fx.Lifecycle) (bs BaseBlocks, err error) {
|
||||
return func(mctx MetricsCtx, repo repo.Repo, cfg *config.Config, lc fx.Lifecycle) (bs BaseBlocks, err error) {
|
||||
rds := &retrystore.Datastore{
|
||||
Batching: repo.Datastore(),
|
||||
Delay: time.Millisecond * 200,
|
||||
Retries: 6,
|
||||
TempErrFunc: isTooManyFDError,
|
||||
}
|
||||
// hash security
|
||||
bs = blockstore.NewBlockstore(rds)
|
||||
bs = &verifbs.VerifBS{Blockstore: bs}
|
||||
|
||||
opts := blockstore.DefaultCacheOpts()
|
||||
opts.HasBloomFilterSize = cfg.Datastore.BloomFilterSize
|
||||
if !permanent {
|
||||
opts.HasBloomFilterSize = 0
|
||||
}
|
||||
|
||||
if !nilRepo {
|
||||
ctx, cancel := context.WithCancel(mctx)
|
||||
|
||||
lc.Append(fx.Hook{
|
||||
OnStop: func(context context.Context) error {
|
||||
cancel()
|
||||
return nil
|
||||
},
|
||||
})
|
||||
bs, err = blockstore.CachedBlockstore(ctx, bs, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
bs = blockstore.NewIdStore(bs)
|
||||
bs = cidv0v1.NewBlockstore(bs)
|
||||
|
||||
if cfg.Datastore.HashOnRead { // TODO: review: this is how it was done originally, is there a reason we can't just pass this directly?
|
||||
bs.HashOnRead(true)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
bs = blockstore.NewIdStore(bs)
|
||||
bs = cidv0v1.NewBlockstore(bs)
|
||||
|
||||
if cfg.Datastore.HashOnRead { // TODO: review: this is how it was done originally, is there a reason we can't just pass this directly?
|
||||
bs.HashOnRead(true)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func GcBlockstoreCtor(repo repo.Repo, bb BaseBlocks, cfg *config.Config) (gclocker blockstore.GCLocker, gcbs blockstore.GCBlockstore, bs blockstore.Blockstore, fstore *filestore.Filestore) {
|
||||
|
||||
Loading…
Reference in New Issue
Block a user