From 7c6a92087d014c594fab27fd29d7e9f475778df6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Tue, 26 Mar 2019 23:32:30 +0100 Subject: [PATCH 01/27] gomod: import dig MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit License: MIT Signed-off-by: Łukasz Magiera --- go.mod | 1 + go.sum | 2 ++ 2 files changed, 3 insertions(+) diff --git a/go.mod b/go.mod index 168172446..e6c336b6b 100644 --- a/go.mod +++ b/go.mod @@ -108,6 +108,7 @@ require ( github.com/whyrusleeping/go-sysinfo v0.0.0-20190219211824-4a357d4b90b1 github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 github.com/whyrusleeping/tar-utils v0.0.0-20180509141711-8c6c8ba81d5c + go.uber.org/dig v1.7.0 // indirect golang.org/x/sys v0.0.0-20190302025703-b6889370fb10 golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e // indirect gopkg.in/airbrake/gobrake.v2 v2.0.9 // indirect diff --git a/go.sum b/go.sum index 300dd111c..2da16a08d 100644 --- a/go.sum +++ b/go.sum @@ -524,6 +524,8 @@ github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee h1:lYbXeSv github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee/go.mod h1:m2aV4LZI4Aez7dP5PMyVKEHhUyEJ/RjmPEDOpDvudHg= github.com/whyrusleeping/yamux v1.1.5 h1:4CK3aUUJQu0qpKZv5gEWJjNOQtdbdDhVVS6PJ+HimdE= github.com/whyrusleeping/yamux v1.1.5/go.mod h1:E8LnQQ8HKx5KD29HZFUwM1PxCOdPRzGwur1mcYhXcD8= +go.uber.org/dig v1.7.0 h1:E5/L92iQTNJTjfgJF2KgU+/JpMaiuvK2DHLBj0+kSZk= +go.uber.org/dig v1.7.0/go.mod h1:z+dSd2TP9Usi48jL8M3v63iSBVkiwtVyMKxMZYYauPg= go4.org v0.0.0-20190218023631-ce4c26f7be8e h1:m9LfARr2VIOW0vsV19kEKp/sWQvZnGobA8JHui/XJoY= go4.org v0.0.0-20190218023631-ce4c26f7be8e/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= From 2379787ee92d9848f296be069f942a09a87b8e23 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 28 Mar 2019 01:48:24 +0100 Subject: [PATCH 02/27] gomod: import uber/fx MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit License: MIT Signed-off-by: Łukasz Magiera --- go.mod | 5 ++++- go.sum | 6 ++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index e6c336b6b..21dbcd646 100644 --- a/go.mod +++ b/go.mod @@ -108,7 +108,10 @@ require ( github.com/whyrusleeping/go-sysinfo v0.0.0-20190219211824-4a357d4b90b1 github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 github.com/whyrusleeping/tar-utils v0.0.0-20180509141711-8c6c8ba81d5c - go.uber.org/dig v1.7.0 // indirect + go.uber.org/atomic v1.3.2 // indirect + go.uber.org/dig v1.7.0 + go.uber.org/fx v1.9.0 + go.uber.org/multierr v1.1.0 // indirect golang.org/x/sys v0.0.0-20190302025703-b6889370fb10 golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e // indirect gopkg.in/airbrake/gobrake.v2 v2.0.9 // indirect diff --git a/go.sum b/go.sum index 2da16a08d..61fc30ac9 100644 --- a/go.sum +++ b/go.sum @@ -524,8 +524,14 @@ github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee h1:lYbXeSv github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee/go.mod h1:m2aV4LZI4Aez7dP5PMyVKEHhUyEJ/RjmPEDOpDvudHg= github.com/whyrusleeping/yamux v1.1.5 h1:4CK3aUUJQu0qpKZv5gEWJjNOQtdbdDhVVS6PJ+HimdE= github.com/whyrusleeping/yamux v1.1.5/go.mod h1:E8LnQQ8HKx5KD29HZFUwM1PxCOdPRzGwur1mcYhXcD8= +go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/dig v1.7.0 h1:E5/L92iQTNJTjfgJF2KgU+/JpMaiuvK2DHLBj0+kSZk= go.uber.org/dig v1.7.0/go.mod h1:z+dSd2TP9Usi48jL8M3v63iSBVkiwtVyMKxMZYYauPg= +go.uber.org/fx v1.9.0 h1:7OAz8ucp35AU8eydejpYG7QrbE8rLKzGhHbZlJi5LYY= +go.uber.org/fx v1.9.0/go.mod h1:mFdUyAUuJ3w4jAckiKSKbldsxy1ojpAMJ+dVZg5Y0Aw= +go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go4.org v0.0.0-20190218023631-ce4c26f7be8e h1:m9LfARr2VIOW0vsV19kEKp/sWQvZnGobA8JHui/XJoY= go4.org v0.0.0-20190218023631-ce4c26f7be8e/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= From 0fd2f80be71c526999b3d05d4faca20e8541b363 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 28 Mar 2019 01:49:22 +0100 Subject: [PATCH 03/27] Initial DI node implementation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit License: MIT Signed-off-by: Łukasz Magiera --- core/builder.go | 85 +++++- core/core.go | 18 +- core/ncore.go | 774 ++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 855 insertions(+), 22 deletions(-) create mode 100644 core/ncore.go diff --git a/core/builder.go b/core/builder.go index 1fdec9944..955ed893c 100644 --- a/core/builder.go +++ b/core/builder.go @@ -5,7 +5,9 @@ import ( "crypto/rand" "encoding/base64" "errors" + "github.com/ipfs/go-ipfs/p2p" "github.com/ipfs/go-ipfs/provider" + "go.uber.org/fx" "os" "syscall" "time" @@ -25,19 +27,15 @@ import ( cfg "github.com/ipfs/go-ipfs-config" offline "github.com/ipfs/go-ipfs-exchange-offline" offroute "github.com/ipfs/go-ipfs-routing/offline" - ipns "github.com/ipfs/go-ipns" dag "github.com/ipfs/go-merkledag" metrics "github.com/ipfs/go-metrics-interface" resolver "github.com/ipfs/go-path/resolver" uio "github.com/ipfs/go-unixfs/io" - goprocessctx "github.com/jbenet/goprocess/context" libp2p "github.com/libp2p/go-libp2p" ci "github.com/libp2p/go-libp2p-crypto" p2phost "github.com/libp2p/go-libp2p-host" peer "github.com/libp2p/go-libp2p-peer" pstore "github.com/libp2p/go-libp2p-peerstore" - pstoremem "github.com/libp2p/go-libp2p-peerstore/pstoremem" - record "github.com/libp2p/go-libp2p-record" ) type BuildCfg struct { @@ -55,7 +53,7 @@ type BuildCfg struct { // DO NOT SET THIS UNLESS YOU'RE TESTING. DisableEncryptedConnections bool - // If NilRepo is set, a repo backed by a nil datastore will be constructed + // If NilRepo is set, a Repo backed by a nil datastore will be constructed NilRepo bool Routing RoutingOption @@ -73,7 +71,7 @@ func (cfg *BuildCfg) getOpt(key string) bool { func (cfg *BuildCfg) fillDefaults() error { if cfg.Repo != nil && cfg.NilRepo { - return errors.New("cannot set a repo and specify nilrepo at the same time") + return errors.New("cannot set a Repo and specify nilrepo at the same time") } if cfg.Repo == nil { @@ -142,7 +140,66 @@ func NewNode(ctx context.Context, cfg *BuildCfg) (*IpfsNode, error) { ctx = metrics.CtxScope(ctx, "ipfs") + repoOption := fx.Provide(func(lc fx.Lifecycle) repo.Repo { + lc.Append(fx.Hook{ + OnStop: func(ctx context.Context) error { + return cfg.Repo.Close() + }, + }) + + return cfg.Repo + }) + + // TODO: Remove this, use only for passing node config + cfgOption := fx.Provide(func() *BuildCfg { + return cfg + }) + n := &IpfsNode{ + ctx: ctx, + } + + app := fx.New( + repoOption, + cfgOption, + + fx.Provide(repoConfig), + fx.Provide(identity), + fx.Provide(privateKey), + + fx.Provide(peerstore), + fx.Provide(baseBlockstoreCtor), + fx.Provide(gcBlockstoreCtor), + + fx.Provide(recordValidator), + + ipfsp2p, + + fx.Invoke(setupSharding), + + fx.Provide(onlineExchangeCtor), // TODO: offline + fx.Provide(onlineNamesysCtor), // TODO: ^^ + fx.Provide(bserv.New), + fx.Provide(onlineDagCtor), + fx.Provide(resolver.NewBasicResolver), + + fx.Provide(pinning), + fx.Provide(files), + + fx.Provide(providerQueue), + fx.Provide(providerCtor), + fx.Provide(reproviderCtor), + fx.Invoke(reprovider), + + fx.Provide(p2p.NewP2P), + + fx.Invoke(ipnsRepublisher), + fx.Invoke(provider.Provider.Run), + + fx.Extract(n), + ) + +/* n := &IpfsNode{ IsOnline: cfg.Online, Repo: cfg.Repo, ctx: ctx, @@ -153,16 +210,19 @@ func NewNode(ctx context.Context, cfg *BuildCfg) (*IpfsNode, error) { "pk": record.PublicKeyValidator{}, "ipns": ipns.Validator{KeyBook: n.Peerstore}, } +*/ + // TODO: port to lifetimes + // n.proc = goprocessctx.WithContextAndTeardown(ctx, n.teardown) - // TODO: this is a weird circular-ish dependency, rework it - n.proc = goprocessctx.WithContextAndTeardown(ctx, n.teardown) - - if err := setupNode(ctx, n, cfg); err != nil { + /*if err := setupNode(ctx, n, cfg); err != nil { n.Close() return nil, err - } + }*/ + if app.Err() != nil { + return nil, app.Err() + } - return n, nil + return n, app.Start(ctx) } func isTooManyFDError(err error) bool { @@ -247,6 +307,7 @@ func setupNode(ctx context.Context, n *IpfsNode, cfg *BuildCfg) error { hostOption = func(ctx context.Context, id peer.ID, ps pstore.Peerstore, options ...libp2p.Option) (p2phost.Host, error) { return innerHostOption(ctx, id, ps, append(options, libp2p.NoSecurity)...) } + // TODO: shouldn't this be Errorf to guarantee visibility? log.Warningf(`Your IPFS node has been configured to run WITHOUT ENCRYPTED CONNECTIONS. You will not be able to connect to any nodes configured to use encrypted connections`) } diff --git a/core/core.go b/core/core.go index 99379b85c..133279755 100644 --- a/core/core.go +++ b/core/core.go @@ -79,8 +79,6 @@ import ( mamask "github.com/whyrusleeping/multiaddr-filter" ) -const IpnsValidatorTag = "ipns" - const kReprovideFrequency = time.Hour * 12 const discoveryConnTimeout = time.Second * 30 const DefaultIpnsCacheSize = 128 @@ -101,9 +99,9 @@ type IpfsNode struct { // Local node Pinning pin.Pinner // the pinning manager - Mounts Mounts // current mount state, if any. + Mounts Mounts `optional:"true"` // current mount state, if any. PrivateKey ic.PrivKey // the local node's private Key - PNetFingerprint []byte // fingerprint of private network + PNetFingerprint PNetFingerprint // fingerprint of private network // Services Peerstore pstore.Peerstore // storage for other Peer instances @@ -115,21 +113,21 @@ type IpfsNode struct { DAG ipld.DAGService // the merkle dag service, get/add objects. Resolver *resolver.Resolver // the path resolution system Reporter metrics.Reporter - Discovery discovery.Service + Discovery discovery.Service `optional:"true"` FilesRoot *mfs.Root RecordValidator record.Validator // Online PeerHost p2phost.Host // the network host (server+client) - Bootstrapper io.Closer // the periodic bootstrapper + Bootstrapper io.Closer `optional:"true"` // the periodic bootstrapper Routing routing.IpfsRouting // the routing system. recommend ipfs-dht Exchange exchange.Interface // the block exchange + strategy (bitswap) Namesys namesys.NameSystem // the name system, resolves paths to hashes Provider provider.Provider // the value provider system Reprovider *rp.Reprovider // the value reprovider system - IpnsRepub *ipnsrp.Republisher + IpnsRepub *ipnsrp.Republisher `optional:"true"` - AutoNAT *autonat.AutoNATService + AutoNAT *autonat.AutoNATService `optional:"true"` PubSub *pubsub.PubSub PSRouter *psrouter.PubsubValueStore DHT *dht.IpfsDHT @@ -139,8 +137,8 @@ type IpfsNode struct { ctx context.Context // Flags - IsOnline bool // Online is set when networking is enabled. - IsDaemon bool // Daemon is set when running on a long-running daemon. + IsOnline bool `optional:"true"` // Online is set when networking is enabled. + IsDaemon bool `optional:"true"` // Daemon is set when running on a long-running daemon. } // Mounts defines what the node's mount state is. This should diff --git a/core/ncore.go b/core/ncore.go new file mode 100644 index 000000000..4346ef728 --- /dev/null +++ b/core/ncore.go @@ -0,0 +1,774 @@ +package core + +import ( + "bytes" + "context" + "errors" + "fmt" + "github.com/ipfs/go-bitswap" + bsnet "github.com/ipfs/go-bitswap/network" + bserv "github.com/ipfs/go-blockservice" + "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" + bstore "github.com/ipfs/go-ipfs-blockstore" + exchange "github.com/ipfs/go-ipfs-exchange-interface" + "github.com/ipfs/go-ipfs-exchange-offline" + u "github.com/ipfs/go-ipfs-util" + rp "github.com/ipfs/go-ipfs/exchange/reprovide" + "github.com/ipfs/go-ipfs/filestore" + "github.com/ipfs/go-ipfs/namesys" + ipnsrp "github.com/ipfs/go-ipfs/namesys/republisher" + "github.com/ipfs/go-ipfs/pin" + "github.com/ipfs/go-ipfs/provider" + "github.com/ipfs/go-ipfs/thirdparty/cidv0v1" + "github.com/ipfs/go-ipfs/thirdparty/verifbs" + "github.com/ipfs/go-ipld-format" + "github.com/ipfs/go-ipns" + merkledag "github.com/ipfs/go-merkledag" + "github.com/ipfs/go-mfs" + ft "github.com/ipfs/go-unixfs" + "github.com/jbenet/goprocess" + "github.com/libp2p/go-libp2p" + "github.com/libp2p/go-libp2p-autonat-svc" + circuit "github.com/libp2p/go-libp2p-circuit" + "github.com/libp2p/go-libp2p-kad-dht" + "github.com/libp2p/go-libp2p-metrics" + pstore "github.com/libp2p/go-libp2p-peerstore" + "github.com/libp2p/go-libp2p-peerstore/pstoremem" + "github.com/libp2p/go-libp2p-pnet" + "github.com/libp2p/go-libp2p-pubsub" + psrouter "github.com/libp2p/go-libp2p-pubsub-router" + quic "github.com/libp2p/go-libp2p-quic-transport" + "github.com/libp2p/go-libp2p-record" + "github.com/libp2p/go-libp2p-routing" + rhelpers "github.com/libp2p/go-libp2p-routing-helpers" + "github.com/libp2p/go-libp2p/p2p/discovery" + rhost "github.com/libp2p/go-libp2p/p2p/host/routed" + "go.uber.org/fx" + "time" + + "github.com/ipfs/go-ipfs/repo" + + retry "github.com/ipfs/go-datastore/retrystore" + iconfig "github.com/ipfs/go-ipfs-config" + uio "github.com/ipfs/go-unixfs/io" + ic "github.com/libp2p/go-libp2p-crypto" + p2phost "github.com/libp2p/go-libp2p-host" + "github.com/libp2p/go-libp2p-peer" + mamask "github.com/whyrusleeping/multiaddr-filter" +) + +func repoConfig(repo repo.Repo) (*iconfig.Config, error) { + return repo.Config() +} + +func identity(cfg *iconfig.Config) (peer.ID, error) { + cid := cfg.Identity.PeerID + if cid == "" { + return "", errors.New("identity was not set in config (was 'ipfs init' run?)") + } + if len(cid) == 0 { + return "", errors.New("no peer ID in config! (was 'ipfs init' run?)") + } + + id, err := peer.IDB58Decode(cid) + if err != nil { + return "", fmt.Errorf("peer ID invalid: %s", err) + } + + return id, nil +} + +func peerstore(id peer.ID, sk ic.PrivKey) pstore.Peerstore { + ps := pstoremem.NewPeerstore() + + if sk != nil { + ps.AddPrivKey(id, sk) + ps.AddPubKey(id, sk.GetPublic()) + } + + return ps +} + +func privateKey(cfg *iconfig.Config, id peer.ID) (ic.PrivKey, error) { + if cfg.Identity.PrivKey == "" { + return nil, nil + } + + sk, err := cfg.Identity.DecodePrivateKey("passphrase todo!") + if err != nil { + return nil, err + } + + id2, err := peer.IDFromPrivateKey(sk) + if err != nil { + return nil, err + } + + if id2 != id { + return nil, fmt.Errorf("private key in config does not match id: %s != %s", id, id2) + } + return sk, nil +} + +func baseBlockstoreCtor(repo repo.Repo, cfg *iconfig.Config, bcfg *BuildCfg, lc fx.Lifecycle) (bs bstore.Blockstore, err error) { + rds := &retry.Datastore{ + Batching: repo.Datastore(), + Delay: time.Millisecond * 200, + Retries: 6, + TempErrFunc: isTooManyFDError, + } + // hash security + bs = bstore.NewBlockstore(rds) + bs = &verifbs.VerifBS{Blockstore: bs} + + opts := bstore.DefaultCacheOpts() + opts.HasBloomFilterSize = cfg.Datastore.BloomFilterSize + if !bcfg.Permanent { + opts.HasBloomFilterSize = 0 + } + + if !bcfg.NilRepo { + ctx, cancel := context.WithCancel(context.TODO()) //TODO: needed for mertics + + lc.Append(fx.Hook{ + OnStop: func(context context.Context) error { + cancel() + return nil + }, + }) + bs, err = bstore.CachedBlockstore(ctx, bs, opts) + if err != nil { + return nil, err + } + } + + bs = bstore.NewIdStore(bs) + bs = cidv0v1.NewBlockstore(bs) + + if cfg.Datastore.HashOnRead { // TODO: review: this is how it was done originally, is there a reason we can't just pass this directly? + bs.HashOnRead(true) + } + + return +} + +func gcBlockstoreCtor(repo repo.Repo, bs bstore.Blockstore, cfg *iconfig.Config) (gclocker bstore.GCLocker, gcbs bstore.GCBlockstore, fstore *filestore.Filestore) { + gclocker = bstore.NewGCLocker() + gcbs = bstore.NewGCBlockstore(bs, gclocker) + + if cfg.Experimental.FilestoreEnabled || cfg.Experimental.UrlstoreEnabled { + // hash security + fstore = filestore.NewFilestore(bs, repo.FileManager()) //TODO: mark optional + gcbs = bstore.NewGCBlockstore(fstore, gclocker) + gcbs = &verifbs.VerifBSGC{GCBlockstore: gcbs} + } + return +} + +func recordValidator(ps pstore.Peerstore) record.Validator { + return record.NamespacedValidator{ + "pk": record.PublicKeyValidator{}, + "ipns": ipns.Validator{KeyBook: ps}, + } +} + +//////////////////// +// libp2p related + +//////////////////// +// libp2p + +var ipfsp2p = fx.Options( + fx.Provide(p2pAddrFilters), + fx.Provide(p2pBandwidthCounter), + fx.Provide(p2pPNet), + fx.Provide(p2pAddrsFactory), + fx.Provide(p2pConnectionManager), + fx.Provide(p2pSmuxTransport), + fx.Provide(p2pNatPortMap), + fx.Provide(p2pRelay), + fx.Provide(p2pAutoRealy), + fx.Provide(p2pDefaultTransports), + fx.Provide(p2pQUIC), + + fx.Provide(p2pHostOption), + fx.Provide(p2pHost), + fx.Provide(p2pOnlineRouting), + + fx.Provide(pubsubCtor), + fx.Provide(newDiscoveryHandler), + + fx.Invoke(autoNATService), + fx.Invoke(p2pPNetChecker), + fx.Invoke(startListening), + fx.Invoke(setupDiscovery), +) + +func p2pHostOption(bcfg *BuildCfg) (hostOption HostOption, err error) { + hostOption = bcfg.Host + if bcfg.DisableEncryptedConnections { + innerHostOption := hostOption + hostOption = func(ctx context.Context, id peer.ID, ps pstore.Peerstore, options ...libp2p.Option) (p2phost.Host, error) { + return innerHostOption(ctx, id, ps, append(options, libp2p.NoSecurity)...) + } + // TODO: shouldn't this be Errorf to guarantee visibility? + log.Warningf(`Your IPFS node has been configured to run WITHOUT ENCRYPTED CONNECTIONS. + You will not be able to connect to any nodes configured to use encrypted connections`) + } + return hostOption, nil +} + +func p2pAddrFilters(cfg *iconfig.Config) (opts libp2pOpts, err error) { + for _, s := range cfg.Swarm.AddrFilters { + f, err := mamask.NewMask(s) + if err != nil { + return opts, fmt.Errorf("incorrectly formatted address filter in config: %s", s) + } + opts.Opts = append(opts.Opts, libp2p.FilterAddresses(f)) + } + return opts, nil +} + +func p2pBandwidthCounter(cfg *iconfig.Config) (opts libp2pOpts, reporter metrics.Reporter) { + reporter = metrics.NewBandwidthCounter() + + if !cfg.Swarm.DisableBandwidthMetrics { + opts.Opts = append(opts.Opts, libp2p.BandwidthReporter(reporter)) + } + return opts, reporter +} + +type libp2pOpts struct { + fx.Out + + Opts []libp2p.Option `group:"libp2p"` +} + +type PNetFingerprint []byte // TODO: find some better place +func p2pPNet(repo repo.Repo) (opts libp2pOpts, fp PNetFingerprint, err error) { + swarmkey, err := repo.SwarmKey() + if err != nil || swarmkey == nil { + return opts, nil, err + } + + protec, err := pnet.NewProtector(bytes.NewReader(swarmkey)) + if err != nil { + return opts, nil, fmt.Errorf("failed to configure private network: %s", err) + } + fp = protec.Fingerprint() + + opts.Opts = append(opts.Opts, libp2p.PrivateNetwork(protec)) + return opts, fp, nil +} + +func p2pPNetChecker(repo repo.Repo, ph p2phost.Host, lc fx.Lifecycle) error { + // TODO: better check? + swarmkey, err := repo.SwarmKey() + if err != nil || swarmkey == nil { + return err + } + + done := make(chan struct{}) + lc.Append(fx.Hook{ + OnStart: func(_ context.Context) error { + go func() { + t := time.NewTicker(30 * time.Second) + <-t.C // swallow one tick + for { + select { + case <-t.C: + if len(ph.Network().Peers()) == 0 { + log.Warning("We are in private network and have no peers.") + log.Warning("This might be configuration mistake.") + } + case <-done: + return + } + } + }() + return nil + }, + OnStop: func(_ context.Context) error { + close(done) + return nil + }, + }) + return nil +} + +func p2pAddrsFactory(cfg *iconfig.Config) (opts libp2pOpts, err error) { + addrsFactory, err := makeAddrsFactory(cfg.Addresses) + if err != nil { + return opts, err + } + if !cfg.Swarm.DisableRelay { + addrsFactory = composeAddrsFactory(addrsFactory, filterRelayAddrs) + } + opts.Opts = append(opts.Opts, libp2p.AddrsFactory(addrsFactory)) + return +} + +func p2pConnectionManager(cfg *iconfig.Config) (opts libp2pOpts, err error) { + connm, err := constructConnMgr(cfg.Swarm.ConnMgr) + if err != nil { + return opts, err + } + + opts.Opts = append(opts.Opts, libp2p.ConnectionManager(connm)) + return +} + +func p2pSmuxTransport(bcfg *BuildCfg) (opts libp2pOpts, err error) { + opts.Opts = append(opts.Opts, makeSmuxTransportOption(bcfg.getOpt("mplex"))) + return +} + +func p2pNatPortMap(cfg *iconfig.Config) (opts libp2pOpts, err error) { + if !cfg.Swarm.DisableNatPortMap { + opts.Opts = append(opts.Opts, libp2p.NATPortMap()) + } + return +} + +func p2pRelay(cfg *iconfig.Config) (opts libp2pOpts, err error) { + if cfg.Swarm.DisableRelay { + // Enabled by default. + opts.Opts = append(opts.Opts, libp2p.DisableRelay()) + } else { + relayOpts := []circuit.RelayOpt{circuit.OptDiscovery} + if cfg.Swarm.EnableRelayHop { + relayOpts = append(relayOpts, circuit.OptHop) + } + opts.Opts = append(opts.Opts, libp2p.EnableRelay(relayOpts...)) + } + return +} + +func p2pAutoRealy(cfg *iconfig.Config) (opts libp2pOpts, err error) { + // enable autorelay + if cfg.Swarm.EnableAutoRelay { + opts.Opts = append(opts.Opts, libp2p.EnableAutoRelay()) + } + return +} + +func p2pDefaultTransports() (opts libp2pOpts, err error) { + opts.Opts = append(opts.Opts, libp2p.DefaultTransports) + return +} + +func p2pQUIC(cfg *iconfig.Config) (opts libp2pOpts, err error) { + if cfg.Experimental.QUIC { + opts.Opts = append(opts.Opts, libp2p.Transport(quic.NewTransport)) + } + return +} + +type p2pHostIn struct { + fx.In + + BCfg *BuildCfg + Repo repo.Repo + Validator record.Validator + HostOption HostOption + ID peer.ID + Peerstore pstore.Peerstore + + Opts [][]libp2p.Option `group:"libp2p"` +} + +type BaseRouting routing.IpfsRouting +type p2pHostOut struct { + fx.Out + + Host p2phost.Host + Routing BaseRouting + IpfsDHT *dht.IpfsDHT +} + +// TODO: move some of this into params struct +func p2pHost(lc fx.Lifecycle, params p2pHostIn) (out p2pHostOut, err error) { + opts := []libp2p.Option{libp2p.NoListenAddrs} + for _, o := range params.Opts { + opts = append(opts, o...) + } + + ctx, cancel := context.WithCancel(context.TODO()) + lc.Append(fx.Hook{ + OnStop: func(_ context.Context) error { + cancel() + return nil + }, + }) + + opts = append(opts, libp2p.Routing(func(h p2phost.Host) (routing.PeerRouting, error) { + r, err := params.BCfg.Routing(ctx, h, params.Repo.Datastore(), params.Validator) + out.Routing = r + return r, err + })) + + out.Host, err = params.HostOption(ctx, params.ID, params.Peerstore, opts...) + + // this code is necessary just for tests: mock network constructions + // ignore the libp2p constructor options that actually construct the routing! + if out.Routing == nil { + r, err := params.BCfg.Routing(ctx, out.Host, params.Repo.Datastore(), params.Validator) + if err != nil { + return p2pHostOut{}, err + } + out.Routing = r + out.Host = rhost.Wrap(out.Host, out.Routing) + } + + // TODO: break this up into more DI units + // TODO: I'm not a fan of type assertions like this but the + // `RoutingOption` system doesn't currently provide access to the + // IpfsNode. + // + // Ideally, we'd do something like: + // + // 1. Add some fancy method to introspect into tiered routers to extract + // things like the pubsub router or the DHT (complicated, messy, + // probably not worth it). + // 2. Pass the IpfsNode into the RoutingOption (would also remove the + // PSRouter case below. + // 3. Introduce some kind of service manager? (my personal favorite but + // that requires a fair amount of work). + if dht, ok := out.Routing.(*dht.IpfsDHT); ok { + out.IpfsDHT = dht + } + + return out, err +} + +type p2pRoutingIn struct { + fx.In + + BCfg *BuildCfg + Repo repo.Repo + Validator record.Validator + Host p2phost.Host + PubSub *pubsub.PubSub + + BaseRouting BaseRouting +} + +type p2pRoutingOut struct { + fx.Out + + IpfsRouting routing.IpfsRouting + PSRouter *psrouter.PubsubValueStore //TODO: optional +} + +func p2pOnlineRouting(lc fx.Lifecycle, in p2pRoutingIn) (out p2pRoutingOut) { + out.IpfsRouting = in.BaseRouting + + if in.BCfg.getOpt("ipnsps") { + out.PSRouter = psrouter.NewPubsubValueStore( + lifecycleCtx(lc), + in.Host, + in.BaseRouting, + in.PubSub, + in.Validator, + ) + + out.IpfsRouting = rhelpers.Tiered{ + Routers: []routing.IpfsRouting{ + // Always check pubsub first. + &rhelpers.Compose{ + ValueStore: &rhelpers.LimitedValueStore{ + ValueStore: out.PSRouter, + Namespaces: []string{"ipns"}, + }, + }, + in.BaseRouting, + }, + Validator: in.Validator, + } + } + return out +} + +//////////// +// P2P services + +func autoNATService(lc fx.Lifecycle, cfg *iconfig.Config, host p2phost.Host) error { + if !cfg.Swarm.EnableAutoNATService { + return nil + } + var opts []libp2p.Option + if cfg.Experimental.QUIC { + opts = append(opts, libp2p.DefaultTransports, libp2p.Transport(quic.NewTransport)) + } + + _, err := autonat.NewAutoNATService(lifecycleCtx(lc), host, opts...) + return err +} + +func pubsubCtor(lc fx.Lifecycle, host p2phost.Host, bcfg *BuildCfg, cfg *iconfig.Config) (service *pubsub.PubSub, err error) { + if !(bcfg.getOpt("pubsub") || bcfg.getOpt("ipnsps")) { + return nil, nil // TODO: mark optional + } + + var pubsubOptions []pubsub.Option + if cfg.Pubsub.DisableSigning { + pubsubOptions = append(pubsubOptions, pubsub.WithMessageSigning(false)) + } + + if cfg.Pubsub.StrictSignatureVerification { + pubsubOptions = append(pubsubOptions, pubsub.WithStrictSignatureVerification(true)) + } + + switch cfg.Pubsub.Router { + case "": + fallthrough + case "floodsub": + service, err = pubsub.NewFloodSub(lifecycleCtx(lc), host, pubsubOptions...) + + case "gossipsub": + service, err = pubsub.NewGossipSub(lifecycleCtx(lc), host, pubsubOptions...) + + default: + err = fmt.Errorf("Unknown pubsub router %s", cfg.Pubsub.Router) + } + + return service, err +} + +//////////// +// Offline services + +// offline.Exchange +// offroute.NewOfflineRouter + +func offlineNamesysCtor(rt routing.IpfsRouting, repo repo.Repo) (namesys.NameSystem, error) { + return namesys.NewNameSystem(rt, repo.Datastore(), 0), nil +} + + +//////////// +// IPFS services + +func pinning(bstore bstore.Blockstore, ds format.DAGService, repo repo.Repo) (pin.Pinner, error) { + internalDag := merkledag.NewDAGService(bserv.New(bstore, offline.Exchange(bstore))) + pinning, err := pin.LoadPinner(repo.Datastore(), ds, internalDag) + if err != nil { + // TODO: we should move towards only running 'NewPinner' explicitly on + // node init instead of implicitly here as a result of the pinner keys + // not being found in the datastore. + // this is kinda sketchy and could cause data loss + pinning = pin.NewPinner(repo.Datastore(), ds, internalDag) + } + + return pinning, nil +} + +func onlineDagCtor(bs bserv.BlockService) format.DAGService { + return merkledag.NewDAGService(bs) +} + +func onlineExchangeCtor(lc fx.Lifecycle, host p2phost.Host, rt routing.IpfsRouting, bs bstore.Blockstore) exchange.Interface { + bitswapNetwork := bsnet.NewFromIpfsHost(host, rt) + return bitswap.New(lifecycleCtx(lc), bitswapNetwork, bs) +} + +func onlineNamesysCtor(rt routing.IpfsRouting, repo repo.Repo, cfg *iconfig.Config) (namesys.NameSystem, error) { + cs := cfg.Ipns.ResolveCacheSize + if cs == 0 { + cs = DefaultIpnsCacheSize + } + if cs < 0 { + return nil, fmt.Errorf("cannot specify negative resolve cache size") + } + return namesys.NewNameSystem(rt, repo.Datastore(), cs), nil +} + +func ipnsRepublisher(lc fx.Lifecycle, cfg *iconfig.Config, namesys namesys.NameSystem, repo repo.Repo, privKey ic.PrivKey) error { + repub := ipnsrp.NewRepublisher(namesys, repo.Datastore(), privKey, repo.Keystore()) + + if cfg.Ipns.RepublishPeriod != "" { + d, err := time.ParseDuration(cfg.Ipns.RepublishPeriod) + if err != nil { + return fmt.Errorf("failure to parse config setting IPNS.RepublishPeriod: %s", err) + } + + if !u.Debug && (d < time.Minute || d > (time.Hour*24)) { + return fmt.Errorf("config setting IPNS.RepublishPeriod is not between 1min and 1day: %s", d) + } + + repub.Interval = d + } + + if cfg.Ipns.RecordLifetime != "" { + d, err := time.ParseDuration(cfg.Ipns.RecordLifetime) + if err != nil { + return fmt.Errorf("failure to parse config setting IPNS.RecordLifetime: %s", err) + } + + repub.RecordLifetime = d + } + + lcGoProc(lc, repub.Run) + return nil +} + +type discoveryHandler struct { + ctx context.Context + host p2phost.Host +} + +func (dh *discoveryHandler) HandlePeerFound(p pstore.PeerInfo) { + log.Warning("trying peer info: ", p) + ctx, cancel := context.WithTimeout(dh.ctx, discoveryConnTimeout) + defer cancel() + if err := dh.host.Connect(ctx, p); err != nil { + log.Warning("Failed to connect to peer found by discovery: ", err) + } +} + +func newDiscoveryHandler(lc fx.Lifecycle, host p2phost.Host) *discoveryHandler { + return &discoveryHandler{ + ctx: lifecycleCtx(lc), + host: host, + } +} + +func setupDiscovery(lc fx.Lifecycle, cfg *iconfig.Config, host p2phost.Host, handler *discoveryHandler) error { + if cfg.Discovery.MDNS.Enabled { + mdns := cfg.Discovery.MDNS + if mdns.Interval == 0 { + mdns.Interval = 5 + } + service, err := discovery.NewMdnsService(lifecycleCtx(lc), host, time.Duration(mdns.Interval)*time.Second, discovery.ServiceTag) + if err != nil { + log.Error("mdns error: ", err) + return nil + } + service.RegisterNotifee(handler) + } + return nil +} + +func providerQueue(lc fx.Lifecycle, repo repo.Repo) (*provider.Queue, error) { + return provider.NewQueue(lifecycleCtx(lc), "provider-v1", repo.Datastore()) +} + +func providerCtor(lc fx.Lifecycle, queue *provider.Queue, rt routing.IpfsRouting) provider.Provider { + return provider.NewProvider(lifecycleCtx(lc), queue, rt) +} + +func reproviderCtor(lc fx.Lifecycle, cfg *iconfig.Config, bs bstore.Blockstore, ds format.DAGService, pinning pin.Pinner, rt routing.IpfsRouting) (*rp.Reprovider, error) { + var keyProvider rp.KeyChanFunc + + switch cfg.Reprovider.Strategy { + case "all": + fallthrough + case "": + keyProvider = rp.NewBlockstoreProvider(bs) + case "roots": + keyProvider = rp.NewPinnedProvider(pinning, ds, true) + case "pinned": + keyProvider = rp.NewPinnedProvider(pinning, ds, false) + default: + return nil, fmt.Errorf("unknown reprovider strategy '%s'", cfg.Reprovider.Strategy) + } + return rp.NewReprovider(lifecycleCtx(lc), rt, keyProvider), nil +} + +func reprovider(cfg *iconfig.Config, reprovider *rp.Reprovider) error { + reproviderInterval := kReprovideFrequency + if cfg.Reprovider.Interval != "" { + dur, err := time.ParseDuration(cfg.Reprovider.Interval) + if err != nil { + return err + } + + reproviderInterval = dur + } + + go reprovider.Run(reproviderInterval) + return nil +} + +func files(lc fx.Lifecycle, repo repo.Repo, dag format.DAGService) (*mfs.Root, error) { + dsk := ds.NewKey("/local/filesroot") + pf := func(ctx context.Context, c cid.Cid) error { + return repo.Datastore().Put(dsk, c.Bytes()) + } + + var nd *merkledag.ProtoNode + val, err := repo.Datastore().Get(dsk) + ctx := lifecycleCtx(lc) + + switch { + case err == ds.ErrNotFound || val == nil: + nd = ft.EmptyDirNode() + err := dag.Add(ctx, nd) + if err != nil { + return nil, fmt.Errorf("failure writing to dagstore: %s", err) + } + case err == nil: + c, err := cid.Cast(val) + if err != nil { + return nil, err + } + + rnd, err := dag.Get(ctx, c) + if err != nil { + return nil, fmt.Errorf("error loading filesroot from DAG: %s", err) + } + + pbnd, ok := rnd.(*merkledag.ProtoNode) + if !ok { + return nil, merkledag.ErrNotProtobuf + } + + nd = pbnd + default: + return nil, err + } + + return mfs.NewRoot(ctx, dag, nd, pf) +} + +// TODO !!!!!!!! +func bootstrap(n IpfsNode) error { + return n.Bootstrap(DefaultBootstrapConfig) +} + +//////////// +// Hacks + +// lifecycleCtx creates a context which will be cancelled when lifecycle stops +// +// This is a hack which we need because most of our services use contexts in a +// wrong way +func lifecycleCtx(lc fx.Lifecycle) context.Context { + ctx, cancel := context.WithCancel(context.TODO()) // TODO: really wire this context up, things (like metrics) may depend on it + lc.Append(fx.Hook{ + OnStop: func(_ context.Context) error { + cancel() + return nil + }, + }) + return ctx +} + +func lcGoProc(lc fx.Lifecycle, processFunc goprocess.ProcessFunc) { + proc := goprocess.Background() + lc.Append(fx.Hook{ + OnStart: func(ctx context.Context) error { + proc.Go(processFunc) + return nil + }, + OnStop: func(ctx context.Context) error { + return proc.Close() // todo: respect ctx + }, + }) +} + +func setupSharding(cfg *iconfig.Config) { + // TEMP: setting global sharding switch here + uio.UseHAMTSharding = cfg.Experimental.ShardingEnabled +} From 361ba691ed0115933da1a8c17038fe3f82e570db Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 28 Mar 2019 02:02:05 +0100 Subject: [PATCH 04/27] Organize NewNode a bit MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit License: MIT Signed-off-by: Łukasz Magiera --- core/builder.go | 86 ++++++++++++++++++++++++++++++++----------------- core/ncore.go | 4 ++- 2 files changed, 60 insertions(+), 30 deletions(-) diff --git a/core/builder.go b/core/builder.go index 955ed893c..6f94e8f9a 100644 --- a/core/builder.go +++ b/core/builder.go @@ -155,47 +155,75 @@ func NewNode(ctx context.Context, cfg *BuildCfg) (*IpfsNode, error) { return cfg }) + + params := fx.Options( + repoOption, + cfgOption, + ) + + storage := fx.Options( + fx.Provide(repoConfig), + fx.Provide(baseBlockstoreCtor), + fx.Provide(gcBlockstoreCtor), + ) + + ident := fx.Options( + fx.Provide(identity), + fx.Provide(privateKey), + ) + + ipns := fx.Options( + fx.Provide(recordValidator), + ) + + online := fx.Options( + fx.Provide(onlineExchangeCtor), + fx.Provide(onlineNamesysCtor), + + fx.Invoke(ipnsRepublisher), + fx.Invoke(provider.Provider.Run), + ) + if !cfg.Online { + online = fx.Options( + fx.Provide(offline.Exchange), + fx.Provide(offlineNamesysCtor), + ) + } + + core := fx.Options( + fx.Provide(bserv.New), + fx.Provide(dagCtor), + fx.Provide(resolver.NewBasicResolver), + fx.Provide(pinning), + fx.Provide(files), + ) + + providers := fx.Options( + fx.Provide(providerQueue), + fx.Provide(providerCtor), + fx.Provide(reproviderCtor), + fx.Invoke(reprovider), + ) + n := &IpfsNode{ ctx: ctx, } app := fx.New( - repoOption, - cfgOption, - - fx.Provide(repoConfig), - fx.Provide(identity), - fx.Provide(privateKey), - - fx.Provide(peerstore), - fx.Provide(baseBlockstoreCtor), - fx.Provide(gcBlockstoreCtor), - - fx.Provide(recordValidator), - + params, + storage, + ident, ipfsp2p, + ipns, + online, fx.Invoke(setupSharding), - fx.Provide(onlineExchangeCtor), // TODO: offline - fx.Provide(onlineNamesysCtor), // TODO: ^^ - fx.Provide(bserv.New), - fx.Provide(onlineDagCtor), - fx.Provide(resolver.NewBasicResolver), - - fx.Provide(pinning), - fx.Provide(files), - - fx.Provide(providerQueue), - fx.Provide(providerCtor), - fx.Provide(reproviderCtor), - fx.Invoke(reprovider), + core, + providers, fx.Provide(p2p.NewP2P), - fx.Invoke(ipnsRepublisher), - fx.Invoke(provider.Provider.Run), - fx.Extract(n), ) diff --git a/core/ncore.go b/core/ncore.go index 4346ef728..1aa300722 100644 --- a/core/ncore.go +++ b/core/ncore.go @@ -180,6 +180,8 @@ func recordValidator(ps pstore.Peerstore) record.Validator { // libp2p var ipfsp2p = fx.Options( + fx.Provide(peerstore), + fx.Provide(p2pAddrFilters), fx.Provide(p2pBandwidthCounter), fx.Provide(p2pPNet), @@ -564,7 +566,7 @@ func pinning(bstore bstore.Blockstore, ds format.DAGService, repo repo.Repo) (pi return pinning, nil } -func onlineDagCtor(bs bserv.BlockService) format.DAGService { +func dagCtor(bs bserv.BlockService) format.DAGService { return merkledag.NewDAGService(bs) } From 65d8fad008f42322c83cd3bfd91f354f90f09d66 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 28 Mar 2019 02:08:38 +0100 Subject: [PATCH 05/27] Properly set IsOnline in NewNode MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit License: MIT Signed-off-by: Łukasz Magiera --- core/builder.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/core/builder.go b/core/builder.go index 6f94e8f9a..8c073a39c 100644 --- a/core/builder.go +++ b/core/builder.go @@ -155,7 +155,6 @@ func NewNode(ctx context.Context, cfg *BuildCfg) (*IpfsNode, error) { return cfg }) - params := fx.Options( repoOption, cfgOption, @@ -227,6 +226,8 @@ func NewNode(ctx context.Context, cfg *BuildCfg) (*IpfsNode, error) { fx.Extract(n), ) + n.IsOnline = cfg.Online + /* n := &IpfsNode{ IsOnline: cfg.Online, Repo: cfg.Repo, From ccc576b69389cf489a953319c314867a002504a1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 28 Mar 2019 02:57:13 +0100 Subject: [PATCH 06/27] More constructor fixes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit License: MIT Signed-off-by: Łukasz Magiera --- core/builder.go | 31 +++++++++++++++++++------------ core/core.go | 32 ++++++++++++++++++-------------- core/ncore.go | 15 ++++++++++----- 3 files changed, 47 insertions(+), 31 deletions(-) diff --git a/core/builder.go b/core/builder.go index 8c073a39c..b6be8115f 100644 --- a/core/builder.go +++ b/core/builder.go @@ -162,6 +162,7 @@ func NewNode(ctx context.Context, cfg *BuildCfg) (*IpfsNode, error) { storage := fx.Options( fx.Provide(repoConfig), + fx.Provide(datastoreCtor), fx.Provide(baseBlockstoreCtor), fx.Provide(gcBlockstoreCtor), ) @@ -169,23 +170,39 @@ func NewNode(ctx context.Context, cfg *BuildCfg) (*IpfsNode, error) { ident := fx.Options( fx.Provide(identity), fx.Provide(privateKey), + fx.Provide(peerstore), ) ipns := fx.Options( fx.Provide(recordValidator), ) + providers := fx.Options( + fx.Provide(providerQueue), + fx.Provide(providerCtor), + fx.Provide(reproviderCtor), + + fx.Invoke(reprovider), + fx.Invoke(provider.Provider.Run), + ) + online := fx.Options( fx.Provide(onlineExchangeCtor), fx.Provide(onlineNamesysCtor), fx.Invoke(ipnsRepublisher), - fx.Invoke(provider.Provider.Run), + + fx.Provide(p2p.NewP2P), + + ipfsp2p, + providers, ) if !cfg.Online { online = fx.Options( fx.Provide(offline.Exchange), fx.Provide(offlineNamesysCtor), + fx.Provide(offroute.NewOfflineRouter), + fx.Provide(provider.NewOfflineProvider), ) } @@ -197,13 +214,6 @@ func NewNode(ctx context.Context, cfg *BuildCfg) (*IpfsNode, error) { fx.Provide(files), ) - providers := fx.Options( - fx.Provide(providerQueue), - fx.Provide(providerCtor), - fx.Provide(reproviderCtor), - fx.Invoke(reprovider), - ) - n := &IpfsNode{ ctx: ctx, } @@ -212,21 +222,18 @@ func NewNode(ctx context.Context, cfg *BuildCfg) (*IpfsNode, error) { params, storage, ident, - ipfsp2p, ipns, online, fx.Invoke(setupSharding), core, - providers, - - fx.Provide(p2p.NewP2P), fx.Extract(n), ) n.IsOnline = cfg.Online + n.app = app /* n := &IpfsNode{ IsOnline: cfg.Online, diff --git a/core/core.go b/core/core.go index 133279755..5067d1670 100644 --- a/core/core.go +++ b/core/core.go @@ -20,6 +20,8 @@ import ( "strings" "time" + "go.uber.org/fx" + version "github.com/ipfs/go-ipfs" rp "github.com/ipfs/go-ipfs/exchange/reprovide" filestore "github.com/ipfs/go-ipfs/filestore" @@ -101,10 +103,10 @@ type IpfsNode struct { Pinning pin.Pinner // the pinning manager Mounts Mounts `optional:"true"` // current mount state, if any. PrivateKey ic.PrivKey // the local node's private Key - PNetFingerprint PNetFingerprint // fingerprint of private network + PNetFingerprint PNetFingerprint `optional:"true"` // fingerprint of private network // Services - Peerstore pstore.Peerstore // storage for other Peer instances + Peerstore pstore.Peerstore `optional:"true"` // storage for other Peer instances Blockstore bstore.GCBlockstore // the block store (lower level) Filestore *filestore.Filestore // the filestore blockstore BaseBlocks bstore.Blockstore // the raw blockstore, no filestore wrapping @@ -112,30 +114,32 @@ type IpfsNode struct { Blocks bserv.BlockService // the block service, get/add blocks. DAG ipld.DAGService // the merkle dag service, get/add objects. Resolver *resolver.Resolver // the path resolution system - Reporter metrics.Reporter + Reporter metrics.Reporter `optional:"true"` Discovery discovery.Service `optional:"true"` FilesRoot *mfs.Root RecordValidator record.Validator // Online - PeerHost p2phost.Host // the network host (server+client) + PeerHost p2phost.Host `optional:"true"` // the network host (server+client) Bootstrapper io.Closer `optional:"true"` // the periodic bootstrapper - Routing routing.IpfsRouting // the routing system. recommend ipfs-dht + Routing routing.IpfsRouting `optional:"true"` // the routing system. recommend ipfs-dht Exchange exchange.Interface // the block exchange + strategy (bitswap) Namesys namesys.NameSystem // the name system, resolves paths to hashes - Provider provider.Provider // the value provider system - Reprovider *rp.Reprovider // the value reprovider system + Provider provider.Provider // the value provider system + Reprovider *rp.Reprovider `optional:"true"` // the value reprovider system IpnsRepub *ipnsrp.Republisher `optional:"true"` AutoNAT *autonat.AutoNATService `optional:"true"` - PubSub *pubsub.PubSub - PSRouter *psrouter.PubsubValueStore - DHT *dht.IpfsDHT - P2P *p2p.P2P + PubSub *pubsub.PubSub `optional:"true"` + PSRouter *psrouter.PubsubValueStore `optional:"true"` + DHT *dht.IpfsDHT `optional:"true"` + P2P *p2p.P2P `optional:"true"` - proc goprocess.Process + proc goprocess.Process //TODO: remove ctx context.Context + app *fx.App + // Flags IsOnline bool `optional:"true"` // Online is set when networking is enabled. IsDaemon bool `optional:"true"` // Daemon is set when running on a long-running daemon. @@ -648,9 +652,9 @@ func (n *IpfsNode) Process() goprocess.Process { return n.proc } -// Close calls Close() on the Process object +// Close calls Close() on the App object func (n *IpfsNode) Close() error { - return n.proc.Close() + return n.app.Stop(n.ctx) } // Context returns the IpfsNode context diff --git a/core/ncore.go b/core/ncore.go index 1aa300722..725591975 100644 --- a/core/ncore.go +++ b/core/ncore.go @@ -111,6 +111,10 @@ func privateKey(cfg *iconfig.Config, id peer.ID) (ic.PrivKey, error) { return sk, nil } +func datastoreCtor(repo repo.Repo) ds.Datastore { + return repo.Datastore() +} + func baseBlockstoreCtor(repo repo.Repo, cfg *iconfig.Config, bcfg *BuildCfg, lc fx.Lifecycle) (bs bstore.Blockstore, err error) { rds := &retry.Datastore{ Batching: repo.Datastore(), @@ -180,8 +184,6 @@ func recordValidator(ps pstore.Peerstore) record.Validator { // libp2p var ipfsp2p = fx.Options( - fx.Provide(peerstore), - fx.Provide(p2pAddrFilters), fx.Provide(p2pBandwidthCounter), fx.Provide(p2pPNet), @@ -411,6 +413,9 @@ func p2pHost(lc fx.Lifecycle, params p2pHostIn) (out p2pHostOut, err error) { })) out.Host, err = params.HostOption(ctx, params.ID, params.Peerstore, opts...) + if err != nil { + return p2pHostOut{}, err + } // this code is necessary just for tests: mock network constructions // ignore the libp2p constructor options that actually construct the routing! @@ -758,14 +763,14 @@ func lifecycleCtx(lc fx.Lifecycle) context.Context { } func lcGoProc(lc fx.Lifecycle, processFunc goprocess.ProcessFunc) { - proc := goprocess.Background() + proc := make(chan goprocess.Process, 1) lc.Append(fx.Hook{ OnStart: func(ctx context.Context) error { - proc.Go(processFunc) + proc <- goprocess.Go(processFunc) return nil }, OnStop: func(ctx context.Context) error { - return proc.Close() // todo: respect ctx + return (<-proc).Close() // todo: respect ctx }, }) } From cc2be2e73a5f568643680f876ba3ff377d67f08b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Fri, 29 Mar 2019 15:55:52 +0100 Subject: [PATCH 07/27] Fix goprocess / lifecycle / ctx relations MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit License: MIT Signed-off-by: Łukasz Magiera --- cmd/ipfs/daemon.go | 2 +- core/builder.go | 16 +++++++++++++--- core/commands/shutdown.go | 2 +- core/core.go | 15 +++++---------- core/corehttp/corehttp.go | 6 +++--- core/ncore.go | 27 ++++++++++++++++++++++----- fuse/ipns/mount_unix.go | 2 +- fuse/readonly/mount_unix.go | 2 +- 8 files changed, 47 insertions(+), 25 deletions(-) diff --git a/cmd/ipfs/daemon.go b/cmd/ipfs/daemon.go index 2aed209ad..fc90e969f 100644 --- a/cmd/ipfs/daemon.go +++ b/cmd/ipfs/daemon.go @@ -372,7 +372,7 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment if err != nil { return err } - node.Process().AddChild(goprocess.WithTeardown(cctx.Plugins.Close)) + node.Process.AddChild(goprocess.WithTeardown(cctx.Plugins.Close)) // construct api endpoint - every time apiErrc, err := serveHTTPApi(req, cctx) diff --git a/core/builder.go b/core/builder.go index b6be8115f..7eb80c91f 100644 --- a/core/builder.go +++ b/core/builder.go @@ -5,13 +5,15 @@ import ( "crypto/rand" "encoding/base64" "errors" - "github.com/ipfs/go-ipfs/p2p" - "github.com/ipfs/go-ipfs/provider" - "go.uber.org/fx" "os" "syscall" "time" + "go.uber.org/fx" + + "github.com/ipfs/go-ipfs/p2p" + "github.com/ipfs/go-ipfs/provider" + filestore "github.com/ipfs/go-ipfs/filestore" namesys "github.com/ipfs/go-ipfs/namesys" pin "github.com/ipfs/go-ipfs/pin" @@ -219,6 +221,8 @@ func NewNode(ctx context.Context, cfg *BuildCfg) (*IpfsNode, error) { } app := fx.New( + fx.Provide(baseProcess), + params, storage, ident, @@ -226,12 +230,18 @@ func NewNode(ctx context.Context, cfg *BuildCfg) (*IpfsNode, error) { online, fx.Invoke(setupSharding), + fx.NopLogger, core, fx.Extract(n), ) + go func() { + <-ctx.Done() + app.Stop(context.Background()) + }() + n.IsOnline = cfg.Online n.app = app diff --git a/core/commands/shutdown.go b/core/commands/shutdown.go index 4a6d44dd4..0586e3da4 100644 --- a/core/commands/shutdown.go +++ b/core/commands/shutdown.go @@ -21,7 +21,7 @@ var daemonShutdownCmd = &cmds.Command{ return cmdkit.Errorf(cmdkit.ErrClient, "daemon not running") } - if err := nd.Process().Close(); err != nil { + if err := nd.Close(); err != nil { log.Error("error while shutting down ipfs daemon:", err) } diff --git a/core/core.go b/core/core.go index 5067d1670..9b18782d2 100644 --- a/core/core.go +++ b/core/core.go @@ -135,7 +135,7 @@ type IpfsNode struct { DHT *dht.IpfsDHT `optional:"true"` P2P *p2p.P2P `optional:"true"` - proc goprocess.Process //TODO: remove + Process goprocess.Process ctx context.Context app *fx.App @@ -206,9 +206,9 @@ func (n *IpfsNode) startOnlineServices(ctx context.Context, routingOption Routin log.Warning("This might be configuration mistake.") } } - case <-n.Process().Closing(): - t.Stop() - return + //case <-n.Process().Closing(): + // t.Stop() + // return } } }() @@ -642,16 +642,11 @@ func (n *IpfsNode) setupIpnsRepublisher() error { n.IpnsRepub.RecordLifetime = d } - n.Process().Go(n.IpnsRepub.Run) + //n.Process().Go(n.IpnsRepub.Run) return nil } -// Process returns the Process object -func (n *IpfsNode) Process() goprocess.Process { - return n.proc -} - // Close calls Close() on the App object func (n *IpfsNode) Close() error { return n.app.Stop(n.ctx) diff --git a/core/corehttp/corehttp.go b/core/corehttp/corehttp.go index 330e8e9c2..c52bea8f5 100644 --- a/core/corehttp/corehttp.go +++ b/core/corehttp/corehttp.go @@ -85,7 +85,7 @@ func Serve(node *core.IpfsNode, lis net.Listener, options ...ServeOption) error } select { - case <-node.Process().Closing(): + case <-node.Process.Closing(): return fmt.Errorf("failed to start server, process closing") default: } @@ -95,7 +95,7 @@ func Serve(node *core.IpfsNode, lis net.Listener, options ...ServeOption) error } var serverError error - serverProc := node.Process().Go(func(p goprocess.Process) { + serverProc := node.Process.Go(func(p goprocess.Process) { serverError = server.Serve(lis) }) @@ -103,7 +103,7 @@ func Serve(node *core.IpfsNode, lis net.Listener, options ...ServeOption) error select { case <-serverProc.Closed(): // if node being closed before server exits, close server - case <-node.Process().Closing(): + case <-node.Process.Closing(): log.Infof("server at %s terminating...", addr) warnProc := periodicproc.Tick(5*time.Second, func(_ goprocess.Process) { diff --git a/core/ncore.go b/core/ncore.go index 725591975..cd716edcb 100644 --- a/core/ncore.go +++ b/core/ncore.go @@ -591,7 +591,7 @@ func onlineNamesysCtor(rt routing.IpfsRouting, repo repo.Repo, cfg *iconfig.Conf return namesys.NewNameSystem(rt, repo.Datastore(), cs), nil } -func ipnsRepublisher(lc fx.Lifecycle, cfg *iconfig.Config, namesys namesys.NameSystem, repo repo.Repo, privKey ic.PrivKey) error { +func ipnsRepublisher(lc lcProcess, cfg *iconfig.Config, namesys namesys.NameSystem, repo repo.Repo, privKey ic.PrivKey) error { repub := ipnsrp.NewRepublisher(namesys, repo.Datastore(), privKey, repo.Keystore()) if cfg.Ipns.RepublishPeriod != "" { @@ -616,7 +616,7 @@ func ipnsRepublisher(lc fx.Lifecycle, cfg *iconfig.Config, namesys namesys.NameS repub.RecordLifetime = d } - lcGoProc(lc, repub.Run) + lc.Run(repub.Run) return nil } @@ -762,11 +762,18 @@ func lifecycleCtx(lc fx.Lifecycle) context.Context { return ctx } -func lcGoProc(lc fx.Lifecycle, processFunc goprocess.ProcessFunc) { +type lcProcess struct { + fx.In + + LC fx.Lifecycle + Proc goprocess.Process +} + +func (lp *lcProcess) Run(f goprocess.ProcessFunc) { proc := make(chan goprocess.Process, 1) - lc.Append(fx.Hook{ + lp.LC.Append(fx.Hook{ OnStart: func(ctx context.Context) error { - proc <- goprocess.Go(processFunc) + proc <- lp.Proc.Go(f) return nil }, OnStop: func(ctx context.Context) error { @@ -775,6 +782,16 @@ func lcGoProc(lc fx.Lifecycle, processFunc goprocess.ProcessFunc) { }) } +func baseProcess(lc fx.Lifecycle) goprocess.Process { + p := goprocess.WithParent(goprocess.Background()) + lc.Append(fx.Hook{ + OnStop: func(_ context.Context) error { + return p.Close() + }, + }) + return p +} + func setupSharding(cfg *iconfig.Config) { // TEMP: setting global sharding switch here uio.UseHAMTSharding = cfg.Experimental.ShardingEnabled diff --git a/fuse/ipns/mount_unix.go b/fuse/ipns/mount_unix.go index e6b551b2d..54e62df60 100644 --- a/fuse/ipns/mount_unix.go +++ b/fuse/ipns/mount_unix.go @@ -22,5 +22,5 @@ func Mount(ipfs *core.IpfsNode, ipnsmp, ipfsmp string) (mount.Mount, error) { return nil, err } - return mount.NewMount(ipfs.Process(), fsys, ipnsmp, allow_other) + return mount.NewMount(ipfs.Process, fsys, ipnsmp, allow_other) } diff --git a/fuse/readonly/mount_unix.go b/fuse/readonly/mount_unix.go index ab7945456..656e23c49 100644 --- a/fuse/readonly/mount_unix.go +++ b/fuse/readonly/mount_unix.go @@ -16,5 +16,5 @@ func Mount(ipfs *core.IpfsNode, mountpoint string) (mount.Mount, error) { } allow_other := cfg.Mounts.FuseAllowOther fsys := NewFileSystem(ipfs) - return mount.NewMount(ipfs.Process(), fsys, mountpoint, allow_other) + return mount.NewMount(ipfs.Process, fsys, mountpoint, allow_other) } From bfaffb2d757014d334e2bb23e984acfc6cb53e50 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 1 Apr 2019 13:45:10 +0200 Subject: [PATCH 08/27] Fix bootstrap MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit License: MIT Signed-off-by: Łukasz Magiera --- core/builder.go | 39 ++++++++++++++++++++++++--------------- core/core.go | 40 ++++++++++++++++++++-------------------- core/ncore.go | 22 ++++++++-------------- 3 files changed, 52 insertions(+), 49 deletions(-) diff --git a/core/builder.go b/core/builder.go index 7eb80c91f..330fb9498 100644 --- a/core/builder.go +++ b/core/builder.go @@ -245,18 +245,18 @@ func NewNode(ctx context.Context, cfg *BuildCfg) (*IpfsNode, error) { n.IsOnline = cfg.Online n.app = app -/* n := &IpfsNode{ - IsOnline: cfg.Online, - Repo: cfg.Repo, - ctx: ctx, - Peerstore: pstoremem.NewPeerstore(), - } + /* n := &IpfsNode{ + IsOnline: cfg.Online, + Repo: cfg.Repo, + ctx: ctx, + Peerstore: pstoremem.NewPeerstore(), + } - n.RecordValidator = record.NamespacedValidator{ - "pk": record.PublicKeyValidator{}, - "ipns": ipns.Validator{KeyBook: n.Peerstore}, - } -*/ + n.RecordValidator = record.NamespacedValidator{ + "pk": record.PublicKeyValidator{}, + "ipns": ipns.Validator{KeyBook: n.Peerstore}, + } + */ // TODO: port to lifetimes // n.proc = goprocessctx.WithContextAndTeardown(ctx, n.teardown) @@ -264,11 +264,20 @@ func NewNode(ctx context.Context, cfg *BuildCfg) (*IpfsNode, error) { n.Close() return nil, err }*/ - if app.Err() != nil { - return nil, app.Err() - } + if app.Err() != nil { + return nil, app.Err() + } - return n, app.Start(ctx) + if err := app.Start(ctx); err != nil { + return nil, err + } + + // TODO: DI-ify bootstrap + if !cfg.Online { + return n, nil + } + + return n, n.Bootstrap(DefaultBootstrapConfig) } func isTooManyFDError(err error) bool { diff --git a/core/core.go b/core/core.go index 9b18782d2..a587389c5 100644 --- a/core/core.go +++ b/core/core.go @@ -100,13 +100,13 @@ type IpfsNode struct { Repo repo.Repo // Local node - Pinning pin.Pinner // the pinning manager - Mounts Mounts `optional:"true"` // current mount state, if any. - PrivateKey ic.PrivKey // the local node's private Key - PNetFingerprint PNetFingerprint `optional:"true"` // fingerprint of private network + Pinning pin.Pinner // the pinning manager + Mounts Mounts `optional:"true"` // current mount state, if any. + PrivateKey ic.PrivKey // the local node's private Key + PNetFingerprint PNetFingerprint `optional:"true"` // fingerprint of private network // Services - Peerstore pstore.Peerstore `optional:"true"` // storage for other Peer instances + Peerstore pstore.Peerstore `optional:"true"` // storage for other Peer instances Blockstore bstore.GCBlockstore // the block store (lower level) Filestore *filestore.Filestore // the filestore blockstore BaseBlocks bstore.Blockstore // the raw blockstore, no filestore wrapping @@ -114,35 +114,35 @@ type IpfsNode struct { Blocks bserv.BlockService // the block service, get/add blocks. DAG ipld.DAGService // the merkle dag service, get/add objects. Resolver *resolver.Resolver // the path resolution system - Reporter metrics.Reporter `optional:"true"` - Discovery discovery.Service `optional:"true"` + Reporter metrics.Reporter `optional:"true"` + Discovery discovery.Service `optional:"true"` FilesRoot *mfs.Root RecordValidator record.Validator // Online - PeerHost p2phost.Host `optional:"true"` // the network host (server+client) + PeerHost p2phost.Host `optional:"true"` // the network host (server+client) Bootstrapper io.Closer `optional:"true"` // the periodic bootstrapper Routing routing.IpfsRouting `optional:"true"` // the routing system. recommend ipfs-dht Exchange exchange.Interface // the block exchange + strategy (bitswap) Namesys namesys.NameSystem // the name system, resolves paths to hashes - Provider provider.Provider // the value provider system - Reprovider *rp.Reprovider `optional:"true"` // the value reprovider system + Provider provider.Provider // the value provider system + Reprovider *rp.Reprovider `optional:"true"` // the value reprovider system IpnsRepub *ipnsrp.Republisher `optional:"true"` - AutoNAT *autonat.AutoNATService `optional:"true"` - PubSub *pubsub.PubSub `optional:"true"` + AutoNAT *autonat.AutoNATService `optional:"true"` + PubSub *pubsub.PubSub `optional:"true"` PSRouter *psrouter.PubsubValueStore `optional:"true"` - DHT *dht.IpfsDHT `optional:"true"` - P2P *p2p.P2P `optional:"true"` + DHT *dht.IpfsDHT `optional:"true"` + P2P *p2p.P2P `optional:"true"` Process goprocess.Process - ctx context.Context + ctx context.Context app *fx.App // Flags - IsOnline bool `optional:"true"` // Online is set when networking is enabled. - IsDaemon bool `optional:"true"` // Daemon is set when running on a long-running daemon. + IsOnline bool `optional:"true"` // Online is set when networking is enabled. + IsDaemon bool `optional:"true"` // Daemon is set when running on a long-running daemon. } // Mounts defines what the node's mount state is. This should @@ -206,9 +206,9 @@ func (n *IpfsNode) startOnlineServices(ctx context.Context, routingOption Routin log.Warning("This might be configuration mistake.") } } - //case <-n.Process().Closing(): - // t.Stop() - // return + //case <-n.Process().Closing(): + // t.Stop() + // return } } }() diff --git a/core/ncore.go b/core/ncore.go index cd716edcb..fa4b385bb 100644 --- a/core/ncore.go +++ b/core/ncore.go @@ -452,11 +452,11 @@ func p2pHost(lc fx.Lifecycle, params p2pHostIn) (out p2pHostOut, err error) { type p2pRoutingIn struct { fx.In - BCfg *BuildCfg - Repo repo.Repo - Validator record.Validator - Host p2phost.Host - PubSub *pubsub.PubSub + BCfg *BuildCfg + Repo repo.Repo + Validator record.Validator + Host p2phost.Host + PubSub *pubsub.PubSub BaseRouting BaseRouting } @@ -553,7 +553,6 @@ func offlineNamesysCtor(rt routing.IpfsRouting, repo repo.Repo) (namesys.NameSys return namesys.NewNameSystem(rt, repo.Datastore(), 0), nil } - //////////// // IPFS services @@ -621,7 +620,7 @@ func ipnsRepublisher(lc lcProcess, cfg *iconfig.Config, namesys namesys.NameSyst } type discoveryHandler struct { - ctx context.Context + ctx context.Context host p2phost.Host } @@ -636,7 +635,7 @@ func (dh *discoveryHandler) HandlePeerFound(p pstore.PeerInfo) { func newDiscoveryHandler(lc fx.Lifecycle, host p2phost.Host) *discoveryHandler { return &discoveryHandler{ - ctx: lifecycleCtx(lc), + ctx: lifecycleCtx(lc), host: host, } } @@ -739,11 +738,6 @@ func files(lc fx.Lifecycle, repo repo.Repo, dag format.DAGService) (*mfs.Root, e return mfs.NewRoot(ctx, dag, nd, pf) } -// TODO !!!!!!!! -func bootstrap(n IpfsNode) error { - return n.Bootstrap(DefaultBootstrapConfig) -} - //////////// // Hacks @@ -765,7 +759,7 @@ func lifecycleCtx(lc fx.Lifecycle) context.Context { type lcProcess struct { fx.In - LC fx.Lifecycle + LC fx.Lifecycle Proc goprocess.Process } From 1acb450332f8e51087886384cccb279d046cbdb3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 1 Apr 2019 17:17:23 +0200 Subject: [PATCH 09/27] Fix some blockstore type mixups MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit License: MIT Signed-off-by: Łukasz Magiera --- core/core.go | 2 +- core/ncore.go | 15 +++++++++------ test/sharness/t0270-filestore.sh | 4 ++++ 3 files changed, 14 insertions(+), 7 deletions(-) diff --git a/core/core.go b/core/core.go index a587389c5..d71e0a7f5 100644 --- a/core/core.go +++ b/core/core.go @@ -109,7 +109,7 @@ type IpfsNode struct { Peerstore pstore.Peerstore `optional:"true"` // storage for other Peer instances Blockstore bstore.GCBlockstore // the block store (lower level) Filestore *filestore.Filestore // the filestore blockstore - BaseBlocks bstore.Blockstore // the raw blockstore, no filestore wrapping + BaseBlocks BaseBlocks // the raw blockstore, no filestore wrapping GCLocker bstore.GCLocker // the locker used to protect the blockstore during gc Blocks bserv.BlockService // the block service, get/add blocks. DAG ipld.DAGService // the merkle dag service, get/add objects. diff --git a/core/ncore.go b/core/ncore.go index fa4b385bb..88f8ebfa4 100644 --- a/core/ncore.go +++ b/core/ncore.go @@ -115,7 +115,9 @@ func datastoreCtor(repo repo.Repo) ds.Datastore { return repo.Datastore() } -func baseBlockstoreCtor(repo repo.Repo, cfg *iconfig.Config, bcfg *BuildCfg, lc fx.Lifecycle) (bs bstore.Blockstore, err error) { +type BaseBlocks bstore.Blockstore + +func baseBlockstoreCtor(repo repo.Repo, cfg *iconfig.Config, bcfg *BuildCfg, lc fx.Lifecycle) (bs BaseBlocks, err error) { rds := &retry.Datastore{ Batching: repo.Datastore(), Delay: time.Millisecond * 200, @@ -157,16 +159,17 @@ func baseBlockstoreCtor(repo repo.Repo, cfg *iconfig.Config, bcfg *BuildCfg, lc return } -func gcBlockstoreCtor(repo repo.Repo, bs bstore.Blockstore, cfg *iconfig.Config) (gclocker bstore.GCLocker, gcbs bstore.GCBlockstore, fstore *filestore.Filestore) { +func gcBlockstoreCtor(repo repo.Repo, bb BaseBlocks, cfg *iconfig.Config) (gclocker bstore.GCLocker, gcbs bstore.GCBlockstore, bs bstore.Blockstore, fstore *filestore.Filestore) { gclocker = bstore.NewGCLocker() - gcbs = bstore.NewGCBlockstore(bs, gclocker) + gcbs = bstore.NewGCBlockstore(bb, gclocker) if cfg.Experimental.FilestoreEnabled || cfg.Experimental.UrlstoreEnabled { // hash security - fstore = filestore.NewFilestore(bs, repo.FileManager()) //TODO: mark optional + fstore = filestore.NewFilestore(bb, repo.FileManager()) //TODO: mark optional gcbs = bstore.NewGCBlockstore(fstore, gclocker) gcbs = &verifbs.VerifBSGC{GCBlockstore: gcbs} } + bs = gcbs return } @@ -574,7 +577,7 @@ func dagCtor(bs bserv.BlockService) format.DAGService { return merkledag.NewDAGService(bs) } -func onlineExchangeCtor(lc fx.Lifecycle, host p2phost.Host, rt routing.IpfsRouting, bs bstore.Blockstore) exchange.Interface { +func onlineExchangeCtor(lc fx.Lifecycle, host p2phost.Host, rt routing.IpfsRouting, bs bstore.GCBlockstore) exchange.Interface { bitswapNetwork := bsnet.NewFromIpfsHost(host, rt) return bitswap.New(lifecycleCtx(lc), bitswapNetwork, bs) } @@ -664,7 +667,7 @@ func providerCtor(lc fx.Lifecycle, queue *provider.Queue, rt routing.IpfsRouting return provider.NewProvider(lifecycleCtx(lc), queue, rt) } -func reproviderCtor(lc fx.Lifecycle, cfg *iconfig.Config, bs bstore.Blockstore, ds format.DAGService, pinning pin.Pinner, rt routing.IpfsRouting) (*rp.Reprovider, error) { +func reproviderCtor(lc fx.Lifecycle, cfg *iconfig.Config, bs BaseBlocks, ds format.DAGService, pinning pin.Pinner, rt routing.IpfsRouting) (*rp.Reprovider, error) { var keyProvider rp.KeyChanFunc switch cfg.Reprovider.Strategy { diff --git a/test/sharness/t0270-filestore.sh b/test/sharness/t0270-filestore.sh index 829562c80..200305493 100755 --- a/test/sharness/t0270-filestore.sh +++ b/test/sharness/t0270-filestore.sh @@ -74,6 +74,8 @@ init_ipfs_filestore() { grep "either the filestore or the urlstore must be enabled" add_out ' + assert_repo_size_less_than 1000000 + test_expect_success "enable urlstore config setting" ' ipfs config --json Experimental.UrlstoreEnabled true ' @@ -84,6 +86,8 @@ init_ipfs_filestore() { grep "filestore is not enabled" add_out ' + assert_repo_size_less_than 1000000 + test_expect_success "enable filestore config setting" ' ipfs config --json Experimental.UrlstoreEnabled true && ipfs config --json Experimental.FilestoreEnabled true From 0ba7661d2056a00cef5ab76f7395b4d4a318a2eb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 1 Apr 2019 18:39:50 +0200 Subject: [PATCH 10/27] Fix context propagation sortof MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit License: MIT Signed-off-by: Łukasz Magiera --- core/builder.go | 7 +++++++ core/ncore.go | 56 ++++++++++++++++++++++++------------------------- 2 files changed, 35 insertions(+), 28 deletions(-) diff --git a/core/builder.go b/core/builder.go index 330fb9498..358e76194 100644 --- a/core/builder.go +++ b/core/builder.go @@ -129,6 +129,8 @@ func defaultRepo(dstore repo.Datastore) (repo.Repo, error) { }, nil } +type MetricsCtx context.Context + // NewNode constructs and returns an IpfsNode using the given cfg. func NewNode(ctx context.Context, cfg *BuildCfg) (*IpfsNode, error) { if cfg == nil { @@ -157,9 +159,14 @@ func NewNode(ctx context.Context, cfg *BuildCfg) (*IpfsNode, error) { return cfg }) + metricsCtx := fx.Provide(func() MetricsCtx { + return MetricsCtx(ctx) + }) + params := fx.Options( repoOption, cfgOption, + metricsCtx, ) storage := fx.Options( diff --git a/core/ncore.go b/core/ncore.go index 88f8ebfa4..6b8130a57 100644 --- a/core/ncore.go +++ b/core/ncore.go @@ -117,7 +117,7 @@ func datastoreCtor(repo repo.Repo) ds.Datastore { type BaseBlocks bstore.Blockstore -func baseBlockstoreCtor(repo repo.Repo, cfg *iconfig.Config, bcfg *BuildCfg, lc fx.Lifecycle) (bs BaseBlocks, err error) { +func baseBlockstoreCtor(mctx MetricsCtx, repo repo.Repo, cfg *iconfig.Config, bcfg *BuildCfg, lc fx.Lifecycle) (bs BaseBlocks, err error) { rds := &retry.Datastore{ Batching: repo.Datastore(), Delay: time.Millisecond * 200, @@ -135,7 +135,7 @@ func baseBlockstoreCtor(repo repo.Repo, cfg *iconfig.Config, bcfg *BuildCfg, lc } if !bcfg.NilRepo { - ctx, cancel := context.WithCancel(context.TODO()) //TODO: needed for mertics + ctx, cancel := context.WithCancel(mctx) lc.Append(fx.Hook{ OnStop: func(context context.Context) error { @@ -395,13 +395,13 @@ type p2pHostOut struct { } // TODO: move some of this into params struct -func p2pHost(lc fx.Lifecycle, params p2pHostIn) (out p2pHostOut, err error) { +func p2pHost(mctx MetricsCtx, lc fx.Lifecycle, params p2pHostIn) (out p2pHostOut, err error) { opts := []libp2p.Option{libp2p.NoListenAddrs} for _, o := range params.Opts { opts = append(opts, o...) } - ctx, cancel := context.WithCancel(context.TODO()) + ctx, cancel := context.WithCancel(mctx) lc.Append(fx.Hook{ OnStop: func(_ context.Context) error { cancel() @@ -471,12 +471,12 @@ type p2pRoutingOut struct { PSRouter *psrouter.PubsubValueStore //TODO: optional } -func p2pOnlineRouting(lc fx.Lifecycle, in p2pRoutingIn) (out p2pRoutingOut) { +func p2pOnlineRouting(mctx MetricsCtx, lc fx.Lifecycle, in p2pRoutingIn) (out p2pRoutingOut) { out.IpfsRouting = in.BaseRouting if in.BCfg.getOpt("ipnsps") { out.PSRouter = psrouter.NewPubsubValueStore( - lifecycleCtx(lc), + lifecycleCtx(mctx, lc), in.Host, in.BaseRouting, in.PubSub, @@ -503,7 +503,7 @@ func p2pOnlineRouting(lc fx.Lifecycle, in p2pRoutingIn) (out p2pRoutingOut) { //////////// // P2P services -func autoNATService(lc fx.Lifecycle, cfg *iconfig.Config, host p2phost.Host) error { +func autoNATService(mctx MetricsCtx, lc fx.Lifecycle, cfg *iconfig.Config, host p2phost.Host) error { if !cfg.Swarm.EnableAutoNATService { return nil } @@ -512,11 +512,11 @@ func autoNATService(lc fx.Lifecycle, cfg *iconfig.Config, host p2phost.Host) err opts = append(opts, libp2p.DefaultTransports, libp2p.Transport(quic.NewTransport)) } - _, err := autonat.NewAutoNATService(lifecycleCtx(lc), host, opts...) + _, err := autonat.NewAutoNATService(lifecycleCtx(mctx, lc), host, opts...) return err } -func pubsubCtor(lc fx.Lifecycle, host p2phost.Host, bcfg *BuildCfg, cfg *iconfig.Config) (service *pubsub.PubSub, err error) { +func pubsubCtor(mctx MetricsCtx, lc fx.Lifecycle, host p2phost.Host, bcfg *BuildCfg, cfg *iconfig.Config) (service *pubsub.PubSub, err error) { if !(bcfg.getOpt("pubsub") || bcfg.getOpt("ipnsps")) { return nil, nil // TODO: mark optional } @@ -534,10 +534,10 @@ func pubsubCtor(lc fx.Lifecycle, host p2phost.Host, bcfg *BuildCfg, cfg *iconfig case "": fallthrough case "floodsub": - service, err = pubsub.NewFloodSub(lifecycleCtx(lc), host, pubsubOptions...) + service, err = pubsub.NewFloodSub(lifecycleCtx(mctx, lc), host, pubsubOptions...) case "gossipsub": - service, err = pubsub.NewGossipSub(lifecycleCtx(lc), host, pubsubOptions...) + service, err = pubsub.NewGossipSub(lifecycleCtx(mctx, lc), host, pubsubOptions...) default: err = fmt.Errorf("Unknown pubsub router %s", cfg.Pubsub.Router) @@ -577,9 +577,9 @@ func dagCtor(bs bserv.BlockService) format.DAGService { return merkledag.NewDAGService(bs) } -func onlineExchangeCtor(lc fx.Lifecycle, host p2phost.Host, rt routing.IpfsRouting, bs bstore.GCBlockstore) exchange.Interface { +func onlineExchangeCtor(mctx MetricsCtx, lc fx.Lifecycle, host p2phost.Host, rt routing.IpfsRouting, bs bstore.GCBlockstore) exchange.Interface { bitswapNetwork := bsnet.NewFromIpfsHost(host, rt) - return bitswap.New(lifecycleCtx(lc), bitswapNetwork, bs) + return bitswap.New(lifecycleCtx(mctx, lc), bitswapNetwork, bs) } func onlineNamesysCtor(rt routing.IpfsRouting, repo repo.Repo, cfg *iconfig.Config) (namesys.NameSystem, error) { @@ -636,20 +636,20 @@ func (dh *discoveryHandler) HandlePeerFound(p pstore.PeerInfo) { } } -func newDiscoveryHandler(lc fx.Lifecycle, host p2phost.Host) *discoveryHandler { +func newDiscoveryHandler(mctx MetricsCtx, lc fx.Lifecycle, host p2phost.Host) *discoveryHandler { return &discoveryHandler{ - ctx: lifecycleCtx(lc), + ctx: lifecycleCtx(mctx, lc), host: host, } } -func setupDiscovery(lc fx.Lifecycle, cfg *iconfig.Config, host p2phost.Host, handler *discoveryHandler) error { +func setupDiscovery(mctx MetricsCtx, lc fx.Lifecycle, cfg *iconfig.Config, host p2phost.Host, handler *discoveryHandler) error { if cfg.Discovery.MDNS.Enabled { mdns := cfg.Discovery.MDNS if mdns.Interval == 0 { mdns.Interval = 5 } - service, err := discovery.NewMdnsService(lifecycleCtx(lc), host, time.Duration(mdns.Interval)*time.Second, discovery.ServiceTag) + service, err := discovery.NewMdnsService(lifecycleCtx(mctx, lc), host, time.Duration(mdns.Interval)*time.Second, discovery.ServiceTag) if err != nil { log.Error("mdns error: ", err) return nil @@ -659,15 +659,15 @@ func setupDiscovery(lc fx.Lifecycle, cfg *iconfig.Config, host p2phost.Host, han return nil } -func providerQueue(lc fx.Lifecycle, repo repo.Repo) (*provider.Queue, error) { - return provider.NewQueue(lifecycleCtx(lc), "provider-v1", repo.Datastore()) +func providerQueue(mctx MetricsCtx, lc fx.Lifecycle, repo repo.Repo) (*provider.Queue, error) { + return provider.NewQueue(lifecycleCtx(mctx, lc), "provider-v1", repo.Datastore()) } -func providerCtor(lc fx.Lifecycle, queue *provider.Queue, rt routing.IpfsRouting) provider.Provider { - return provider.NewProvider(lifecycleCtx(lc), queue, rt) +func providerCtor(mctx MetricsCtx, lc fx.Lifecycle, queue *provider.Queue, rt routing.IpfsRouting) provider.Provider { + return provider.NewProvider(lifecycleCtx(mctx, lc), queue, rt) } -func reproviderCtor(lc fx.Lifecycle, cfg *iconfig.Config, bs BaseBlocks, ds format.DAGService, pinning pin.Pinner, rt routing.IpfsRouting) (*rp.Reprovider, error) { +func reproviderCtor(mctx MetricsCtx, lc fx.Lifecycle, cfg *iconfig.Config, bs BaseBlocks, ds format.DAGService, pinning pin.Pinner, rt routing.IpfsRouting) (*rp.Reprovider, error) { var keyProvider rp.KeyChanFunc switch cfg.Reprovider.Strategy { @@ -682,7 +682,7 @@ func reproviderCtor(lc fx.Lifecycle, cfg *iconfig.Config, bs BaseBlocks, ds form default: return nil, fmt.Errorf("unknown reprovider strategy '%s'", cfg.Reprovider.Strategy) } - return rp.NewReprovider(lifecycleCtx(lc), rt, keyProvider), nil + return rp.NewReprovider(lifecycleCtx(mctx, lc), rt, keyProvider), nil } func reprovider(cfg *iconfig.Config, reprovider *rp.Reprovider) error { @@ -700,7 +700,7 @@ func reprovider(cfg *iconfig.Config, reprovider *rp.Reprovider) error { return nil } -func files(lc fx.Lifecycle, repo repo.Repo, dag format.DAGService) (*mfs.Root, error) { +func files(mctx MetricsCtx, lc fx.Lifecycle, repo repo.Repo, dag format.DAGService) (*mfs.Root, error) { dsk := ds.NewKey("/local/filesroot") pf := func(ctx context.Context, c cid.Cid) error { return repo.Datastore().Put(dsk, c.Bytes()) @@ -708,7 +708,7 @@ func files(lc fx.Lifecycle, repo repo.Repo, dag format.DAGService) (*mfs.Root, e var nd *merkledag.ProtoNode val, err := repo.Datastore().Get(dsk) - ctx := lifecycleCtx(lc) + ctx := lifecycleCtx(mctx, lc) switch { case err == ds.ErrNotFound || val == nil: @@ -748,8 +748,8 @@ func files(lc fx.Lifecycle, repo repo.Repo, dag format.DAGService) (*mfs.Root, e // // This is a hack which we need because most of our services use contexts in a // wrong way -func lifecycleCtx(lc fx.Lifecycle) context.Context { - ctx, cancel := context.WithCancel(context.TODO()) // TODO: really wire this context up, things (like metrics) may depend on it +func lifecycleCtx(mctx MetricsCtx, lc fx.Lifecycle) context.Context { + ctx, cancel := context.WithCancel(mctx) lc.Append(fx.Hook{ OnStop: func(_ context.Context) error { cancel() @@ -774,7 +774,7 @@ func (lp *lcProcess) Run(f goprocess.ProcessFunc) { return nil }, OnStop: func(ctx context.Context) error { - return (<-proc).Close() // todo: respect ctx + return (<-proc).Close() // todo: respect ctx, somehow }, }) } From d0670f22efdabe9b14642a2f8d13eb00acb0f315 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 1 Apr 2019 18:49:36 +0200 Subject: [PATCH 11/27] Rewire teardown routines to lifecycles MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit License: MIT Signed-off-by: Łukasz Magiera --- core/builder.go | 2 +- core/core.go | 4 ++-- core/ncore.go | 44 +++++++++++++++++++++++++++++++++++++++++--- 3 files changed, 44 insertions(+), 6 deletions(-) diff --git a/core/builder.go b/core/builder.go index 358e76194..e9cdc8fa3 100644 --- a/core/builder.go +++ b/core/builder.go @@ -216,7 +216,7 @@ func NewNode(ctx context.Context, cfg *BuildCfg) (*IpfsNode, error) { } core := fx.Options( - fx.Provide(bserv.New), + fx.Provide(blockServiceCtor), fx.Provide(dagCtor), fx.Provide(resolver.NewBasicResolver), fx.Provide(pinning), diff --git a/core/core.go b/core/core.go index d71e0a7f5..1a62dc4e4 100644 --- a/core/core.go +++ b/core/core.go @@ -684,10 +684,10 @@ func (n *IpfsNode) teardown() error { closers = append(closers, n.Exchange) } - if n.Mounts.Ipfs != nil && !n.Mounts.Ipfs.IsActive() { + if n.Mounts.Ipfs != nil && !n.Mounts.Ipfs.IsActive() { //TODO closers = append(closers, mount.Closer(n.Mounts.Ipfs)) } - if n.Mounts.Ipns != nil && !n.Mounts.Ipns.IsActive() { + if n.Mounts.Ipns != nil && !n.Mounts.Ipns.IsActive() { // TODO closers = append(closers, mount.Closer(n.Mounts.Ipns)) } diff --git a/core/ncore.go b/core/ncore.go index 6b8130a57..dc27bebcf 100644 --- a/core/ncore.go +++ b/core/ncore.go @@ -159,7 +159,7 @@ func baseBlockstoreCtor(mctx MetricsCtx, repo repo.Repo, cfg *iconfig.Config, bc return } -func gcBlockstoreCtor(repo repo.Repo, bb BaseBlocks, cfg *iconfig.Config) (gclocker bstore.GCLocker, gcbs bstore.GCBlockstore, bs bstore.Blockstore, fstore *filestore.Filestore) { +func gcBlockstoreCtor(lc fx.Lifecycle, repo repo.Repo, bb BaseBlocks, cfg *iconfig.Config) (gclocker bstore.GCLocker, gcbs bstore.GCBlockstore, bs bstore.Blockstore, fstore *filestore.Filestore) { gclocker = bstore.NewGCLocker() gcbs = bstore.NewGCBlockstore(bb, gclocker) @@ -173,6 +173,18 @@ func gcBlockstoreCtor(repo repo.Repo, bb BaseBlocks, cfg *iconfig.Config) (gcloc return } +func blockServiceCtor(lc fx.Lifecycle, bs bstore.Blockstore, rem exchange.Interface) bserv.BlockService { + bsvc := bserv.New(bs, rem) + + lc.Append(fx.Hook{ + OnStop: func(ctx context.Context) error { + return bsvc.Close() + }, + }) + + return bsvc +} + func recordValidator(ps pstore.Peerstore) record.Validator { return record.NamespacedValidator{ "pk": record.PublicKeyValidator{}, @@ -431,6 +443,12 @@ func p2pHost(mctx MetricsCtx, lc fx.Lifecycle, params p2pHostIn) (out p2pHostOut out.Host = rhost.Wrap(out.Host, out.Routing) } + lc.Append(fx.Hook{ + OnStop: func(ctx context.Context) error { + return out.Host.Close() + }, + }) + // TODO: break this up into more DI units // TODO: I'm not a fan of type assertions like this but the // `RoutingOption` system doesn't currently provide access to the @@ -447,6 +465,12 @@ func p2pHost(mctx MetricsCtx, lc fx.Lifecycle, params p2pHostIn) (out p2pHostOut // that requires a fair amount of work). if dht, ok := out.Routing.(*dht.IpfsDHT); ok { out.IpfsDHT = dht + + lc.Append(fx.Hook{ + OnStop: func(ctx context.Context) error { + return out.IpfsDHT.Close() + }, + }) } return out, err @@ -579,7 +603,13 @@ func dagCtor(bs bserv.BlockService) format.DAGService { func onlineExchangeCtor(mctx MetricsCtx, lc fx.Lifecycle, host p2phost.Host, rt routing.IpfsRouting, bs bstore.GCBlockstore) exchange.Interface { bitswapNetwork := bsnet.NewFromIpfsHost(host, rt) - return bitswap.New(lifecycleCtx(mctx, lc), bitswapNetwork, bs) + exch := bitswap.New(lifecycleCtx(mctx, lc), bitswapNetwork, bs) + lc.Append(fx.Hook{ + OnStop: func(ctx context.Context) error { + return exch.Close() + }, + }) + return exch } func onlineNamesysCtor(rt routing.IpfsRouting, repo repo.Repo, cfg *iconfig.Config) (namesys.NameSystem, error) { @@ -738,7 +768,15 @@ func files(mctx MetricsCtx, lc fx.Lifecycle, repo repo.Repo, dag format.DAGServi return nil, err } - return mfs.NewRoot(ctx, dag, nd, pf) + root, err := mfs.NewRoot(ctx, dag, nd, pf) + + lc.Append(fx.Hook{ + OnStop: func(ctx context.Context) error { + return root.Close() + }, + }) + + return root, err } //////////// From 5299299037f968c5e47a8278904b73ca8bb86c93 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 1 Apr 2019 21:13:37 +0200 Subject: [PATCH 12/27] Move ConnMgr construction into DI provider MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit License: MIT Signed-off-by: Łukasz Magiera --- core/ncore.go | 27 +++++++++++++++++++++++---- 1 file changed, 23 insertions(+), 4 deletions(-) diff --git a/core/ncore.go b/core/ncore.go index dc27bebcf..bcdc1d0bb 100644 --- a/core/ncore.go +++ b/core/ncore.go @@ -31,6 +31,7 @@ import ( "github.com/libp2p/go-libp2p" "github.com/libp2p/go-libp2p-autonat-svc" circuit "github.com/libp2p/go-libp2p-circuit" + connmgr "github.com/libp2p/go-libp2p-connmgr" "github.com/libp2p/go-libp2p-kad-dht" "github.com/libp2p/go-libp2p-metrics" pstore "github.com/libp2p/go-libp2p-peerstore" @@ -329,12 +330,30 @@ func p2pAddrsFactory(cfg *iconfig.Config) (opts libp2pOpts, err error) { } func p2pConnectionManager(cfg *iconfig.Config) (opts libp2pOpts, err error) { - connm, err := constructConnMgr(cfg.Swarm.ConnMgr) - if err != nil { - return opts, err + grace := iconfig.DefaultConnMgrGracePeriod + low := iconfig.DefaultConnMgrHighWater + high := iconfig.DefaultConnMgrHighWater + + switch cfg.Swarm.ConnMgr.Type { + case "": + // 'default' value is the basic connection manager + return + case "none": + return opts, nil + case "basic": + grace, err = time.ParseDuration(cfg.Swarm.ConnMgr.GracePeriod) + if err != nil { + return opts, fmt.Errorf("parsing Swarm.ConnMgr.GracePeriod: %s", err) + } + + low = cfg.Swarm.ConnMgr.LowWater + high = cfg.Swarm.ConnMgr.HighWater + default: + return opts, fmt.Errorf("unrecognized ConnMgr.Type: %q", cfg.Swarm.ConnMgr.Type) } - opts.Opts = append(opts.Opts, libp2p.ConnectionManager(connm)) + cm := connmgr.NewConnManager(low, high, grace) + opts.Opts = append(opts.Opts, libp2p.ConnectionManager(cm)) return } From c5f887dc2afafcfecd5b4a253056d7eaaeba4200 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 1 Apr 2019 21:26:04 +0200 Subject: [PATCH 13/27] fuse: fix govet warning MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit License: MIT Signed-off-by: Łukasz Magiera --- fuse/node/mount_nofuse.go | 2 ++ fuse/node/mount_unix.go | 2 ++ 2 files changed, 4 insertions(+) diff --git a/fuse/node/mount_nofuse.go b/fuse/node/mount_nofuse.go index 7f824ef3e..92f61f809 100644 --- a/fuse/node/mount_nofuse.go +++ b/fuse/node/mount_nofuse.go @@ -8,6 +8,8 @@ import ( core "github.com/ipfs/go-ipfs/core" ) +type errNeedFuseVersion error // used in tests, needed in OSX + func Mount(node *core.IpfsNode, fsdir, nsdir string) error { return errors.New("not compiled in") } diff --git a/fuse/node/mount_unix.go b/fuse/node/mount_unix.go index 8fee86947..3b9cb5c3a 100644 --- a/fuse/node/mount_unix.go +++ b/fuse/node/mount_unix.go @@ -30,6 +30,8 @@ var platformFuseChecks = func(*core.IpfsNode) error { return nil } +type errNeedFuseVersion error // used in tests, needed in OSX + func Mount(node *core.IpfsNode, fsdir, nsdir string) error { // check if we already have live mounts. // if the user said "Mount", then there must be something wrong. From adbc85bf8a8cf44f7d579ac427d7ea727ffd3f02 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 1 Apr 2019 21:34:17 +0200 Subject: [PATCH 14/27] Remove old constructor code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit License: MIT Signed-off-by: Łukasz Magiera --- core/builder.go | 154 +----------- core/core.go | 646 ++---------------------------------------------- 2 files changed, 17 insertions(+), 783 deletions(-) diff --git a/core/builder.go b/core/builder.go index e9cdc8fa3..4a4c2423b 100644 --- a/core/builder.go +++ b/core/builder.go @@ -7,37 +7,23 @@ import ( "errors" "os" "syscall" - "time" "go.uber.org/fx" "github.com/ipfs/go-ipfs/p2p" "github.com/ipfs/go-ipfs/provider" - filestore "github.com/ipfs/go-ipfs/filestore" - namesys "github.com/ipfs/go-ipfs/namesys" - pin "github.com/ipfs/go-ipfs/pin" repo "github.com/ipfs/go-ipfs/repo" - cidv0v1 "github.com/ipfs/go-ipfs/thirdparty/cidv0v1" - "github.com/ipfs/go-ipfs/thirdparty/verifbs" - bserv "github.com/ipfs/go-blockservice" ds "github.com/ipfs/go-datastore" - retry "github.com/ipfs/go-datastore/retrystore" dsync "github.com/ipfs/go-datastore/sync" - bstore "github.com/ipfs/go-ipfs-blockstore" cfg "github.com/ipfs/go-ipfs-config" offline "github.com/ipfs/go-ipfs-exchange-offline" offroute "github.com/ipfs/go-ipfs-routing/offline" - dag "github.com/ipfs/go-merkledag" metrics "github.com/ipfs/go-metrics-interface" resolver "github.com/ipfs/go-path/resolver" - uio "github.com/ipfs/go-unixfs/io" - libp2p "github.com/libp2p/go-libp2p" ci "github.com/libp2p/go-libp2p-crypto" - p2phost "github.com/libp2p/go-libp2p-host" peer "github.com/libp2p/go-libp2p-peer" - pstore "github.com/libp2p/go-libp2p-peerstore" ) type BuildCfg struct { @@ -245,6 +231,8 @@ func NewNode(ctx context.Context, cfg *BuildCfg) (*IpfsNode, error) { ) go func() { + // Note that some services use contexts to signal shutting down, which is + // very suboptimal. This needs to be here until that's addressed somehow <-ctx.Done() app.Stop(context.Background()) }() @@ -252,25 +240,6 @@ func NewNode(ctx context.Context, cfg *BuildCfg) (*IpfsNode, error) { n.IsOnline = cfg.Online n.app = app - /* n := &IpfsNode{ - IsOnline: cfg.Online, - Repo: cfg.Repo, - ctx: ctx, - Peerstore: pstoremem.NewPeerstore(), - } - - n.RecordValidator = record.NamespacedValidator{ - "pk": record.PublicKeyValidator{}, - "ipns": ipns.Validator{KeyBook: n.Peerstore}, - } - */ - // TODO: port to lifetimes - // n.proc = goprocessctx.WithContextAndTeardown(ctx, n.teardown) - - /*if err := setupNode(ctx, n, cfg); err != nil { - n.Close() - return nil, err - }*/ if app.Err() != nil { return nil, app.Err() } @@ -295,122 +264,3 @@ func isTooManyFDError(err error) bool { return false } - -func setupNode(ctx context.Context, n *IpfsNode, cfg *BuildCfg) error { - // setup local identity - if err := n.loadID(); err != nil { - return err - } - - // load the private key (if present) - if err := n.loadPrivateKey(); err != nil { - return err - } - - rds := &retry.Datastore{ - Batching: n.Repo.Datastore(), - Delay: time.Millisecond * 200, - Retries: 6, - TempErrFunc: isTooManyFDError, - } - - // hash security - bs := bstore.NewBlockstore(rds) - bs = &verifbs.VerifBS{Blockstore: bs} - - opts := bstore.DefaultCacheOpts() - conf, err := n.Repo.Config() - if err != nil { - return err - } - - // TEMP: setting global sharding switch here - uio.UseHAMTSharding = conf.Experimental.ShardingEnabled - - opts.HasBloomFilterSize = conf.Datastore.BloomFilterSize - if !cfg.Permanent { - opts.HasBloomFilterSize = 0 - } - - if !cfg.NilRepo { - bs, err = bstore.CachedBlockstore(ctx, bs, opts) - if err != nil { - return err - } - } - - bs = bstore.NewIdStore(bs) - - bs = cidv0v1.NewBlockstore(bs) - - n.BaseBlocks = bs - n.GCLocker = bstore.NewGCLocker() - n.Blockstore = bstore.NewGCBlockstore(bs, n.GCLocker) - - if conf.Experimental.FilestoreEnabled || conf.Experimental.UrlstoreEnabled { - // hash security - n.Filestore = filestore.NewFilestore(bs, n.Repo.FileManager()) - n.Blockstore = bstore.NewGCBlockstore(n.Filestore, n.GCLocker) - n.Blockstore = &verifbs.VerifBSGC{GCBlockstore: n.Blockstore} - } - - rcfg, err := n.Repo.Config() - if err != nil { - return err - } - - if rcfg.Datastore.HashOnRead { - bs.HashOnRead(true) - } - - hostOption := cfg.Host - if cfg.DisableEncryptedConnections { - innerHostOption := hostOption - hostOption = func(ctx context.Context, id peer.ID, ps pstore.Peerstore, options ...libp2p.Option) (p2phost.Host, error) { - return innerHostOption(ctx, id, ps, append(options, libp2p.NoSecurity)...) - } - // TODO: shouldn't this be Errorf to guarantee visibility? - log.Warningf(`Your IPFS node has been configured to run WITHOUT ENCRYPTED CONNECTIONS. - You will not be able to connect to any nodes configured to use encrypted connections`) - } - - if cfg.Online { - do := setupDiscoveryOption(rcfg.Discovery) - if err := n.startOnlineServices(ctx, cfg.Routing, hostOption, do, cfg.getOpt("pubsub"), cfg.getOpt("ipnsps"), cfg.getOpt("mplex")); err != nil { - return err - } - } else { - n.Exchange = offline.Exchange(n.Blockstore) - n.Routing = offroute.NewOfflineRouter(n.Repo.Datastore(), n.RecordValidator) - n.Namesys = namesys.NewNameSystem(n.Routing, n.Repo.Datastore(), 0) - } - - n.Blocks = bserv.New(n.Blockstore, n.Exchange) - n.DAG = dag.NewDAGService(n.Blocks) - - internalDag := dag.NewDAGService(bserv.New(n.Blockstore, offline.Exchange(n.Blockstore))) - n.Pinning, err = pin.LoadPinner(n.Repo.Datastore(), n.DAG, internalDag) - if err != nil { - // TODO: we should move towards only running 'NewPinner' explicitly on - // node init instead of implicitly here as a result of the pinner keys - // not being found in the datastore. - // this is kinda sketchy and could cause data loss - n.Pinning = pin.NewPinner(n.Repo.Datastore(), n.DAG, internalDag) - } - n.Resolver = resolver.NewBasicResolver(n.DAG) - - // Provider - queue, err := provider.NewQueue(ctx, "provider-v1", n.Repo.Datastore()) - if err != nil { - return err - } - n.Provider = provider.NewProvider(ctx, queue, n.Routing) - - if cfg.Online { - if err := n.startLateOnlineServices(ctx); err != nil { - return err - } - } - - return n.loadFilesRoot() -} diff --git a/core/core.go b/core/core.go index 1a62dc4e4..3056bd805 100644 --- a/core/core.go +++ b/core/core.go @@ -10,9 +10,7 @@ interfaces and how core/... fits into the bigger IPFS picture, see: package core import ( - "bytes" "context" - "errors" "fmt" "io" "io/ioutil" @@ -24,36 +22,32 @@ import ( version "github.com/ipfs/go-ipfs" rp "github.com/ipfs/go-ipfs/exchange/reprovide" - filestore "github.com/ipfs/go-ipfs/filestore" - mount "github.com/ipfs/go-ipfs/fuse/mount" - namesys "github.com/ipfs/go-ipfs/namesys" + "github.com/ipfs/go-ipfs/filestore" + "github.com/ipfs/go-ipfs/fuse/mount" + "github.com/ipfs/go-ipfs/namesys" ipnsrp "github.com/ipfs/go-ipfs/namesys/republisher" - p2p "github.com/ipfs/go-ipfs/p2p" - pin "github.com/ipfs/go-ipfs/pin" - provider "github.com/ipfs/go-ipfs/provider" - repo "github.com/ipfs/go-ipfs/repo" + "github.com/ipfs/go-ipfs/p2p" + "github.com/ipfs/go-ipfs/pin" + "github.com/ipfs/go-ipfs/provider" + "github.com/ipfs/go-ipfs/repo" - bitswap "github.com/ipfs/go-bitswap" - bsnet "github.com/ipfs/go-bitswap/network" bserv "github.com/ipfs/go-blockservice" - cid "github.com/ipfs/go-cid" + "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" bstore "github.com/ipfs/go-ipfs-blockstore" config "github.com/ipfs/go-ipfs-config" exchange "github.com/ipfs/go-ipfs-exchange-interface" nilrouting "github.com/ipfs/go-ipfs-routing/none" - u "github.com/ipfs/go-ipfs-util" ipld "github.com/ipfs/go-ipld-format" logging "github.com/ipfs/go-log" - merkledag "github.com/ipfs/go-merkledag" - mfs "github.com/ipfs/go-mfs" - resolver "github.com/ipfs/go-path/resolver" + "github.com/ipfs/go-merkledag" + "github.com/ipfs/go-mfs" + "github.com/ipfs/go-path/resolver" ft "github.com/ipfs/go-unixfs" - goprocess "github.com/jbenet/goprocess" - libp2p "github.com/libp2p/go-libp2p" + "github.com/jbenet/goprocess" + "github.com/libp2p/go-libp2p" autonat "github.com/libp2p/go-libp2p-autonat-svc" circuit "github.com/libp2p/go-libp2p-circuit" - connmgr "github.com/libp2p/go-libp2p-connmgr" ic "github.com/libp2p/go-libp2p-crypto" p2phost "github.com/libp2p/go-libp2p-host" ifconnmgr "github.com/libp2p/go-libp2p-interface-connmgr" @@ -62,17 +56,13 @@ import ( metrics "github.com/libp2p/go-libp2p-metrics" peer "github.com/libp2p/go-libp2p-peer" pstore "github.com/libp2p/go-libp2p-peerstore" - pnet "github.com/libp2p/go-libp2p-pnet" pubsub "github.com/libp2p/go-libp2p-pubsub" psrouter "github.com/libp2p/go-libp2p-pubsub-router" - quic "github.com/libp2p/go-libp2p-quic-transport" record "github.com/libp2p/go-libp2p-record" routing "github.com/libp2p/go-libp2p-routing" - rhelpers "github.com/libp2p/go-libp2p-routing-helpers" - discovery "github.com/libp2p/go-libp2p/p2p/discovery" + "github.com/libp2p/go-libp2p/p2p/discovery" p2pbhost "github.com/libp2p/go-libp2p/p2p/host/basic" - rhost "github.com/libp2p/go-libp2p/p2p/host/routed" - identify "github.com/libp2p/go-libp2p/p2p/protocol/identify" + "github.com/libp2p/go-libp2p/p2p/protocol/identify" mafilter "github.com/libp2p/go-maddr-filter" smux "github.com/libp2p/go-stream-muxer" ma "github.com/multiformats/go-multiaddr" @@ -153,218 +143,6 @@ type Mounts struct { Ipns mount.Mount } -func (n *IpfsNode) startOnlineServices(ctx context.Context, routingOption RoutingOption, hostOption HostOption, do DiscoveryOption, pubsub, ipnsps, mplex bool) error { - if n.PeerHost != nil { // already online. - return errors.New("node already online") - } - - if n.PrivateKey == nil { - return fmt.Errorf("private key not available") - } - - // get undialable addrs from config - cfg, err := n.Repo.Config() - if err != nil { - return err - } - - var libp2pOpts []libp2p.Option - for _, s := range cfg.Swarm.AddrFilters { - f, err := mamask.NewMask(s) - if err != nil { - return fmt.Errorf("incorrectly formatted address filter in config: %s", s) - } - libp2pOpts = append(libp2pOpts, libp2p.FilterAddresses(f)) - } - - if !cfg.Swarm.DisableBandwidthMetrics { - // Set reporter - n.Reporter = metrics.NewBandwidthCounter() - libp2pOpts = append(libp2pOpts, libp2p.BandwidthReporter(n.Reporter)) - } - - swarmkey, err := n.Repo.SwarmKey() - if err != nil { - return err - } - - if swarmkey != nil { - protec, err := pnet.NewProtector(bytes.NewReader(swarmkey)) - if err != nil { - return fmt.Errorf("failed to configure private network: %s", err) - } - n.PNetFingerprint = protec.Fingerprint() - go func() { - t := time.NewTicker(30 * time.Second) - <-t.C // swallow one tick - for { - select { - case <-t.C: - if ph := n.PeerHost; ph != nil { - if len(ph.Network().Peers()) == 0 { - log.Warning("We are in private network and have no peers.") - log.Warning("This might be configuration mistake.") - } - } - //case <-n.Process().Closing(): - // t.Stop() - // return - } - } - }() - - libp2pOpts = append(libp2pOpts, libp2p.PrivateNetwork(protec)) - } - - addrsFactory, err := makeAddrsFactory(cfg.Addresses) - if err != nil { - return err - } - if !cfg.Swarm.DisableRelay { - addrsFactory = composeAddrsFactory(addrsFactory, filterRelayAddrs) - } - libp2pOpts = append(libp2pOpts, libp2p.AddrsFactory(addrsFactory)) - - connm, err := constructConnMgr(cfg.Swarm.ConnMgr) - if err != nil { - return err - } - libp2pOpts = append(libp2pOpts, libp2p.ConnectionManager(connm)) - - libp2pOpts = append(libp2pOpts, makeSmuxTransportOption(mplex)) - - if !cfg.Swarm.DisableNatPortMap { - libp2pOpts = append(libp2pOpts, libp2p.NATPortMap()) - } - - // disable the default listen addrs - libp2pOpts = append(libp2pOpts, libp2p.NoListenAddrs) - - if cfg.Swarm.DisableRelay { - // Enabled by default. - libp2pOpts = append(libp2pOpts, libp2p.DisableRelay()) - } else { - relayOpts := []circuit.RelayOpt{circuit.OptDiscovery} - if cfg.Swarm.EnableRelayHop { - relayOpts = append(relayOpts, circuit.OptHop) - } - libp2pOpts = append(libp2pOpts, libp2p.EnableRelay(relayOpts...)) - } - - // explicitly enable the default transports - libp2pOpts = append(libp2pOpts, libp2p.DefaultTransports) - - if cfg.Experimental.QUIC { - libp2pOpts = append(libp2pOpts, libp2p.Transport(quic.NewTransport)) - } - - // enable routing - libp2pOpts = append(libp2pOpts, libp2p.Routing(func(h p2phost.Host) (routing.PeerRouting, error) { - r, err := routingOption(ctx, h, n.Repo.Datastore(), n.RecordValidator) - n.Routing = r - return r, err - })) - - // enable autorelay - if cfg.Swarm.EnableAutoRelay { - libp2pOpts = append(libp2pOpts, libp2p.EnableAutoRelay()) - } - - peerhost, err := hostOption(ctx, n.Identity, n.Peerstore, libp2pOpts...) - - if err != nil { - return err - } - - n.PeerHost = peerhost - - if err := n.startOnlineServicesWithHost(ctx, routingOption, pubsub, ipnsps); err != nil { - return err - } - - // Ok, now we're ready to listen. - if err := startListening(n.PeerHost, cfg); err != nil { - return err - } - - n.P2P = p2p.NewP2P(n.Identity, n.PeerHost, n.Peerstore) - - // setup local discovery - if do != nil { - service, err := do(ctx, n.PeerHost) - if err != nil { - log.Error("mdns error: ", err) - } else { - service.RegisterNotifee(n) - n.Discovery = service - } - } - - return n.Bootstrap(DefaultBootstrapConfig) -} - -func constructConnMgr(cfg config.ConnMgr) (ifconnmgr.ConnManager, error) { - switch cfg.Type { - case "": - // 'default' value is the basic connection manager - return connmgr.NewConnManager(config.DefaultConnMgrLowWater, config.DefaultConnMgrHighWater, config.DefaultConnMgrGracePeriod), nil - case "none": - return nil, nil - case "basic": - grace, err := time.ParseDuration(cfg.GracePeriod) - if err != nil { - return nil, fmt.Errorf("parsing Swarm.ConnMgr.GracePeriod: %s", err) - } - - return connmgr.NewConnManager(cfg.LowWater, cfg.HighWater, grace), nil - default: - return nil, fmt.Errorf("unrecognized ConnMgr.Type: %q", cfg.Type) - } -} - -func (n *IpfsNode) startLateOnlineServices(ctx context.Context) error { - cfg, err := n.Repo.Config() - if err != nil { - return err - } - - // Provider - - n.Provider.Run() - - // Reprovider - - var keyProvider rp.KeyChanFunc - - switch cfg.Reprovider.Strategy { - case "all": - fallthrough - case "": - keyProvider = rp.NewBlockstoreProvider(n.Blockstore) - case "roots": - keyProvider = rp.NewPinnedProvider(n.Pinning, n.DAG, true) - case "pinned": - keyProvider = rp.NewPinnedProvider(n.Pinning, n.DAG, false) - default: - return fmt.Errorf("unknown reprovider strategy '%s'", cfg.Reprovider.Strategy) - } - n.Reprovider = rp.NewReprovider(ctx, n.Routing, keyProvider) - - reproviderInterval := kReprovideFrequency - if cfg.Reprovider.Interval != "" { - dur, err := time.ParseDuration(cfg.Reprovider.Interval) - if err != nil { - return err - } - - reproviderInterval = dur - } - - go n.Reprovider.Run(reproviderInterval) - - return nil -} - func makeAddrsFactory(cfg config.Addresses) (p2pbhost.AddrsFactory, error) { var annAddrs []ma.Multiaddr for _, addr := range cfg.Announce { @@ -453,200 +231,6 @@ func makeSmuxTransportOption(mplexExp bool) libp2p.Option { return libp2p.ChainOptions(opts...) } -func setupDiscoveryOption(d config.Discovery) DiscoveryOption { - if d.MDNS.Enabled { - return func(ctx context.Context, h p2phost.Host) (discovery.Service, error) { - if d.MDNS.Interval == 0 { - d.MDNS.Interval = 5 - } - return discovery.NewMdnsService(ctx, h, time.Duration(d.MDNS.Interval)*time.Second, discovery.ServiceTag) - } - } - return nil -} - -// HandlePeerFound attempts to connect to peer from `PeerInfo`, if it fails -// logs a warning log. -func (n *IpfsNode) HandlePeerFound(p pstore.PeerInfo) { - log.Warning("trying peer info: ", p) - ctx, cancel := context.WithTimeout(n.Context(), discoveryConnTimeout) - defer cancel() - if err := n.PeerHost.Connect(ctx, p); err != nil { - log.Warning("Failed to connect to peer found by discovery: ", err) - } -} - -// startOnlineServicesWithHost is the set of services which need to be -// initialized with the host and _before_ we start listening. -func (n *IpfsNode) startOnlineServicesWithHost(ctx context.Context, routingOption RoutingOption, enablePubsub bool, enableIpnsps bool) error { - cfg, err := n.Repo.Config() - if err != nil { - return err - } - - if cfg.Swarm.EnableAutoNATService { - var opts []libp2p.Option - if cfg.Experimental.QUIC { - opts = append(opts, libp2p.DefaultTransports, libp2p.Transport(quic.NewTransport)) - } - - svc, err := autonat.NewAutoNATService(ctx, n.PeerHost, opts...) - if err != nil { - return err - } - n.AutoNAT = svc - } - - if enablePubsub || enableIpnsps { - var service *pubsub.PubSub - - var pubsubOptions []pubsub.Option - if cfg.Pubsub.DisableSigning { - pubsubOptions = append(pubsubOptions, pubsub.WithMessageSigning(false)) - } - - if cfg.Pubsub.StrictSignatureVerification { - pubsubOptions = append(pubsubOptions, pubsub.WithStrictSignatureVerification(true)) - } - - switch cfg.Pubsub.Router { - case "": - fallthrough - case "floodsub": - service, err = pubsub.NewFloodSub(ctx, n.PeerHost, pubsubOptions...) - - case "gossipsub": - service, err = pubsub.NewGossipSub(ctx, n.PeerHost, pubsubOptions...) - - default: - err = fmt.Errorf("Unknown pubsub router %s", cfg.Pubsub.Router) - } - - if err != nil { - return err - } - n.PubSub = service - } - - // this code is necessary just for tests: mock network constructions - // ignore the libp2p constructor options that actually construct the routing! - if n.Routing == nil { - r, err := routingOption(ctx, n.PeerHost, n.Repo.Datastore(), n.RecordValidator) - if err != nil { - return err - } - n.Routing = r - n.PeerHost = rhost.Wrap(n.PeerHost, n.Routing) - } - - // TODO: I'm not a fan of type assertions like this but the - // `RoutingOption` system doesn't currently provide access to the - // IpfsNode. - // - // Ideally, we'd do something like: - // - // 1. Add some fancy method to introspect into tiered routers to extract - // things like the pubsub router or the DHT (complicated, messy, - // probably not worth it). - // 2. Pass the IpfsNode into the RoutingOption (would also remove the - // PSRouter case below. - // 3. Introduce some kind of service manager? (my personal favorite but - // that requires a fair amount of work). - if dht, ok := n.Routing.(*dht.IpfsDHT); ok { - n.DHT = dht - } - - if enableIpnsps { - n.PSRouter = psrouter.NewPubsubValueStore( - ctx, - n.PeerHost, - n.Routing, - n.PubSub, - n.RecordValidator, - ) - n.Routing = rhelpers.Tiered{ - Routers: []routing.IpfsRouting{ - // Always check pubsub first. - &rhelpers.Compose{ - ValueStore: &rhelpers.LimitedValueStore{ - ValueStore: n.PSRouter, - Namespaces: []string{"ipns"}, - }, - }, - n.Routing, - }, - Validator: n.RecordValidator, - } - } - - // setup exchange service - bitswapNetwork := bsnet.NewFromIpfsHost(n.PeerHost, n.Routing) - n.Exchange = bitswap.New(ctx, bitswapNetwork, n.Blockstore) - - size, err := n.getCacheSize() - if err != nil { - return err - } - - // setup name system - n.Namesys = namesys.NewNameSystem(n.Routing, n.Repo.Datastore(), size) - - // setup ipns republishing - return n.setupIpnsRepublisher() -} - -// getCacheSize returns cache life and cache size -func (n *IpfsNode) getCacheSize() (int, error) { - cfg, err := n.Repo.Config() - if err != nil { - return 0, err - } - - cs := cfg.Ipns.ResolveCacheSize - if cs == 0 { - cs = DefaultIpnsCacheSize - } - if cs < 0 { - return 0, fmt.Errorf("cannot specify negative resolve cache size") - } - return cs, nil -} - -func (n *IpfsNode) setupIpnsRepublisher() error { - cfg, err := n.Repo.Config() - if err != nil { - return err - } - - n.IpnsRepub = ipnsrp.NewRepublisher(n.Namesys, n.Repo.Datastore(), n.PrivateKey, n.Repo.Keystore()) - - if cfg.Ipns.RepublishPeriod != "" { - d, err := time.ParseDuration(cfg.Ipns.RepublishPeriod) - if err != nil { - return fmt.Errorf("failure to parse config setting IPNS.RepublishPeriod: %s", err) - } - - if !u.Debug && (d < time.Minute || d > (time.Hour*24)) { - return fmt.Errorf("config setting IPNS.RepublishPeriod is not between 1min and 1day: %s", d) - } - - n.IpnsRepub.Interval = d - } - - if cfg.Ipns.RecordLifetime != "" { - d, err := time.ParseDuration(cfg.Ipns.RecordLifetime) - if err != nil { - return fmt.Errorf("failure to parse config setting IPNS.RecordLifetime: %s", err) - } - - n.IpnsRepub.RecordLifetime = d - } - - //n.Process().Go(n.IpnsRepub.Run) - - return nil -} - // Close calls Close() on the App object func (n *IpfsNode) Close() error { return n.app.Stop(n.ctx) @@ -660,68 +244,6 @@ func (n *IpfsNode) Context() context.Context { return n.ctx } -// teardown closes owned children. If any errors occur, this function returns -// the first error. -func (n *IpfsNode) teardown() error { - log.Debug("core is shutting down...") - // owned objects are closed in this teardown to ensure that they're closed - // regardless of which constructor was used to add them to the node. - var closers []io.Closer - - // NOTE: The order that objects are added(closed) matters, if an object - // needs to use another during its shutdown/cleanup process, it should be - // closed before that other object - - if n.Provider != nil { - closers = append(closers, n.Provider) - } - - if n.FilesRoot != nil { - closers = append(closers, n.FilesRoot) - } - - if n.Exchange != nil { - closers = append(closers, n.Exchange) - } - - if n.Mounts.Ipfs != nil && !n.Mounts.Ipfs.IsActive() { //TODO - closers = append(closers, mount.Closer(n.Mounts.Ipfs)) - } - if n.Mounts.Ipns != nil && !n.Mounts.Ipns.IsActive() { // TODO - closers = append(closers, mount.Closer(n.Mounts.Ipns)) - } - - if n.DHT != nil { - closers = append(closers, n.DHT.Process()) - } - - if n.Blocks != nil { - closers = append(closers, n.Blocks) - } - - if n.Bootstrapper != nil { - closers = append(closers, n.Bootstrapper) - } - - if n.PeerHost != nil { - closers = append(closers, n.PeerHost) - } - - // Repo closed last, most things need to preserve state here - closers = append(closers, n.Repo) - - var errs []error - for _, closer := range closers { - if err := closer.Close(); err != nil { - errs = append(errs, err) - } - } - if len(errs) > 0 { - return errs[0] - } - return nil -} - // Bootstrap will set and call the IpfsNodes bootstrap function. func (n *IpfsNode) Bootstrap(cfg BootstrapConfig) error { // TODO what should return value be when in offlineMode? @@ -751,80 +273,6 @@ func (n *IpfsNode) Bootstrap(cfg BootstrapConfig) error { return err } -func (n *IpfsNode) loadID() error { - if n.Identity != "" { - return errors.New("identity already loaded") - } - - cfg, err := n.Repo.Config() - if err != nil { - return err - } - - cid := cfg.Identity.PeerID - if cid == "" { - return errors.New("identity was not set in config (was 'ipfs init' run?)") - } - if len(cid) == 0 { - return errors.New("no peer ID in config! (was 'ipfs init' run?)") - } - - id, err := peer.IDB58Decode(cid) - if err != nil { - return fmt.Errorf("peer ID invalid: %s", err) - } - - n.Identity = id - return nil -} - -// GetKey will return a key from the Keystore with name `name`. -func (n *IpfsNode) GetKey(name string) (ic.PrivKey, error) { - if name == "self" { - if n.PrivateKey == nil { - return nil, fmt.Errorf("private key not available") - } - return n.PrivateKey, nil - } else { - return n.Repo.Keystore().Get(name) - } -} - -// loadPrivateKey loads the private key *if* available -func (n *IpfsNode) loadPrivateKey() error { - if n.Identity == "" || n.Peerstore == nil { - return errors.New("loaded private key out of order") - } - - if n.PrivateKey != nil { - log.Warning("private key already loaded") - return nil - } - - cfg, err := n.Repo.Config() - if err != nil { - return err - } - - if cfg.Identity.PrivKey == "" { - return nil - } - - sk, err := loadPrivateKey(&cfg.Identity, n.Identity) - if err != nil { - return err - } - - if err := n.Peerstore.AddPrivKey(n.Identity, sk); err != nil { - return err - } - if err := n.Peerstore.AddPubKey(n.Identity, sk.GetPublic()); err != nil { - return err - } - n.PrivateKey = sk - return nil -} - func (n *IpfsNode) loadBootstrapPeers() ([]pstore.PeerInfo, error) { cfg, err := n.Repo.Config() if err != nil { @@ -838,70 +286,6 @@ func (n *IpfsNode) loadBootstrapPeers() ([]pstore.PeerInfo, error) { return toPeerInfos(parsed), nil } -func (n *IpfsNode) loadFilesRoot() error { - dsk := ds.NewKey("/local/filesroot") - pf := func(ctx context.Context, c cid.Cid) error { - return n.Repo.Datastore().Put(dsk, c.Bytes()) - } - - var nd *merkledag.ProtoNode - val, err := n.Repo.Datastore().Get(dsk) - - switch { - case err == ds.ErrNotFound || val == nil: - nd = ft.EmptyDirNode() - err := n.DAG.Add(n.Context(), nd) - if err != nil { - return fmt.Errorf("failure writing to dagstore: %s", err) - } - case err == nil: - c, err := cid.Cast(val) - if err != nil { - return err - } - - rnd, err := n.DAG.Get(n.Context(), c) - if err != nil { - return fmt.Errorf("error loading filesroot from DAG: %s", err) - } - - pbnd, ok := rnd.(*merkledag.ProtoNode) - if !ok { - return merkledag.ErrNotProtobuf - } - - nd = pbnd - default: - return err - } - - mr, err := mfs.NewRoot(n.Context(), n.DAG, nd, pf) - if err != nil { - return err - } - - n.FilesRoot = mr - return nil -} - -func loadPrivateKey(cfg *config.Identity, id peer.ID) (ic.PrivKey, error) { - sk, err := cfg.DecodePrivateKey("passphrase todo!") - if err != nil { - return nil, err - } - - id2, err := peer.IDFromPrivateKey(sk) - if err != nil { - return nil, err - } - - if id2 != id { - return nil, fmt.Errorf("private key in config does not match id: %s != %s", id, id2) - } - - return sk, nil -} - func listenAddresses(cfg *config.Config) ([]ma.Multiaddr, error) { var listen []ma.Multiaddr for _, addr := range cfg.Addresses.Swarm { From d35dac70f0b0b47c7813c6380deb8974179bdb32 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Wed, 3 Apr 2019 03:44:32 +0200 Subject: [PATCH 15/27] Cleanup core package MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit License: MIT Signed-off-by: Łukasz Magiera --- cmd/ipfs/daemon.go | 7 +- core/{ => bootstrap}/bootstrap.go | 109 +-- core/{ => bootstrap}/bootstrap_test.go | 4 +- core/builder.go | 130 +-- core/core.go | 221 +----- core/coreapi/coreapi.go | 3 +- core/coreapi/test/api_test.go | 3 +- core/mock/mock.go | 3 +- core/ncore.go | 831 +------------------- core/node/builder.go | 36 + core/node/core.go | 117 +++ core/node/discovery.go | 51 ++ core/node/groups.go | 88 +++ core/node/helpers.go | 43 + core/node/identity.go | 48 ++ core/node/ipns.go | 71 ++ core/node/libp2p.go | 597 ++++++++++++++ core/node/provider.go | 59 ++ core/node/storage.go | 94 +++ namesys/republisher/repub_test.go | 3 +- test/integration/addcat_test.go | 5 +- test/integration/bench_cat_test.go | 5 +- test/integration/bitswap_wo_routing_test.go | 3 +- test/integration/three_legged_cat_test.go | 3 +- 24 files changed, 1319 insertions(+), 1215 deletions(-) rename core/{ => bootstrap}/bootstrap.go (77%) rename core/{ => bootstrap}/bootstrap_test.go (95%) create mode 100644 core/node/builder.go create mode 100644 core/node/core.go create mode 100644 core/node/discovery.go create mode 100644 core/node/groups.go create mode 100644 core/node/helpers.go create mode 100644 core/node/identity.go create mode 100644 core/node/ipns.go create mode 100644 core/node/libp2p.go create mode 100644 core/node/provider.go create mode 100644 core/node/storage.go diff --git a/cmd/ipfs/daemon.go b/cmd/ipfs/daemon.go index fc90e969f..6d3d920d2 100644 --- a/cmd/ipfs/daemon.go +++ b/cmd/ipfs/daemon.go @@ -20,6 +20,7 @@ import ( coreapi "github.com/ipfs/go-ipfs/core/coreapi" corehttp "github.com/ipfs/go-ipfs/core/corehttp" corerepo "github.com/ipfs/go-ipfs/core/corerepo" + "github.com/ipfs/go-ipfs/core/node" nodeMount "github.com/ipfs/go-ipfs/fuse/node" fsrepo "github.com/ipfs/go-ipfs/repo/fsrepo" migrate "github.com/ipfs/go-ipfs/repo/fsrepo/migrations" @@ -323,11 +324,11 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment case routingOptionSupernodeKwd: return errors.New("supernode routing was never fully implemented and has been removed") case routingOptionDHTClientKwd: - ncfg.Routing = core.DHTClientOption + ncfg.Routing = node.DHTClientOption case routingOptionDHTKwd: - ncfg.Routing = core.DHTOption + ncfg.Routing = node.DHTOption case routingOptionNoneKwd: - ncfg.Routing = core.NilRouterOption + ncfg.Routing = node.NilRouterOption default: return fmt.Errorf("unrecognized routing option: %s", routingOption) } diff --git a/core/bootstrap.go b/core/bootstrap/bootstrap.go similarity index 77% rename from core/bootstrap.go rename to core/bootstrap/bootstrap.go index 5ff4f8e14..e6b4f826d 100644 --- a/core/bootstrap.go +++ b/core/bootstrap/bootstrap.go @@ -1,4 +1,4 @@ -package core +package bootstrap import ( "context" @@ -9,19 +9,23 @@ import ( "sync" "time" - math2 "github.com/ipfs/go-ipfs/thirdparty/math2" - lgbl "github.com/libp2p/go-libp2p-loggables" - config "github.com/ipfs/go-ipfs-config" - goprocess "github.com/jbenet/goprocess" - procctx "github.com/jbenet/goprocess/context" - periodicproc "github.com/jbenet/goprocess/periodic" - host "github.com/libp2p/go-libp2p-host" - inet "github.com/libp2p/go-libp2p-net" - peer "github.com/libp2p/go-libp2p-peer" - pstore "github.com/libp2p/go-libp2p-peerstore" + logging "github.com/ipfs/go-log" + "github.com/jbenet/goprocess" + "github.com/jbenet/goprocess/context" + "github.com/jbenet/goprocess/periodic" + "github.com/libp2p/go-libp2p-host" + "github.com/libp2p/go-libp2p-loggables" + "github.com/libp2p/go-libp2p-net" + "github.com/libp2p/go-libp2p-peer" + "github.com/libp2p/go-libp2p-peerstore" + "github.com/libp2p/go-libp2p-routing" + + "github.com/ipfs/go-ipfs/thirdparty/math2" ) +var log = logging.Logger("bootstrap") + // ErrNotEnoughBootstrapPeers signals that we do not have enough bootstrap // peers to bootstrap correctly. var ErrNotEnoughBootstrapPeers = errors.New("not enough bootstrap peers to bootstrap") @@ -29,7 +33,6 @@ var ErrNotEnoughBootstrapPeers = errors.New("not enough bootstrap peers to boots // BootstrapConfig specifies parameters used in an IpfsNode's network // bootstrapping process. type BootstrapConfig struct { - // MinPeerThreshold governs whether to bootstrap more connections. If the // node has less open connections than this number, it will open connections // to the bootstrap nodes. From there, the routing system should be able @@ -50,7 +53,7 @@ type BootstrapConfig struct { // BootstrapPeers is a function that returns a set of bootstrap peers // for the bootstrap process to use. This makes it possible for clients // to control the peers the process uses at any moment. - BootstrapPeers func() []pstore.PeerInfo + BootstrapPeers func() []peerstore.PeerInfo } // DefaultBootstrapConfig specifies default sane parameters for bootstrapping. @@ -60,9 +63,9 @@ var DefaultBootstrapConfig = BootstrapConfig{ ConnectionTimeout: (30 * time.Second) / 3, // Perod / 3 } -func BootstrapConfigWithPeers(pis []pstore.PeerInfo) BootstrapConfig { +func BootstrapConfigWithPeers(pis []peerstore.PeerInfo) BootstrapConfig { cfg := DefaultBootstrapConfig - cfg.BootstrapPeers = func() []pstore.PeerInfo { + cfg.BootstrapPeers = func() []peerstore.PeerInfo { return pis } return cfg @@ -72,7 +75,7 @@ func BootstrapConfigWithPeers(pis []pstore.PeerInfo) BootstrapConfig { // check the number of open connections and -- if there are too few -- initiate // connections to well-known bootstrap peers. It also kicks off subsystem // bootstrapping (i.e. routing). -func Bootstrap(n *IpfsNode, cfg BootstrapConfig) (io.Closer, error) { +func Bootstrap(id peer.ID, host host.Host, rt routing.IpfsRouting, cfg BootstrapConfig) (io.Closer, error) { // make a signal to wait for one bootstrap round to complete. doneWithRound := make(chan struct{}) @@ -85,12 +88,12 @@ func Bootstrap(n *IpfsNode, cfg BootstrapConfig) (io.Closer, error) { // the periodic bootstrap function -- the connection supervisor periodic := func(worker goprocess.Process) { - ctx := procctx.OnClosingContext(worker) - defer log.EventBegin(ctx, "periodicBootstrap", n.Identity).Done() + ctx := goprocessctx.OnClosingContext(worker) + defer log.EventBegin(ctx, "periodicBootstrap", id).Done() - if err := bootstrapRound(ctx, n.PeerHost, cfg); err != nil { - log.Event(ctx, "bootstrapError", n.Identity, lgbl.Error(err)) - log.Debugf("%s bootstrap error: %s", n.Identity, err) + if err := bootstrapRound(ctx, host, cfg); err != nil { + log.Event(ctx, "bootstrapError", id, loggables.Error(err)) + log.Debugf("%s bootstrap error: %s", id, err) } <-doneWithRound @@ -101,9 +104,9 @@ func Bootstrap(n *IpfsNode, cfg BootstrapConfig) (io.Closer, error) { proc.Go(periodic) // run one right now. // kick off Routing.Bootstrap - if n.Routing != nil { - ctx := procctx.OnClosingContext(proc) - if err := n.Routing.Bootstrap(ctx); err != nil { + if rt != nil { + ctx := goprocessctx.OnClosingContext(proc) + if err := rt.Bootstrap(ctx); err != nil { proc.Close() return nil, err } @@ -134,9 +137,9 @@ func bootstrapRound(ctx context.Context, host host.Host, cfg BootstrapConfig) er numToDial := cfg.MinPeerThreshold - len(connected) // filter out bootstrap nodes we are already connected to - var notConnected []pstore.PeerInfo + var notConnected []peerstore.PeerInfo for _, p := range peers { - if host.Network().Connectedness(p.ID) != inet.Connected { + if host.Network().Connectedness(p.ID) != net.Connected { notConnected = append(notConnected, p) } } @@ -155,7 +158,7 @@ func bootstrapRound(ctx context.Context, host host.Host, cfg BootstrapConfig) er return bootstrapConnect(ctx, host, randSubset) } -func bootstrapConnect(ctx context.Context, ph host.Host, peers []pstore.PeerInfo) error { +func bootstrapConnect(ctx context.Context, ph host.Host, peers []peerstore.PeerInfo) error { if len(peers) < 1 { return ErrNotEnoughBootstrapPeers } @@ -170,12 +173,12 @@ func bootstrapConnect(ctx context.Context, ph host.Host, peers []pstore.PeerInfo // Also, performed asynchronously for dial speed. wg.Add(1) - go func(p pstore.PeerInfo) { + go func(p peerstore.PeerInfo) { defer wg.Done() defer log.EventBegin(ctx, "bootstrapDial", ph.ID(), p.ID).Done() log.Debugf("%s bootstrapping to %s", ph.ID(), p.ID) - ph.Peerstore().AddAddrs(p.ID, p.Addrs, pstore.PermanentAddrTTL) + ph.Peerstore().AddAddrs(p.ID, p.Addrs, peerstore.PermanentAddrTTL) if err := ph.Connect(ctx, p); err != nil { log.Event(ctx, "bootstrapDialFailed", p.ID) log.Debugf("failed to bootstrap with %v: %s", p.ID, err) @@ -204,30 +207,9 @@ func bootstrapConnect(ctx context.Context, ph host.Host, peers []pstore.PeerInfo return nil } -func toPeerInfos(bpeers []config.BootstrapPeer) []pstore.PeerInfo { - pinfos := make(map[peer.ID]*pstore.PeerInfo) - for _, bootstrap := range bpeers { - pinfo, ok := pinfos[bootstrap.ID()] - if !ok { - pinfo = new(pstore.PeerInfo) - pinfos[bootstrap.ID()] = pinfo - pinfo.ID = bootstrap.ID() - } - - pinfo.Addrs = append(pinfo.Addrs, bootstrap.Transport()) - } - - var peers []pstore.PeerInfo - for _, pinfo := range pinfos { - peers = append(peers, *pinfo) - } - - return peers -} - -func randomSubsetOfPeers(in []pstore.PeerInfo, max int) []pstore.PeerInfo { +func randomSubsetOfPeers(in []peerstore.PeerInfo, max int) []peerstore.PeerInfo { n := math2.IntMin(max, len(in)) - var out []pstore.PeerInfo + var out []peerstore.PeerInfo for _, val := range rand.Perm(len(in)) { out = append(out, in[val]) if len(out) >= n { @@ -236,3 +218,26 @@ func randomSubsetOfPeers(in []pstore.PeerInfo, max int) []pstore.PeerInfo { } return out } + +type Peers []config.BootstrapPeer + +func (bpeers Peers) ToPeerInfos() []peerstore.PeerInfo { + pinfos := make(map[peer.ID]*peerstore.PeerInfo) + for _, bootstrap := range bpeers { + pinfo, ok := pinfos[bootstrap.ID()] + if !ok { + pinfo = new(peerstore.PeerInfo) + pinfos[bootstrap.ID()] = pinfo + pinfo.ID = bootstrap.ID() + } + + pinfo.Addrs = append(pinfo.Addrs, bootstrap.Transport()) + } + + var peers []peerstore.PeerInfo + for _, pinfo := range pinfos { + peers = append(peers, *pinfo) + } + + return peers +} diff --git a/core/bootstrap_test.go b/core/bootstrap/bootstrap_test.go similarity index 95% rename from core/bootstrap_test.go rename to core/bootstrap/bootstrap_test.go index 51e85d8aa..0c7799858 100644 --- a/core/bootstrap_test.go +++ b/core/bootstrap/bootstrap_test.go @@ -1,4 +1,4 @@ -package core +package bootstrap import ( "fmt" @@ -49,7 +49,7 @@ func TestMultipleAddrsPerPeer(t *testing.T) { bsps = append(bsps, bsp1, bsp2) } - pinfos := toPeerInfos(bsps) + pinfos := Peers.ToPeerInfos(bsps) if len(pinfos) != len(bsps)/2 { t.Fatal("expected fewer peers") } diff --git a/core/builder.go b/core/builder.go index 4a4c2423b..02b772e70 100644 --- a/core/builder.go +++ b/core/builder.go @@ -5,57 +5,24 @@ import ( "crypto/rand" "encoding/base64" "errors" - "os" - "syscall" "go.uber.org/fx" - "github.com/ipfs/go-ipfs/p2p" - "github.com/ipfs/go-ipfs/provider" + "github.com/ipfs/go-ipfs/core/bootstrap" + "github.com/ipfs/go-ipfs/core/node" repo "github.com/ipfs/go-ipfs/repo" ds "github.com/ipfs/go-datastore" dsync "github.com/ipfs/go-datastore/sync" cfg "github.com/ipfs/go-ipfs-config" - offline "github.com/ipfs/go-ipfs-exchange-offline" - offroute "github.com/ipfs/go-ipfs-routing/offline" metrics "github.com/ipfs/go-metrics-interface" resolver "github.com/ipfs/go-path/resolver" ci "github.com/libp2p/go-libp2p-crypto" peer "github.com/libp2p/go-libp2p-peer" ) -type BuildCfg struct { - // If online is set, the node will have networking enabled - Online bool - - // ExtraOpts is a map of extra options used to configure the ipfs nodes creation - ExtraOpts map[string]bool - - // If permanent then node should run more expensive processes - // that will improve performance in long run - Permanent bool - - // DisableEncryptedConnections disables connection encryption *entirely*. - // DO NOT SET THIS UNLESS YOU'RE TESTING. - DisableEncryptedConnections bool - - // If NilRepo is set, a Repo backed by a nil datastore will be constructed - NilRepo bool - - Routing RoutingOption - Host HostOption - Repo repo.Repo -} - -func (cfg *BuildCfg) getOpt(key string) bool { - if cfg.ExtraOpts == nil { - return false - } - - return cfg.ExtraOpts[key] -} +type BuildCfg node.BuildCfg func (cfg *BuildCfg) fillDefaults() error { if cfg.Repo != nil && cfg.NilRepo { @@ -77,11 +44,11 @@ func (cfg *BuildCfg) fillDefaults() error { } if cfg.Routing == nil { - cfg.Routing = DHTOption + cfg.Routing = node.DHTOption } if cfg.Host == nil { - cfg.Host = DefaultHostOption + cfg.Host = node.DefaultHostOption } return nil @@ -115,8 +82,6 @@ func defaultRepo(dstore repo.Datastore) (repo.Repo, error) { }, nil } -type MetricsCtx context.Context - // NewNode constructs and returns an IpfsNode using the given cfg. func NewNode(ctx context.Context, cfg *BuildCfg) (*IpfsNode, error) { if cfg == nil { @@ -141,12 +106,12 @@ func NewNode(ctx context.Context, cfg *BuildCfg) (*IpfsNode, error) { }) // TODO: Remove this, use only for passing node config - cfgOption := fx.Provide(func() *BuildCfg { - return cfg + cfgOption := fx.Provide(func() *node.BuildCfg { + return (*node.BuildCfg)(cfg) }) - metricsCtx := fx.Provide(func() MetricsCtx { - return MetricsCtx(ctx) + metricsCtx := fx.Provide(func() node.MetricsCtx { + return node.MetricsCtx(ctx) }) params := fx.Options( @@ -155,58 +120,12 @@ func NewNode(ctx context.Context, cfg *BuildCfg) (*IpfsNode, error) { metricsCtx, ) - storage := fx.Options( - fx.Provide(repoConfig), - fx.Provide(datastoreCtor), - fx.Provide(baseBlockstoreCtor), - fx.Provide(gcBlockstoreCtor), - ) - - ident := fx.Options( - fx.Provide(identity), - fx.Provide(privateKey), - fx.Provide(peerstore), - ) - - ipns := fx.Options( - fx.Provide(recordValidator), - ) - - providers := fx.Options( - fx.Provide(providerQueue), - fx.Provide(providerCtor), - fx.Provide(reproviderCtor), - - fx.Invoke(reprovider), - fx.Invoke(provider.Provider.Run), - ) - - online := fx.Options( - fx.Provide(onlineExchangeCtor), - fx.Provide(onlineNamesysCtor), - - fx.Invoke(ipnsRepublisher), - - fx.Provide(p2p.NewP2P), - - ipfsp2p, - providers, - ) - if !cfg.Online { - online = fx.Options( - fx.Provide(offline.Exchange), - fx.Provide(offlineNamesysCtor), - fx.Provide(offroute.NewOfflineRouter), - fx.Provide(provider.NewOfflineProvider), - ) - } - core := fx.Options( - fx.Provide(blockServiceCtor), - fx.Provide(dagCtor), + fx.Provide(node.BlockServiceCtor), + fx.Provide(node.DagCtor), fx.Provide(resolver.NewBasicResolver), - fx.Provide(pinning), - fx.Provide(files), + fx.Provide(node.Pinning), + fx.Provide(node.Files), ) n := &IpfsNode{ @@ -214,16 +133,16 @@ func NewNode(ctx context.Context, cfg *BuildCfg) (*IpfsNode, error) { } app := fx.New( + fx.NopLogger, fx.Provide(baseProcess), params, - storage, - ident, - ipns, - online, + node.Storage, + node.Identity, + node.IPNS, + node.Networked(cfg.Online), fx.Invoke(setupSharding), - fx.NopLogger, core, @@ -248,19 +167,10 @@ func NewNode(ctx context.Context, cfg *BuildCfg) (*IpfsNode, error) { return nil, err } - // TODO: DI-ify bootstrap + // TODO: How soon will bootstrap move to libp2p? if !cfg.Online { return n, nil } - return n, n.Bootstrap(DefaultBootstrapConfig) -} - -func isTooManyFDError(err error) bool { - perr, ok := err.(*os.PathError) - if ok && perr.Err == syscall.EMFILE { - return true - } - - return false + return n, n.Bootstrap(bootstrap.DefaultBootstrapConfig) } diff --git a/core/core.go b/core/core.go index 3056bd805..23aa84e77 100644 --- a/core/core.go +++ b/core/core.go @@ -11,16 +11,13 @@ package core import ( "context" - "fmt" "io" - "io/ioutil" - "os" - "strings" - "time" "go.uber.org/fx" version "github.com/ipfs/go-ipfs" + "github.com/ipfs/go-ipfs/core/bootstrap" + "github.com/ipfs/go-ipfs/core/node" rp "github.com/ipfs/go-ipfs/exchange/reprovide" "github.com/ipfs/go-ipfs/filestore" "github.com/ipfs/go-ipfs/fuse/mount" @@ -32,27 +29,18 @@ import ( "github.com/ipfs/go-ipfs/repo" bserv "github.com/ipfs/go-blockservice" - "github.com/ipfs/go-cid" - ds "github.com/ipfs/go-datastore" bstore "github.com/ipfs/go-ipfs-blockstore" - config "github.com/ipfs/go-ipfs-config" exchange "github.com/ipfs/go-ipfs-exchange-interface" - nilrouting "github.com/ipfs/go-ipfs-routing/none" ipld "github.com/ipfs/go-ipld-format" logging "github.com/ipfs/go-log" - "github.com/ipfs/go-merkledag" "github.com/ipfs/go-mfs" "github.com/ipfs/go-path/resolver" - ft "github.com/ipfs/go-unixfs" "github.com/jbenet/goprocess" - "github.com/libp2p/go-libp2p" autonat "github.com/libp2p/go-libp2p-autonat-svc" - circuit "github.com/libp2p/go-libp2p-circuit" ic "github.com/libp2p/go-libp2p-crypto" p2phost "github.com/libp2p/go-libp2p-host" ifconnmgr "github.com/libp2p/go-libp2p-interface-connmgr" dht "github.com/libp2p/go-libp2p-kad-dht" - dhtopts "github.com/libp2p/go-libp2p-kad-dht/opts" metrics "github.com/libp2p/go-libp2p-metrics" peer "github.com/libp2p/go-libp2p-peer" pstore "github.com/libp2p/go-libp2p-peerstore" @@ -63,18 +51,8 @@ import ( "github.com/libp2p/go-libp2p/p2p/discovery" p2pbhost "github.com/libp2p/go-libp2p/p2p/host/basic" "github.com/libp2p/go-libp2p/p2p/protocol/identify" - mafilter "github.com/libp2p/go-maddr-filter" - smux "github.com/libp2p/go-stream-muxer" - ma "github.com/multiformats/go-multiaddr" - mplex "github.com/whyrusleeping/go-smux-multiplex" - yamux "github.com/whyrusleeping/go-smux-yamux" - mamask "github.com/whyrusleeping/multiaddr-filter" ) -const kReprovideFrequency = time.Hour * 12 -const discoveryConnTimeout = time.Second * 30 -const DefaultIpnsCacheSize = 128 - var log = logging.Logger("core") func init() { @@ -90,16 +68,16 @@ type IpfsNode struct { Repo repo.Repo // Local node - Pinning pin.Pinner // the pinning manager - Mounts Mounts `optional:"true"` // current mount state, if any. - PrivateKey ic.PrivKey // the local node's private Key - PNetFingerprint PNetFingerprint `optional:"true"` // fingerprint of private network + Pinning pin.Pinner // the pinning manager + Mounts Mounts `optional:"true"` // current mount state, if any. + PrivateKey ic.PrivKey // the local node's private Key + PNetFingerprint node.PNetFingerprint `optional:"true"` // fingerprint of private network // Services Peerstore pstore.Peerstore `optional:"true"` // storage for other Peer instances Blockstore bstore.GCBlockstore // the block store (lower level) Filestore *filestore.Filestore // the filestore blockstore - BaseBlocks BaseBlocks // the raw blockstore, no filestore wrapping + BaseBlocks node.BaseBlocks // the raw blockstore, no filestore wrapping GCLocker bstore.GCLocker // the locker used to protect the blockstore during gc Blocks bserv.BlockService // the block service, get/add blocks. DAG ipld.DAGService // the merkle dag service, get/add objects. @@ -143,94 +121,6 @@ type Mounts struct { Ipns mount.Mount } -func makeAddrsFactory(cfg config.Addresses) (p2pbhost.AddrsFactory, error) { - var annAddrs []ma.Multiaddr - for _, addr := range cfg.Announce { - maddr, err := ma.NewMultiaddr(addr) - if err != nil { - return nil, err - } - annAddrs = append(annAddrs, maddr) - } - - filters := mafilter.NewFilters() - noAnnAddrs := map[string]bool{} - for _, addr := range cfg.NoAnnounce { - f, err := mamask.NewMask(addr) - if err == nil { - filters.AddDialFilter(f) - continue - } - maddr, err := ma.NewMultiaddr(addr) - if err != nil { - return nil, err - } - noAnnAddrs[maddr.String()] = true - } - - return func(allAddrs []ma.Multiaddr) []ma.Multiaddr { - var addrs []ma.Multiaddr - if len(annAddrs) > 0 { - addrs = annAddrs - } else { - addrs = allAddrs - } - - var out []ma.Multiaddr - for _, maddr := range addrs { - // check for exact matches - ok := noAnnAddrs[maddr.String()] - // check for /ipcidr matches - if !ok && !filters.AddrBlocked(maddr) { - out = append(out, maddr) - } - } - return out - }, nil -} - -func makeSmuxTransportOption(mplexExp bool) libp2p.Option { - const yamuxID = "/yamux/1.0.0" - const mplexID = "/mplex/6.7.0" - - ymxtpt := &yamux.Transport{ - AcceptBacklog: 512, - ConnectionWriteTimeout: time.Second * 10, - KeepAliveInterval: time.Second * 30, - EnableKeepAlive: true, - MaxStreamWindowSize: uint32(16 * 1024 * 1024), // 16MiB - LogOutput: ioutil.Discard, - } - - if os.Getenv("YAMUX_DEBUG") != "" { - ymxtpt.LogOutput = os.Stderr - } - - muxers := map[string]smux.Transport{yamuxID: ymxtpt} - if mplexExp { - muxers[mplexID] = mplex.DefaultTransport - } - - // Allow muxer preference order overriding - order := []string{yamuxID, mplexID} - if prefs := os.Getenv("LIBP2P_MUX_PREFS"); prefs != "" { - order = strings.Fields(prefs) - } - - opts := make([]libp2p.Option, 0, len(order)) - for _, id := range order { - tpt, ok := muxers[id] - if !ok { - log.Warning("unknown or duplicate muxer in LIBP2P_MUX_PREFS: %s", id) - continue - } - delete(muxers, id) - opts = append(opts, libp2p.Muxer(id, tpt)) - } - - return libp2p.ChainOptions(opts...) -} - // Close calls Close() on the App object func (n *IpfsNode) Close() error { return n.app.Stop(n.ctx) @@ -245,7 +135,7 @@ func (n *IpfsNode) Context() context.Context { } // Bootstrap will set and call the IpfsNodes bootstrap function. -func (n *IpfsNode) Bootstrap(cfg BootstrapConfig) error { +func (n *IpfsNode) Bootstrap(cfg bootstrap.BootstrapConfig) error { // TODO what should return value be when in offlineMode? if n.Routing == nil { return nil @@ -269,7 +159,7 @@ func (n *IpfsNode) Bootstrap(cfg BootstrapConfig) error { } var err error - n.Bootstrapper, err = Bootstrap(n, cfg) + n.Bootstrapper, err = bootstrap.Bootstrap(n.Identity, n.PeerHost, n.Routing, cfg) return err } @@ -283,20 +173,7 @@ func (n *IpfsNode) loadBootstrapPeers() ([]pstore.PeerInfo, error) { if err != nil { return nil, err } - return toPeerInfos(parsed), nil -} - -func listenAddresses(cfg *config.Config) ([]ma.Multiaddr, error) { - var listen []ma.Multiaddr - for _, addr := range cfg.Addresses.Swarm { - maddr, err := ma.NewMultiaddr(addr) - if err != nil { - return nil, fmt.Errorf("failure to parse config.Addresses.Swarm: %s", cfg.Addresses.Swarm) - } - listen = append(listen, maddr) - } - - return listen, nil + return bootstrap.Peers.ToPeerInfos(parsed), nil } type ConstructPeerHostOpts struct { @@ -306,81 +183,3 @@ type ConstructPeerHostOpts struct { EnableRelayHop bool ConnectionManager ifconnmgr.ConnManager } - -type HostOption func(ctx context.Context, id peer.ID, ps pstore.Peerstore, options ...libp2p.Option) (p2phost.Host, error) - -var DefaultHostOption HostOption = constructPeerHost - -// isolates the complex initialization steps -func constructPeerHost(ctx context.Context, id peer.ID, ps pstore.Peerstore, options ...libp2p.Option) (p2phost.Host, error) { - pkey := ps.PrivKey(id) - if pkey == nil { - return nil, fmt.Errorf("missing private key for node ID: %s", id.Pretty()) - } - options = append([]libp2p.Option{libp2p.Identity(pkey), libp2p.Peerstore(ps)}, options...) - return libp2p.New(ctx, options...) -} - -func filterRelayAddrs(addrs []ma.Multiaddr) []ma.Multiaddr { - var raddrs []ma.Multiaddr - for _, addr := range addrs { - _, err := addr.ValueForProtocol(circuit.P_CIRCUIT) - if err == nil { - continue - } - raddrs = append(raddrs, addr) - } - return raddrs -} - -func composeAddrsFactory(f, g p2pbhost.AddrsFactory) p2pbhost.AddrsFactory { - return func(addrs []ma.Multiaddr) []ma.Multiaddr { - return f(g(addrs)) - } -} - -// startListening on the network addresses -func startListening(host p2phost.Host, cfg *config.Config) error { - listenAddrs, err := listenAddresses(cfg) - if err != nil { - return err - } - - // Actually start listening: - if err := host.Network().Listen(listenAddrs...); err != nil { - return err - } - - // list out our addresses - addrs, err := host.Network().InterfaceListenAddresses() - if err != nil { - return err - } - log.Infof("Swarm listening at: %s", addrs) - return nil -} - -func constructDHTRouting(ctx context.Context, host p2phost.Host, dstore ds.Batching, validator record.Validator) (routing.IpfsRouting, error) { - return dht.New( - ctx, host, - dhtopts.Datastore(dstore), - dhtopts.Validator(validator), - ) -} - -func constructClientDHTRouting(ctx context.Context, host p2phost.Host, dstore ds.Batching, validator record.Validator) (routing.IpfsRouting, error) { - return dht.New( - ctx, host, - dhtopts.Client(true), - dhtopts.Datastore(dstore), - dhtopts.Validator(validator), - ) -} - -type RoutingOption func(context.Context, p2phost.Host, ds.Batching, record.Validator) (routing.IpfsRouting, error) - -type DiscoveryOption func(context.Context, p2phost.Host) (discovery.Service, error) - -var DHTOption RoutingOption = constructDHTRouting -var DHTClientOption RoutingOption = constructClientDHTRouting -var NilRouterOption RoutingOption = nilrouting.ConstructNilRouting diff --git a/core/coreapi/coreapi.go b/core/coreapi/coreapi.go index f22803f92..eaf870ec8 100644 --- a/core/coreapi/coreapi.go +++ b/core/coreapi/coreapi.go @@ -19,6 +19,7 @@ import ( "fmt" "github.com/ipfs/go-ipfs/core" + "github.com/ipfs/go-ipfs/core/node" "github.com/ipfs/go-ipfs/namesys" "github.com/ipfs/go-ipfs/pin" "github.com/ipfs/go-ipfs/provider" @@ -207,7 +208,7 @@ func (api *CoreAPI) WithOptions(opts ...options.ApiOption) (coreiface.CoreAPI, e cs := cfg.Ipns.ResolveCacheSize if cs == 0 { - cs = core.DefaultIpnsCacheSize + cs = node.DefaultIpnsCacheSize } if cs < 0 { return nil, fmt.Errorf("cannot specify negative resolve cache size") diff --git a/core/coreapi/test/api_test.go b/core/coreapi/test/api_test.go index 9ad164d8c..23b8b6289 100644 --- a/core/coreapi/test/api_test.go +++ b/core/coreapi/test/api_test.go @@ -8,6 +8,7 @@ import ( "path/filepath" "testing" + "github.com/ipfs/go-ipfs/core/bootstrap" "github.com/ipfs/go-ipfs/filestore" "github.com/ipfs/go-ipfs/core" @@ -101,7 +102,7 @@ func (NodeProvider) MakeAPISwarm(ctx context.Context, fullIdentity bool, n int) return nil, err } - bsinf := core.BootstrapConfigWithPeers( + bsinf := bootstrap.BootstrapConfigWithPeers( []pstore.PeerInfo{ nodes[0].Peerstore.PeerInfo(nodes[0].Identity), }, diff --git a/core/mock/mock.go b/core/mock/mock.go index dc47917aa..e759d2010 100644 --- a/core/mock/mock.go +++ b/core/mock/mock.go @@ -5,6 +5,7 @@ import ( commands "github.com/ipfs/go-ipfs/commands" core "github.com/ipfs/go-ipfs/core" + "github.com/ipfs/go-ipfs/core/node" "github.com/ipfs/go-ipfs/repo" datastore "github.com/ipfs/go-datastore" @@ -29,7 +30,7 @@ func NewMockNode() (*core.IpfsNode, error) { }) } -func MockHostOption(mn mocknet.Mocknet) core.HostOption { +func MockHostOption(mn mocknet.Mocknet) node.HostOption { return func(ctx context.Context, id peer.ID, ps pstore.Peerstore, _ ...libp2p.Option) (host.Host, error) { return mn.AddPeerWithPeerstore(id, ps) } diff --git a/core/ncore.go b/core/ncore.go index bcdc1d0bb..4704a71af 100644 --- a/core/ncore.go +++ b/core/ncore.go @@ -1,839 +1,21 @@ package core import ( - "bytes" "context" - "errors" - "fmt" - "github.com/ipfs/go-bitswap" - bsnet "github.com/ipfs/go-bitswap/network" - bserv "github.com/ipfs/go-blockservice" - "github.com/ipfs/go-cid" - ds "github.com/ipfs/go-datastore" - bstore "github.com/ipfs/go-ipfs-blockstore" - exchange "github.com/ipfs/go-ipfs-exchange-interface" - "github.com/ipfs/go-ipfs-exchange-offline" - u "github.com/ipfs/go-ipfs-util" - rp "github.com/ipfs/go-ipfs/exchange/reprovide" - "github.com/ipfs/go-ipfs/filestore" - "github.com/ipfs/go-ipfs/namesys" - ipnsrp "github.com/ipfs/go-ipfs/namesys/republisher" - "github.com/ipfs/go-ipfs/pin" - "github.com/ipfs/go-ipfs/provider" - "github.com/ipfs/go-ipfs/thirdparty/cidv0v1" - "github.com/ipfs/go-ipfs/thirdparty/verifbs" - "github.com/ipfs/go-ipld-format" - "github.com/ipfs/go-ipns" - merkledag "github.com/ipfs/go-merkledag" - "github.com/ipfs/go-mfs" - ft "github.com/ipfs/go-unixfs" + "github.com/jbenet/goprocess" - "github.com/libp2p/go-libp2p" - "github.com/libp2p/go-libp2p-autonat-svc" - circuit "github.com/libp2p/go-libp2p-circuit" - connmgr "github.com/libp2p/go-libp2p-connmgr" - "github.com/libp2p/go-libp2p-kad-dht" - "github.com/libp2p/go-libp2p-metrics" - pstore "github.com/libp2p/go-libp2p-peerstore" - "github.com/libp2p/go-libp2p-peerstore/pstoremem" - "github.com/libp2p/go-libp2p-pnet" - "github.com/libp2p/go-libp2p-pubsub" - psrouter "github.com/libp2p/go-libp2p-pubsub-router" - quic "github.com/libp2p/go-libp2p-quic-transport" - "github.com/libp2p/go-libp2p-record" - "github.com/libp2p/go-libp2p-routing" - rhelpers "github.com/libp2p/go-libp2p-routing-helpers" - "github.com/libp2p/go-libp2p/p2p/discovery" - rhost "github.com/libp2p/go-libp2p/p2p/host/routed" "go.uber.org/fx" - "time" - "github.com/ipfs/go-ipfs/repo" - - retry "github.com/ipfs/go-datastore/retrystore" iconfig "github.com/ipfs/go-ipfs-config" uio "github.com/ipfs/go-unixfs/io" - ic "github.com/libp2p/go-libp2p-crypto" - p2phost "github.com/libp2p/go-libp2p-host" - "github.com/libp2p/go-libp2p-peer" - mamask "github.com/whyrusleeping/multiaddr-filter" ) -func repoConfig(repo repo.Repo) (*iconfig.Config, error) { - return repo.Config() -} - -func identity(cfg *iconfig.Config) (peer.ID, error) { - cid := cfg.Identity.PeerID - if cid == "" { - return "", errors.New("identity was not set in config (was 'ipfs init' run?)") - } - if len(cid) == 0 { - return "", errors.New("no peer ID in config! (was 'ipfs init' run?)") - } - - id, err := peer.IDB58Decode(cid) - if err != nil { - return "", fmt.Errorf("peer ID invalid: %s", err) - } - - return id, nil -} - -func peerstore(id peer.ID, sk ic.PrivKey) pstore.Peerstore { - ps := pstoremem.NewPeerstore() - - if sk != nil { - ps.AddPrivKey(id, sk) - ps.AddPubKey(id, sk.GetPublic()) - } - - return ps -} - -func privateKey(cfg *iconfig.Config, id peer.ID) (ic.PrivKey, error) { - if cfg.Identity.PrivKey == "" { - return nil, nil - } - - sk, err := cfg.Identity.DecodePrivateKey("passphrase todo!") - if err != nil { - return nil, err - } - - id2, err := peer.IDFromPrivateKey(sk) - if err != nil { - return nil, err - } - - if id2 != id { - return nil, fmt.Errorf("private key in config does not match id: %s != %s", id, id2) - } - return sk, nil -} - -func datastoreCtor(repo repo.Repo) ds.Datastore { - return repo.Datastore() -} - -type BaseBlocks bstore.Blockstore - -func baseBlockstoreCtor(mctx MetricsCtx, repo repo.Repo, cfg *iconfig.Config, bcfg *BuildCfg, lc fx.Lifecycle) (bs BaseBlocks, err error) { - rds := &retry.Datastore{ - Batching: repo.Datastore(), - Delay: time.Millisecond * 200, - Retries: 6, - TempErrFunc: isTooManyFDError, - } - // hash security - bs = bstore.NewBlockstore(rds) - bs = &verifbs.VerifBS{Blockstore: bs} - - opts := bstore.DefaultCacheOpts() - opts.HasBloomFilterSize = cfg.Datastore.BloomFilterSize - if !bcfg.Permanent { - opts.HasBloomFilterSize = 0 - } - - if !bcfg.NilRepo { - ctx, cancel := context.WithCancel(mctx) - - lc.Append(fx.Hook{ - OnStop: func(context context.Context) error { - cancel() - return nil - }, - }) - bs, err = bstore.CachedBlockstore(ctx, bs, opts) - if err != nil { - return nil, err - } - } - - bs = bstore.NewIdStore(bs) - bs = cidv0v1.NewBlockstore(bs) - - if cfg.Datastore.HashOnRead { // TODO: review: this is how it was done originally, is there a reason we can't just pass this directly? - bs.HashOnRead(true) - } - - return -} - -func gcBlockstoreCtor(lc fx.Lifecycle, repo repo.Repo, bb BaseBlocks, cfg *iconfig.Config) (gclocker bstore.GCLocker, gcbs bstore.GCBlockstore, bs bstore.Blockstore, fstore *filestore.Filestore) { - gclocker = bstore.NewGCLocker() - gcbs = bstore.NewGCBlockstore(bb, gclocker) - - if cfg.Experimental.FilestoreEnabled || cfg.Experimental.UrlstoreEnabled { - // hash security - fstore = filestore.NewFilestore(bb, repo.FileManager()) //TODO: mark optional - gcbs = bstore.NewGCBlockstore(fstore, gclocker) - gcbs = &verifbs.VerifBSGC{GCBlockstore: gcbs} - } - bs = gcbs - return -} - -func blockServiceCtor(lc fx.Lifecycle, bs bstore.Blockstore, rem exchange.Interface) bserv.BlockService { - bsvc := bserv.New(bs, rem) - - lc.Append(fx.Hook{ - OnStop: func(ctx context.Context) error { - return bsvc.Close() - }, - }) - - return bsvc -} - -func recordValidator(ps pstore.Peerstore) record.Validator { - return record.NamespacedValidator{ - "pk": record.PublicKeyValidator{}, - "ipns": ipns.Validator{KeyBook: ps}, - } -} - -//////////////////// -// libp2p related - //////////////////// // libp2p -var ipfsp2p = fx.Options( - fx.Provide(p2pAddrFilters), - fx.Provide(p2pBandwidthCounter), - fx.Provide(p2pPNet), - fx.Provide(p2pAddrsFactory), - fx.Provide(p2pConnectionManager), - fx.Provide(p2pSmuxTransport), - fx.Provide(p2pNatPortMap), - fx.Provide(p2pRelay), - fx.Provide(p2pAutoRealy), - fx.Provide(p2pDefaultTransports), - fx.Provide(p2pQUIC), - - fx.Provide(p2pHostOption), - fx.Provide(p2pHost), - fx.Provide(p2pOnlineRouting), - - fx.Provide(pubsubCtor), - fx.Provide(newDiscoveryHandler), - - fx.Invoke(autoNATService), - fx.Invoke(p2pPNetChecker), - fx.Invoke(startListening), - fx.Invoke(setupDiscovery), -) - -func p2pHostOption(bcfg *BuildCfg) (hostOption HostOption, err error) { - hostOption = bcfg.Host - if bcfg.DisableEncryptedConnections { - innerHostOption := hostOption - hostOption = func(ctx context.Context, id peer.ID, ps pstore.Peerstore, options ...libp2p.Option) (p2phost.Host, error) { - return innerHostOption(ctx, id, ps, append(options, libp2p.NoSecurity)...) - } - // TODO: shouldn't this be Errorf to guarantee visibility? - log.Warningf(`Your IPFS node has been configured to run WITHOUT ENCRYPTED CONNECTIONS. - You will not be able to connect to any nodes configured to use encrypted connections`) - } - return hostOption, nil -} - -func p2pAddrFilters(cfg *iconfig.Config) (opts libp2pOpts, err error) { - for _, s := range cfg.Swarm.AddrFilters { - f, err := mamask.NewMask(s) - if err != nil { - return opts, fmt.Errorf("incorrectly formatted address filter in config: %s", s) - } - opts.Opts = append(opts.Opts, libp2p.FilterAddresses(f)) - } - return opts, nil -} - -func p2pBandwidthCounter(cfg *iconfig.Config) (opts libp2pOpts, reporter metrics.Reporter) { - reporter = metrics.NewBandwidthCounter() - - if !cfg.Swarm.DisableBandwidthMetrics { - opts.Opts = append(opts.Opts, libp2p.BandwidthReporter(reporter)) - } - return opts, reporter -} - -type libp2pOpts struct { - fx.Out - - Opts []libp2p.Option `group:"libp2p"` -} - -type PNetFingerprint []byte // TODO: find some better place -func p2pPNet(repo repo.Repo) (opts libp2pOpts, fp PNetFingerprint, err error) { - swarmkey, err := repo.SwarmKey() - if err != nil || swarmkey == nil { - return opts, nil, err - } - - protec, err := pnet.NewProtector(bytes.NewReader(swarmkey)) - if err != nil { - return opts, nil, fmt.Errorf("failed to configure private network: %s", err) - } - fp = protec.Fingerprint() - - opts.Opts = append(opts.Opts, libp2p.PrivateNetwork(protec)) - return opts, fp, nil -} - -func p2pPNetChecker(repo repo.Repo, ph p2phost.Host, lc fx.Lifecycle) error { - // TODO: better check? - swarmkey, err := repo.SwarmKey() - if err != nil || swarmkey == nil { - return err - } - - done := make(chan struct{}) - lc.Append(fx.Hook{ - OnStart: func(_ context.Context) error { - go func() { - t := time.NewTicker(30 * time.Second) - <-t.C // swallow one tick - for { - select { - case <-t.C: - if len(ph.Network().Peers()) == 0 { - log.Warning("We are in private network and have no peers.") - log.Warning("This might be configuration mistake.") - } - case <-done: - return - } - } - }() - return nil - }, - OnStop: func(_ context.Context) error { - close(done) - return nil - }, - }) - return nil -} - -func p2pAddrsFactory(cfg *iconfig.Config) (opts libp2pOpts, err error) { - addrsFactory, err := makeAddrsFactory(cfg.Addresses) - if err != nil { - return opts, err - } - if !cfg.Swarm.DisableRelay { - addrsFactory = composeAddrsFactory(addrsFactory, filterRelayAddrs) - } - opts.Opts = append(opts.Opts, libp2p.AddrsFactory(addrsFactory)) - return -} - -func p2pConnectionManager(cfg *iconfig.Config) (opts libp2pOpts, err error) { - grace := iconfig.DefaultConnMgrGracePeriod - low := iconfig.DefaultConnMgrHighWater - high := iconfig.DefaultConnMgrHighWater - - switch cfg.Swarm.ConnMgr.Type { - case "": - // 'default' value is the basic connection manager - return - case "none": - return opts, nil - case "basic": - grace, err = time.ParseDuration(cfg.Swarm.ConnMgr.GracePeriod) - if err != nil { - return opts, fmt.Errorf("parsing Swarm.ConnMgr.GracePeriod: %s", err) - } - - low = cfg.Swarm.ConnMgr.LowWater - high = cfg.Swarm.ConnMgr.HighWater - default: - return opts, fmt.Errorf("unrecognized ConnMgr.Type: %q", cfg.Swarm.ConnMgr.Type) - } - - cm := connmgr.NewConnManager(low, high, grace) - opts.Opts = append(opts.Opts, libp2p.ConnectionManager(cm)) - return -} - -func p2pSmuxTransport(bcfg *BuildCfg) (opts libp2pOpts, err error) { - opts.Opts = append(opts.Opts, makeSmuxTransportOption(bcfg.getOpt("mplex"))) - return -} - -func p2pNatPortMap(cfg *iconfig.Config) (opts libp2pOpts, err error) { - if !cfg.Swarm.DisableNatPortMap { - opts.Opts = append(opts.Opts, libp2p.NATPortMap()) - } - return -} - -func p2pRelay(cfg *iconfig.Config) (opts libp2pOpts, err error) { - if cfg.Swarm.DisableRelay { - // Enabled by default. - opts.Opts = append(opts.Opts, libp2p.DisableRelay()) - } else { - relayOpts := []circuit.RelayOpt{circuit.OptDiscovery} - if cfg.Swarm.EnableRelayHop { - relayOpts = append(relayOpts, circuit.OptHop) - } - opts.Opts = append(opts.Opts, libp2p.EnableRelay(relayOpts...)) - } - return -} - -func p2pAutoRealy(cfg *iconfig.Config) (opts libp2pOpts, err error) { - // enable autorelay - if cfg.Swarm.EnableAutoRelay { - opts.Opts = append(opts.Opts, libp2p.EnableAutoRelay()) - } - return -} - -func p2pDefaultTransports() (opts libp2pOpts, err error) { - opts.Opts = append(opts.Opts, libp2p.DefaultTransports) - return -} - -func p2pQUIC(cfg *iconfig.Config) (opts libp2pOpts, err error) { - if cfg.Experimental.QUIC { - opts.Opts = append(opts.Opts, libp2p.Transport(quic.NewTransport)) - } - return -} - -type p2pHostIn struct { - fx.In - - BCfg *BuildCfg - Repo repo.Repo - Validator record.Validator - HostOption HostOption - ID peer.ID - Peerstore pstore.Peerstore - - Opts [][]libp2p.Option `group:"libp2p"` -} - -type BaseRouting routing.IpfsRouting -type p2pHostOut struct { - fx.Out - - Host p2phost.Host - Routing BaseRouting - IpfsDHT *dht.IpfsDHT -} - -// TODO: move some of this into params struct -func p2pHost(mctx MetricsCtx, lc fx.Lifecycle, params p2pHostIn) (out p2pHostOut, err error) { - opts := []libp2p.Option{libp2p.NoListenAddrs} - for _, o := range params.Opts { - opts = append(opts, o...) - } - - ctx, cancel := context.WithCancel(mctx) - lc.Append(fx.Hook{ - OnStop: func(_ context.Context) error { - cancel() - return nil - }, - }) - - opts = append(opts, libp2p.Routing(func(h p2phost.Host) (routing.PeerRouting, error) { - r, err := params.BCfg.Routing(ctx, h, params.Repo.Datastore(), params.Validator) - out.Routing = r - return r, err - })) - - out.Host, err = params.HostOption(ctx, params.ID, params.Peerstore, opts...) - if err != nil { - return p2pHostOut{}, err - } - - // this code is necessary just for tests: mock network constructions - // ignore the libp2p constructor options that actually construct the routing! - if out.Routing == nil { - r, err := params.BCfg.Routing(ctx, out.Host, params.Repo.Datastore(), params.Validator) - if err != nil { - return p2pHostOut{}, err - } - out.Routing = r - out.Host = rhost.Wrap(out.Host, out.Routing) - } - - lc.Append(fx.Hook{ - OnStop: func(ctx context.Context) error { - return out.Host.Close() - }, - }) - - // TODO: break this up into more DI units - // TODO: I'm not a fan of type assertions like this but the - // `RoutingOption` system doesn't currently provide access to the - // IpfsNode. - // - // Ideally, we'd do something like: - // - // 1. Add some fancy method to introspect into tiered routers to extract - // things like the pubsub router or the DHT (complicated, messy, - // probably not worth it). - // 2. Pass the IpfsNode into the RoutingOption (would also remove the - // PSRouter case below. - // 3. Introduce some kind of service manager? (my personal favorite but - // that requires a fair amount of work). - if dht, ok := out.Routing.(*dht.IpfsDHT); ok { - out.IpfsDHT = dht - - lc.Append(fx.Hook{ - OnStop: func(ctx context.Context) error { - return out.IpfsDHT.Close() - }, - }) - } - - return out, err -} - -type p2pRoutingIn struct { - fx.In - - BCfg *BuildCfg - Repo repo.Repo - Validator record.Validator - Host p2phost.Host - PubSub *pubsub.PubSub - - BaseRouting BaseRouting -} - -type p2pRoutingOut struct { - fx.Out - - IpfsRouting routing.IpfsRouting - PSRouter *psrouter.PubsubValueStore //TODO: optional -} - -func p2pOnlineRouting(mctx MetricsCtx, lc fx.Lifecycle, in p2pRoutingIn) (out p2pRoutingOut) { - out.IpfsRouting = in.BaseRouting - - if in.BCfg.getOpt("ipnsps") { - out.PSRouter = psrouter.NewPubsubValueStore( - lifecycleCtx(mctx, lc), - in.Host, - in.BaseRouting, - in.PubSub, - in.Validator, - ) - - out.IpfsRouting = rhelpers.Tiered{ - Routers: []routing.IpfsRouting{ - // Always check pubsub first. - &rhelpers.Compose{ - ValueStore: &rhelpers.LimitedValueStore{ - ValueStore: out.PSRouter, - Namespaces: []string{"ipns"}, - }, - }, - in.BaseRouting, - }, - Validator: in.Validator, - } - } - return out -} - -//////////// -// P2P services - -func autoNATService(mctx MetricsCtx, lc fx.Lifecycle, cfg *iconfig.Config, host p2phost.Host) error { - if !cfg.Swarm.EnableAutoNATService { - return nil - } - var opts []libp2p.Option - if cfg.Experimental.QUIC { - opts = append(opts, libp2p.DefaultTransports, libp2p.Transport(quic.NewTransport)) - } - - _, err := autonat.NewAutoNATService(lifecycleCtx(mctx, lc), host, opts...) - return err -} - -func pubsubCtor(mctx MetricsCtx, lc fx.Lifecycle, host p2phost.Host, bcfg *BuildCfg, cfg *iconfig.Config) (service *pubsub.PubSub, err error) { - if !(bcfg.getOpt("pubsub") || bcfg.getOpt("ipnsps")) { - return nil, nil // TODO: mark optional - } - - var pubsubOptions []pubsub.Option - if cfg.Pubsub.DisableSigning { - pubsubOptions = append(pubsubOptions, pubsub.WithMessageSigning(false)) - } - - if cfg.Pubsub.StrictSignatureVerification { - pubsubOptions = append(pubsubOptions, pubsub.WithStrictSignatureVerification(true)) - } - - switch cfg.Pubsub.Router { - case "": - fallthrough - case "floodsub": - service, err = pubsub.NewFloodSub(lifecycleCtx(mctx, lc), host, pubsubOptions...) - - case "gossipsub": - service, err = pubsub.NewGossipSub(lifecycleCtx(mctx, lc), host, pubsubOptions...) - - default: - err = fmt.Errorf("Unknown pubsub router %s", cfg.Pubsub.Router) - } - - return service, err -} - -//////////// -// Offline services - -// offline.Exchange -// offroute.NewOfflineRouter - -func offlineNamesysCtor(rt routing.IpfsRouting, repo repo.Repo) (namesys.NameSystem, error) { - return namesys.NewNameSystem(rt, repo.Datastore(), 0), nil -} - -//////////// -// IPFS services - -func pinning(bstore bstore.Blockstore, ds format.DAGService, repo repo.Repo) (pin.Pinner, error) { - internalDag := merkledag.NewDAGService(bserv.New(bstore, offline.Exchange(bstore))) - pinning, err := pin.LoadPinner(repo.Datastore(), ds, internalDag) - if err != nil { - // TODO: we should move towards only running 'NewPinner' explicitly on - // node init instead of implicitly here as a result of the pinner keys - // not being found in the datastore. - // this is kinda sketchy and could cause data loss - pinning = pin.NewPinner(repo.Datastore(), ds, internalDag) - } - - return pinning, nil -} - -func dagCtor(bs bserv.BlockService) format.DAGService { - return merkledag.NewDAGService(bs) -} - -func onlineExchangeCtor(mctx MetricsCtx, lc fx.Lifecycle, host p2phost.Host, rt routing.IpfsRouting, bs bstore.GCBlockstore) exchange.Interface { - bitswapNetwork := bsnet.NewFromIpfsHost(host, rt) - exch := bitswap.New(lifecycleCtx(mctx, lc), bitswapNetwork, bs) - lc.Append(fx.Hook{ - OnStop: func(ctx context.Context) error { - return exch.Close() - }, - }) - return exch -} - -func onlineNamesysCtor(rt routing.IpfsRouting, repo repo.Repo, cfg *iconfig.Config) (namesys.NameSystem, error) { - cs := cfg.Ipns.ResolveCacheSize - if cs == 0 { - cs = DefaultIpnsCacheSize - } - if cs < 0 { - return nil, fmt.Errorf("cannot specify negative resolve cache size") - } - return namesys.NewNameSystem(rt, repo.Datastore(), cs), nil -} - -func ipnsRepublisher(lc lcProcess, cfg *iconfig.Config, namesys namesys.NameSystem, repo repo.Repo, privKey ic.PrivKey) error { - repub := ipnsrp.NewRepublisher(namesys, repo.Datastore(), privKey, repo.Keystore()) - - if cfg.Ipns.RepublishPeriod != "" { - d, err := time.ParseDuration(cfg.Ipns.RepublishPeriod) - if err != nil { - return fmt.Errorf("failure to parse config setting IPNS.RepublishPeriod: %s", err) - } - - if !u.Debug && (d < time.Minute || d > (time.Hour*24)) { - return fmt.Errorf("config setting IPNS.RepublishPeriod is not between 1min and 1day: %s", d) - } - - repub.Interval = d - } - - if cfg.Ipns.RecordLifetime != "" { - d, err := time.ParseDuration(cfg.Ipns.RecordLifetime) - if err != nil { - return fmt.Errorf("failure to parse config setting IPNS.RecordLifetime: %s", err) - } - - repub.RecordLifetime = d - } - - lc.Run(repub.Run) - return nil -} - -type discoveryHandler struct { - ctx context.Context - host p2phost.Host -} - -func (dh *discoveryHandler) HandlePeerFound(p pstore.PeerInfo) { - log.Warning("trying peer info: ", p) - ctx, cancel := context.WithTimeout(dh.ctx, discoveryConnTimeout) - defer cancel() - if err := dh.host.Connect(ctx, p); err != nil { - log.Warning("Failed to connect to peer found by discovery: ", err) - } -} - -func newDiscoveryHandler(mctx MetricsCtx, lc fx.Lifecycle, host p2phost.Host) *discoveryHandler { - return &discoveryHandler{ - ctx: lifecycleCtx(mctx, lc), - host: host, - } -} - -func setupDiscovery(mctx MetricsCtx, lc fx.Lifecycle, cfg *iconfig.Config, host p2phost.Host, handler *discoveryHandler) error { - if cfg.Discovery.MDNS.Enabled { - mdns := cfg.Discovery.MDNS - if mdns.Interval == 0 { - mdns.Interval = 5 - } - service, err := discovery.NewMdnsService(lifecycleCtx(mctx, lc), host, time.Duration(mdns.Interval)*time.Second, discovery.ServiceTag) - if err != nil { - log.Error("mdns error: ", err) - return nil - } - service.RegisterNotifee(handler) - } - return nil -} - -func providerQueue(mctx MetricsCtx, lc fx.Lifecycle, repo repo.Repo) (*provider.Queue, error) { - return provider.NewQueue(lifecycleCtx(mctx, lc), "provider-v1", repo.Datastore()) -} - -func providerCtor(mctx MetricsCtx, lc fx.Lifecycle, queue *provider.Queue, rt routing.IpfsRouting) provider.Provider { - return provider.NewProvider(lifecycleCtx(mctx, lc), queue, rt) -} - -func reproviderCtor(mctx MetricsCtx, lc fx.Lifecycle, cfg *iconfig.Config, bs BaseBlocks, ds format.DAGService, pinning pin.Pinner, rt routing.IpfsRouting) (*rp.Reprovider, error) { - var keyProvider rp.KeyChanFunc - - switch cfg.Reprovider.Strategy { - case "all": - fallthrough - case "": - keyProvider = rp.NewBlockstoreProvider(bs) - case "roots": - keyProvider = rp.NewPinnedProvider(pinning, ds, true) - case "pinned": - keyProvider = rp.NewPinnedProvider(pinning, ds, false) - default: - return nil, fmt.Errorf("unknown reprovider strategy '%s'", cfg.Reprovider.Strategy) - } - return rp.NewReprovider(lifecycleCtx(mctx, lc), rt, keyProvider), nil -} - -func reprovider(cfg *iconfig.Config, reprovider *rp.Reprovider) error { - reproviderInterval := kReprovideFrequency - if cfg.Reprovider.Interval != "" { - dur, err := time.ParseDuration(cfg.Reprovider.Interval) - if err != nil { - return err - } - - reproviderInterval = dur - } - - go reprovider.Run(reproviderInterval) - return nil -} - -func files(mctx MetricsCtx, lc fx.Lifecycle, repo repo.Repo, dag format.DAGService) (*mfs.Root, error) { - dsk := ds.NewKey("/local/filesroot") - pf := func(ctx context.Context, c cid.Cid) error { - return repo.Datastore().Put(dsk, c.Bytes()) - } - - var nd *merkledag.ProtoNode - val, err := repo.Datastore().Get(dsk) - ctx := lifecycleCtx(mctx, lc) - - switch { - case err == ds.ErrNotFound || val == nil: - nd = ft.EmptyDirNode() - err := dag.Add(ctx, nd) - if err != nil { - return nil, fmt.Errorf("failure writing to dagstore: %s", err) - } - case err == nil: - c, err := cid.Cast(val) - if err != nil { - return nil, err - } - - rnd, err := dag.Get(ctx, c) - if err != nil { - return nil, fmt.Errorf("error loading filesroot from DAG: %s", err) - } - - pbnd, ok := rnd.(*merkledag.ProtoNode) - if !ok { - return nil, merkledag.ErrNotProtobuf - } - - nd = pbnd - default: - return nil, err - } - - root, err := mfs.NewRoot(ctx, dag, nd, pf) - - lc.Append(fx.Hook{ - OnStop: func(ctx context.Context) error { - return root.Close() - }, - }) - - return root, err -} - -//////////// -// Hacks - -// lifecycleCtx creates a context which will be cancelled when lifecycle stops -// -// This is a hack which we need because most of our services use contexts in a -// wrong way -func lifecycleCtx(mctx MetricsCtx, lc fx.Lifecycle) context.Context { - ctx, cancel := context.WithCancel(mctx) - lc.Append(fx.Hook{ - OnStop: func(_ context.Context) error { - cancel() - return nil - }, - }) - return ctx -} - -type lcProcess struct { - fx.In - - LC fx.Lifecycle - Proc goprocess.Process -} - -func (lp *lcProcess) Run(f goprocess.ProcessFunc) { - proc := make(chan goprocess.Process, 1) - lp.LC.Append(fx.Hook{ - OnStart: func(ctx context.Context) error { - proc <- lp.Proc.Go(f) - return nil - }, - OnStop: func(ctx context.Context) error { - return (<-proc).Close() // todo: respect ctx, somehow - }, - }) +func setupSharding(cfg *iconfig.Config) { + // TEMP: setting global sharding switch here + uio.UseHAMTSharding = cfg.Experimental.ShardingEnabled } func baseProcess(lc fx.Lifecycle) goprocess.Process { @@ -845,8 +27,3 @@ func baseProcess(lc fx.Lifecycle) goprocess.Process { }) return p } - -func setupSharding(cfg *iconfig.Config) { - // TEMP: setting global sharding switch here - uio.UseHAMTSharding = cfg.Experimental.ShardingEnabled -} diff --git a/core/node/builder.go b/core/node/builder.go new file mode 100644 index 000000000..112d1f7fe --- /dev/null +++ b/core/node/builder.go @@ -0,0 +1,36 @@ +package node + +import ( + "github.com/ipfs/go-ipfs/repo" +) + +type BuildCfg struct { + // If online is set, the node will have networking enabled + Online bool + + // ExtraOpts is a map of extra options used to configure the ipfs nodes creation + ExtraOpts map[string]bool + + // If permanent then node should run more expensive processes + // that will improve performance in long run + Permanent bool + + // DisableEncryptedConnections disables connection encryption *entirely*. + // DO NOT SET THIS UNLESS YOU'RE TESTING. + DisableEncryptedConnections bool + + // If NilRepo is set, a Repo backed by a nil datastore will be constructed + NilRepo bool + + Routing RoutingOption + Host HostOption + Repo repo.Repo +} + +func (cfg *BuildCfg) getOpt(key string) bool { + if cfg.ExtraOpts == nil { + return false + } + + return cfg.ExtraOpts[key] +} diff --git a/core/node/core.go b/core/node/core.go new file mode 100644 index 000000000..160a833cf --- /dev/null +++ b/core/node/core.go @@ -0,0 +1,117 @@ +package node + +import ( + "context" + "fmt" + + "github.com/ipfs/go-bitswap" + "github.com/ipfs/go-bitswap/network" + "github.com/ipfs/go-blockservice" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + blockstore "github.com/ipfs/go-ipfs-blockstore" + exchange "github.com/ipfs/go-ipfs-exchange-interface" + offline "github.com/ipfs/go-ipfs-exchange-offline" + format "github.com/ipfs/go-ipld-format" + "github.com/ipfs/go-merkledag" + "github.com/ipfs/go-mfs" + "github.com/ipfs/go-unixfs" + host "github.com/libp2p/go-libp2p-host" + routing "github.com/libp2p/go-libp2p-routing" + "go.uber.org/fx" + + "github.com/ipfs/go-ipfs/pin" + "github.com/ipfs/go-ipfs/repo" +) + +func BlockServiceCtor(lc fx.Lifecycle, bs blockstore.Blockstore, rem exchange.Interface) blockservice.BlockService { + bsvc := blockservice.New(bs, rem) + + lc.Append(fx.Hook{ + OnStop: func(ctx context.Context) error { + return bsvc.Close() + }, + }) + + return bsvc +} + +func Pinning(bstore blockstore.Blockstore, ds format.DAGService, repo repo.Repo) (pin.Pinner, error) { + internalDag := merkledag.NewDAGService(blockservice.New(bstore, offline.Exchange(bstore))) + pinning, err := pin.LoadPinner(repo.Datastore(), ds, internalDag) + if err != nil { + // TODO: we should move towards only running 'NewPinner' explicitly on + // node init instead of implicitly here as a result of the pinner keys + // not being found in the datastore. + // this is kinda sketchy and could cause data loss + pinning = pin.NewPinner(repo.Datastore(), ds, internalDag) + } + + return pinning, nil +} + +func DagCtor(bs blockservice.BlockService) format.DAGService { + return merkledag.NewDAGService(bs) +} + +func OnlineExchangeCtor(mctx MetricsCtx, lc fx.Lifecycle, host host.Host, rt routing.IpfsRouting, bs blockstore.GCBlockstore) exchange.Interface { + bitswapNetwork := network.NewFromIpfsHost(host, rt) + exch := bitswap.New(lifecycleCtx(mctx, lc), bitswapNetwork, bs) + lc.Append(fx.Hook{ + OnStop: func(ctx context.Context) error { + return exch.Close() + }, + }) + return exch +} + +func Files(mctx MetricsCtx, lc fx.Lifecycle, repo repo.Repo, dag format.DAGService) (*mfs.Root, error) { + dsk := datastore.NewKey("/local/filesroot") + pf := func(ctx context.Context, c cid.Cid) error { + return repo.Datastore().Put(dsk, c.Bytes()) + } + + var nd *merkledag.ProtoNode + val, err := repo.Datastore().Get(dsk) + ctx := lifecycleCtx(mctx, lc) + + switch { + case err == datastore.ErrNotFound || val == nil: + nd = unixfs.EmptyDirNode() + err := dag.Add(ctx, nd) + if err != nil { + return nil, fmt.Errorf("failure writing to dagstore: %s", err) + } + case err == nil: + c, err := cid.Cast(val) + if err != nil { + return nil, err + } + + rnd, err := dag.Get(ctx, c) + if err != nil { + return nil, fmt.Errorf("error loading filesroot from DAG: %s", err) + } + + pbnd, ok := rnd.(*merkledag.ProtoNode) + if !ok { + return nil, merkledag.ErrNotProtobuf + } + + nd = pbnd + default: + return nil, err + } + + root, err := mfs.NewRoot(ctx, dag, nd, pf) + + lc.Append(fx.Hook{ + OnStop: func(ctx context.Context) error { + return root.Close() + }, + }) + + return root, err +} + +type MetricsCtx context.Context diff --git a/core/node/discovery.go b/core/node/discovery.go new file mode 100644 index 000000000..f51e7593d --- /dev/null +++ b/core/node/discovery.go @@ -0,0 +1,51 @@ +package node + +import ( + "context" + "time" + + "github.com/ipfs/go-ipfs-config" + "github.com/libp2p/go-libp2p-host" + "github.com/libp2p/go-libp2p-peerstore" + "github.com/libp2p/go-libp2p/p2p/discovery" + "go.uber.org/fx" +) + +const discoveryConnTimeout = time.Second * 30 + +type discoveryHandler struct { + ctx context.Context + host host.Host +} + +func (dh *discoveryHandler) HandlePeerFound(p peerstore.PeerInfo) { + log.Warning("trying peer info: ", p) + ctx, cancel := context.WithTimeout(dh.ctx, discoveryConnTimeout) + defer cancel() + if err := dh.host.Connect(ctx, p); err != nil { + log.Warning("Failed to connect to peer found by discovery: ", err) + } +} + +func NewDiscoveryHandler(mctx MetricsCtx, lc fx.Lifecycle, host host.Host) *discoveryHandler { + return &discoveryHandler{ + ctx: lifecycleCtx(mctx, lc), + host: host, + } +} + +func SetupDiscovery(mctx MetricsCtx, lc fx.Lifecycle, cfg *config.Config, host host.Host, handler *discoveryHandler) error { + if cfg.Discovery.MDNS.Enabled { + mdns := cfg.Discovery.MDNS + if mdns.Interval == 0 { + mdns.Interval = 5 + } + service, err := discovery.NewMdnsService(lifecycleCtx(mctx, lc), host, time.Duration(mdns.Interval)*time.Second, discovery.ServiceTag) + if err != nil { + log.Error("mdns error: ", err) + return nil + } + service.RegisterNotifee(handler) + } + return nil +} diff --git a/core/node/groups.go b/core/node/groups.go new file mode 100644 index 000000000..202c0c4f2 --- /dev/null +++ b/core/node/groups.go @@ -0,0 +1,88 @@ +package node + +import ( + offline "github.com/ipfs/go-ipfs-exchange-offline" + "go.uber.org/fx" + + offroute "github.com/ipfs/go-ipfs-routing/offline" + "github.com/ipfs/go-ipfs/p2p" + "github.com/ipfs/go-ipfs/provider" +) + +var LibP2P = fx.Options( + fx.Provide(P2PAddrFilters), + fx.Provide(P2PBandwidthCounter), + fx.Provide(P2PPNet), + fx.Provide(P2PAddrsFactory), + fx.Provide(P2PConnectionManager), + fx.Provide(P2PSmuxTransport), + fx.Provide(P2PNatPortMap), + fx.Provide(P2PRelay), + fx.Provide(P2PAutoRealy), + fx.Provide(P2PDefaultTransports), + fx.Provide(P2PQUIC), + + fx.Provide(P2PHostOption), + fx.Provide(P2PHost), + fx.Provide(P2POnlineRouting), + + fx.Provide(Pubsub), + fx.Provide(NewDiscoveryHandler), + + fx.Invoke(AutoNATService), + fx.Invoke(P2PPNetChecker), + fx.Invoke(StartListening), + fx.Invoke(SetupDiscovery), +) + +var Storage = fx.Options( + fx.Provide(RepoConfig), + fx.Provide(DatastoreCtor), + fx.Provide(BaseBlockstoreCtor), + fx.Provide(GcBlockstoreCtor), +) + +var Identity = fx.Options( + fx.Provide(PeerID), + fx.Provide(PrivateKey), + fx.Provide(Peerstore), +) + +var IPNS = fx.Options( + fx.Provide(RecordValidator), +) + +var Providers = fx.Options( + fx.Provide(ProviderQueue), + fx.Provide(ProviderCtor), + fx.Provide(ReproviderCtor), + + fx.Invoke(Reprovider), + fx.Invoke(provider.Provider.Run), +) + +var Online = fx.Options( + fx.Provide(OnlineExchangeCtor), + fx.Provide(OnlineNamesysCtor), + + fx.Invoke(IpnsRepublisher), + + fx.Provide(p2p.NewP2P), + + LibP2P, + Providers, +) + +var Offline = fx.Options( + fx.Provide(offline.Exchange), + fx.Provide(OfflineNamesysCtor), + fx.Provide(offroute.NewOfflineRouter), + fx.Provide(provider.NewOfflineProvider), +) + +func Networked(online bool) fx.Option { + if online { + return Online + } + return Offline +} diff --git a/core/node/helpers.go b/core/node/helpers.go new file mode 100644 index 000000000..a785124a3 --- /dev/null +++ b/core/node/helpers.go @@ -0,0 +1,43 @@ +package node + +import ( + "context" + + "github.com/jbenet/goprocess" + "go.uber.org/fx" +) + +// lifecycleCtx creates a context which will be cancelled when lifecycle stops +// +// This is a hack which we need because most of our services use contexts in a +// wrong way +func lifecycleCtx(mctx MetricsCtx, lc fx.Lifecycle) context.Context { + ctx, cancel := context.WithCancel(mctx) + lc.Append(fx.Hook{ + OnStop: func(_ context.Context) error { + cancel() + return nil + }, + }) + return ctx +} + +type lcProcess struct { + fx.In + + LC fx.Lifecycle + Proc goprocess.Process +} + +func (lp *lcProcess) Run(f goprocess.ProcessFunc) { + proc := make(chan goprocess.Process, 1) + lp.LC.Append(fx.Hook{ + OnStart: func(ctx context.Context) error { + proc <- lp.Proc.Go(f) + return nil + }, + OnStop: func(ctx context.Context) error { + return (<-proc).Close() // todo: respect ctx, somehow + }, + }) +} diff --git a/core/node/identity.go b/core/node/identity.go new file mode 100644 index 000000000..eb3903098 --- /dev/null +++ b/core/node/identity.go @@ -0,0 +1,48 @@ +package node + +import ( + "errors" + "fmt" + + "github.com/ipfs/go-ipfs-config" + "github.com/libp2p/go-libp2p-crypto" + "github.com/libp2p/go-libp2p-peer" +) + +func PeerID(cfg *config.Config) (peer.ID, error) { + cid := cfg.Identity.PeerID + if cid == "" { + return "", errors.New("identity was not set in config (was 'ipfs init' run?)") + } + if len(cid) == 0 { + return "", errors.New("no peer ID in config! (was 'ipfs init' run?)") + } + + id, err := peer.IDB58Decode(cid) + if err != nil { + return "", fmt.Errorf("peer ID invalid: %s", err) + } + + return id, nil +} + +func PrivateKey(cfg *config.Config, id peer.ID) (crypto.PrivKey, error) { + if cfg.Identity.PrivKey == "" { + return nil, nil + } + + sk, err := cfg.Identity.DecodePrivateKey("passphrase todo!") + if err != nil { + return nil, err + } + + id2, err := peer.IDFromPrivateKey(sk) + if err != nil { + return nil, err + } + + if id2 != id { + return nil, fmt.Errorf("private key in config does not match id: %s != %s", id, id2) + } + return sk, nil +} diff --git a/core/node/ipns.go b/core/node/ipns.go new file mode 100644 index 000000000..1e8511d2c --- /dev/null +++ b/core/node/ipns.go @@ -0,0 +1,71 @@ +package node + +import ( + "fmt" + "time" + + "github.com/ipfs/go-ipfs-config" + "github.com/ipfs/go-ipfs-util" + "github.com/ipfs/go-ipns" + "github.com/libp2p/go-libp2p-crypto" + "github.com/libp2p/go-libp2p-peerstore" + "github.com/libp2p/go-libp2p-record" + "github.com/libp2p/go-libp2p-routing" + + "github.com/ipfs/go-ipfs/namesys" + "github.com/ipfs/go-ipfs/namesys/republisher" + "github.com/ipfs/go-ipfs/repo" +) + +const DefaultIpnsCacheSize = 128 + +func RecordValidator(ps peerstore.Peerstore) record.Validator { + return record.NamespacedValidator{ + "pk": record.PublicKeyValidator{}, + "ipns": ipns.Validator{KeyBook: ps}, + } +} + +func OfflineNamesysCtor(rt routing.IpfsRouting, repo repo.Repo) (namesys.NameSystem, error) { + return namesys.NewNameSystem(rt, repo.Datastore(), 0), nil +} + +func OnlineNamesysCtor(rt routing.IpfsRouting, repo repo.Repo, cfg *config.Config) (namesys.NameSystem, error) { + cs := cfg.Ipns.ResolveCacheSize + if cs == 0 { + cs = DefaultIpnsCacheSize + } + if cs < 0 { + return nil, fmt.Errorf("cannot specify negative resolve cache size") + } + return namesys.NewNameSystem(rt, repo.Datastore(), cs), nil +} + +func IpnsRepublisher(lc lcProcess, cfg *config.Config, namesys namesys.NameSystem, repo repo.Repo, privKey crypto.PrivKey) error { + repub := republisher.NewRepublisher(namesys, repo.Datastore(), privKey, repo.Keystore()) + + if cfg.Ipns.RepublishPeriod != "" { + d, err := time.ParseDuration(cfg.Ipns.RepublishPeriod) + if err != nil { + return fmt.Errorf("failure to parse config setting IPNS.RepublishPeriod: %s", err) + } + + if !util.Debug && (d < time.Minute || d > (time.Hour*24)) { + return fmt.Errorf("config setting IPNS.RepublishPeriod is not between 1min and 1day: %s", d) + } + + repub.Interval = d + } + + if cfg.Ipns.RecordLifetime != "" { + d, err := time.ParseDuration(cfg.Ipns.RecordLifetime) + if err != nil { + return fmt.Errorf("failure to parse config setting IPNS.RecordLifetime: %s", err) + } + + repub.RecordLifetime = d + } + + lc.Run(repub.Run) + return nil +} diff --git a/core/node/libp2p.go b/core/node/libp2p.go new file mode 100644 index 000000000..a2f7c4253 --- /dev/null +++ b/core/node/libp2p.go @@ -0,0 +1,597 @@ +package node + +import ( + "bytes" + "context" + "fmt" + "io/ioutil" + "os" + "strings" + "time" + + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-ipfs-config" + nilrouting "github.com/ipfs/go-ipfs-routing/none" + logging "github.com/ipfs/go-log" + "github.com/libp2p/go-libp2p" + "github.com/libp2p/go-libp2p-autonat-svc" + "github.com/libp2p/go-libp2p-circuit" + circuit "github.com/libp2p/go-libp2p-circuit" + "github.com/libp2p/go-libp2p-connmgr" + "github.com/libp2p/go-libp2p-crypto" + "github.com/libp2p/go-libp2p-host" + "github.com/libp2p/go-libp2p-kad-dht" + dhtopts "github.com/libp2p/go-libp2p-kad-dht/opts" + "github.com/libp2p/go-libp2p-metrics" + "github.com/libp2p/go-libp2p-peer" + "github.com/libp2p/go-libp2p-peerstore" + "github.com/libp2p/go-libp2p-peerstore/pstoremem" + "github.com/libp2p/go-libp2p-pnet" + "github.com/libp2p/go-libp2p-pubsub" + "github.com/libp2p/go-libp2p-pubsub-router" + "github.com/libp2p/go-libp2p-quic-transport" + "github.com/libp2p/go-libp2p-record" + "github.com/libp2p/go-libp2p-routing" + "github.com/libp2p/go-libp2p-routing-helpers" + p2pbhost "github.com/libp2p/go-libp2p/p2p/host/basic" + "github.com/libp2p/go-libp2p/p2p/host/routed" + mafilter "github.com/libp2p/go-maddr-filter" + smux "github.com/libp2p/go-stream-muxer" + ma "github.com/multiformats/go-multiaddr" + mplex "github.com/whyrusleeping/go-smux-multiplex" + yamux "github.com/whyrusleeping/go-smux-yamux" + "github.com/whyrusleeping/multiaddr-filter" + mamask "github.com/whyrusleeping/multiaddr-filter" + "go.uber.org/fx" + + "github.com/ipfs/go-ipfs/repo" +) + +var log = logging.Logger("node") + +type HostOption func(ctx context.Context, id peer.ID, ps peerstore.Peerstore, options ...libp2p.Option) (host.Host, error) +type RoutingOption func(context.Context, host.Host, datastore.Batching, record.Validator) (routing.IpfsRouting, error) + +var DefaultHostOption HostOption = constructPeerHost + +// isolates the complex initialization steps +func constructPeerHost(ctx context.Context, id peer.ID, ps peerstore.Peerstore, options ...libp2p.Option) (host.Host, error) { + pkey := ps.PrivKey(id) + if pkey == nil { + return nil, fmt.Errorf("missing private key for node ID: %s", id.Pretty()) + } + options = append([]libp2p.Option{libp2p.Identity(pkey), libp2p.Peerstore(ps)}, options...) + return libp2p.New(ctx, options...) +} + +func constructDHTRouting(ctx context.Context, host host.Host, dstore datastore.Batching, validator record.Validator) (routing.IpfsRouting, error) { + return dht.New( + ctx, host, + dhtopts.Datastore(dstore), + dhtopts.Validator(validator), + ) +} + +func constructClientDHTRouting(ctx context.Context, host host.Host, dstore datastore.Batching, validator record.Validator) (routing.IpfsRouting, error) { + return dht.New( + ctx, host, + dhtopts.Client(true), + dhtopts.Datastore(dstore), + dhtopts.Validator(validator), + ) +} + +var DHTOption RoutingOption = constructDHTRouting +var DHTClientOption RoutingOption = constructClientDHTRouting +var NilRouterOption RoutingOption = nilrouting.ConstructNilRouting + +func Peerstore(id peer.ID, sk crypto.PrivKey) peerstore.Peerstore { + ps := pstoremem.NewPeerstore() + + if sk != nil { + ps.AddPrivKey(id, sk) + ps.AddPubKey(id, sk.GetPublic()) + } + + return ps +} + +func P2PAddrFilters(cfg *config.Config) (opts Libp2pOpts, err error) { + for _, s := range cfg.Swarm.AddrFilters { + f, err := mask.NewMask(s) + if err != nil { + return opts, fmt.Errorf("incorrectly formatted address filter in config: %s", s) + } + opts.Opts = append(opts.Opts, libp2p.FilterAddresses(f)) + } + return opts, nil +} + +func P2PBandwidthCounter(cfg *config.Config) (opts Libp2pOpts, reporter metrics.Reporter) { + reporter = metrics.NewBandwidthCounter() + + if !cfg.Swarm.DisableBandwidthMetrics { + opts.Opts = append(opts.Opts, libp2p.BandwidthReporter(reporter)) + } + return opts, reporter +} + +type Libp2pOpts struct { + fx.Out + + Opts []libp2p.Option `group:"libp2p"` +} + +type PNetFingerprint []byte // TODO: find some better place +func P2PPNet(repo repo.Repo) (opts Libp2pOpts, fp PNetFingerprint, err error) { + swarmkey, err := repo.SwarmKey() + if err != nil || swarmkey == nil { + return opts, nil, err + } + + protec, err := pnet.NewProtector(bytes.NewReader(swarmkey)) + if err != nil { + return opts, nil, fmt.Errorf("failed to configure private network: %s", err) + } + fp = protec.Fingerprint() + + opts.Opts = append(opts.Opts, libp2p.PrivateNetwork(protec)) + return opts, fp, nil +} + +func P2PPNetChecker(repo repo.Repo, ph host.Host, lc fx.Lifecycle) error { + // TODO: better check? + swarmkey, err := repo.SwarmKey() + if err != nil || swarmkey == nil { + return err + } + + done := make(chan struct{}) + lc.Append(fx.Hook{ + OnStart: func(_ context.Context) error { + go func() { + t := time.NewTicker(30 * time.Second) + <-t.C // swallow one tick + for { + select { + case <-t.C: + if len(ph.Network().Peers()) == 0 { + log.Warning("We are in private network and have no peers.") + log.Warning("This might be configuration mistake.") + } + case <-done: + return + } + } + }() + return nil + }, + OnStop: func(_ context.Context) error { + close(done) + return nil + }, + }) + return nil +} + +func makeAddrsFactory(cfg config.Addresses) (p2pbhost.AddrsFactory, error) { + var annAddrs []ma.Multiaddr + for _, addr := range cfg.Announce { + maddr, err := ma.NewMultiaddr(addr) + if err != nil { + return nil, err + } + annAddrs = append(annAddrs, maddr) + } + + filters := mafilter.NewFilters() + noAnnAddrs := map[string]bool{} + for _, addr := range cfg.NoAnnounce { + f, err := mamask.NewMask(addr) + if err == nil { + filters.AddDialFilter(f) + continue + } + maddr, err := ma.NewMultiaddr(addr) + if err != nil { + return nil, err + } + noAnnAddrs[maddr.String()] = true + } + + return func(allAddrs []ma.Multiaddr) []ma.Multiaddr { + var addrs []ma.Multiaddr + if len(annAddrs) > 0 { + addrs = annAddrs + } else { + addrs = allAddrs + } + + var out []ma.Multiaddr + for _, maddr := range addrs { + // check for exact matches + ok := noAnnAddrs[maddr.String()] + // check for /ipcidr matches + if !ok && !filters.AddrBlocked(maddr) { + out = append(out, maddr) + } + } + return out + }, nil +} + +func P2PAddrsFactory(cfg *config.Config) (opts Libp2pOpts, err error) { + addrsFactory, err := makeAddrsFactory(cfg.Addresses) + if err != nil { + return opts, err + } + if !cfg.Swarm.DisableRelay { + addrsFactory = composeAddrsFactory(addrsFactory, filterRelayAddrs) + } + opts.Opts = append(opts.Opts, libp2p.AddrsFactory(addrsFactory)) + return +} + +func filterRelayAddrs(addrs []ma.Multiaddr) []ma.Multiaddr { + var raddrs []ma.Multiaddr + for _, addr := range addrs { + _, err := addr.ValueForProtocol(circuit.P_CIRCUIT) + if err == nil { + continue + } + raddrs = append(raddrs, addr) + } + return raddrs +} + +func composeAddrsFactory(f, g p2pbhost.AddrsFactory) p2pbhost.AddrsFactory { + return func(addrs []ma.Multiaddr) []ma.Multiaddr { + return f(g(addrs)) + } +} + +func P2PConnectionManager(cfg *config.Config) (opts Libp2pOpts, err error) { + grace := config.DefaultConnMgrGracePeriod + low := config.DefaultConnMgrHighWater + high := config.DefaultConnMgrHighWater + + switch cfg.Swarm.ConnMgr.Type { + case "": + // 'default' value is the basic connection manager + return + case "none": + return opts, nil + case "basic": + grace, err = time.ParseDuration(cfg.Swarm.ConnMgr.GracePeriod) + if err != nil { + return opts, fmt.Errorf("parsing Swarm.ConnMgr.GracePeriod: %s", err) + } + + low = cfg.Swarm.ConnMgr.LowWater + high = cfg.Swarm.ConnMgr.HighWater + default: + return opts, fmt.Errorf("unrecognized ConnMgr.Type: %q", cfg.Swarm.ConnMgr.Type) + } + + cm := connmgr.NewConnManager(low, high, grace) + opts.Opts = append(opts.Opts, libp2p.ConnectionManager(cm)) + return +} + +func makeSmuxTransportOption(mplexExp bool) libp2p.Option { + const yamuxID = "/yamux/1.0.0" + const mplexID = "/mplex/6.7.0" + + ymxtpt := &yamux.Transport{ + AcceptBacklog: 512, + ConnectionWriteTimeout: time.Second * 10, + KeepAliveInterval: time.Second * 30, + EnableKeepAlive: true, + MaxStreamWindowSize: uint32(16 * 1024 * 1024), // 16MiB + LogOutput: ioutil.Discard, + } + + if os.Getenv("YAMUX_DEBUG") != "" { + ymxtpt.LogOutput = os.Stderr + } + + muxers := map[string]smux.Transport{yamuxID: ymxtpt} + if mplexExp { + muxers[mplexID] = mplex.DefaultTransport + } + + // Allow muxer preference order overriding + order := []string{yamuxID, mplexID} + if prefs := os.Getenv("LIBP2P_MUX_PREFS"); prefs != "" { + order = strings.Fields(prefs) + } + + opts := make([]libp2p.Option, 0, len(order)) + for _, id := range order { + tpt, ok := muxers[id] + if !ok { + log.Warning("unknown or duplicate muxer in LIBP2P_MUX_PREFS: %s", id) + continue + } + delete(muxers, id) + opts = append(opts, libp2p.Muxer(id, tpt)) + } + + return libp2p.ChainOptions(opts...) +} + +func P2PSmuxTransport(bcfg *BuildCfg) (opts Libp2pOpts, err error) { + opts.Opts = append(opts.Opts, makeSmuxTransportOption(bcfg.getOpt("mplex"))) + return +} + +func P2PNatPortMap(cfg *config.Config) (opts Libp2pOpts, err error) { + if !cfg.Swarm.DisableNatPortMap { + opts.Opts = append(opts.Opts, libp2p.NATPortMap()) + } + return +} + +func P2PRelay(cfg *config.Config) (opts Libp2pOpts, err error) { + if cfg.Swarm.DisableRelay { + // Enabled by default. + opts.Opts = append(opts.Opts, libp2p.DisableRelay()) + } else { + relayOpts := []relay.RelayOpt{relay.OptDiscovery} + if cfg.Swarm.EnableRelayHop { + relayOpts = append(relayOpts, relay.OptHop) + } + opts.Opts = append(opts.Opts, libp2p.EnableRelay(relayOpts...)) + } + return +} + +func P2PAutoRealy(cfg *config.Config) (opts Libp2pOpts, err error) { + // enable autorelay + if cfg.Swarm.EnableAutoRelay { + opts.Opts = append(opts.Opts, libp2p.EnableAutoRelay()) + } + return +} + +func P2PDefaultTransports() (opts Libp2pOpts, err error) { + opts.Opts = append(opts.Opts, libp2p.DefaultTransports) + return +} + +func P2PQUIC(cfg *config.Config) (opts Libp2pOpts, err error) { + if cfg.Experimental.QUIC { + opts.Opts = append(opts.Opts, libp2p.Transport(libp2pquic.NewTransport)) + } + return +} + +type P2PHostIn struct { + fx.In + + BCfg *BuildCfg + Repo repo.Repo + Validator record.Validator + HostOption HostOption + ID peer.ID + Peerstore peerstore.Peerstore + + Opts [][]libp2p.Option `group:"libp2p"` +} + +type BaseRouting routing.IpfsRouting +type P2PHostOut struct { + fx.Out + + Host host.Host + Routing BaseRouting + IpfsDHT *dht.IpfsDHT +} + +// TODO: move some of this into params struct +func P2PHost(mctx MetricsCtx, lc fx.Lifecycle, params P2PHostIn) (out P2PHostOut, err error) { + opts := []libp2p.Option{libp2p.NoListenAddrs} + for _, o := range params.Opts { + opts = append(opts, o...) + } + + ctx, cancel := context.WithCancel(mctx) + lc.Append(fx.Hook{ + OnStop: func(_ context.Context) error { + cancel() + return nil + }, + }) + + opts = append(opts, libp2p.Routing(func(h host.Host) (routing.PeerRouting, error) { + r, err := params.BCfg.Routing(ctx, h, params.Repo.Datastore(), params.Validator) + out.Routing = r + return r, err + })) + + out.Host, err = params.HostOption(ctx, params.ID, params.Peerstore, opts...) + if err != nil { + return P2PHostOut{}, err + } + + // this code is necessary just for tests: mock network constructions + // ignore the libp2p constructor options that actually construct the routing! + if out.Routing == nil { + r, err := params.BCfg.Routing(ctx, out.Host, params.Repo.Datastore(), params.Validator) + if err != nil { + return P2PHostOut{}, err + } + out.Routing = r + out.Host = routedhost.Wrap(out.Host, out.Routing) + } + + lc.Append(fx.Hook{ + OnStop: func(ctx context.Context) error { + return out.Host.Close() + }, + }) + + // TODO: break this up into more DI units + // TODO: I'm not a fan of type assertions like this but the + // `RoutingOption` system doesn't currently provide access to the + // IpfsNode. + // + // Ideally, we'd do something like: + // + // 1. Add some fancy method to introspect into tiered routers to extract + // things like the pubsub router or the DHT (complicated, messy, + // probably not worth it). + // 2. Pass the IpfsNode into the RoutingOption (would also remove the + // PSRouter case below. + // 3. Introduce some kind of service manager? (my personal favorite but + // that requires a fair amount of work). + if dht, ok := out.Routing.(*dht.IpfsDHT); ok { + out.IpfsDHT = dht + + lc.Append(fx.Hook{ + OnStop: func(ctx context.Context) error { + return out.IpfsDHT.Close() + }, + }) + } + + return out, err +} + +type p2pRoutingIn struct { + fx.In + + BCfg *BuildCfg + Repo repo.Repo + Validator record.Validator + Host host.Host + PubSub *pubsub.PubSub + + BaseRouting BaseRouting +} + +type p2pRoutingOut struct { + fx.Out + + IpfsRouting routing.IpfsRouting + PSRouter *namesys.PubsubValueStore // TODO: optional +} + +func P2POnlineRouting(mctx MetricsCtx, lc fx.Lifecycle, in p2pRoutingIn) (out p2pRoutingOut) { + out.IpfsRouting = in.BaseRouting + + if in.BCfg.getOpt("ipnsps") { + out.PSRouter = namesys.NewPubsubValueStore( + lifecycleCtx(mctx, lc), + in.Host, + in.BaseRouting, + in.PubSub, + in.Validator, + ) + + out.IpfsRouting = routinghelpers.Tiered{ + Routers: []routing.IpfsRouting{ + // Always check pubsub first. + &routinghelpers.Compose{ + ValueStore: &routinghelpers.LimitedValueStore{ + ValueStore: out.PSRouter, + Namespaces: []string{"ipns"}, + }, + }, + in.BaseRouting, + }, + Validator: in.Validator, + } + } + return out +} + +func AutoNATService(mctx MetricsCtx, lc fx.Lifecycle, cfg *config.Config, host host.Host) error { + if !cfg.Swarm.EnableAutoNATService { + return nil + } + var opts []libp2p.Option + if cfg.Experimental.QUIC { + opts = append(opts, libp2p.DefaultTransports, libp2p.Transport(libp2pquic.NewTransport)) + } + + _, err := autonat.NewAutoNATService(lifecycleCtx(mctx, lc), host, opts...) + return err +} + +func Pubsub(mctx MetricsCtx, lc fx.Lifecycle, host host.Host, bcfg *BuildCfg, cfg *config.Config) (service *pubsub.PubSub, err error) { + if !(bcfg.getOpt("pubsub") || bcfg.getOpt("ipnsps")) { + return nil, nil // TODO: mark optional + } + + var pubsubOptions []pubsub.Option + if cfg.Pubsub.DisableSigning { + pubsubOptions = append(pubsubOptions, pubsub.WithMessageSigning(false)) + } + + if cfg.Pubsub.StrictSignatureVerification { + pubsubOptions = append(pubsubOptions, pubsub.WithStrictSignatureVerification(true)) + } + + switch cfg.Pubsub.Router { + case "": + fallthrough + case "floodsub": + service, err = pubsub.NewFloodSub(lifecycleCtx(mctx, lc), host, pubsubOptions...) + + case "gossipsub": + service, err = pubsub.NewGossipSub(lifecycleCtx(mctx, lc), host, pubsubOptions...) + + default: + err = fmt.Errorf("Unknown pubsub router %s", cfg.Pubsub.Router) + } + + return service, err +} + +func listenAddresses(cfg *config.Config) ([]ma.Multiaddr, error) { + var listen []ma.Multiaddr + for _, addr := range cfg.Addresses.Swarm { + maddr, err := ma.NewMultiaddr(addr) + if err != nil { + return nil, fmt.Errorf("failure to parse config.Addresses.Swarm: %s", cfg.Addresses.Swarm) + } + listen = append(listen, maddr) + } + + return listen, nil +} + +func StartListening(host host.Host, cfg *config.Config) error { + listenAddrs, err := listenAddresses(cfg) + if err != nil { + return err + } + + // Actually start listening: + if err := host.Network().Listen(listenAddrs...); err != nil { + return err + } + + // list out our addresses + addrs, err := host.Network().InterfaceListenAddresses() + if err != nil { + return err + } + log.Infof("Swarm listening at: %s", addrs) + return nil +} + +func P2PHostOption(bcfg *BuildCfg) (hostOption HostOption, err error) { + hostOption = bcfg.Host + if bcfg.DisableEncryptedConnections { + innerHostOption := hostOption + hostOption = func(ctx context.Context, id peer.ID, ps peerstore.Peerstore, options ...libp2p.Option) (host.Host, error) { + return innerHostOption(ctx, id, ps, append(options, libp2p.NoSecurity)...) + } + // TODO: shouldn't this be Errorf to guarantee visibility? + log.Warningf(`Your IPFS node has been configured to run WITHOUT ENCRYPTED CONNECTIONS. + You will not be able to connect to any nodes configured to use encrypted connections`) + } + return hostOption, nil +} diff --git a/core/node/provider.go b/core/node/provider.go new file mode 100644 index 000000000..8c6792440 --- /dev/null +++ b/core/node/provider.go @@ -0,0 +1,59 @@ +package node + +import ( + "fmt" + "time" + + "github.com/ipfs/go-ipfs-config" + "github.com/ipfs/go-ipld-format" + "github.com/libp2p/go-libp2p-routing" + "go.uber.org/fx" + + "github.com/ipfs/go-ipfs/exchange/reprovide" + "github.com/ipfs/go-ipfs/pin" + "github.com/ipfs/go-ipfs/provider" + "github.com/ipfs/go-ipfs/repo" +) + +const kReprovideFrequency = time.Hour * 12 + +func ProviderQueue(mctx MetricsCtx, lc fx.Lifecycle, repo repo.Repo) (*provider.Queue, error) { + return provider.NewQueue(lifecycleCtx(mctx, lc), "provider-v1", repo.Datastore()) +} + +func ProviderCtor(mctx MetricsCtx, lc fx.Lifecycle, queue *provider.Queue, rt routing.IpfsRouting) provider.Provider { + return provider.NewProvider(lifecycleCtx(mctx, lc), queue, rt) +} + +func ReproviderCtor(mctx MetricsCtx, lc fx.Lifecycle, cfg *config.Config, bs BaseBlocks, ds format.DAGService, pinning pin.Pinner, rt routing.IpfsRouting) (*reprovide.Reprovider, error) { + var keyProvider reprovide.KeyChanFunc + + switch cfg.Reprovider.Strategy { + case "all": + fallthrough + case "": + keyProvider = reprovide.NewBlockstoreProvider(bs) + case "roots": + keyProvider = reprovide.NewPinnedProvider(pinning, ds, true) + case "pinned": + keyProvider = reprovide.NewPinnedProvider(pinning, ds, false) + default: + return nil, fmt.Errorf("unknown reprovider strategy '%s'", cfg.Reprovider.Strategy) + } + return reprovide.NewReprovider(lifecycleCtx(mctx, lc), rt, keyProvider), nil +} + +func Reprovider(cfg *config.Config, reprovider *reprovide.Reprovider) error { + reproviderInterval := kReprovideFrequency + if cfg.Reprovider.Interval != "" { + dur, err := time.ParseDuration(cfg.Reprovider.Interval) + if err != nil { + return err + } + + reproviderInterval = dur + } + + go reprovider.Run(reproviderInterval) + return nil +} diff --git a/core/node/storage.go b/core/node/storage.go new file mode 100644 index 000000000..e956293c6 --- /dev/null +++ b/core/node/storage.go @@ -0,0 +1,94 @@ +package node + +import ( + "context" + "os" + "syscall" + "time" + + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/retrystore" + blockstore "github.com/ipfs/go-ipfs-blockstore" + config "github.com/ipfs/go-ipfs-config" + "go.uber.org/fx" + + "github.com/ipfs/go-ipfs/filestore" + "github.com/ipfs/go-ipfs/repo" + "github.com/ipfs/go-ipfs/thirdparty/cidv0v1" + "github.com/ipfs/go-ipfs/thirdparty/verifbs" +) + +func isTooManyFDError(err error) bool { + perr, ok := err.(*os.PathError) + if ok && perr.Err == syscall.EMFILE { + return true + } + + return false +} + +func RepoConfig(repo repo.Repo) (*config.Config, error) { + return repo.Config() +} + +func DatastoreCtor(repo repo.Repo) datastore.Datastore { + return repo.Datastore() +} + +type BaseBlocks blockstore.Blockstore + +func BaseBlockstoreCtor(mctx MetricsCtx, repo repo.Repo, cfg *config.Config, bcfg *BuildCfg, lc fx.Lifecycle) (bs BaseBlocks, err error) { + rds := &retrystore.Datastore{ + Batching: repo.Datastore(), + Delay: time.Millisecond * 200, + Retries: 6, + TempErrFunc: isTooManyFDError, + } + // hash security + bs = blockstore.NewBlockstore(rds) + bs = &verifbs.VerifBS{Blockstore: bs} + + opts := blockstore.DefaultCacheOpts() + opts.HasBloomFilterSize = cfg.Datastore.BloomFilterSize + if !bcfg.Permanent { + opts.HasBloomFilterSize = 0 + } + + if !bcfg.NilRepo { + ctx, cancel := context.WithCancel(mctx) + + lc.Append(fx.Hook{ + OnStop: func(context context.Context) error { + cancel() + return nil + }, + }) + bs, err = blockstore.CachedBlockstore(ctx, bs, opts) + if err != nil { + return nil, err + } + } + + bs = blockstore.NewIdStore(bs) + bs = cidv0v1.NewBlockstore(bs) + + if cfg.Datastore.HashOnRead { // TODO: review: this is how it was done originally, is there a reason we can't just pass this directly? + bs.HashOnRead(true) + } + + return +} + +func GcBlockstoreCtor(repo repo.Repo, bb BaseBlocks, cfg *config.Config) (gclocker blockstore.GCLocker, gcbs blockstore.GCBlockstore, bs blockstore.Blockstore, fstore *filestore.Filestore) { + gclocker = blockstore.NewGCLocker() + gcbs = blockstore.NewGCBlockstore(bb, gclocker) + + if cfg.Experimental.FilestoreEnabled || cfg.Experimental.UrlstoreEnabled { + // hash security + fstore = filestore.NewFilestore(bb, repo.FileManager()) // TODO: mark optional + gcbs = blockstore.NewGCBlockstore(fstore, gclocker) + gcbs = &verifbs.VerifBSGC{GCBlockstore: gcbs} + } + bs = gcbs + return +} diff --git a/namesys/republisher/repub_test.go b/namesys/republisher/repub_test.go index 8f0048c4c..48a0b086f 100644 --- a/namesys/republisher/repub_test.go +++ b/namesys/republisher/repub_test.go @@ -7,6 +7,7 @@ import ( "time" "github.com/ipfs/go-ipfs/core" + "github.com/ipfs/go-ipfs/core/bootstrap" mock "github.com/ipfs/go-ipfs/core/mock" namesys "github.com/ipfs/go-ipfs/namesys" . "github.com/ipfs/go-ipfs/namesys/republisher" @@ -45,7 +46,7 @@ func TestRepublish(t *testing.T) { t.Fatal(err) } - bsinf := core.BootstrapConfigWithPeers( + bsinf := bootstrap.BootstrapConfigWithPeers( []pstore.PeerInfo{ nodes[0].Peerstore.PeerInfo(nodes[0].Identity), }, diff --git a/test/integration/addcat_test.go b/test/integration/addcat_test.go index b0def746d..98e6936ee 100644 --- a/test/integration/addcat_test.go +++ b/test/integration/addcat_test.go @@ -12,6 +12,7 @@ import ( "time" "github.com/ipfs/go-ipfs/core" + "github.com/ipfs/go-ipfs/core/bootstrap" "github.com/ipfs/go-ipfs/core/coreapi" mock "github.com/ipfs/go-ipfs/core/mock" "github.com/ipfs/go-ipfs/thirdparty/unit" @@ -140,10 +141,10 @@ func DirectAddCat(data []byte, conf testutil.LatencyConfig) error { bs1 := []pstore.PeerInfo{adder.Peerstore.PeerInfo(adder.Identity)} bs2 := []pstore.PeerInfo{catter.Peerstore.PeerInfo(catter.Identity)} - if err := catter.Bootstrap(core.BootstrapConfigWithPeers(bs1)); err != nil { + if err := catter.Bootstrap(bootstrap.BootstrapConfigWithPeers(bs1)); err != nil { return err } - if err := adder.Bootstrap(core.BootstrapConfigWithPeers(bs2)); err != nil { + if err := adder.Bootstrap(bootstrap.BootstrapConfigWithPeers(bs2)); err != nil { return err } diff --git a/test/integration/bench_cat_test.go b/test/integration/bench_cat_test.go index e8a2322de..a40fcfe37 100644 --- a/test/integration/bench_cat_test.go +++ b/test/integration/bench_cat_test.go @@ -9,6 +9,7 @@ import ( "testing" "github.com/ipfs/go-ipfs/core" + "github.com/ipfs/go-ipfs/core/bootstrap" "github.com/ipfs/go-ipfs/core/coreapi" mock "github.com/ipfs/go-ipfs/core/mock" "github.com/ipfs/go-ipfs/thirdparty/unit" @@ -83,10 +84,10 @@ func benchCat(b *testing.B, data []byte, conf testutil.LatencyConfig) error { bs1 := []pstore.PeerInfo{adder.Peerstore.PeerInfo(adder.Identity)} bs2 := []pstore.PeerInfo{catter.Peerstore.PeerInfo(catter.Identity)} - if err := catter.Bootstrap(core.BootstrapConfigWithPeers(bs1)); err != nil { + if err := catter.Bootstrap(bootstrap.BootstrapConfigWithPeers(bs1)); err != nil { return err } - if err := adder.Bootstrap(core.BootstrapConfigWithPeers(bs2)); err != nil { + if err := adder.Bootstrap(bootstrap.BootstrapConfigWithPeers(bs2)); err != nil { return err } diff --git a/test/integration/bitswap_wo_routing_test.go b/test/integration/bitswap_wo_routing_test.go index af4406633..30b8ce30d 100644 --- a/test/integration/bitswap_wo_routing_test.go +++ b/test/integration/bitswap_wo_routing_test.go @@ -8,6 +8,7 @@ import ( "github.com/ipfs/go-block-format" "github.com/ipfs/go-ipfs/core" "github.com/ipfs/go-ipfs/core/mock" + "github.com/ipfs/go-ipfs/core/node" cid "github.com/ipfs/go-cid" mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" @@ -26,7 +27,7 @@ func TestBitswapWithoutRouting(t *testing.T) { n, err := core.NewNode(ctx, &core.BuildCfg{ Online: true, Host: coremock.MockHostOption(mn), - Routing: core.NilRouterOption, // no routing + Routing: node.NilRouterOption, // no routing }) if err != nil { t.Fatal(err) diff --git a/test/integration/three_legged_cat_test.go b/test/integration/three_legged_cat_test.go index 953c7e370..1fc0e7bf2 100644 --- a/test/integration/three_legged_cat_test.go +++ b/test/integration/three_legged_cat_test.go @@ -10,6 +10,7 @@ import ( "time" core "github.com/ipfs/go-ipfs/core" + bootstrap2 "github.com/ipfs/go-ipfs/core/bootstrap" "github.com/ipfs/go-ipfs/core/coreapi" mock "github.com/ipfs/go-ipfs/core/mock" "github.com/ipfs/go-ipfs/thirdparty/unit" @@ -118,7 +119,7 @@ func RunThreeLeggedCat(data []byte, conf testutil.LatencyConfig) error { } bis := bootstrap.Peerstore.PeerInfo(bootstrap.PeerHost.ID()) - bcfg := core.BootstrapConfigWithPeers([]pstore.PeerInfo{bis}) + bcfg := bootstrap2.BootstrapConfigWithPeers([]pstore.PeerInfo{bis}) if err := adder.Bootstrap(bcfg); err != nil { return err } From 7046626eccfb1d8e1c500a637c4f85dbef373972 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Wed, 3 Apr 2019 03:51:45 +0200 Subject: [PATCH 16/27] Move pathresolve MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit License: MIT Signed-off-by: Łukasz Magiera --- core/commands/refs.go | 3 ++- core/commands/tar.go | 4 +-- core/coreapi/path.go | 6 ++--- core/corehttp/gateway_handler.go | 3 ++- fuse/ipns/ipns_unix.go | 4 ++- .../resolve}/pathresolver_test.go | 11 ++++---- .../resolve/resolve.go | 27 ++++++++++--------- 7 files changed, 33 insertions(+), 25 deletions(-) rename {core => namesys/resolve}/pathresolver_test.go (63%) rename core/pathresolver.go => namesys/resolve/resolve.go (74%) diff --git a/core/commands/refs.go b/core/commands/refs.go index d63786f87..9a006cb8d 100644 --- a/core/commands/refs.go +++ b/core/commands/refs.go @@ -9,6 +9,7 @@ import ( core "github.com/ipfs/go-ipfs/core" cmdenv "github.com/ipfs/go-ipfs/core/commands/cmdenv" + "github.com/ipfs/go-ipfs/namesys/resolve" cid "github.com/ipfs/go-cid" cidenc "github.com/ipfs/go-cidutil/cidenc" @@ -173,7 +174,7 @@ func objectsForPaths(ctx context.Context, n *core.IpfsNode, paths []string) ([]i return nil, err } - o, err := core.Resolve(ctx, n.Namesys, n.Resolver, p) + o, err := resolve.Resolve(ctx, n.Namesys, n.Resolver, p) if err != nil { return nil, err } diff --git a/core/commands/tar.go b/core/commands/tar.go index 488193f18..d10c97fdd 100644 --- a/core/commands/tar.go +++ b/core/commands/tar.go @@ -4,8 +4,8 @@ import ( "fmt" "io" - "github.com/ipfs/go-ipfs/core" "github.com/ipfs/go-ipfs/core/commands/cmdenv" + "github.com/ipfs/go-ipfs/namesys/resolve" tar "github.com/ipfs/go-ipfs/tar" "github.com/ipfs/go-ipfs-cmdkit" @@ -97,7 +97,7 @@ var tarCatCmd = &cmds.Command{ return err } - root, err := core.Resolve(req.Context, nd.Namesys, nd.Resolver, p) + root, err := resolve.Resolve(req.Context, nd.Namesys, nd.Resolver, p) if err != nil { return err } diff --git a/core/coreapi/path.go b/core/coreapi/path.go index 4c7837f36..d78650278 100644 --- a/core/coreapi/path.go +++ b/core/coreapi/path.go @@ -5,7 +5,7 @@ import ( "fmt" gopath "path" - "github.com/ipfs/go-ipfs/core" + node2 "github.com/ipfs/go-ipfs/namesys/resolve" "github.com/ipfs/go-cid" ipld "github.com/ipfs/go-ipld-format" @@ -42,8 +42,8 @@ func (api *CoreAPI) ResolvePath(ctx context.Context, p path.Path) (path.Resolved } ipath := ipfspath.Path(p.String()) - ipath, err := core.ResolveIPNS(ctx, api.namesys, ipath) - if err == core.ErrNoNamesys { + ipath, err := node2.ResolveIPNS(ctx, api.namesys, ipath) + if err == node2.ErrNoNamesys { return nil, coreiface.ErrOffline } else if err != nil { return nil, err diff --git a/core/corehttp/gateway_handler.go b/core/corehttp/gateway_handler.go index cdbcce594..72566930b 100644 --- a/core/corehttp/gateway_handler.go +++ b/core/corehttp/gateway_handler.go @@ -14,6 +14,7 @@ import ( "github.com/ipfs/go-ipfs/core" "github.com/ipfs/go-ipfs/dagutils" + "github.com/ipfs/go-ipfs/namesys/resolve" "github.com/dustin/go-humanize" "github.com/ipfs/go-cid" @@ -423,7 +424,7 @@ func (i *gatewayHandler) putHandler(w http.ResponseWriter, r *http.Request) { } var newcid cid.Cid - rnode, err := core.Resolve(ctx, i.node.Namesys, i.node.Resolver, rootPath) + rnode, err := resolve.Resolve(ctx, i.node.Namesys, i.node.Resolver, rootPath) switch ev := err.(type) { case resolver.ErrNoLink: // ev.Node < node where resolve failed diff --git a/fuse/ipns/ipns_unix.go b/fuse/ipns/ipns_unix.go index d9f6c6740..fb56d8353 100644 --- a/fuse/ipns/ipns_unix.go +++ b/fuse/ipns/ipns_unix.go @@ -13,6 +13,8 @@ import ( core "github.com/ipfs/go-ipfs/core" namesys "github.com/ipfs/go-ipfs/namesys" + node2 "github.com/ipfs/go-ipfs/namesys/resolve" + dag "github.com/ipfs/go-merkledag" path "github.com/ipfs/go-path" ft "github.com/ipfs/go-unixfs" @@ -96,7 +98,7 @@ func loadRoot(ctx context.Context, rt *keyRoot, ipfs *core.IpfsNode, name string return nil, err } - node, err := core.Resolve(ctx, ipfs.Namesys, ipfs.Resolver, p) + node, err := node2.Resolve(ctx, ipfs.Namesys, ipfs.Resolver, p) switch err { case nil: case namesys.ErrResolveFailed: diff --git a/core/pathresolver_test.go b/namesys/resolve/pathresolver_test.go similarity index 63% rename from core/pathresolver_test.go rename to namesys/resolve/pathresolver_test.go index 92318f275..fe578b5d3 100644 --- a/core/pathresolver_test.go +++ b/namesys/resolve/pathresolver_test.go @@ -1,10 +1,11 @@ -package core_test +package resolve_test import ( "testing" - core "github.com/ipfs/go-ipfs/core" coremock "github.com/ipfs/go-ipfs/core/mock" + "github.com/ipfs/go-ipfs/namesys/resolve" + path "github.com/ipfs/go-path" ) @@ -14,17 +15,17 @@ func TestResolveNoComponents(t *testing.T) { t.Fatal("Should have constructed a mock node", err) } - _, err = core.Resolve(n.Context(), n.Namesys, n.Resolver, path.Path("/ipns/")) + _, err = resolve.Resolve(n.Context(), n.Namesys, n.Resolver, path.Path("/ipns/")) if err != path.ErrNoComponents { t.Fatal("Should error with no components (/ipns/).", err) } - _, err = core.Resolve(n.Context(), n.Namesys, n.Resolver, path.Path("/ipfs/")) + _, err = resolve.Resolve(n.Context(), n.Namesys, n.Resolver, path.Path("/ipfs/")) if err != path.ErrNoComponents { t.Fatal("Should error with no components (/ipfs/).", err) } - _, err = core.Resolve(n.Context(), n.Namesys, n.Resolver, path.Path("/../..")) + _, err = resolve.Resolve(n.Context(), n.Namesys, n.Resolver, path.Path("/../..")) if err != path.ErrBadPath { t.Fatal("Should error with invalid path.", err) } diff --git a/core/pathresolver.go b/namesys/resolve/resolve.go similarity index 74% rename from core/pathresolver.go rename to namesys/resolve/resolve.go index 21c2a84be..bd1667fa4 100644 --- a/core/pathresolver.go +++ b/namesys/resolve/resolve.go @@ -1,18 +1,21 @@ -package core +package resolve import ( "context" "errors" "strings" - namesys "github.com/ipfs/go-ipfs/namesys" - - ipld "github.com/ipfs/go-ipld-format" + "github.com/ipfs/go-ipld-format" + log2 "github.com/ipfs/go-log" logging "github.com/ipfs/go-log" - path "github.com/ipfs/go-path" - resolver "github.com/ipfs/go-path/resolver" + "github.com/ipfs/go-path" + "github.com/ipfs/go-path/resolver" + + "github.com/ipfs/go-ipfs/namesys" ) +var log = logging.Logger("nsresolv") + // ErrNoNamesys is an explicit error for when an IPFS node doesn't // (yet) have a name system var ErrNoNamesys = errors.New( @@ -27,34 +30,34 @@ func ResolveIPNS(ctx context.Context, nsys namesys.NameSystem, p path.Path) (pat // TODO(cryptix): we should be able to query the local cache for the path if nsys == nil { - evt.Append(logging.LoggableMap{"error": ErrNoNamesys.Error()}) + evt.Append(log2.LoggableMap{"error": ErrNoNamesys.Error()}) return "", ErrNoNamesys } seg := p.Segments() if len(seg) < 2 || seg[1] == "" { // just "/" without further segments - evt.Append(logging.LoggableMap{"error": path.ErrNoComponents.Error()}) + evt.Append(log2.LoggableMap{"error": path.ErrNoComponents.Error()}) return "", path.ErrNoComponents } extensions := seg[2:] resolvable, err := path.FromSegments("/", seg[0], seg[1]) if err != nil { - evt.Append(logging.LoggableMap{"error": err.Error()}) + evt.Append(log2.LoggableMap{"error": err.Error()}) return "", err } respath, err := nsys.Resolve(ctx, resolvable.String()) if err != nil { - evt.Append(logging.LoggableMap{"error": err.Error()}) + evt.Append(log2.LoggableMap{"error": err.Error()}) return "", err } segments := append(respath.Segments(), extensions...) p, err = path.FromSegments("/", segments...) if err != nil { - evt.Append(logging.LoggableMap{"error": err.Error()}) + evt.Append(log2.LoggableMap{"error": err.Error()}) return "", err } } @@ -64,7 +67,7 @@ func ResolveIPNS(ctx context.Context, nsys namesys.NameSystem, p path.Path) (pat // Resolve resolves the given path by parsing out protocol-specific // entries (e.g. /ipns/) and then going through the /ipfs/ // entries and returning the final node. -func Resolve(ctx context.Context, nsys namesys.NameSystem, r *resolver.Resolver, p path.Path) (ipld.Node, error) { +func Resolve(ctx context.Context, nsys namesys.NameSystem, r *resolver.Resolver, p path.Path) (format.Node, error) { p, err := ResolveIPNS(ctx, nsys, p) if err != nil { return nil, err From fd0c06a825f52be86f4b8a9f9ed2a1b6ac68920e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Wed, 3 Apr 2019 16:13:28 +0200 Subject: [PATCH 17/27] Remove DI module dependency on BuildCfg MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit License: MIT Signed-off-by: Łukasz Magiera --- core/builder.go | 20 +++++---- core/node/groups.go | 64 ++++++++++++++++---------- core/node/libp2p.go | 105 ++++++++++++++++++++----------------------- core/node/storage.go | 78 ++++++++++++++++---------------- 4 files changed, 142 insertions(+), 125 deletions(-) diff --git a/core/builder.go b/core/builder.go index 02b772e70..1c1bf880c 100644 --- a/core/builder.go +++ b/core/builder.go @@ -105,18 +105,22 @@ func NewNode(ctx context.Context, cfg *BuildCfg) (*IpfsNode, error) { return cfg.Repo }) - // TODO: Remove this, use only for passing node config - cfgOption := fx.Provide(func() *node.BuildCfg { - return (*node.BuildCfg)(cfg) - }) - metricsCtx := fx.Provide(func() node.MetricsCtx { return node.MetricsCtx(ctx) }) + hostOption := fx.Provide(func() node.HostOption { + return cfg.Host + }) + + routingOption := fx.Provide(func() node.RoutingOption { + return cfg.Routing + }) + params := fx.Options( repoOption, - cfgOption, + hostOption, + routingOption, metricsCtx, ) @@ -137,10 +141,10 @@ func NewNode(ctx context.Context, cfg *BuildCfg) (*IpfsNode, error) { fx.Provide(baseProcess), params, - node.Storage, + node.Storage((*node.BuildCfg)(cfg)), node.Identity, node.IPNS, - node.Networked(cfg.Online), + node.Networked((*node.BuildCfg)(cfg)), fx.Invoke(setupSharding), diff --git a/core/node/groups.go b/core/node/groups.go index 202c0c4f2..083d69d6b 100644 --- a/core/node/groups.go +++ b/core/node/groups.go @@ -9,24 +9,20 @@ import ( "github.com/ipfs/go-ipfs/provider" ) -var LibP2P = fx.Options( +var BaseLibP2P = fx.Options( fx.Provide(P2PAddrFilters), fx.Provide(P2PBandwidthCounter), fx.Provide(P2PPNet), fx.Provide(P2PAddrsFactory), fx.Provide(P2PConnectionManager), - fx.Provide(P2PSmuxTransport), fx.Provide(P2PNatPortMap), fx.Provide(P2PRelay), fx.Provide(P2PAutoRealy), fx.Provide(P2PDefaultTransports), fx.Provide(P2PQUIC), - fx.Provide(P2PHostOption), fx.Provide(P2PHost), - fx.Provide(P2POnlineRouting), - fx.Provide(Pubsub), fx.Provide(NewDiscoveryHandler), fx.Invoke(AutoNATService), @@ -35,12 +31,26 @@ var LibP2P = fx.Options( fx.Invoke(SetupDiscovery), ) -var Storage = fx.Options( - fx.Provide(RepoConfig), - fx.Provide(DatastoreCtor), - fx.Provide(BaseBlockstoreCtor), - fx.Provide(GcBlockstoreCtor), -) +func LibP2P(cfg *BuildCfg) fx.Option { + return fx.Options( + BaseLibP2P, + + MaybeProvide(P2PNoSecurity, cfg.DisableEncryptedConnections), + MaybeProvide(Pubsub, cfg.getOpt("pubsub") || cfg.getOpt("ipnsps")), + + fx.Provide(P2PSmuxTransport(cfg.getOpt("mplex"))), + fx.Provide(P2POnlineRouting(cfg.getOpt("ipnsps"))), + ) +} + +func Storage(cfg *BuildCfg) fx.Option { + return fx.Options( + fx.Provide(RepoConfig), + fx.Provide(DatastoreCtor), + fx.Provide(BaseBlockstoreCtor(cfg.Permanent, cfg.NilRepo)), + fx.Provide(GcBlockstoreCtor), + ) +} var Identity = fx.Options( fx.Provide(PeerID), @@ -61,18 +71,19 @@ var Providers = fx.Options( fx.Invoke(provider.Provider.Run), ) -var Online = fx.Options( - fx.Provide(OnlineExchangeCtor), - fx.Provide(OnlineNamesysCtor), +func Online(cfg *BuildCfg) fx.Option { + return fx.Options( + fx.Provide(OnlineExchangeCtor), + fx.Provide(OnlineNamesysCtor), - fx.Invoke(IpnsRepublisher), + fx.Invoke(IpnsRepublisher), - fx.Provide(p2p.NewP2P), - - LibP2P, - Providers, -) + fx.Provide(p2p.NewP2P), + LibP2P(cfg), + Providers, + ) +} var Offline = fx.Options( fx.Provide(offline.Exchange), fx.Provide(OfflineNamesysCtor), @@ -80,9 +91,16 @@ var Offline = fx.Options( fx.Provide(provider.NewOfflineProvider), ) -func Networked(online bool) fx.Option { - if online { - return Online +func Networked(cfg *BuildCfg) fx.Option { + if cfg.Online { + return Online(cfg) } return Offline } + +func MaybeProvide(opt interface{}, enable bool) fx.Option { + if enable { + return fx.Provide(opt) + } + return fx.Options() +} diff --git a/core/node/libp2p.go b/core/node/libp2p.go index a2f7c4253..8405b8cc9 100644 --- a/core/node/libp2p.go +++ b/core/node/libp2p.go @@ -320,9 +320,11 @@ func makeSmuxTransportOption(mplexExp bool) libp2p.Option { return libp2p.ChainOptions(opts...) } -func P2PSmuxTransport(bcfg *BuildCfg) (opts Libp2pOpts, err error) { - opts.Opts = append(opts.Opts, makeSmuxTransportOption(bcfg.getOpt("mplex"))) - return +func P2PSmuxTransport(mplex bool) func() (opts Libp2pOpts, err error) { + return func() (opts Libp2pOpts, err error) { + opts.Opts = append(opts.Opts, makeSmuxTransportOption(mplex)) + return + } } func P2PNatPortMap(cfg *config.Config) (opts Libp2pOpts, err error) { @@ -366,15 +368,23 @@ func P2PQUIC(cfg *config.Config) (opts Libp2pOpts, err error) { return } +func P2PNoSecurity() (opts Libp2pOpts) { + opts.Opts = append(opts.Opts, libp2p.NoSecurity) + // TODO: shouldn't this be Errorf to guarantee visibility? + log.Warningf(`Your IPFS node has been configured to run WITHOUT ENCRYPTED CONNECTIONS. + You will not be able to connect to any nodes configured to use encrypted connections`) + return opts +} + type P2PHostIn struct { fx.In - BCfg *BuildCfg - Repo repo.Repo - Validator record.Validator - HostOption HostOption - ID peer.ID - Peerstore peerstore.Peerstore + Repo repo.Repo + Validator record.Validator + HostOption HostOption + RoutingOption RoutingOption + ID peer.ID + Peerstore peerstore.Peerstore Opts [][]libp2p.Option `group:"libp2p"` } @@ -404,7 +414,7 @@ func P2PHost(mctx MetricsCtx, lc fx.Lifecycle, params P2PHostIn) (out P2PHostOut }) opts = append(opts, libp2p.Routing(func(h host.Host) (routing.PeerRouting, error) { - r, err := params.BCfg.Routing(ctx, h, params.Repo.Datastore(), params.Validator) + r, err := params.RoutingOption(ctx, h, params.Repo.Datastore(), params.Validator) out.Routing = r return r, err })) @@ -417,7 +427,7 @@ func P2PHost(mctx MetricsCtx, lc fx.Lifecycle, params P2PHostIn) (out P2PHostOut // this code is necessary just for tests: mock network constructions // ignore the libp2p constructor options that actually construct the routing! if out.Routing == nil { - r, err := params.BCfg.Routing(ctx, out.Host, params.Repo.Datastore(), params.Validator) + r, err := params.RoutingOption(ctx, out.Host, params.Repo.Datastore(), params.Validator) if err != nil { return P2PHostOut{}, err } @@ -461,11 +471,10 @@ func P2PHost(mctx MetricsCtx, lc fx.Lifecycle, params P2PHostIn) (out P2PHostOut type p2pRoutingIn struct { fx.In - BCfg *BuildCfg Repo repo.Repo Validator record.Validator Host host.Host - PubSub *pubsub.PubSub + PubSub *pubsub.PubSub `optional:"true"` BaseRouting BaseRouting } @@ -474,36 +483,38 @@ type p2pRoutingOut struct { fx.Out IpfsRouting routing.IpfsRouting - PSRouter *namesys.PubsubValueStore // TODO: optional + PSRouter *namesys.PubsubValueStore } -func P2POnlineRouting(mctx MetricsCtx, lc fx.Lifecycle, in p2pRoutingIn) (out p2pRoutingOut) { - out.IpfsRouting = in.BaseRouting +func P2POnlineRouting(ipnsps bool) func(mctx MetricsCtx, lc fx.Lifecycle, in p2pRoutingIn) (out p2pRoutingOut) { + return func(mctx MetricsCtx, lc fx.Lifecycle, in p2pRoutingIn) (out p2pRoutingOut) { + out.IpfsRouting = in.BaseRouting - if in.BCfg.getOpt("ipnsps") { - out.PSRouter = namesys.NewPubsubValueStore( - lifecycleCtx(mctx, lc), - in.Host, - in.BaseRouting, - in.PubSub, - in.Validator, - ) - - out.IpfsRouting = routinghelpers.Tiered{ - Routers: []routing.IpfsRouting{ - // Always check pubsub first. - &routinghelpers.Compose{ - ValueStore: &routinghelpers.LimitedValueStore{ - ValueStore: out.PSRouter, - Namespaces: []string{"ipns"}, - }, - }, + if ipnsps { + out.PSRouter = namesys.NewPubsubValueStore( + lifecycleCtx(mctx, lc), + in.Host, in.BaseRouting, - }, - Validator: in.Validator, + in.PubSub, + in.Validator, + ) + + out.IpfsRouting = routinghelpers.Tiered{ + Routers: []routing.IpfsRouting{ + // Always check pubsub first. + &routinghelpers.Compose{ + ValueStore: &routinghelpers.LimitedValueStore{ + ValueStore: out.PSRouter, + Namespaces: []string{"ipns"}, + }, + }, + in.BaseRouting, + }, + Validator: in.Validator, + } } + return out } - return out } func AutoNATService(mctx MetricsCtx, lc fx.Lifecycle, cfg *config.Config, host host.Host) error { @@ -519,11 +530,7 @@ func AutoNATService(mctx MetricsCtx, lc fx.Lifecycle, cfg *config.Config, host h return err } -func Pubsub(mctx MetricsCtx, lc fx.Lifecycle, host host.Host, bcfg *BuildCfg, cfg *config.Config) (service *pubsub.PubSub, err error) { - if !(bcfg.getOpt("pubsub") || bcfg.getOpt("ipnsps")) { - return nil, nil // TODO: mark optional - } - +func Pubsub(mctx MetricsCtx, lc fx.Lifecycle, host host.Host, cfg *config.Config) (service *pubsub.PubSub, err error) { var pubsubOptions []pubsub.Option if cfg.Pubsub.DisableSigning { pubsubOptions = append(pubsubOptions, pubsub.WithMessageSigning(false)) @@ -581,17 +588,3 @@ func StartListening(host host.Host, cfg *config.Config) error { log.Infof("Swarm listening at: %s", addrs) return nil } - -func P2PHostOption(bcfg *BuildCfg) (hostOption HostOption, err error) { - hostOption = bcfg.Host - if bcfg.DisableEncryptedConnections { - innerHostOption := hostOption - hostOption = func(ctx context.Context, id peer.ID, ps peerstore.Peerstore, options ...libp2p.Option) (host.Host, error) { - return innerHostOption(ctx, id, ps, append(options, libp2p.NoSecurity)...) - } - // TODO: shouldn't this be Errorf to guarantee visibility? - log.Warningf(`Your IPFS node has been configured to run WITHOUT ENCRYPTED CONNECTIONS. - You will not be able to connect to any nodes configured to use encrypted connections`) - } - return hostOption, nil -} diff --git a/core/node/storage.go b/core/node/storage.go index e956293c6..69c912609 100644 --- a/core/node/storage.go +++ b/core/node/storage.go @@ -37,46 +37,48 @@ func DatastoreCtor(repo repo.Repo) datastore.Datastore { type BaseBlocks blockstore.Blockstore -func BaseBlockstoreCtor(mctx MetricsCtx, repo repo.Repo, cfg *config.Config, bcfg *BuildCfg, lc fx.Lifecycle) (bs BaseBlocks, err error) { - rds := &retrystore.Datastore{ - Batching: repo.Datastore(), - Delay: time.Millisecond * 200, - Retries: 6, - TempErrFunc: isTooManyFDError, - } - // hash security - bs = blockstore.NewBlockstore(rds) - bs = &verifbs.VerifBS{Blockstore: bs} - - opts := blockstore.DefaultCacheOpts() - opts.HasBloomFilterSize = cfg.Datastore.BloomFilterSize - if !bcfg.Permanent { - opts.HasBloomFilterSize = 0 - } - - if !bcfg.NilRepo { - ctx, cancel := context.WithCancel(mctx) - - lc.Append(fx.Hook{ - OnStop: func(context context.Context) error { - cancel() - return nil - }, - }) - bs, err = blockstore.CachedBlockstore(ctx, bs, opts) - if err != nil { - return nil, err +func BaseBlockstoreCtor(permanent bool, nilRepo bool) func(mctx MetricsCtx, repo repo.Repo, cfg *config.Config, lc fx.Lifecycle) (bs BaseBlocks, err error) { + return func(mctx MetricsCtx, repo repo.Repo, cfg *config.Config, lc fx.Lifecycle) (bs BaseBlocks, err error) { + rds := &retrystore.Datastore{ + Batching: repo.Datastore(), + Delay: time.Millisecond * 200, + Retries: 6, + TempErrFunc: isTooManyFDError, } + // hash security + bs = blockstore.NewBlockstore(rds) + bs = &verifbs.VerifBS{Blockstore: bs} + + opts := blockstore.DefaultCacheOpts() + opts.HasBloomFilterSize = cfg.Datastore.BloomFilterSize + if !permanent { + opts.HasBloomFilterSize = 0 + } + + if !nilRepo { + ctx, cancel := context.WithCancel(mctx) + + lc.Append(fx.Hook{ + OnStop: func(context context.Context) error { + cancel() + return nil + }, + }) + bs, err = blockstore.CachedBlockstore(ctx, bs, opts) + if err != nil { + return nil, err + } + } + + bs = blockstore.NewIdStore(bs) + bs = cidv0v1.NewBlockstore(bs) + + if cfg.Datastore.HashOnRead { // TODO: review: this is how it was done originally, is there a reason we can't just pass this directly? + bs.HashOnRead(true) + } + + return } - - bs = blockstore.NewIdStore(bs) - bs = cidv0v1.NewBlockstore(bs) - - if cfg.Datastore.HashOnRead { // TODO: review: this is how it was done originally, is there a reason we can't just pass this directly? - bs.HashOnRead(true) - } - - return } func GcBlockstoreCtor(repo repo.Repo, bb BaseBlocks, cfg *config.Config) (gclocker blockstore.GCLocker, gcbs blockstore.GCBlockstore, bs blockstore.Blockstore, fstore *filestore.Filestore) { From 3ac605744fc41c29e4e4fc85d3dec395df9f7f48 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Wed, 3 Apr 2019 16:34:14 +0200 Subject: [PATCH 18/27] Separate function to parse BuildCfg into Options MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit License: MIT Signed-off-by: Łukasz Magiera --- core/builder.go | 135 +------------------------------------------ core/ncore.go | 29 ---------- core/node/builder.go | 68 ++++++++++++++++++++++ core/node/groups.go | 74 ++++++++++++++++++++++-- core/node/helpers.go | 24 ++++++++ core/node/libp2p.go | 2 +- 6 files changed, 164 insertions(+), 168 deletions(-) delete mode 100644 core/ncore.go diff --git a/core/builder.go b/core/builder.go index 1c1bf880c..64b4acee6 100644 --- a/core/builder.go +++ b/core/builder.go @@ -2,154 +2,25 @@ package core import ( "context" - "crypto/rand" - "encoding/base64" - "errors" "go.uber.org/fx" "github.com/ipfs/go-ipfs/core/bootstrap" "github.com/ipfs/go-ipfs/core/node" - - repo "github.com/ipfs/go-ipfs/repo" - - ds "github.com/ipfs/go-datastore" - dsync "github.com/ipfs/go-datastore/sync" - cfg "github.com/ipfs/go-ipfs-config" - metrics "github.com/ipfs/go-metrics-interface" - resolver "github.com/ipfs/go-path/resolver" - ci "github.com/libp2p/go-libp2p-crypto" - peer "github.com/libp2p/go-libp2p-peer" ) -type BuildCfg node.BuildCfg - -func (cfg *BuildCfg) fillDefaults() error { - if cfg.Repo != nil && cfg.NilRepo { - return errors.New("cannot set a Repo and specify nilrepo at the same time") - } - - if cfg.Repo == nil { - var d ds.Datastore - if cfg.NilRepo { - d = ds.NewNullDatastore() - } else { - d = ds.NewMapDatastore() - } - r, err := defaultRepo(dsync.MutexWrap(d)) - if err != nil { - return err - } - cfg.Repo = r - } - - if cfg.Routing == nil { - cfg.Routing = node.DHTOption - } - - if cfg.Host == nil { - cfg.Host = node.DefaultHostOption - } - - return nil -} - -func defaultRepo(dstore repo.Datastore) (repo.Repo, error) { - c := cfg.Config{} - priv, pub, err := ci.GenerateKeyPairWithReader(ci.RSA, 1024, rand.Reader) - if err != nil { - return nil, err - } - - pid, err := peer.IDFromPublicKey(pub) - if err != nil { - return nil, err - } - - privkeyb, err := priv.Bytes() - if err != nil { - return nil, err - } - - c.Bootstrap = cfg.DefaultBootstrapAddresses - c.Addresses.Swarm = []string{"/ip4/0.0.0.0/tcp/4001"} - c.Identity.PeerID = pid.Pretty() - c.Identity.PrivKey = base64.StdEncoding.EncodeToString(privkeyb) - - return &repo.Mock{ - D: dstore, - C: c, - }, nil -} +type BuildCfg = node.BuildCfg // Alias for compatibility until we properly refactor the constructor interface // NewNode constructs and returns an IpfsNode using the given cfg. func NewNode(ctx context.Context, cfg *BuildCfg) (*IpfsNode, error) { - if cfg == nil { - cfg = new(BuildCfg) - } - - err := cfg.fillDefaults() - if err != nil { - return nil, err - } - - ctx = metrics.CtxScope(ctx, "ipfs") - - repoOption := fx.Provide(func(lc fx.Lifecycle) repo.Repo { - lc.Append(fx.Hook{ - OnStop: func(ctx context.Context) error { - return cfg.Repo.Close() - }, - }) - - return cfg.Repo - }) - - metricsCtx := fx.Provide(func() node.MetricsCtx { - return node.MetricsCtx(ctx) - }) - - hostOption := fx.Provide(func() node.HostOption { - return cfg.Host - }) - - routingOption := fx.Provide(func() node.RoutingOption { - return cfg.Routing - }) - - params := fx.Options( - repoOption, - hostOption, - routingOption, - metricsCtx, - ) - - core := fx.Options( - fx.Provide(node.BlockServiceCtor), - fx.Provide(node.DagCtor), - fx.Provide(resolver.NewBasicResolver), - fx.Provide(node.Pinning), - fx.Provide(node.Files), - ) - n := &IpfsNode{ ctx: ctx, } app := fx.New( + node.IPFS(ctx, cfg), + fx.NopLogger, - fx.Provide(baseProcess), - - params, - node.Storage((*node.BuildCfg)(cfg)), - node.Identity, - node.IPNS, - node.Networked((*node.BuildCfg)(cfg)), - - fx.Invoke(setupSharding), - - core, - fx.Extract(n), ) diff --git a/core/ncore.go b/core/ncore.go deleted file mode 100644 index 4704a71af..000000000 --- a/core/ncore.go +++ /dev/null @@ -1,29 +0,0 @@ -package core - -import ( - "context" - - "github.com/jbenet/goprocess" - "go.uber.org/fx" - - iconfig "github.com/ipfs/go-ipfs-config" - uio "github.com/ipfs/go-unixfs/io" -) - -//////////////////// -// libp2p - -func setupSharding(cfg *iconfig.Config) { - // TEMP: setting global sharding switch here - uio.UseHAMTSharding = cfg.Experimental.ShardingEnabled -} - -func baseProcess(lc fx.Lifecycle) goprocess.Process { - p := goprocess.WithParent(goprocess.Background()) - lc.Append(fx.Hook{ - OnStop: func(_ context.Context) error { - return p.Close() - }, - }) - return p -} diff --git a/core/node/builder.go b/core/node/builder.go index 112d1f7fe..267aac895 100644 --- a/core/node/builder.go +++ b/core/node/builder.go @@ -1,6 +1,16 @@ package node import ( + "crypto/rand" + "encoding/base64" + "errors" + + ds "github.com/ipfs/go-datastore" + dsync "github.com/ipfs/go-datastore/sync" + cfg "github.com/ipfs/go-ipfs-config" + ci "github.com/libp2p/go-libp2p-crypto" + peer "github.com/libp2p/go-libp2p-peer" + "github.com/ipfs/go-ipfs/repo" ) @@ -34,3 +44,61 @@ func (cfg *BuildCfg) getOpt(key string) bool { return cfg.ExtraOpts[key] } + +func (cfg *BuildCfg) fillDefaults() error { + if cfg.Repo != nil && cfg.NilRepo { + return errors.New("cannot set a Repo and specify nilrepo at the same time") + } + + if cfg.Repo == nil { + var d ds.Datastore + if cfg.NilRepo { + d = ds.NewNullDatastore() + } else { + d = ds.NewMapDatastore() + } + r, err := defaultRepo(dsync.MutexWrap(d)) + if err != nil { + return err + } + cfg.Repo = r + } + + if cfg.Routing == nil { + cfg.Routing = DHTOption + } + + if cfg.Host == nil { + cfg.Host = DefaultHostOption + } + + return nil +} + +func defaultRepo(dstore repo.Datastore) (repo.Repo, error) { + c := cfg.Config{} + priv, pub, err := ci.GenerateKeyPairWithReader(ci.RSA, 1024, rand.Reader) + if err != nil { + return nil, err + } + + pid, err := peer.IDFromPublicKey(pub) + if err != nil { + return nil, err + } + + privkeyb, err := priv.Bytes() + if err != nil { + return nil, err + } + + c.Bootstrap = cfg.DefaultBootstrapAddresses + c.Addresses.Swarm = []string{"/ip4/0.0.0.0/tcp/4001"} + c.Identity.PeerID = pid.Pretty() + c.Identity.PrivKey = base64.StdEncoding.EncodeToString(privkeyb) + + return &repo.Mock{ + D: dstore, + C: c, + }, nil +} diff --git a/core/node/groups.go b/core/node/groups.go index 083d69d6b..7bcbbfb4b 100644 --- a/core/node/groups.go +++ b/core/node/groups.go @@ -1,12 +1,17 @@ package node import ( + "context" + offline "github.com/ipfs/go-ipfs-exchange-offline" + "github.com/ipfs/go-metrics-interface" + "github.com/ipfs/go-path/resolver" "go.uber.org/fx" offroute "github.com/ipfs/go-ipfs-routing/offline" "github.com/ipfs/go-ipfs/p2p" "github.com/ipfs/go-ipfs/provider" + "github.com/ipfs/go-ipfs/repo" ) var BaseLibP2P = fx.Options( @@ -35,8 +40,8 @@ func LibP2P(cfg *BuildCfg) fx.Option { return fx.Options( BaseLibP2P, - MaybeProvide(P2PNoSecurity, cfg.DisableEncryptedConnections), - MaybeProvide(Pubsub, cfg.getOpt("pubsub") || cfg.getOpt("ipnsps")), + maybeProvide(P2PNoSecurity, cfg.DisableEncryptedConnections), + maybeProvide(Pubsub, cfg.getOpt("pubsub") || cfg.getOpt("ipnsps")), fx.Provide(P2PSmuxTransport(cfg.getOpt("mplex"))), fx.Provide(P2POnlineRouting(cfg.getOpt("ipnsps"))), @@ -84,6 +89,7 @@ func Online(cfg *BuildCfg) fx.Option { Providers, ) } + var Offline = fx.Options( fx.Provide(offline.Exchange), fx.Provide(OfflineNamesysCtor), @@ -91,6 +97,14 @@ var Offline = fx.Options( fx.Provide(provider.NewOfflineProvider), ) +var Core = fx.Options( + fx.Provide(BlockServiceCtor), + fx.Provide(DagCtor), + fx.Provide(resolver.NewBasicResolver), + fx.Provide(Pinning), + fx.Provide(Files), +) + func Networked(cfg *BuildCfg) fx.Option { if cfg.Online { return Online(cfg) @@ -98,9 +112,57 @@ func Networked(cfg *BuildCfg) fx.Option { return Offline } -func MaybeProvide(opt interface{}, enable bool) fx.Option { - if enable { - return fx.Provide(opt) +func IPFS(ctx context.Context, cfg *BuildCfg) fx.Option { + if cfg == nil { + cfg = new(BuildCfg) } - return fx.Options() + + err := cfg.fillDefaults() + if err != nil { + return fx.Error(err) + } + + ctx = metrics.CtxScope(ctx, "ipfs") + + repoOption := fx.Provide(func(lc fx.Lifecycle) repo.Repo { + lc.Append(fx.Hook{ + OnStop: func(ctx context.Context) error { + return cfg.Repo.Close() + }, + }) + + return cfg.Repo + }) + + metricsCtx := fx.Provide(func() MetricsCtx { + return MetricsCtx(ctx) + }) + + hostOption := fx.Provide(func() HostOption { + return cfg.Host + }) + + routingOption := fx.Provide(func() RoutingOption { + return cfg.Routing + }) + + params := fx.Options( + repoOption, + hostOption, + routingOption, + metricsCtx, + ) + + return fx.Options( + params, + fx.Provide(baseProcess), + fx.Invoke(setupSharding), + + Storage(cfg), + Identity, + IPNS, + Networked(cfg), + + Core, + ) } diff --git a/core/node/helpers.go b/core/node/helpers.go index a785124a3..bca088f70 100644 --- a/core/node/helpers.go +++ b/core/node/helpers.go @@ -3,6 +3,8 @@ package node import ( "context" + config "github.com/ipfs/go-ipfs-config" + uio "github.com/ipfs/go-unixfs/io" "github.com/jbenet/goprocess" "go.uber.org/fx" ) @@ -41,3 +43,25 @@ func (lp *lcProcess) Run(f goprocess.ProcessFunc) { }, }) } + +func maybeProvide(opt interface{}, enable bool) fx.Option { + if enable { + return fx.Provide(opt) + } + return fx.Options() +} + +func setupSharding(cfg *config.Config) { + // TEMP: setting global sharding switch here + uio.UseHAMTSharding = cfg.Experimental.ShardingEnabled +} + +func baseProcess(lc fx.Lifecycle) goprocess.Process { + p := goprocess.WithParent(goprocess.Background()) + lc.Append(fx.Hook{ + OnStop: func(_ context.Context) error { + return p.Close() + }, + }) + return p +} diff --git a/core/node/libp2p.go b/core/node/libp2p.go index 8405b8cc9..5fa22d155 100644 --- a/core/node/libp2p.go +++ b/core/node/libp2p.go @@ -122,7 +122,7 @@ type Libp2pOpts struct { Opts []libp2p.Option `group:"libp2p"` } -type PNetFingerprint []byte // TODO: find some better place +type PNetFingerprint []byte func P2PPNet(repo repo.Repo) (opts Libp2pOpts, fp PNetFingerprint, err error) { swarmkey, err := repo.SwarmKey() if err != nil || swarmkey == nil { From e4cf66008f82243df91df3ec414b152c958fa0f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Wed, 3 Apr 2019 16:56:45 +0200 Subject: [PATCH 19/27] Move option parsing to BuildCfg; fix imports MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit License: MIT Signed-off-by: Łukasz Magiera --- core/builder.go | 3 +++ core/coreapi/path.go | 6 ++--- core/node/builder.go | 43 ++++++++++++++++++++++++++++++-- core/node/core.go | 18 +++++++------- core/node/groups.go | 51 ++++++-------------------------------- core/node/libp2p.go | 1 + fuse/ipns/ipns_unix.go | 4 +-- namesys/resolve/resolve.go | 11 ++++---- 8 files changed, 71 insertions(+), 66 deletions(-) diff --git a/core/builder.go b/core/builder.go index 64b4acee6..60cf2cb23 100644 --- a/core/builder.go +++ b/core/builder.go @@ -3,6 +3,7 @@ package core import ( "context" + "github.com/ipfs/go-metrics-interface" "go.uber.org/fx" "github.com/ipfs/go-ipfs/core/bootstrap" @@ -13,6 +14,8 @@ type BuildCfg = node.BuildCfg // Alias for compatibility until we properly refac // NewNode constructs and returns an IpfsNode using the given cfg. func NewNode(ctx context.Context, cfg *BuildCfg) (*IpfsNode, error) { + ctx = metrics.CtxScope(ctx, "ipfs") + n := &IpfsNode{ ctx: ctx, } diff --git a/core/coreapi/path.go b/core/coreapi/path.go index d78650278..314d1b5fd 100644 --- a/core/coreapi/path.go +++ b/core/coreapi/path.go @@ -5,7 +5,7 @@ import ( "fmt" gopath "path" - node2 "github.com/ipfs/go-ipfs/namesys/resolve" + "github.com/ipfs/go-ipfs/namesys/resolve" "github.com/ipfs/go-cid" ipld "github.com/ipfs/go-ipld-format" @@ -42,8 +42,8 @@ func (api *CoreAPI) ResolvePath(ctx context.Context, p path.Path) (path.Resolved } ipath := ipfspath.Path(p.String()) - ipath, err := node2.ResolveIPNS(ctx, api.namesys, ipath) - if err == node2.ErrNoNamesys { + ipath, err := resolve.ResolveIPNS(ctx, api.namesys, ipath) + if err == resolve.ErrNoNamesys { return nil, coreiface.ErrOffline } else if err != nil { return nil, err diff --git a/core/node/builder.go b/core/node/builder.go index 267aac895..e83fa764e 100644 --- a/core/node/builder.go +++ b/core/node/builder.go @@ -1,17 +1,20 @@ package node import ( + "context" "crypto/rand" "encoding/base64" "errors" + "go.uber.org/fx" + + "github.com/ipfs/go-ipfs/repo" + ds "github.com/ipfs/go-datastore" dsync "github.com/ipfs/go-datastore/sync" cfg "github.com/ipfs/go-ipfs-config" ci "github.com/libp2p/go-libp2p-crypto" peer "github.com/libp2p/go-libp2p-peer" - - "github.com/ipfs/go-ipfs/repo" ) type BuildCfg struct { @@ -75,6 +78,42 @@ func (cfg *BuildCfg) fillDefaults() error { return nil } +func (cfg *BuildCfg) options(ctx context.Context) fx.Option { + err := cfg.fillDefaults() + if err != nil { + return fx.Error(err) + } + + repoOption := fx.Provide(func(lc fx.Lifecycle) repo.Repo { + lc.Append(fx.Hook{ + OnStop: func(ctx context.Context) error { + return cfg.Repo.Close() + }, + }) + + return cfg.Repo + }) + + metricsCtx := fx.Provide(func() MetricsCtx { + return MetricsCtx(ctx) + }) + + hostOption := fx.Provide(func() HostOption { + return cfg.Host + }) + + routingOption := fx.Provide(func() RoutingOption { + return cfg.Routing + }) + + return fx.Options( + repoOption, + hostOption, + routingOption, + metricsCtx, + ) +} + func defaultRepo(dstore repo.Datastore) (repo.Repo, error) { c := cfg.Config{} priv, pub, err := ci.GenerateKeyPairWithReader(ci.RSA, 1024, rand.Reader) diff --git a/core/node/core.go b/core/node/core.go index 160a833cf..7bba419e8 100644 --- a/core/node/core.go +++ b/core/node/core.go @@ -4,24 +4,24 @@ import ( "context" "fmt" + "github.com/ipfs/go-ipfs/pin" + "github.com/ipfs/go-ipfs/repo" + "github.com/ipfs/go-bitswap" "github.com/ipfs/go-bitswap/network" "github.com/ipfs/go-blockservice" "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" - blockstore "github.com/ipfs/go-ipfs-blockstore" - exchange "github.com/ipfs/go-ipfs-exchange-interface" - offline "github.com/ipfs/go-ipfs-exchange-offline" - format "github.com/ipfs/go-ipld-format" + "github.com/ipfs/go-ipfs-blockstore" + "github.com/ipfs/go-ipfs-exchange-interface" + "github.com/ipfs/go-ipfs-exchange-offline" + "github.com/ipfs/go-ipld-format" "github.com/ipfs/go-merkledag" "github.com/ipfs/go-mfs" "github.com/ipfs/go-unixfs" - host "github.com/libp2p/go-libp2p-host" - routing "github.com/libp2p/go-libp2p-routing" + "github.com/libp2p/go-libp2p-host" + "github.com/libp2p/go-libp2p-routing" "go.uber.org/fx" - - "github.com/ipfs/go-ipfs/pin" - "github.com/ipfs/go-ipfs/repo" ) func BlockServiceCtor(lc fx.Lifecycle, bs blockstore.Blockstore, rem exchange.Interface) blockservice.BlockService { diff --git a/core/node/groups.go b/core/node/groups.go index 7bcbbfb4b..398d48169 100644 --- a/core/node/groups.go +++ b/core/node/groups.go @@ -3,15 +3,13 @@ package node import ( "context" - offline "github.com/ipfs/go-ipfs-exchange-offline" - "github.com/ipfs/go-metrics-interface" - "github.com/ipfs/go-path/resolver" - "go.uber.org/fx" - - offroute "github.com/ipfs/go-ipfs-routing/offline" "github.com/ipfs/go-ipfs/p2p" "github.com/ipfs/go-ipfs/provider" - "github.com/ipfs/go-ipfs/repo" + + offline "github.com/ipfs/go-ipfs-exchange-offline" + offroute "github.com/ipfs/go-ipfs-routing/offline" + "github.com/ipfs/go-path/resolver" + "go.uber.org/fx" ) var BaseLibP2P = fx.Options( @@ -117,44 +115,9 @@ func IPFS(ctx context.Context, cfg *BuildCfg) fx.Option { cfg = new(BuildCfg) } - err := cfg.fillDefaults() - if err != nil { - return fx.Error(err) - } - - ctx = metrics.CtxScope(ctx, "ipfs") - - repoOption := fx.Provide(func(lc fx.Lifecycle) repo.Repo { - lc.Append(fx.Hook{ - OnStop: func(ctx context.Context) error { - return cfg.Repo.Close() - }, - }) - - return cfg.Repo - }) - - metricsCtx := fx.Provide(func() MetricsCtx { - return MetricsCtx(ctx) - }) - - hostOption := fx.Provide(func() HostOption { - return cfg.Host - }) - - routingOption := fx.Provide(func() RoutingOption { - return cfg.Routing - }) - - params := fx.Options( - repoOption, - hostOption, - routingOption, - metricsCtx, - ) - return fx.Options( - params, + cfg.options(ctx), + fx.Provide(baseProcess), fx.Invoke(setupSharding), diff --git a/core/node/libp2p.go b/core/node/libp2p.go index 5fa22d155..467f3eaab 100644 --- a/core/node/libp2p.go +++ b/core/node/libp2p.go @@ -123,6 +123,7 @@ type Libp2pOpts struct { } type PNetFingerprint []byte + func P2PPNet(repo repo.Repo) (opts Libp2pOpts, fp PNetFingerprint, err error) { swarmkey, err := repo.SwarmKey() if err != nil || swarmkey == nil { diff --git a/fuse/ipns/ipns_unix.go b/fuse/ipns/ipns_unix.go index fb56d8353..a662ab22e 100644 --- a/fuse/ipns/ipns_unix.go +++ b/fuse/ipns/ipns_unix.go @@ -13,7 +13,7 @@ import ( core "github.com/ipfs/go-ipfs/core" namesys "github.com/ipfs/go-ipfs/namesys" - node2 "github.com/ipfs/go-ipfs/namesys/resolve" + resolve "github.com/ipfs/go-ipfs/namesys/resolve" dag "github.com/ipfs/go-merkledag" path "github.com/ipfs/go-path" @@ -98,7 +98,7 @@ func loadRoot(ctx context.Context, rt *keyRoot, ipfs *core.IpfsNode, name string return nil, err } - node, err := node2.Resolve(ctx, ipfs.Namesys, ipfs.Resolver, p) + node, err := resolve.Resolve(ctx, ipfs.Namesys, ipfs.Resolver, p) switch err { case nil: case namesys.ErrResolveFailed: diff --git a/namesys/resolve/resolve.go b/namesys/resolve/resolve.go index bd1667fa4..128619c65 100644 --- a/namesys/resolve/resolve.go +++ b/namesys/resolve/resolve.go @@ -6,7 +6,6 @@ import ( "strings" "github.com/ipfs/go-ipld-format" - log2 "github.com/ipfs/go-log" logging "github.com/ipfs/go-log" "github.com/ipfs/go-path" "github.com/ipfs/go-path/resolver" @@ -30,34 +29,34 @@ func ResolveIPNS(ctx context.Context, nsys namesys.NameSystem, p path.Path) (pat // TODO(cryptix): we should be able to query the local cache for the path if nsys == nil { - evt.Append(log2.LoggableMap{"error": ErrNoNamesys.Error()}) + evt.Append(logging.LoggableMap{"error": ErrNoNamesys.Error()}) return "", ErrNoNamesys } seg := p.Segments() if len(seg) < 2 || seg[1] == "" { // just "/" without further segments - evt.Append(log2.LoggableMap{"error": path.ErrNoComponents.Error()}) + evt.Append(logging.LoggableMap{"error": path.ErrNoComponents.Error()}) return "", path.ErrNoComponents } extensions := seg[2:] resolvable, err := path.FromSegments("/", seg[0], seg[1]) if err != nil { - evt.Append(log2.LoggableMap{"error": err.Error()}) + evt.Append(logging.LoggableMap{"error": err.Error()}) return "", err } respath, err := nsys.Resolve(ctx, resolvable.String()) if err != nil { - evt.Append(log2.LoggableMap{"error": err.Error()}) + evt.Append(logging.LoggableMap{"error": err.Error()}) return "", err } segments := append(respath.Segments(), extensions...) p, err = path.FromSegments("/", segments...) if err != nil { - evt.Append(log2.LoggableMap{"error": err.Error()}) + evt.Append(logging.LoggableMap{"error": err.Error()}) return "", err } } From 0e6f8d4cc19ef5bb5d9c47e357a3ffb862b8b067 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 8 Apr 2019 15:36:25 +0200 Subject: [PATCH 20/27] bootstrap: cleanup randomSubsetOfPeers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit License: MIT Signed-off-by: Łukasz Magiera --- core/bootstrap/bootstrap.go | 16 +++++++--------- thirdparty/math2/math2.go | 9 --------- 2 files changed, 7 insertions(+), 18 deletions(-) delete mode 100644 thirdparty/math2/math2.go diff --git a/core/bootstrap/bootstrap.go b/core/bootstrap/bootstrap.go index e6b4f826d..d7c107690 100644 --- a/core/bootstrap/bootstrap.go +++ b/core/bootstrap/bootstrap.go @@ -20,8 +20,6 @@ import ( "github.com/libp2p/go-libp2p-peer" "github.com/libp2p/go-libp2p-peerstore" "github.com/libp2p/go-libp2p-routing" - - "github.com/ipfs/go-ipfs/thirdparty/math2" ) var log = logging.Logger("bootstrap") @@ -208,13 +206,13 @@ func bootstrapConnect(ctx context.Context, ph host.Host, peers []peerstore.PeerI } func randomSubsetOfPeers(in []peerstore.PeerInfo, max int) []peerstore.PeerInfo { - n := math2.IntMin(max, len(in)) - var out []peerstore.PeerInfo - for _, val := range rand.Perm(len(in)) { - out = append(out, in[val]) - if len(out) >= n { - break - } + if max > len(in) { + max = len(in) + } + + out := make([]peerstore.PeerInfo, max) + for i, val := range rand.Perm(len(in))[:max] { + out[i] = in[val] } return out } diff --git a/thirdparty/math2/math2.go b/thirdparty/math2/math2.go deleted file mode 100644 index e8a75b5f7..000000000 --- a/thirdparty/math2/math2.go +++ /dev/null @@ -1,9 +0,0 @@ -package math2 - -// IntMin returns the smaller of x or y. -func IntMin(x, y int) int { - if x < y { - return x - } - return y -} From 803512e46cc451c54d3a710d7471402d8aa0d5d2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 8 Apr 2019 15:46:28 +0200 Subject: [PATCH 21/27] Invoke Provider.Run in lc.OnStart MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit License: MIT Signed-off-by: Łukasz Magiera --- core/node/groups.go | 1 - core/node/provider.go | 12 +++++++++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/core/node/groups.go b/core/node/groups.go index 398d48169..e85438435 100644 --- a/core/node/groups.go +++ b/core/node/groups.go @@ -71,7 +71,6 @@ var Providers = fx.Options( fx.Provide(ReproviderCtor), fx.Invoke(Reprovider), - fx.Invoke(provider.Provider.Run), ) func Online(cfg *BuildCfg) fx.Option { diff --git a/core/node/provider.go b/core/node/provider.go index 8c6792440..78b7b8036 100644 --- a/core/node/provider.go +++ b/core/node/provider.go @@ -1,6 +1,7 @@ package node import ( + "context" "fmt" "time" @@ -22,7 +23,16 @@ func ProviderQueue(mctx MetricsCtx, lc fx.Lifecycle, repo repo.Repo) (*provider. } func ProviderCtor(mctx MetricsCtx, lc fx.Lifecycle, queue *provider.Queue, rt routing.IpfsRouting) provider.Provider { - return provider.NewProvider(lifecycleCtx(mctx, lc), queue, rt) + p := provider.NewProvider(lifecycleCtx(mctx, lc), queue, rt) + + lc.Append(fx.Hook{ + OnStart: func(ctx context.Context) error { + p.Run() + return nil + }, + }) + + return p } func ReproviderCtor(mctx MetricsCtx, lc fx.Lifecycle, cfg *config.Config, bs BaseBlocks, ds format.DAGService, pinning pin.Pinner, rt routing.IpfsRouting) (*reprovide.Reprovider, error) { From bb4b99ef4751bc9d9e4a4d423e2adc302a63edf0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 8 Apr 2019 15:57:20 +0200 Subject: [PATCH 22/27] Rename lcProcess.Run to Append, add docs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit License: MIT Signed-off-by: Łukasz Magiera --- core/node/helpers.go | 17 +++++++++++++---- core/node/ipns.go | 2 +- go.mod | 1 + 3 files changed, 15 insertions(+), 5 deletions(-) diff --git a/core/node/helpers.go b/core/node/helpers.go index bca088f70..4fced7fc6 100644 --- a/core/node/helpers.go +++ b/core/node/helpers.go @@ -6,6 +6,7 @@ import ( config "github.com/ipfs/go-ipfs-config" uio "github.com/ipfs/go-unixfs/io" "github.com/jbenet/goprocess" + "github.com/pkg/errors" "go.uber.org/fx" ) @@ -31,15 +32,23 @@ type lcProcess struct { Proc goprocess.Process } -func (lp *lcProcess) Run(f goprocess.ProcessFunc) { - proc := make(chan goprocess.Process, 1) +// Append wraps ProcessFunc into a goprocess, and appends it to the lifecycle +func (lp *lcProcess) Append(f goprocess.ProcessFunc) { + // Hooks are guaranteed to run in sequence. If a hook fails to start, its + // OnStop won't be executed. + var proc goprocess.Process + lp.LC.Append(fx.Hook{ OnStart: func(ctx context.Context) error { - proc <- lp.Proc.Go(f) + proc = lp.Proc.Go(f) return nil }, OnStop: func(ctx context.Context) error { - return (<-proc).Close() // todo: respect ctx, somehow + if proc == nil { // Theoretically this shouldn't ever happen + return errors.New("lcProcess: proc was nil") + } + + return proc.Close() // todo: respect ctx, somehow }, }) } diff --git a/core/node/ipns.go b/core/node/ipns.go index 1e8511d2c..afd6e678d 100644 --- a/core/node/ipns.go +++ b/core/node/ipns.go @@ -66,6 +66,6 @@ func IpnsRepublisher(lc lcProcess, cfg *config.Config, namesys namesys.NameSyste repub.RecordLifetime = d } - lc.Run(repub.Run) + lc.Append(repub.Run) return nil } diff --git a/go.mod b/go.mod index 21dbcd646..2c32a77f9 100644 --- a/go.mod +++ b/go.mod @@ -100,6 +100,7 @@ require ( github.com/multiformats/go-multibase v0.0.1 github.com/multiformats/go-multihash v0.0.1 github.com/opentracing/opentracing-go v1.0.2 + github.com/pkg/errors v0.8.1 github.com/prometheus/client_golang v0.9.2 github.com/syndtr/goleveldb v1.0.0 github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc From 23f50ab03e3bfe9e8d669075050c743ad2cb619e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 8 Apr 2019 16:00:48 +0200 Subject: [PATCH 23/27] Minor cleanups in libp2p construction MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit License: MIT Signed-off-by: Łukasz Magiera --- core/node/libp2p.go | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/core/node/libp2p.go b/core/node/libp2p.go index 467f3eaab..20de7f43a 100644 --- a/core/node/libp2p.go +++ b/core/node/libp2p.go @@ -152,6 +152,8 @@ func P2PPNetChecker(repo repo.Repo, ph host.Host, lc fx.Lifecycle) error { OnStart: func(_ context.Context) error { go func() { t := time.NewTicker(30 * time.Second) + defer t.Stop() + <-t.C // swallow one tick for { select { @@ -406,13 +408,7 @@ func P2PHost(mctx MetricsCtx, lc fx.Lifecycle, params P2PHostIn) (out P2PHostOut opts = append(opts, o...) } - ctx, cancel := context.WithCancel(mctx) - lc.Append(fx.Hook{ - OnStop: func(_ context.Context) error { - cancel() - return nil - }, - }) + ctx := lifecycleCtx(mctx, lc) opts = append(opts, libp2p.Routing(func(h host.Host) (routing.PeerRouting, error) { r, err := params.RoutingOption(ctx, h, params.Repo.Datastore(), params.Validator) From bf380b873811e06a7e54c91dc541f7e122bff2dc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 15 Apr 2019 17:06:39 +0200 Subject: [PATCH 24/27] Cleanup routing related units MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit License: MIT Signed-off-by: Łukasz Magiera --- core/node/groups.go | 8 ++- core/node/libp2p.go | 130 ++++++++++++++++++++++++-------------------- 2 files changed, 78 insertions(+), 60 deletions(-) diff --git a/core/node/groups.go b/core/node/groups.go index e85438435..45d91471a 100644 --- a/core/node/groups.go +++ b/core/node/groups.go @@ -35,15 +35,19 @@ var BaseLibP2P = fx.Options( ) func LibP2P(cfg *BuildCfg) fx.Option { - return fx.Options( + opts := fx.Options( BaseLibP2P, maybeProvide(P2PNoSecurity, cfg.DisableEncryptedConnections), maybeProvide(Pubsub, cfg.getOpt("pubsub") || cfg.getOpt("ipnsps")), fx.Provide(P2PSmuxTransport(cfg.getOpt("mplex"))), - fx.Provide(P2POnlineRouting(cfg.getOpt("ipnsps"))), + fx.Provide(P2PRouting), + fx.Provide(P2PBaseRouting), + maybeProvide(P2PPubsubRouter, cfg.getOpt("ipnsps")), ) + + return opts } func Storage(cfg *BuildCfg) fx.Option { diff --git a/core/node/libp2p.go b/core/node/libp2p.go index 20de7f43a..4153b6a08 100644 --- a/core/node/libp2p.go +++ b/core/node/libp2p.go @@ -6,6 +6,7 @@ import ( "fmt" "io/ioutil" "os" + "sort" "strings" "time" @@ -398,10 +399,8 @@ type P2PHostOut struct { Host host.Host Routing BaseRouting - IpfsDHT *dht.IpfsDHT } -// TODO: move some of this into params struct func P2PHost(mctx MetricsCtx, lc fx.Lifecycle, params P2PHostIn) (out P2PHostOut, err error) { opts := []libp2p.Option{libp2p.NoListenAddrs} for _, o := range params.Opts { @@ -438,80 +437,95 @@ func P2PHost(mctx MetricsCtx, lc fx.Lifecycle, params P2PHostIn) (out P2PHostOut }, }) - // TODO: break this up into more DI units - // TODO: I'm not a fan of type assertions like this but the - // `RoutingOption` system doesn't currently provide access to the - // IpfsNode. - // - // Ideally, we'd do something like: - // - // 1. Add some fancy method to introspect into tiered routers to extract - // things like the pubsub router or the DHT (complicated, messy, - // probably not worth it). - // 2. Pass the IpfsNode into the RoutingOption (would also remove the - // PSRouter case below. - // 3. Introduce some kind of service manager? (my personal favorite but - // that requires a fair amount of work). - if dht, ok := out.Routing.(*dht.IpfsDHT); ok { - out.IpfsDHT = dht + return out, err +} + +type Router struct { + routing.IpfsRouting + + Priority int // less = more important +} + +type p2pRouterOut struct { + fx.Out + + Router Router `group:"routers"` +} + +func P2PBaseRouting(lc fx.Lifecycle, in BaseRouting) (out p2pRouterOut, dr *dht.IpfsDHT) { + if dht, ok := in.(*dht.IpfsDHT); ok { + dr = dht lc.Append(fx.Hook{ OnStop: func(ctx context.Context) error { - return out.IpfsDHT.Close() + return dr.Close() }, }) } - return out, err + return p2pRouterOut{ + Router: Router{ + Priority: 1000, + IpfsRouting: in, + }, + }, dr } -type p2pRoutingIn struct { +type p2pOnlineRoutingIn struct { fx.In - Repo repo.Repo + Routers []Router `group:"routers"` Validator record.Validator - Host host.Host - PubSub *pubsub.PubSub `optional:"true"` +} + +func P2PRouting(in p2pOnlineRoutingIn) routing.IpfsRouting { + routers := in.Routers + + sort.SliceStable(routers, func(i, j int) bool { + return routers[i].Priority < routers[j].Priority + }) + + irouters := make([]routing.IpfsRouting, len(routers)) + for i, v := range routers { + irouters[i] = v.IpfsRouting + } + + return routinghelpers.Tiered{ + Routers: irouters, + Validator: in.Validator, + } +} + +type p2pPSRoutingIn struct { + fx.In BaseRouting BaseRouting + Repo repo.Repo + Validator record.Validator + Host host.Host + PubSub *pubsub.PubSub `optional:"true"` } -type p2pRoutingOut struct { - fx.Out +func P2PPubsubRouter(mctx MetricsCtx, lc fx.Lifecycle, in p2pPSRoutingIn) (p2pRouterOut, *namesys.PubsubValueStore) { + psRouter := namesys.NewPubsubValueStore( + lifecycleCtx(mctx, lc), + in.Host, + in.BaseRouting, + in.PubSub, + in.Validator, + ) - IpfsRouting routing.IpfsRouting - PSRouter *namesys.PubsubValueStore -} - -func P2POnlineRouting(ipnsps bool) func(mctx MetricsCtx, lc fx.Lifecycle, in p2pRoutingIn) (out p2pRoutingOut) { - return func(mctx MetricsCtx, lc fx.Lifecycle, in p2pRoutingIn) (out p2pRoutingOut) { - out.IpfsRouting = in.BaseRouting - - if ipnsps { - out.PSRouter = namesys.NewPubsubValueStore( - lifecycleCtx(mctx, lc), - in.Host, - in.BaseRouting, - in.PubSub, - in.Validator, - ) - - out.IpfsRouting = routinghelpers.Tiered{ - Routers: []routing.IpfsRouting{ - // Always check pubsub first. - &routinghelpers.Compose{ - ValueStore: &routinghelpers.LimitedValueStore{ - ValueStore: out.PSRouter, - Namespaces: []string{"ipns"}, - }, - }, - in.BaseRouting, + return p2pRouterOut{ + Router: Router{ + IpfsRouting: &routinghelpers.Compose{ + ValueStore: &routinghelpers.LimitedValueStore{ + ValueStore: psRouter, + Namespaces: []string{"ipns"}, }, - Validator: in.Validator, - } - } - return out - } + }, + Priority: 100, + }, + }, psRouter } func AutoNATService(mctx MetricsCtx, lc fx.Lifecycle, cfg *config.Config, host host.Host) error { From d5746584cafbbba940e96ff47d896cf065202c90 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 15 Apr 2019 17:10:20 +0200 Subject: [PATCH 25/27] Handle Provider closing in lifecycle MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit License: MIT Signed-off-by: Łukasz Magiera --- core/node/provider.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/core/node/provider.go b/core/node/provider.go index 78b7b8036..8518ca70e 100644 --- a/core/node/provider.go +++ b/core/node/provider.go @@ -30,6 +30,9 @@ func ProviderCtor(mctx MetricsCtx, lc fx.Lifecycle, queue *provider.Queue, rt ro p.Run() return nil }, + OnStop: func(ctx context.Context) error { + return p.Close() + }, }) return p From 19cc7c9297d73fffc92f8ce2488e3ae4169c90cb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 15 Apr 2019 17:16:22 +0200 Subject: [PATCH 26/27] Remove relay address filtering logic MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit License: MIT Signed-off-by: Łukasz Magiera --- core/node/libp2p.go | 22 ---------------------- 1 file changed, 22 deletions(-) diff --git a/core/node/libp2p.go b/core/node/libp2p.go index 4153b6a08..261e1cc2d 100644 --- a/core/node/libp2p.go +++ b/core/node/libp2p.go @@ -17,7 +17,6 @@ import ( "github.com/libp2p/go-libp2p" "github.com/libp2p/go-libp2p-autonat-svc" "github.com/libp2p/go-libp2p-circuit" - circuit "github.com/libp2p/go-libp2p-circuit" "github.com/libp2p/go-libp2p-connmgr" "github.com/libp2p/go-libp2p-crypto" "github.com/libp2p/go-libp2p-host" @@ -229,31 +228,10 @@ func P2PAddrsFactory(cfg *config.Config) (opts Libp2pOpts, err error) { if err != nil { return opts, err } - if !cfg.Swarm.DisableRelay { - addrsFactory = composeAddrsFactory(addrsFactory, filterRelayAddrs) - } opts.Opts = append(opts.Opts, libp2p.AddrsFactory(addrsFactory)) return } -func filterRelayAddrs(addrs []ma.Multiaddr) []ma.Multiaddr { - var raddrs []ma.Multiaddr - for _, addr := range addrs { - _, err := addr.ValueForProtocol(circuit.P_CIRCUIT) - if err == nil { - continue - } - raddrs = append(raddrs, addr) - } - return raddrs -} - -func composeAddrsFactory(f, g p2pbhost.AddrsFactory) p2pbhost.AddrsFactory { - return func(addrs []ma.Multiaddr) []ma.Multiaddr { - return f(g(addrs)) - } -} - func P2PConnectionManager(cfg *config.Config) (opts Libp2pOpts, err error) { grace := config.DefaultConnMgrGracePeriod low := config.DefaultConnMgrHighWater From c9ea728880a976fe7f01f0b51b855ff139142ddb Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Wed, 17 Apr 2019 16:55:18 -0700 Subject: [PATCH 27/27] chore: remove duplicate import License: MIT Signed-off-by: Steven Allen --- core/node/libp2p.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/core/node/libp2p.go b/core/node/libp2p.go index 261e1cc2d..be1b4861a 100644 --- a/core/node/libp2p.go +++ b/core/node/libp2p.go @@ -40,7 +40,6 @@ import ( ma "github.com/multiformats/go-multiaddr" mplex "github.com/whyrusleeping/go-smux-multiplex" yamux "github.com/whyrusleeping/go-smux-yamux" - "github.com/whyrusleeping/multiaddr-filter" mamask "github.com/whyrusleeping/multiaddr-filter" "go.uber.org/fx" @@ -98,7 +97,7 @@ func Peerstore(id peer.ID, sk crypto.PrivKey) peerstore.Peerstore { func P2PAddrFilters(cfg *config.Config) (opts Libp2pOpts, err error) { for _, s := range cfg.Swarm.AddrFilters { - f, err := mask.NewMask(s) + f, err := mamask.NewMask(s) if err != nil { return opts, fmt.Errorf("incorrectly formatted address filter in config: %s", s) }