mirror of
https://github.com/ipfs/kubo.git
synced 2026-02-22 02:47:48 +08:00
* plumb through go-datastore context changes * update go-libp2p to v0.16.0 * use LIBP2P_TCP_REUSEPORT instead of IPFS_REUSEPORT * use relay config * making deprecation notice match the go-ipfs-config key * docs(config): circuit relay v2 * docs(config): fix links and headers * feat(config): Internal.Libp2pForceReachability This switches to config that supports setting and reading Internal.Libp2pForceReachability OptionalString flag * use configuration option for static relays * chore: go-ipfs-config v0.18.0 https://github.com/ipfs/go-ipfs-config/releases/tag/v0.18.0 * feat: circuit v1 migration prompt when Swarm.EnableRelayHop is set (#8559) * exit when Swarm.EnableRelayHop is set * docs: Experimental.ShardingEnabled migration This ensures existing users of global sharding experiment get notified that the flag no longer works + that autosharding happens automatically. For people who NEED to keep the old behavior (eg. have no time to migrate today) there is a note about restoring it with `UnixFSShardingSizeThreshold`. * chore: add dag-jose code to the cid command output * add support for setting automatic unixfs sharding threshold from the config * test: have tests use low cutoff for sharding to mimic old behavior * test: change error message to match the current error * test: Add automatic sharding/unsharding tests (#8547) * test: refactored naming in the sharding sharness tests to make more sense * ci: set interop test executor to convenience image for Go1.16 + Node * ci: use interop master Co-authored-by: Marcin Rataj <lidel@lidel.org> Co-authored-by: Marten Seemann <martenseemann@gmail.com> Co-authored-by: Marcin Rataj <lidel@lidel.org> Co-authored-by: Gus Eggert <gus@gus.dev> Co-authored-by: Lucas Molas <schomatis@gmail.com>
142 lines
3.0 KiB
Go
142 lines
3.0 KiB
Go
package coreapi
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
|
|
blockservice "github.com/ipfs/go-blockservice"
|
|
cid "github.com/ipfs/go-cid"
|
|
cidutil "github.com/ipfs/go-cidutil"
|
|
blockstore "github.com/ipfs/go-ipfs-blockstore"
|
|
offline "github.com/ipfs/go-ipfs-exchange-offline"
|
|
dag "github.com/ipfs/go-merkledag"
|
|
coreiface "github.com/ipfs/interface-go-ipfs-core"
|
|
caopts "github.com/ipfs/interface-go-ipfs-core/options"
|
|
path "github.com/ipfs/interface-go-ipfs-core/path"
|
|
peer "github.com/libp2p/go-libp2p-core/peer"
|
|
routing "github.com/libp2p/go-libp2p-core/routing"
|
|
)
|
|
|
|
type DhtAPI CoreAPI
|
|
|
|
func (api *DhtAPI) FindPeer(ctx context.Context, p peer.ID) (peer.AddrInfo, error) {
|
|
err := api.checkOnline(false)
|
|
if err != nil {
|
|
return peer.AddrInfo{}, err
|
|
}
|
|
|
|
pi, err := api.routing.FindPeer(ctx, peer.ID(p))
|
|
if err != nil {
|
|
return peer.AddrInfo{}, err
|
|
}
|
|
|
|
return pi, nil
|
|
}
|
|
|
|
func (api *DhtAPI) FindProviders(ctx context.Context, p path.Path, opts ...caopts.DhtFindProvidersOption) (<-chan peer.AddrInfo, error) {
|
|
settings, err := caopts.DhtFindProvidersOptions(opts...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
err = api.checkOnline(false)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
rp, err := api.core().ResolvePath(ctx, p)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
numProviders := settings.NumProviders
|
|
if numProviders < 1 {
|
|
return nil, fmt.Errorf("number of providers must be greater than 0")
|
|
}
|
|
|
|
pchan := api.routing.FindProvidersAsync(ctx, rp.Cid(), numProviders)
|
|
return pchan, nil
|
|
}
|
|
|
|
func (api *DhtAPI) Provide(ctx context.Context, path path.Path, opts ...caopts.DhtProvideOption) error {
|
|
settings, err := caopts.DhtProvideOptions(opts...)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
err = api.checkOnline(false)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
rp, err := api.core().ResolvePath(ctx, path)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
c := rp.Cid()
|
|
|
|
has, err := api.blockstore.Has(ctx, c)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if !has {
|
|
return fmt.Errorf("block %s not found locally, cannot provide", c)
|
|
}
|
|
|
|
if settings.Recursive {
|
|
err = provideKeysRec(ctx, api.routing, api.blockstore, []cid.Cid{c})
|
|
} else {
|
|
err = provideKeys(ctx, api.routing, []cid.Cid{c})
|
|
}
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func provideKeys(ctx context.Context, r routing.Routing, cids []cid.Cid) error {
|
|
for _, c := range cids {
|
|
err := r.Provide(ctx, c, true)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func provideKeysRec(ctx context.Context, r routing.Routing, bs blockstore.Blockstore, cids []cid.Cid) error {
|
|
provided := cidutil.NewStreamingSet()
|
|
|
|
errCh := make(chan error)
|
|
go func() {
|
|
dserv := dag.NewDAGService(blockservice.New(bs, offline.Exchange(bs)))
|
|
for _, c := range cids {
|
|
err := dag.Walk(ctx, dag.GetLinksDirect(dserv), c, provided.Visitor(ctx))
|
|
if err != nil {
|
|
errCh <- err
|
|
}
|
|
}
|
|
}()
|
|
|
|
for {
|
|
select {
|
|
case k := <-provided.New:
|
|
err := r.Provide(ctx, k, true)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
case err := <-errCh:
|
|
return err
|
|
case <-ctx.Done():
|
|
return ctx.Err()
|
|
}
|
|
}
|
|
}
|
|
|
|
func (api *DhtAPI) core() coreiface.CoreAPI {
|
|
return (*CoreAPI)(api)
|
|
}
|