client/rpc: bring up to speed with streaming pins

This commit is contained in:
Jorropo 2023-06-02 16:52:13 +02:00
commit 9b63ab6da6
80 changed files with 4763 additions and 383 deletions

4
.github/pull_request_template.md vendored Normal file
View File

@ -0,0 +1,4 @@
<!--
PR Creation Checklist
- [ ] Update Changelog
-->

View File

@ -3,6 +3,8 @@ name: Interop
on:
workflow_dispatch:
pull_request:
paths-ignore:
- '**/*.md'
push:
branches:
- 'master'
@ -14,6 +16,10 @@ concurrency:
group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }}
cancel-in-progress: true
defaults:
run:
shell: bash
jobs:
interop-prep:
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
@ -44,7 +50,7 @@ jobs:
path: cmd/ipfs/ipfs
interop:
needs: [interop-prep]
runs-on: ubuntu-latest
runs-on: ${{ fromJSON(github.repository == 'ipfs/kubo' && '["self-hosted", "linux", "x64", "2xlarge"]' || '"ubuntu-latest"') }}
timeout-minutes: 20
defaults:
run:
@ -75,7 +81,7 @@ jobs:
npm install ipfs-interop@^10.0.1
working-directory: interop
# Run the interop tests while ignoring the js-js interop test cases
- run: npx ipfs-interop -- -t node --grep '^(?!.*(js\d? -> js\d?|js-js-js))'
- run: npx ipfs-interop -- -t node --grep '^(?!.*(js\d? -> js\d?|js-js-js))' --parallel
env:
LIBP2P_TCP_REUSEPORT: false
LIBP2P_ALLOW_WEAK_RSA_KEYS: 1
@ -121,42 +127,9 @@ jobs:
working-directory: go-ipfs-api
- run: cmd/ipfs/ipfs shutdown
if: always()
go-ipfs-http-client:
needs: [interop-prep]
runs-on: ubuntu-latest
timeout-minutes: 5
env:
TEST_DOCKER: 0
TEST_FUSE: 0
TEST_VERBOSE: 1
TRAVIS: 1
GIT_PAGER: cat
IPFS_CHECK_RCMGR_DEFAULTS: 1
defaults:
run:
shell: bash
steps:
- uses: actions/setup-go@v3
with:
go-version: ${{ env.GO_VERSION }}
- uses: actions/download-artifact@v3
with:
name: kubo
path: cmd/ipfs
- run: chmod +x cmd/ipfs/ipfs
- uses: actions/checkout@v3
with:
repository: ipfs/go-ipfs-http-client
path: go-ipfs-http-client
- uses: protocol/cache-go-action@v1
with:
name: ${{ github.job }}
- run: echo '${{ github.workspace }}/cmd/ipfs' >> $GITHUB_PATH
- run: go test -count=1 -v ./...
working-directory: go-ipfs-http-client
ipfs-webui:
needs: [interop-prep]
runs-on: ubuntu-latest
runs-on: ${{ fromJSON(github.repository == 'ipfs/kubo' && '["self-hosted", "linux", "x64", "2xlarge"]' || '"ubuntu-latest"') }}
timeout-minutes: 20
env:
NO_SANDBOX: true
@ -191,14 +164,26 @@ jobs:
key: ${{ runner.os }}-${{ github.job }}-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{ runner.os }}-${{ github.job }}-
- run: |
npm ci --prefer-offline --no-audit --progress=false
npx playwright install
- env:
NPM_CACHE_DIR: ${{ steps.npm-cache-dir.outputs.dir }}
run: |
npm ci --prefer-offline --no-audit --progress=false --cache "$NPM_CACHE_DIR"
npx playwright install --with-deps
working-directory: ipfs-webui
- name: Run ipfs-webui@main build and smoke-test to confirm the upstream repo is not broken
run: npm test
- id: ref
run: echo "ref=$(git rev-parse --short HEAD)" | tee -a $GITHUB_OUTPUT
working-directory: ipfs-webui
- name: Test ipfs-webui@main E2E against the locally built Kubo binary
- id: state
env:
GITHUB_REPOSITORY: ipfs/ipfs-webui
GITHUB_REF: ${{ steps.ref.outputs.ref }}
GITHUB_TOKEN: ${{ github.token }}
run: |
echo "state=$(curl -L -H "Authorization: Bearer $GITHUB_TOKEN" "https://api.github.com/repos/$GITHUB_REPOSITORY/commits/$GITHUB_REF/status" --jq '.state')" | tee -a $GITHUB_OUTPUT
- name: Build ipfs-webui@main (state=${{ steps.state.outputs.state }})
run: npm run test:build
working-directory: ipfs-webui
- name: Test ipfs-webui@main (state=${{ steps.state.outputs.state }}) E2E against the locally built Kubo binary
run: npm run test:e2e
env:
IPFS_GO_EXEC: ${{ github.workspace }}/cmd/ipfs/ipfs

View File

@ -8,6 +8,8 @@ on:
pull_request:
# The branches below must be a subset of the branches above
branches: [ master ]
paths-ignore:
- '**/*.md'
schedule:
- cron: '30 12 * * 2'

View File

@ -3,6 +3,8 @@ name: Docker Build
on:
workflow_dispatch:
pull_request:
paths-ignore:
- '**/*.md'
push:
branches:
- 'master'

View File

@ -5,6 +5,8 @@ on:
branches:
- master
pull_request:
paths-ignore:
- '**/*.md'
concurrency:
group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }}

View File

@ -3,6 +3,8 @@ name: Go Build
on:
workflow_dispatch:
pull_request:
paths-ignore:
- '**/*.md'
push:
branches:
- 'master'
@ -12,12 +14,9 @@ concurrency:
cancel-in-progress: true
jobs:
go-build-runner:
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
uses: ipfs/kubo/.github/workflows/runner.yml@master
go-build:
needs: [go-build-runner]
runs-on: ${{ fromJSON(needs.go-build-runner.outputs.config).labels }}
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
runs-on: ${{ fromJSON(github.repository == 'ipfs/kubo' && '["self-hosted", "linux", "x64", "4xlarge"]' || '"ubuntu-latest"') }}
timeout-minutes: 20
env:
TEST_DOCKER: 0

View File

@ -3,6 +3,8 @@ name: Go Check
on:
workflow_dispatch:
pull_request:
paths-ignore:
- '**/*.md'
push:
branches:
- 'master'

View File

@ -3,6 +3,8 @@ name: Go Lint
on:
workflow_dispatch:
pull_request:
paths-ignore:
- '**/*.md'
push:
branches:
- 'master'

View File

@ -3,6 +3,8 @@ name: Go Test
on:
workflow_dispatch:
pull_request:
paths-ignore:
- '**/*.md'
push:
branches:
- 'master'
@ -12,12 +14,9 @@ concurrency:
cancel-in-progress: true
jobs:
go-test-runner:
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
uses: ipfs/kubo/.github/workflows/runner.yml@master
go-test:
needs: [go-test-runner]
runs-on: ${{ fromJSON(needs.go-test-runner.outputs.config).labels }}
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
runs-on: ${{ fromJSON(github.repository == 'ipfs/kubo' && '["self-hosted", "linux", "x64", "2xlarge"]' || '"ubuntu-latest"') }}
timeout-minutes: 20
env:
TEST_DOCKER: 0
@ -41,8 +40,11 @@ jobs:
with:
name: ${{ github.job }}
- name: 👉️ If this step failed, go to «Summary» (top left) → inspect the «Failures/Errors» table
env:
# increasing parallelism beyond 2 doesn't speed up the tests much
PARALLEL: 2
run: |
make -j 2 test/unit/gotest.junit.xml &&
make -j "$PARALLEL" test/unit/gotest.junit.xml &&
[[ ! $(jq -s -c 'map(select(.Action == "fail")) | .[]' test/unit/gotest.json) ]]
- name: Upload coverage to Codecov
uses: codecov/codecov-action@81cd2dc8148241f03f5839d295e000b8f761e378 # v3.1.0

View File

@ -1,34 +0,0 @@
name: Runner
on:
workflow_call:
outputs:
config:
description: "The runner's configuration"
value: ${{ jobs.choose.outputs.config }}
jobs:
choose:
runs-on: ubuntu-latest
timeout-minutes: 1
outputs:
config: ${{ steps.config.outputs.result }}
steps:
- uses: actions/github-script@v6
id: config
with:
script: |
if (`${context.repo.owner}/${context.repo.repo}` === 'ipfs/kubo') {
return {
labels: ['self-hosted', 'linux', 'x64', 'kubo'],
parallel: 10,
aws: true
}
} else {
return {
labels: ['ubuntu-latest'],
parallel: 3,
aws: false
}
}
- run: echo ${{ steps.config.outputs.result }}

View File

@ -3,6 +3,8 @@ name: Sharness
on:
workflow_dispatch:
pull_request:
paths-ignore:
- '**/*.md'
push:
branches:
- 'master'
@ -12,12 +14,9 @@ concurrency:
cancel-in-progress: true
jobs:
sharness-runner:
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
uses: ipfs/kubo/.github/workflows/runner.yml@master
sharness-test:
needs: [sharness-runner]
runs-on: ${{ fromJSON(needs.sharness-runner.outputs.config).labels }}
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
runs-on: ${{ fromJSON(github.repository == 'ipfs/kubo' && '["self-hosted", "linux", "x64", "4xlarge"]' || '"ubuntu-latest"') }}
timeout-minutes: 20
defaults:
run:
@ -57,7 +56,8 @@ jobs:
TEST_EXPENSIVE: 1
IPFS_CHECK_RCMGR_DEFAULTS: 1
CONTINUE_ON_S_FAILURE: 1
PARALLEL: ${{ fromJSON(needs.sharness-runner.outputs.config).parallel }}
# increasing parallelism beyond 10 doesn't speed up the tests much
PARALLEL: ${{ github.repository == 'ipfs/kubo' && 10 || 3 }}
- name: Upload coverage report
uses: codecov/codecov-action@81cd2dc8148241f03f5839d295e000b8f761e378 # v3.1.0
if: failure() || success()
@ -86,12 +86,12 @@ jobs:
- name: Upload one-page HTML report to S3
id: one-page
uses: pl-strflt/tf-aws-gh-runner/.github/actions/upload-artifact@main
if: fromJSON(needs.sharness-runner.outputs.config).aws && (failure() || success())
if: github.repository == 'ipfs/kubo' && (failure() || success())
with:
source: kubo/test/sharness/test-results/sharness.html
destination: sharness.html
- name: Upload one-page HTML report
if: (! fromJSON(needs.sharness-runner.outputs.config).aws) && (failure() || success())
if: github.repository != 'ipfs/kubo' && (failure() || success())
uses: actions/upload-artifact@v3
with:
name: sharness.html
@ -106,18 +106,18 @@ jobs:
- name: Upload full HTML report to S3
id: full
uses: pl-strflt/tf-aws-gh-runner/.github/actions/upload-artifact@main
if: fromJSON(needs.sharness-runner.outputs.config).aws && (failure() || success())
if: github.repository == 'ipfs/kubo' && (failure() || success())
with:
source: kubo/test/sharness/test-results/sharness-html
destination: sharness-html/
- name: Upload full HTML report
if: (! fromJSON(needs.sharness-runner.outputs.config).aws) && (failure() || success())
if: github.repository != 'ipfs/kubo' && (failure() || success())
uses: actions/upload-artifact@v3
with:
name: sharness-html
path: kubo/test/sharness/test-results/sharness-html
- name: Add S3 links to the summary
if: fromJSON(needs.sharness-runner.outputs.config).aws && (failure() || success())
if: github.repository == 'ipfs/kubo' && (failure() || success())
run: echo "$MD" >> $GITHUB_STEP_SUMMARY
env:
MD: |

View File

@ -4,5 +4,8 @@ linters:
linters-settings:
stylecheck:
checks:
- all
- '-ST1003'
dot-import-whitelist:
- github.com/ipfs/kubo/test/cli/testutils

View File

@ -1,5 +1,6 @@
# Kubo Changelogs
- [v0.21](docs/changelogs/v0.21.md)
- [v0.20](docs/changelogs/v0.20.md)
- [v0.19](docs/changelogs/v0.19.md)
- [v0.18](docs/changelogs/v0.18.md)

View File

@ -187,10 +187,10 @@ $ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_windows-amd64.zip
With the purely functional package manager [Nix](https://nixos.org/nix/) you can install kubo (go-ipfs) like this:
```
$ nix-env -i ipfs
$ nix-env -i kubo
```
You can also install the Package by using its attribute name, which is also `ipfs`.
You can also install the Package by using its attribute name, which is also `kubo`.
#### Solus
@ -251,10 +251,10 @@ $ sudo port install ipfs
In macOS you can use the purely functional package manager [Nix](https://nixos.org/nix/):
```
$ nix-env -i ipfs
$ nix-env -i kubo
```
You can also install the Package by using its attribute name, which is also `ipfs`.
You can also install the Package by using its attribute name, which is also `kubo`.
#### Homebrew

44
client/rpc/README.md Normal file
View File

@ -0,0 +1,44 @@
# `coreiface.CoreAPI` over http `rpc`
> IPFS CoreAPI implementation using HTTP API
This packages implements [`coreiface.CoreAPI`](https://pkg.go.dev/github.com/ipfs/boxo/coreiface#CoreAPI) over the HTTP API.
## Documentation
https://pkg.go.dev/github.com/ipfs/kubo/client/rpc
### Example
Pin file on your local IPFS node based on its CID:
```go
package main
import (
"context"
"fmt"
"github.com/ipfs/kubo/client/rpc"
path "github.com/ipfs/boxo/coreiface/path"
)
func main() {
// "Connect" to local node
node, err := rpc.NewLocalApi()
if err != nil {
fmt.Printf(err)
return
}
// Pin a given file by its CID
ctx := context.Background()
cid := "bafkreidtuosuw37f5xmn65b3ksdiikajy7pwjjslzj2lxxz2vc4wdy3zku"
p := path.New(cid)
err = node.Pin().Add(ctx, p)
if err != nil {
fmt.Printf(err)
return
}
return
}
```

215
client/rpc/api.go Normal file
View File

@ -0,0 +1,215 @@
package rpc
import (
"errors"
"fmt"
"net/http"
"os"
"path/filepath"
"strings"
iface "github.com/ipfs/boxo/coreiface"
caopts "github.com/ipfs/boxo/coreiface/options"
"github.com/mitchellh/go-homedir"
ma "github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr/net"
)
const (
DefaultPathName = ".ipfs"
DefaultPathRoot = "~/" + DefaultPathName
DefaultApiFile = "api"
EnvDir = "IPFS_PATH"
)
// ErrApiNotFound if we fail to find a running daemon.
var ErrApiNotFound = errors.New("ipfs api address could not be found")
// HttpApi implements github.com/ipfs/interface-go-ipfs-core/CoreAPI using
// IPFS HTTP API.
//
// For interface docs see
// https://godoc.org/github.com/ipfs/interface-go-ipfs-core#CoreAPI
type HttpApi struct {
url string
httpcli http.Client
Headers http.Header
applyGlobal func(*requestBuilder)
}
// NewLocalApi tries to construct new HttpApi instance communicating with local
// IPFS daemon
//
// Daemon api address is pulled from the $IPFS_PATH/api file.
// If $IPFS_PATH env var is not present, it defaults to ~/.ipfs
func NewLocalApi() (*HttpApi, error) {
baseDir := os.Getenv(EnvDir)
if baseDir == "" {
baseDir = DefaultPathRoot
}
return NewPathApi(baseDir)
}
// NewPathApi constructs new HttpApi by pulling api address from specified
// ipfspath. Api file should be located at $ipfspath/api
func NewPathApi(ipfspath string) (*HttpApi, error) {
a, err := ApiAddr(ipfspath)
if err != nil {
if os.IsNotExist(err) {
err = ErrApiNotFound
}
return nil, err
}
return NewApi(a)
}
// ApiAddr reads api file in specified ipfs path
func ApiAddr(ipfspath string) (ma.Multiaddr, error) {
baseDir, err := homedir.Expand(ipfspath)
if err != nil {
return nil, err
}
apiFile := filepath.Join(baseDir, DefaultApiFile)
api, err := os.ReadFile(apiFile)
if err != nil {
return nil, err
}
return ma.NewMultiaddr(strings.TrimSpace(string(api)))
}
// NewApi constructs HttpApi with specified endpoint
func NewApi(a ma.Multiaddr) (*HttpApi, error) {
c := &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
DisableKeepAlives: true,
},
}
return NewApiWithClient(a, c)
}
// NewApiWithClient constructs HttpApi with specified endpoint and custom http client
func NewApiWithClient(a ma.Multiaddr, c *http.Client) (*HttpApi, error) {
_, url, err := manet.DialArgs(a)
if err != nil {
return nil, err
}
if a, err := ma.NewMultiaddr(url); err == nil {
_, host, err := manet.DialArgs(a)
if err == nil {
url = host
}
}
proto := "http://"
// By default, DialArgs is going to provide details suitable for connecting
// a socket to, but not really suitable for making an informed choice of http
// protocol. For multiaddresses specifying tls and/or https we want to make
// a https request instead of a http request.
protocols := a.Protocols()
for _, p := range protocols {
if p.Code == ma.P_HTTPS || p.Code == ma.P_TLS {
proto = "https://"
break
}
}
return NewURLApiWithClient(proto+url, c)
}
func NewURLApiWithClient(url string, c *http.Client) (*HttpApi, error) {
api := &HttpApi{
url: url,
httpcli: *c,
Headers: make(map[string][]string),
applyGlobal: func(*requestBuilder) {},
}
// We don't support redirects.
api.httpcli.CheckRedirect = func(_ *http.Request, _ []*http.Request) error {
return fmt.Errorf("unexpected redirect")
}
return api, nil
}
func (api *HttpApi) WithOptions(opts ...caopts.ApiOption) (iface.CoreAPI, error) {
options, err := caopts.ApiOptions(opts...)
if err != nil {
return nil, err
}
subApi := *api
subApi.applyGlobal = func(req *requestBuilder) {
if options.Offline {
req.Option("offline", options.Offline)
}
}
return &subApi, nil
}
func (api *HttpApi) Request(command string, args ...string) RequestBuilder {
headers := make(map[string]string)
if api.Headers != nil {
for k := range api.Headers {
headers[k] = api.Headers.Get(k)
}
}
return &requestBuilder{
command: command,
args: args,
shell: api,
headers: headers,
}
}
func (api *HttpApi) Unixfs() iface.UnixfsAPI {
return (*UnixfsAPI)(api)
}
func (api *HttpApi) Block() iface.BlockAPI {
return (*BlockAPI)(api)
}
func (api *HttpApi) Dag() iface.APIDagService {
return (*HttpDagServ)(api)
}
func (api *HttpApi) Name() iface.NameAPI {
return (*NameAPI)(api)
}
func (api *HttpApi) Key() iface.KeyAPI {
return (*KeyAPI)(api)
}
func (api *HttpApi) Pin() iface.PinAPI {
return (*PinAPI)(api)
}
func (api *HttpApi) Object() iface.ObjectAPI {
return (*ObjectAPI)(api)
}
func (api *HttpApi) Dht() iface.DhtAPI {
return (*DhtAPI)(api)
}
func (api *HttpApi) Swarm() iface.SwarmAPI {
return (*SwarmAPI)(api)
}
func (api *HttpApi) PubSub() iface.PubSubAPI {
return (*PubsubAPI)(api)
}
func (api *HttpApi) Routing() iface.RoutingAPI {
return (*RoutingAPI)(api)
}

162
client/rpc/api_test.go Normal file
View File

@ -0,0 +1,162 @@
package rpc
import (
"context"
"net/http"
"net/http/httptest"
"runtime"
"strconv"
"strings"
"sync"
"testing"
"time"
iface "github.com/ipfs/boxo/coreiface"
"github.com/ipfs/boxo/coreiface/path"
"github.com/ipfs/boxo/coreiface/tests"
"github.com/ipfs/kubo/test/cli/harness"
ma "github.com/multiformats/go-multiaddr"
"go.uber.org/multierr"
)
type NodeProvider struct{}
func (np NodeProvider) MakeAPISwarm(t *testing.T, ctx context.Context, fullIdentity, online bool, n int) ([]iface.CoreAPI, error) {
h := harness.NewT(t)
apis := make([]iface.CoreAPI, n)
nodes := h.NewNodes(n)
var wg, zero sync.WaitGroup
zeroNode := nodes[0]
wg.Add(len(apis))
zero.Add(1)
var errs []error
var errsLk sync.Mutex
for i, n := range nodes {
go func(i int, n *harness.Node) {
if err := func() error {
defer wg.Done()
var err error
n.Init("--empty-repo")
c := n.ReadConfig()
c.Experimental.FilestoreEnabled = true
n.WriteConfig(c)
n.StartDaemon("--enable-pubsub-experiment", "--offline="+strconv.FormatBool(!online))
if online {
if i > 0 {
zero.Wait()
n.Connect(zeroNode)
} else {
zero.Done()
}
}
apiMaddr, err := n.TryAPIAddr()
if err != nil {
return err
}
api, err := NewApi(apiMaddr)
if err != nil {
return err
}
apis[i] = api
// empty node is pinned even with --empty-repo, we don't want that
emptyNode := path.New("/ipfs/QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn")
if err := api.Pin().Rm(ctx, emptyNode); err != nil {
return err
}
return nil
}(); err != nil {
errsLk.Lock()
errs = append(errs, err)
errsLk.Unlock()
}
}(i, n)
}
wg.Wait()
return apis, multierr.Combine(errs...)
}
func TestHttpApi(t *testing.T) {
t.Parallel()
if runtime.GOOS == "windows" {
t.Skip("skipping due to #9905")
}
tests.TestApi(NodeProvider{})(t)
}
func Test_NewURLApiWithClient_With_Headers(t *testing.T) {
t.Parallel()
var (
headerToTest = "Test-Header"
expectedHeaderValue = "thisisaheadertest"
)
ts := httptest.NewServer(
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
val := r.Header.Get(headerToTest)
if val != expectedHeaderValue {
w.WriteHeader(400)
return
}
http.ServeContent(w, r, "", time.Now(), strings.NewReader("test"))
}),
)
defer ts.Close()
api, err := NewURLApiWithClient(ts.URL, &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
DisableKeepAlives: true,
},
})
if err != nil {
t.Fatal(err)
}
api.Headers.Set(headerToTest, expectedHeaderValue)
if err := api.Pin().Rm(context.Background(), path.New("/ipfs/QmS4ustL54uo8FzR9455qaxZwuMiUhyvMcX9Ba8nUH4uVv")); err != nil {
t.Fatal(err)
}
}
func Test_NewURLApiWithClient_HTTP_Variant(t *testing.T) {
t.Parallel()
testcases := []struct {
address string
expected string
}{
{address: "/ip4/127.0.0.1/tcp/80", expected: "http://127.0.0.1:80"},
{address: "/ip4/127.0.0.1/tcp/443/tls", expected: "https://127.0.0.1:443"},
{address: "/ip4/127.0.0.1/tcp/443/https", expected: "https://127.0.0.1:443"},
{address: "/ip4/127.0.0.1/tcp/443/tls/http", expected: "https://127.0.0.1:443"},
}
for _, tc := range testcases {
address, err := ma.NewMultiaddr(tc.address)
if err != nil {
t.Fatal(err)
}
api, err := NewApiWithClient(address, &http.Client{})
if err != nil {
t.Fatal(err)
}
if api.url != tc.expected {
t.Errorf("Expected = %s; got %s", tc.expected, api.url)
}
}
}

270
client/rpc/apifile.go Normal file
View File

@ -0,0 +1,270 @@
package rpc
import (
"context"
"encoding/json"
"fmt"
"io"
"github.com/ipfs/boxo/coreiface/path"
"github.com/ipfs/boxo/files"
unixfs "github.com/ipfs/boxo/ipld/unixfs"
"github.com/ipfs/go-cid"
)
const forwardSeekLimit = 1 << 14 //16k
func (api *UnixfsAPI) Get(ctx context.Context, p path.Path) (files.Node, error) {
if p.Mutable() { // use resolved path in case we are dealing with IPNS / MFS
var err error
p, err = api.core().ResolvePath(ctx, p)
if err != nil {
return nil, err
}
}
var stat struct {
Hash string
Type string
Size int64 // unixfs size
}
err := api.core().Request("files/stat", p.String()).Exec(ctx, &stat)
if err != nil {
return nil, err
}
switch stat.Type {
case "file":
return api.getFile(ctx, p, stat.Size)
case "directory":
return api.getDir(ctx, p, stat.Size)
default:
return nil, fmt.Errorf("unsupported file type '%s'", stat.Type)
}
}
type apiFile struct {
ctx context.Context
core *HttpApi
size int64
path path.Path
r *Response
at int64
}
func (f *apiFile) reset() error {
if f.r != nil {
_ = f.r.Cancel()
f.r = nil
}
req := f.core.Request("cat", f.path.String())
if f.at != 0 {
req.Option("offset", f.at)
}
resp, err := req.Send(f.ctx)
if err != nil {
return err
}
if resp.Error != nil {
return resp.Error
}
f.r = resp
return nil
}
func (f *apiFile) Read(p []byte) (int, error) {
n, err := f.r.Output.Read(p)
if n > 0 {
f.at += int64(n)
}
return n, err
}
func (f *apiFile) ReadAt(p []byte, off int64) (int, error) {
// Always make a new request. This method should be parallel-safe.
resp, err := f.core.Request("cat", f.path.String()).
Option("offset", off).Option("length", len(p)).Send(f.ctx)
if err != nil {
return 0, err
}
if resp.Error != nil {
return 0, resp.Error
}
defer resp.Output.Close()
n, err := io.ReadFull(resp.Output, p)
if err == io.ErrUnexpectedEOF {
err = io.EOF
}
return n, err
}
func (f *apiFile) Seek(offset int64, whence int) (int64, error) {
switch whence {
case io.SeekEnd:
offset = f.size + offset
case io.SeekCurrent:
offset = f.at + offset
}
if f.at == offset { //noop
return offset, nil
}
if f.at < offset && offset-f.at < forwardSeekLimit { //forward skip
r, err := io.CopyN(io.Discard, f.r.Output, offset-f.at)
f.at += r
return f.at, err
}
f.at = offset
return f.at, f.reset()
}
func (f *apiFile) Close() error {
if f.r != nil {
return f.r.Cancel()
}
return nil
}
func (f *apiFile) Size() (int64, error) {
return f.size, nil
}
func (api *UnixfsAPI) getFile(ctx context.Context, p path.Path, size int64) (files.Node, error) {
f := &apiFile{
ctx: ctx,
core: api.core(),
size: size,
path: p,
}
return f, f.reset()
}
type apiIter struct {
ctx context.Context
core *UnixfsAPI
err error
dec *json.Decoder
curFile files.Node
cur lsLink
}
func (it *apiIter) Err() error {
return it.err
}
func (it *apiIter) Name() string {
return it.cur.Name
}
func (it *apiIter) Next() bool {
if it.ctx.Err() != nil {
it.err = it.ctx.Err()
return false
}
var out lsOutput
if err := it.dec.Decode(&out); err != nil {
if err != io.EOF {
it.err = err
}
return false
}
if len(out.Objects) != 1 {
it.err = fmt.Errorf("ls returned more objects than expected (%d)", len(out.Objects))
return false
}
if len(out.Objects[0].Links) != 1 {
it.err = fmt.Errorf("ls returned more links than expected (%d)", len(out.Objects[0].Links))
return false
}
it.cur = out.Objects[0].Links[0]
c, err := cid.Parse(it.cur.Hash)
if err != nil {
it.err = err
return false
}
switch it.cur.Type {
case unixfs.THAMTShard, unixfs.TMetadata, unixfs.TDirectory:
it.curFile, err = it.core.getDir(it.ctx, path.IpfsPath(c), int64(it.cur.Size))
if err != nil {
it.err = err
return false
}
case unixfs.TFile:
it.curFile, err = it.core.getFile(it.ctx, path.IpfsPath(c), int64(it.cur.Size))
if err != nil {
it.err = err
return false
}
default:
it.err = fmt.Errorf("file type %d not supported", it.cur.Type)
return false
}
return true
}
func (it *apiIter) Node() files.Node {
return it.curFile
}
type apiDir struct {
ctx context.Context
core *UnixfsAPI
size int64
path path.Path
dec *json.Decoder
}
func (d *apiDir) Close() error {
return nil
}
func (d *apiDir) Size() (int64, error) {
return d.size, nil
}
func (d *apiDir) Entries() files.DirIterator {
return &apiIter{
ctx: d.ctx,
core: d.core,
dec: d.dec,
}
}
func (api *UnixfsAPI) getDir(ctx context.Context, p path.Path, size int64) (files.Node, error) {
resp, err := api.core().Request("ls", p.String()).
Option("resolve-size", true).
Option("stream", true).Send(ctx)
if err != nil {
return nil, err
}
if resp.Error != nil {
return nil, resp.Error
}
d := &apiDir{
ctx: ctx,
core: api,
size: size,
path: p,
dec: json.NewDecoder(resp.Output),
}
return d, nil
}
var _ files.File = &apiFile{}
var _ files.Directory = &apiDir{}

134
client/rpc/block.go Normal file
View File

@ -0,0 +1,134 @@
package rpc
import (
"bytes"
"context"
"fmt"
"io"
iface "github.com/ipfs/boxo/coreiface"
caopts "github.com/ipfs/boxo/coreiface/options"
"github.com/ipfs/boxo/coreiface/path"
"github.com/ipfs/go-cid"
mc "github.com/multiformats/go-multicodec"
mh "github.com/multiformats/go-multihash"
)
type BlockAPI HttpApi
type blockStat struct {
Key string
BSize int `json:"Size"`
cid cid.Cid
}
func (s *blockStat) Size() int {
return s.BSize
}
func (s *blockStat) Path() path.Resolved {
return path.IpldPath(s.cid)
}
func (api *BlockAPI) Put(ctx context.Context, r io.Reader, opts ...caopts.BlockPutOption) (iface.BlockStat, error) {
options, err := caopts.BlockPutOptions(opts...)
px := options.CidPrefix
if err != nil {
return nil, err
}
mht, ok := mh.Codes[px.MhType]
if !ok {
return nil, fmt.Errorf("unknowm mhType %d", px.MhType)
}
var cidOptKey, cidOptVal string
switch {
case px.Version == 0 && px.Codec == cid.DagProtobuf:
// ensure legacy --format=v0 passes as BlockPutOption still works
cidOptKey = "format"
cidOptVal = "v0"
default:
// pass codec as string
cidOptKey = "cid-codec"
cidOptVal = mc.Code(px.Codec).String()
}
req := api.core().Request("block/put").
Option("mhtype", mht).
Option("mhlen", px.MhLength).
Option(cidOptKey, cidOptVal).
Option("pin", options.Pin).
FileBody(r)
var out blockStat
if err := req.Exec(ctx, &out); err != nil {
return nil, err
}
out.cid, err = cid.Parse(out.Key)
if err != nil {
return nil, err
}
return &out, nil
}
func (api *BlockAPI) Get(ctx context.Context, p path.Path) (io.Reader, error) {
resp, err := api.core().Request("block/get", p.String()).Send(ctx)
if err != nil {
return nil, err
}
if resp.Error != nil {
return nil, parseErrNotFoundWithFallbackToError(resp.Error)
}
//TODO: make get return ReadCloser to avoid copying
defer resp.Close()
b := new(bytes.Buffer)
if _, err := io.Copy(b, resp.Output); err != nil {
return nil, err
}
return b, nil
}
func (api *BlockAPI) Rm(ctx context.Context, p path.Path, opts ...caopts.BlockRmOption) error {
options, err := caopts.BlockRmOptions(opts...)
if err != nil {
return err
}
removedBlock := struct {
Hash string `json:",omitempty"`
Error string `json:",omitempty"`
}{}
req := api.core().Request("block/rm").
Option("force", options.Force).
Arguments(p.String())
if err := req.Exec(ctx, &removedBlock); err != nil {
return err
}
return parseErrNotFoundWithFallbackToMSG(removedBlock.Error)
}
func (api *BlockAPI) Stat(ctx context.Context, p path.Path) (iface.BlockStat, error) {
var out blockStat
err := api.core().Request("block/stat", p.String()).Exec(ctx, &out)
if err != nil {
return nil, parseErrNotFoundWithFallbackToError(err)
}
out.cid, err = cid.Parse(out.Key)
if err != nil {
return nil, err
}
return &out, nil
}
func (api *BlockAPI) core() *HttpApi {
return (*HttpApi)(api)
}

136
client/rpc/dag.go Normal file
View File

@ -0,0 +1,136 @@
package rpc
import (
"bytes"
"context"
"fmt"
"io"
"github.com/ipfs/boxo/coreiface/options"
"github.com/ipfs/boxo/coreiface/path"
"github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
format "github.com/ipfs/go-ipld-format"
multicodec "github.com/multiformats/go-multicodec"
)
type httpNodeAdder HttpApi
type HttpDagServ httpNodeAdder
type pinningHttpNodeAdder httpNodeAdder
func (api *HttpDagServ) Get(ctx context.Context, c cid.Cid) (format.Node, error) {
r, err := api.core().Block().Get(ctx, path.IpldPath(c))
if err != nil {
return nil, err
}
data, err := io.ReadAll(r)
if err != nil {
return nil, err
}
blk, err := blocks.NewBlockWithCid(data, c)
if err != nil {
return nil, err
}
return format.DefaultBlockDecoder.Decode(blk)
}
func (api *HttpDagServ) GetMany(ctx context.Context, cids []cid.Cid) <-chan *format.NodeOption {
out := make(chan *format.NodeOption)
for _, c := range cids {
// TODO: Consider limiting concurrency of this somehow
go func(c cid.Cid) {
n, err := api.Get(ctx, c)
select {
case out <- &format.NodeOption{Node: n, Err: err}:
case <-ctx.Done():
}
}(c)
}
return out
}
func (api *httpNodeAdder) add(ctx context.Context, nd format.Node, pin bool) error {
c := nd.Cid()
prefix := c.Prefix()
// preserve 'cid-codec' when sent over HTTP
cidCodec := multicodec.Code(prefix.Codec).String()
// 'format' got replaced by 'cid-codec' in https://github.com/ipfs/interface-go-ipfs-core/pull/80
// but we still support it here for backward-compatibility with use of CIDv0
format := ""
if prefix.Version == 0 {
cidCodec = ""
format = "v0"
}
stat, err := api.core().Block().Put(ctx, bytes.NewReader(nd.RawData()),
options.Block.Hash(prefix.MhType, prefix.MhLength),
options.Block.CidCodec(cidCodec),
options.Block.Format(format),
options.Block.Pin(pin))
if err != nil {
return err
}
if !stat.Path().Cid().Equals(c) {
return fmt.Errorf("cids didn't match - local %s, remote %s", c.String(), stat.Path().Cid().String())
}
return nil
}
func (api *httpNodeAdder) addMany(ctx context.Context, nds []format.Node, pin bool) error {
for _, nd := range nds {
// TODO: optimize
if err := api.add(ctx, nd, pin); err != nil {
return err
}
}
return nil
}
func (api *HttpDagServ) AddMany(ctx context.Context, nds []format.Node) error {
return (*httpNodeAdder)(api).addMany(ctx, nds, false)
}
func (api *HttpDagServ) Add(ctx context.Context, nd format.Node) error {
return (*httpNodeAdder)(api).add(ctx, nd, false)
}
func (api *pinningHttpNodeAdder) Add(ctx context.Context, nd format.Node) error {
return (*httpNodeAdder)(api).add(ctx, nd, true)
}
func (api *pinningHttpNodeAdder) AddMany(ctx context.Context, nds []format.Node) error {
return (*httpNodeAdder)(api).addMany(ctx, nds, true)
}
func (api *HttpDagServ) Pinning() format.NodeAdder {
return (*pinningHttpNodeAdder)(api)
}
func (api *HttpDagServ) Remove(ctx context.Context, c cid.Cid) error {
return api.core().Block().Rm(ctx, path.IpldPath(c)) //TODO: should we force rm?
}
func (api *HttpDagServ) RemoveMany(ctx context.Context, cids []cid.Cid) error {
for _, c := range cids {
// TODO: optimize
if err := api.Remove(ctx, c); err != nil {
return err
}
}
return nil
}
func (api *httpNodeAdder) core() *HttpApi {
return (*HttpApi)(api)
}
func (api *HttpDagServ) core() *HttpApi {
return (*HttpApi)(api)
}

113
client/rpc/dht.go Normal file
View File

@ -0,0 +1,113 @@
package rpc
import (
"context"
"encoding/json"
caopts "github.com/ipfs/boxo/coreiface/options"
"github.com/ipfs/boxo/coreiface/path"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/routing"
)
type DhtAPI HttpApi
func (api *DhtAPI) FindPeer(ctx context.Context, p peer.ID) (peer.AddrInfo, error) {
var out struct {
Type routing.QueryEventType
Responses []peer.AddrInfo
}
resp, err := api.core().Request("dht/findpeer", p.Pretty()).Send(ctx)
if err != nil {
return peer.AddrInfo{}, err
}
if resp.Error != nil {
return peer.AddrInfo{}, resp.Error
}
defer resp.Close()
dec := json.NewDecoder(resp.Output)
for {
if err := dec.Decode(&out); err != nil {
return peer.AddrInfo{}, err
}
if out.Type == routing.FinalPeer {
return out.Responses[0], nil
}
}
}
func (api *DhtAPI) FindProviders(ctx context.Context, p path.Path, opts ...caopts.DhtFindProvidersOption) (<-chan peer.AddrInfo, error) {
options, err := caopts.DhtFindProvidersOptions(opts...)
if err != nil {
return nil, err
}
rp, err := api.core().ResolvePath(ctx, p)
if err != nil {
return nil, err
}
resp, err := api.core().Request("dht/findprovs", rp.Cid().String()).
Option("num-providers", options.NumProviders).
Send(ctx)
if err != nil {
return nil, err
}
if resp.Error != nil {
return nil, resp.Error
}
res := make(chan peer.AddrInfo)
go func() {
defer resp.Close()
defer close(res)
dec := json.NewDecoder(resp.Output)
for {
var out struct {
Extra string
Type routing.QueryEventType
Responses []peer.AddrInfo
}
if err := dec.Decode(&out); err != nil {
return // todo: handle this somehow
}
if out.Type == routing.QueryError {
return // usually a 'not found' error
// todo: handle other errors
}
if out.Type == routing.Provider {
for _, pi := range out.Responses {
select {
case res <- pi:
case <-ctx.Done():
return
}
}
}
}
}()
return res, nil
}
func (api *DhtAPI) Provide(ctx context.Context, p path.Path, opts ...caopts.DhtProvideOption) error {
options, err := caopts.DhtProvideOptions(opts...)
if err != nil {
return err
}
rp, err := api.core().ResolvePath(ctx, p)
if err != nil {
return err
}
return api.core().Request("dht/provide", rp.Cid().String()).
Option("recursive", options.Recursive).
Exec(ctx, nil)
}
func (api *DhtAPI) core() *HttpApi {
return (*HttpApi)(api)
}

166
client/rpc/errors.go Normal file
View File

@ -0,0 +1,166 @@
package rpc
import (
"errors"
"strings"
"unicode/utf8"
"github.com/ipfs/go-cid"
ipld "github.com/ipfs/go-ipld-format"
mbase "github.com/multiformats/go-multibase"
)
// This file handle parsing and returning the correct ABI based errors from error messages
type prePostWrappedNotFoundError struct {
pre string
post string
wrapped ipld.ErrNotFound
}
func (e prePostWrappedNotFoundError) String() string {
return e.Error()
}
func (e prePostWrappedNotFoundError) Error() string {
return e.pre + e.wrapped.Error() + e.post
}
func (e prePostWrappedNotFoundError) Unwrap() error {
return e.wrapped
}
func parseErrNotFoundWithFallbackToMSG(msg string) error {
err, handled := parseErrNotFound(msg)
if handled {
return err
}
return errors.New(msg)
}
func parseErrNotFoundWithFallbackToError(msg error) error {
err, handled := parseErrNotFound(msg.Error())
if handled {
return err
}
return msg
}
func parseErrNotFound(msg string) (error, bool) {
if msg == "" {
return nil, true // Fast path
}
if err, handled := parseIPLDErrNotFound(msg); handled {
return err, true
}
if err, handled := parseBlockstoreNotFound(msg); handled {
return err, true
}
return nil, false
}
// Assume CIDs break on:
// - Whitespaces: " \t\n\r\v\f"
// - Semicolon: ";" this is to parse ipld.ErrNotFound wrapped in multierr
// - Double Quotes: "\"" this is for parsing %q and %#v formating
const cidBreakSet = " \t\n\r\v\f;\""
func parseIPLDErrNotFound(msg string) (error, bool) {
// The patern we search for is:
const ipldErrNotFoundKey = "ipld: could not find " /*CID*/
// We try to parse the CID, if it's invalid we give up and return a simple text error.
// We also accept "node" in place of the CID because that means it's an Undefined CID.
keyIndex := strings.Index(msg, ipldErrNotFoundKey)
if keyIndex < 0 { // Unknown error
return nil, false
}
cidStart := keyIndex + len(ipldErrNotFoundKey)
msgPostKey := msg[cidStart:]
var c cid.Cid
var postIndex int
if strings.HasPrefix(msgPostKey, "node") {
// Fallback case
c = cid.Undef
postIndex = len("node")
} else {
postIndex = strings.IndexFunc(msgPostKey, func(r rune) bool {
return strings.ContainsAny(string(r), cidBreakSet)
})
if postIndex < 0 {
// no breakage meaning the string look like this something + "ipld: could not find bafy"
postIndex = len(msgPostKey)
}
cidStr := msgPostKey[:postIndex]
var err error
c, err = cid.Decode(cidStr)
if err != nil {
// failed to decode CID give up
return nil, false
}
// check that the CID is either a CIDv0 or a base32 multibase
// because that what ipld.ErrNotFound.Error() -> cid.Cid.String() do currently
if c.Version() != 0 {
baseRune, _ := utf8.DecodeRuneInString(cidStr)
if baseRune == utf8.RuneError || baseRune != mbase.Base32 {
// not a multibase we expect, give up
return nil, false
}
}
}
err := ipld.ErrNotFound{Cid: c}
pre := msg[:keyIndex]
post := msgPostKey[postIndex:]
if len(pre) > 0 || len(post) > 0 {
return prePostWrappedNotFoundError{
pre: pre,
post: post,
wrapped: err,
}, true
}
return err, true
}
// This is a simple error type that just return msg as Error().
// But that also match ipld.ErrNotFound when called with Is(err).
// That is needed to keep compatiblity with code that use string.Contains(err.Error(), "blockstore: block not found")
// and code using ipld.ErrNotFound
type blockstoreNotFoundMatchingIPLDErrNotFound struct {
msg string
}
func (e blockstoreNotFoundMatchingIPLDErrNotFound) String() string {
return e.Error()
}
func (e blockstoreNotFoundMatchingIPLDErrNotFound) Error() string {
return e.msg
}
func (e blockstoreNotFoundMatchingIPLDErrNotFound) Is(err error) bool {
_, ok := err.(ipld.ErrNotFound)
return ok
}
func parseBlockstoreNotFound(msg string) (error, bool) {
if !strings.Contains(msg, "blockstore: block not found") {
return nil, false
}
return blockstoreNotFoundMatchingIPLDErrNotFound{msg: msg}, true
}

99
client/rpc/errors_test.go Normal file
View File

@ -0,0 +1,99 @@
package rpc
import (
"errors"
"fmt"
"testing"
"github.com/ipfs/go-cid"
ipld "github.com/ipfs/go-ipld-format"
mbase "github.com/multiformats/go-multibase"
mh "github.com/multiformats/go-multihash"
)
var randomSha256MH = mh.Multihash{0x12, 0x20, 0x88, 0x82, 0x73, 0x37, 0x7c, 0xc1, 0xc9, 0x96, 0xad, 0xee, 0xd, 0x26, 0x84, 0x2, 0xc9, 0xc9, 0x5c, 0xf9, 0x5c, 0x4d, 0x9b, 0xc3, 0x3f, 0xfb, 0x4a, 0xd8, 0xaf, 0x28, 0x6b, 0xca, 0x1a, 0xf2}
func doParseIpldNotFoundTest(t *testing.T, original error) {
originalMsg := original.Error()
rebuilt := parseErrNotFoundWithFallbackToMSG(originalMsg)
rebuiltMsg := rebuilt.Error()
if originalMsg != rebuiltMsg {
t.Errorf("expected message to be %q; got %q", originalMsg, rebuiltMsg)
}
originalNotFound := ipld.IsNotFound(original)
rebuiltNotFound := ipld.IsNotFound(rebuilt)
if originalNotFound != rebuiltNotFound {
t.Errorf("for %q expected Ipld.IsNotFound to be %t; got %t", originalMsg, originalNotFound, rebuiltNotFound)
}
}
func TestParseIPLDNotFound(t *testing.T) {
t.Parallel()
if err := parseErrNotFoundWithFallbackToMSG(""); err != nil {
t.Errorf("expected empty string to give no error; got %T %q", err, err.Error())
}
cidBreaks := make([]string, len(cidBreakSet))
for i, v := range cidBreakSet {
cidBreaks[i] = "%w" + string(v)
}
base58BTCEncoder, err := mbase.NewEncoder(mbase.Base58BTC)
if err != nil {
t.Fatalf("expected to find Base58BTC encoder; got error %q", err.Error())
}
for _, wrap := range append(cidBreaks,
"",
"merkledag: %w",
"testing: %w the test",
"%w is wrong",
) {
for _, err := range [...]error{
errors.New("ipld: could not find "),
errors.New("ipld: could not find Bad_CID"),
errors.New("ipld: could not find " + cid.NewCidV1(cid.Raw, randomSha256MH).Encode(base58BTCEncoder)), // Test that we only accept CIDv0 and base32 CIDs
errors.New("network connection timeout"),
ipld.ErrNotFound{Cid: cid.Undef},
ipld.ErrNotFound{Cid: cid.NewCidV0(randomSha256MH)},
ipld.ErrNotFound{Cid: cid.NewCidV1(cid.Raw, randomSha256MH)},
} {
if wrap != "" {
err = fmt.Errorf(wrap, err)
}
doParseIpldNotFoundTest(t, err)
}
}
}
func TestBlockstoreNotFoundMatchingIPLDErrNotFound(t *testing.T) {
t.Parallel()
if !ipld.IsNotFound(blockstoreNotFoundMatchingIPLDErrNotFound{}) {
t.Fatalf("expected blockstoreNotFoundMatchingIPLDErrNotFound to match ipld.IsNotFound; got false")
}
for _, wrap := range [...]string{
"",
"merkledag: %w",
"testing: %w the test",
"%w is wrong",
} {
for _, err := range [...]error{
errors.New("network connection timeout"),
blockstoreNotFoundMatchingIPLDErrNotFound{"blockstore: block not found"},
} {
if wrap != "" {
err = fmt.Errorf(wrap, err)
}
doParseIpldNotFoundTest(t, err)
}
}
}

123
client/rpc/key.go Normal file
View File

@ -0,0 +1,123 @@
package rpc
import (
"context"
"errors"
iface "github.com/ipfs/boxo/coreiface"
caopts "github.com/ipfs/boxo/coreiface/options"
"github.com/ipfs/boxo/coreiface/path"
"github.com/libp2p/go-libp2p/core/peer"
)
type KeyAPI HttpApi
type keyOutput struct {
JName string `json:"Name"`
Id string
pid peer.ID
}
func (k *keyOutput) Name() string {
return k.JName
}
func (k *keyOutput) Path() path.Path {
return path.New("/ipns/" + k.Id)
}
func (k *keyOutput) ID() peer.ID {
return k.pid
}
func (api *KeyAPI) Generate(ctx context.Context, name string, opts ...caopts.KeyGenerateOption) (iface.Key, error) {
options, err := caopts.KeyGenerateOptions(opts...)
if err != nil {
return nil, err
}
var out keyOutput
err = api.core().Request("key/gen", name).
Option("type", options.Algorithm).
Option("size", options.Size).
Exec(ctx, &out)
if err != nil {
return nil, err
}
out.pid, err = peer.Decode(out.Id)
return &out, err
}
func (api *KeyAPI) Rename(ctx context.Context, oldName string, newName string, opts ...caopts.KeyRenameOption) (iface.Key, bool, error) {
options, err := caopts.KeyRenameOptions(opts...)
if err != nil {
return nil, false, err
}
var out struct {
Was string
Now string
Id string
Overwrite bool
}
err = api.core().Request("key/rename", oldName, newName).
Option("force", options.Force).
Exec(ctx, &out)
if err != nil {
return nil, false, err
}
id := &keyOutput{JName: out.Now, Id: out.Id}
id.pid, err = peer.Decode(id.Id)
return id, out.Overwrite, err
}
func (api *KeyAPI) List(ctx context.Context) ([]iface.Key, error) {
var out struct{ Keys []*keyOutput }
if err := api.core().Request("key/list").Exec(ctx, &out); err != nil {
return nil, err
}
res := make([]iface.Key, len(out.Keys))
for i, k := range out.Keys {
var err error
k.pid, err = peer.Decode(k.Id)
if err != nil {
return nil, err
}
res[i] = k
}
return res, nil
}
func (api *KeyAPI) Self(ctx context.Context) (iface.Key, error) {
var id struct{ ID string }
if err := api.core().Request("id").Exec(ctx, &id); err != nil {
return nil, err
}
var err error
out := keyOutput{JName: "self", Id: id.ID}
out.pid, err = peer.Decode(out.Id)
return &out, err
}
func (api *KeyAPI) Remove(ctx context.Context, name string) (iface.Key, error) {
var out struct{ Keys []keyOutput }
if err := api.core().Request("key/rm", name).Exec(ctx, &out); err != nil {
return nil, err
}
if len(out.Keys) != 1 {
return nil, errors.New("got unexpected number of keys back")
}
var err error
out.Keys[0].pid, err = peer.Decode(out.Keys[0].Id)
return &out.Keys[0], err
}
func (api *KeyAPI) core() *HttpApi {
return (*HttpApi)(api)
}

140
client/rpc/name.go Normal file
View File

@ -0,0 +1,140 @@
package rpc
import (
"context"
"encoding/json"
"fmt"
"io"
iface "github.com/ipfs/boxo/coreiface"
caopts "github.com/ipfs/boxo/coreiface/options"
nsopts "github.com/ipfs/boxo/coreiface/options/namesys"
"github.com/ipfs/boxo/coreiface/path"
)
type NameAPI HttpApi
type ipnsEntry struct {
JName string `json:"Name"`
JValue string `json:"Value"`
path path.Path
}
func (e *ipnsEntry) Name() string {
return e.JName
}
func (e *ipnsEntry) Value() path.Path {
return e.path
}
func (api *NameAPI) Publish(ctx context.Context, p path.Path, opts ...caopts.NamePublishOption) (iface.IpnsEntry, error) {
options, err := caopts.NamePublishOptions(opts...)
if err != nil {
return nil, err
}
req := api.core().Request("name/publish", p.String()).
Option("key", options.Key).
Option("allow-offline", options.AllowOffline).
Option("lifetime", options.ValidTime).
Option("resolve", false)
if options.TTL != nil {
req.Option("ttl", options.TTL)
}
var out ipnsEntry
if err := req.Exec(ctx, &out); err != nil {
return nil, err
}
out.path = path.New(out.JValue)
return &out, out.path.IsValid()
}
func (api *NameAPI) Search(ctx context.Context, name string, opts ...caopts.NameResolveOption) (<-chan iface.IpnsResult, error) {
options, err := caopts.NameResolveOptions(opts...)
if err != nil {
return nil, err
}
ropts := nsopts.ProcessOpts(options.ResolveOpts)
if ropts.Depth != nsopts.DefaultDepthLimit && ropts.Depth != 1 {
return nil, fmt.Errorf("Name.Resolve: depth other than 1 or %d not supported", nsopts.DefaultDepthLimit)
}
req := api.core().Request("name/resolve", name).
Option("nocache", !options.Cache).
Option("recursive", ropts.Depth != 1).
Option("dht-record-count", ropts.DhtRecordCount).
Option("dht-timeout", ropts.DhtTimeout).
Option("stream", true)
resp, err := req.Send(ctx)
if err != nil {
return nil, err
}
if resp.Error != nil {
return nil, resp.Error
}
res := make(chan iface.IpnsResult)
go func() {
defer close(res)
defer resp.Close()
dec := json.NewDecoder(resp.Output)
for {
var out struct{ Path string }
err := dec.Decode(&out)
if err == io.EOF {
return
}
var ires iface.IpnsResult
if err == nil {
ires.Path = path.New(out.Path)
}
select {
case res <- ires:
case <-ctx.Done():
}
if err != nil {
return
}
}
}()
return res, nil
}
func (api *NameAPI) Resolve(ctx context.Context, name string, opts ...caopts.NameResolveOption) (path.Path, error) {
options, err := caopts.NameResolveOptions(opts...)
if err != nil {
return nil, err
}
ropts := nsopts.ProcessOpts(options.ResolveOpts)
if ropts.Depth != nsopts.DefaultDepthLimit && ropts.Depth != 1 {
return nil, fmt.Errorf("Name.Resolve: depth other than 1 or %d not supported", nsopts.DefaultDepthLimit)
}
req := api.core().Request("name/resolve", name).
Option("nocache", !options.Cache).
Option("recursive", ropts.Depth != 1).
Option("dht-record-count", ropts.DhtRecordCount).
Option("dht-timeout", ropts.DhtTimeout)
var out struct{ Path string }
if err := req.Exec(ctx, &out); err != nil {
return nil, err
}
return path.New(out.Path), nil
}
func (api *NameAPI) core() *HttpApi {
return (*HttpApi)(api)
}

260
client/rpc/object.go Normal file
View File

@ -0,0 +1,260 @@
package rpc
import (
"bytes"
"context"
"fmt"
"io"
iface "github.com/ipfs/boxo/coreiface"
caopts "github.com/ipfs/boxo/coreiface/options"
"github.com/ipfs/boxo/coreiface/path"
"github.com/ipfs/boxo/ipld/merkledag"
ft "github.com/ipfs/boxo/ipld/unixfs"
"github.com/ipfs/go-cid"
ipld "github.com/ipfs/go-ipld-format"
)
type ObjectAPI HttpApi
type objectOut struct {
Hash string
}
func (api *ObjectAPI) New(ctx context.Context, opts ...caopts.ObjectNewOption) (ipld.Node, error) {
options, err := caopts.ObjectNewOptions(opts...)
if err != nil {
return nil, err
}
var n ipld.Node
switch options.Type {
case "empty":
n = new(merkledag.ProtoNode)
case "unixfs-dir":
n = ft.EmptyDirNode()
default:
return nil, fmt.Errorf("unknown object type: %s", options.Type)
}
return n, nil
}
func (api *ObjectAPI) Put(ctx context.Context, r io.Reader, opts ...caopts.ObjectPutOption) (path.Resolved, error) {
options, err := caopts.ObjectPutOptions(opts...)
if err != nil {
return nil, err
}
var out objectOut
err = api.core().Request("object/put").
Option("inputenc", options.InputEnc).
Option("datafieldenc", options.DataType).
Option("pin", options.Pin).
FileBody(r).
Exec(ctx, &out)
if err != nil {
return nil, err
}
c, err := cid.Parse(out.Hash)
if err != nil {
return nil, err
}
return path.IpfsPath(c), nil
}
func (api *ObjectAPI) Get(ctx context.Context, p path.Path) (ipld.Node, error) {
r, err := api.core().Block().Get(ctx, p)
if err != nil {
return nil, err
}
b, err := io.ReadAll(r)
if err != nil {
return nil, err
}
return merkledag.DecodeProtobuf(b)
}
func (api *ObjectAPI) Data(ctx context.Context, p path.Path) (io.Reader, error) {
resp, err := api.core().Request("object/data", p.String()).Send(ctx)
if err != nil {
return nil, err
}
if resp.Error != nil {
return nil, resp.Error
}
//TODO: make Data return ReadCloser to avoid copying
defer resp.Close()
b := new(bytes.Buffer)
if _, err := io.Copy(b, resp.Output); err != nil {
return nil, err
}
return b, nil
}
func (api *ObjectAPI) Links(ctx context.Context, p path.Path) ([]*ipld.Link, error) {
var out struct {
Links []struct {
Name string
Hash string
Size uint64
}
}
if err := api.core().Request("object/links", p.String()).Exec(ctx, &out); err != nil {
return nil, err
}
res := make([]*ipld.Link, len(out.Links))
for i, l := range out.Links {
c, err := cid.Parse(l.Hash)
if err != nil {
return nil, err
}
res[i] = &ipld.Link{
Cid: c,
Name: l.Name,
Size: l.Size,
}
}
return res, nil
}
func (api *ObjectAPI) Stat(ctx context.Context, p path.Path) (*iface.ObjectStat, error) {
var out struct {
Hash string
NumLinks int
BlockSize int
LinksSize int
DataSize int
CumulativeSize int
}
if err := api.core().Request("object/stat", p.String()).Exec(ctx, &out); err != nil {
return nil, err
}
c, err := cid.Parse(out.Hash)
if err != nil {
return nil, err
}
return &iface.ObjectStat{
Cid: c,
NumLinks: out.NumLinks,
BlockSize: out.BlockSize,
LinksSize: out.LinksSize,
DataSize: out.DataSize,
CumulativeSize: out.CumulativeSize,
}, nil
}
func (api *ObjectAPI) AddLink(ctx context.Context, base path.Path, name string, child path.Path, opts ...caopts.ObjectAddLinkOption) (path.Resolved, error) {
options, err := caopts.ObjectAddLinkOptions(opts...)
if err != nil {
return nil, err
}
var out objectOut
err = api.core().Request("object/patch/add-link", base.String(), name, child.String()).
Option("create", options.Create).
Exec(ctx, &out)
if err != nil {
return nil, err
}
c, err := cid.Parse(out.Hash)
if err != nil {
return nil, err
}
return path.IpfsPath(c), nil
}
func (api *ObjectAPI) RmLink(ctx context.Context, base path.Path, link string) (path.Resolved, error) {
var out objectOut
err := api.core().Request("object/patch/rm-link", base.String(), link).
Exec(ctx, &out)
if err != nil {
return nil, err
}
c, err := cid.Parse(out.Hash)
if err != nil {
return nil, err
}
return path.IpfsPath(c), nil
}
func (api *ObjectAPI) AppendData(ctx context.Context, p path.Path, r io.Reader) (path.Resolved, error) {
var out objectOut
err := api.core().Request("object/patch/append-data", p.String()).
FileBody(r).
Exec(ctx, &out)
if err != nil {
return nil, err
}
c, err := cid.Parse(out.Hash)
if err != nil {
return nil, err
}
return path.IpfsPath(c), nil
}
func (api *ObjectAPI) SetData(ctx context.Context, p path.Path, r io.Reader) (path.Resolved, error) {
var out objectOut
err := api.core().Request("object/patch/set-data", p.String()).
FileBody(r).
Exec(ctx, &out)
if err != nil {
return nil, err
}
c, err := cid.Parse(out.Hash)
if err != nil {
return nil, err
}
return path.IpfsPath(c), nil
}
type change struct {
Type iface.ChangeType
Path string
Before cid.Cid
After cid.Cid
}
func (api *ObjectAPI) Diff(ctx context.Context, a path.Path, b path.Path) ([]iface.ObjectChange, error) {
var out struct {
Changes []change
}
if err := api.core().Request("object/diff", a.String(), b.String()).Exec(ctx, &out); err != nil {
return nil, err
}
res := make([]iface.ObjectChange, len(out.Changes))
for i, ch := range out.Changes {
res[i] = iface.ObjectChange{
Type: ch.Type,
Path: ch.Path,
}
if ch.Before != cid.Undef {
res[i].Before = path.IpfsPath(ch.Before)
}
if ch.After != cid.Undef {
res[i].After = path.IpfsPath(ch.After)
}
}
return res, nil
}
func (api *ObjectAPI) core() *HttpApi {
return (*HttpApi)(api)
}

52
client/rpc/path.go Normal file
View File

@ -0,0 +1,52 @@
package rpc
import (
"context"
"github.com/ipfs/boxo/coreiface/path"
ipfspath "github.com/ipfs/boxo/path"
cid "github.com/ipfs/go-cid"
ipld "github.com/ipfs/go-ipld-format"
)
func (api *HttpApi) ResolvePath(ctx context.Context, p path.Path) (path.Resolved, error) {
var out struct {
Cid cid.Cid
RemPath string
}
//TODO: this is hacky, fixing https://github.com/ipfs/go-ipfs/issues/5703 would help
var err error
if p.Namespace() == "ipns" {
if p, err = api.Name().Resolve(ctx, p.String()); err != nil {
return nil, err
}
}
if err := api.Request("dag/resolve", p.String()).Exec(ctx, &out); err != nil {
return nil, err
}
// TODO:
ipath, err := ipfspath.FromSegments("/"+p.Namespace()+"/", out.Cid.String(), out.RemPath)
if err != nil {
return nil, err
}
root, err := cid.Parse(ipfspath.Path(p.String()).Segments()[1])
if err != nil {
return nil, err
}
return path.NewResolvedPath(ipath, out.Cid, root, out.RemPath), nil
}
func (api *HttpApi) ResolveNode(ctx context.Context, p path.Path) (ipld.Node, error) {
rp, err := api.ResolvePath(ctx, p)
if err != nil {
return nil, err
}
return api.Dag().Get(ctx, rp.Cid())
}

234
client/rpc/pin.go Normal file
View File

@ -0,0 +1,234 @@
package rpc
import (
"context"
"encoding/json"
"io"
"strings"
iface "github.com/ipfs/boxo/coreiface"
caopts "github.com/ipfs/boxo/coreiface/options"
"github.com/ipfs/boxo/coreiface/path"
"github.com/ipfs/go-cid"
"github.com/pkg/errors"
)
type PinAPI HttpApi
type pinRefKeyObject struct {
Type string
}
type pinRefKeyList struct {
Keys map[string]pinRefKeyObject
}
type pin struct {
path path.Resolved
typ string
err error
}
func (p *pin) Err() error {
return p.err
}
func (p *pin) Path() path.Resolved {
return p.path
}
func (p *pin) Type() string {
return p.typ
}
func (api *PinAPI) Add(ctx context.Context, p path.Path, opts ...caopts.PinAddOption) error {
options, err := caopts.PinAddOptions(opts...)
if err != nil {
return err
}
return api.core().Request("pin/add", p.String()).
Option("recursive", options.Recursive).Exec(ctx, nil)
}
func (api *PinAPI) Ls(ctx context.Context, opts ...caopts.PinLsOption) (<-chan iface.Pin, error) {
options, err := caopts.PinLsOptions(opts...)
if err != nil {
return nil, err
}
var out pinRefKeyList
err = api.core().Request("pin/ls").
Option("type", options.Type).Exec(ctx, &out)
if err != nil {
return nil, err
}
pins := make(chan iface.Pin)
go func(ch chan<- iface.Pin) {
defer close(ch)
for hash, p := range out.Keys {
c, e := cid.Parse(hash)
if e != nil {
ch <- &pin{typ: p.Type, err: e}
return
}
ch <- &pin{typ: p.Type, path: path.IpldPath(c), err: e}
}
}(pins)
return pins, nil
}
// IsPinned returns whether or not the given cid is pinned
// and an explanation of why its pinned
func (api *PinAPI) IsPinned(ctx context.Context, p path.Path, opts ...caopts.PinIsPinnedOption) (string, bool, error) {
options, err := caopts.PinIsPinnedOptions(opts...)
if err != nil {
return "", false, err
}
var out pinRefKeyList
err = api.core().Request("pin/ls").
Option("type", options.WithType).
Option("arg", p.String()).
Exec(ctx, &out)
if err != nil {
// TODO: This error-type discrimination based on sub-string matching is brittle.
// It is addressed by this open issue: https://github.com/ipfs/go-ipfs/issues/7563
if strings.Contains(err.Error(), "is not pinned") {
return "", false, nil
}
return "", false, err
}
for _, obj := range out.Keys {
return obj.Type, true, nil
}
return "", false, errors.New("http api returned no error and no results")
}
func (api *PinAPI) Rm(ctx context.Context, p path.Path, opts ...caopts.PinRmOption) error {
options, err := caopts.PinRmOptions(opts...)
if err != nil {
return err
}
return api.core().Request("pin/rm", p.String()).
Option("recursive", options.Recursive).
Exec(ctx, nil)
}
func (api *PinAPI) Update(ctx context.Context, from path.Path, to path.Path, opts ...caopts.PinUpdateOption) error {
options, err := caopts.PinUpdateOptions(opts...)
if err != nil {
return err
}
return api.core().Request("pin/update", from.String(), to.String()).
Option("unpin", options.Unpin).Exec(ctx, nil)
}
type pinVerifyRes struct {
ok bool
badNodes []iface.BadPinNode
err error
}
func (r pinVerifyRes) Ok() bool {
return r.ok
}
func (r pinVerifyRes) BadNodes() []iface.BadPinNode {
return r.badNodes
}
func (r pinVerifyRes) Err() error {
return r.err
}
type badNode struct {
err error
cid cid.Cid
}
func (n badNode) Path() path.Resolved {
return path.IpldPath(n.cid)
}
func (n badNode) Err() error {
return n.err
}
func (api *PinAPI) Verify(ctx context.Context) (<-chan iface.PinStatus, error) {
resp, err := api.core().Request("pin/verify").Option("verbose", true).Send(ctx)
if err != nil {
return nil, err
}
if resp.Error != nil {
return nil, resp.Error
}
res := make(chan iface.PinStatus)
go func() {
defer resp.Close()
defer close(res)
dec := json.NewDecoder(resp.Output)
for {
var out struct {
Cid string
Err string
Ok bool
BadNodes []struct {
Cid string
Err string
}
}
if err := dec.Decode(&out); err != nil {
if err == io.EOF {
return
}
select {
case res <- pinVerifyRes{err: err}:
return
case <-ctx.Done():
return
}
}
if out.Err != "" {
select {
case res <- pinVerifyRes{err: errors.New(out.Err)}:
return
case <-ctx.Done():
return
}
}
badNodes := make([]iface.BadPinNode, len(out.BadNodes))
for i, n := range out.BadNodes {
c, err := cid.Decode(n.Cid)
if err != nil {
badNodes[i] = badNode{cid: c, err: err}
continue
}
if n.Err != "" {
err = errors.New(n.Err)
}
badNodes[i] = badNode{cid: c, err: err}
}
select {
case res <- pinVerifyRes{ok: out.Ok, badNodes: badNodes}:
case <-ctx.Done():
return
}
}
}()
return res, nil
}
func (api *PinAPI) core() *HttpApi {
return (*HttpApi)(api)
}

214
client/rpc/pubsub.go Normal file
View File

@ -0,0 +1,214 @@
package rpc
import (
"bytes"
"context"
"encoding/json"
"io"
iface "github.com/ipfs/boxo/coreiface"
caopts "github.com/ipfs/boxo/coreiface/options"
"github.com/libp2p/go-libp2p/core/peer"
mbase "github.com/multiformats/go-multibase"
)
type PubsubAPI HttpApi
func (api *PubsubAPI) Ls(ctx context.Context) ([]string, error) {
var out struct {
Strings []string
}
if err := api.core().Request("pubsub/ls").Exec(ctx, &out); err != nil {
return nil, err
}
topics := make([]string, len(out.Strings))
for n, mb := range out.Strings {
_, topic, err := mbase.Decode(mb)
if err != nil {
return nil, err
}
topics[n] = string(topic)
}
return topics, nil
}
func (api *PubsubAPI) Peers(ctx context.Context, opts ...caopts.PubSubPeersOption) ([]peer.ID, error) {
options, err := caopts.PubSubPeersOptions(opts...)
if err != nil {
return nil, err
}
var out struct {
Strings []string
}
var optionalTopic string
if len(options.Topic) > 0 {
optionalTopic = toMultibase([]byte(options.Topic))
}
if err := api.core().Request("pubsub/peers", optionalTopic).Exec(ctx, &out); err != nil {
return nil, err
}
res := make([]peer.ID, len(out.Strings))
for i, sid := range out.Strings {
id, err := peer.Decode(sid)
if err != nil {
return nil, err
}
res[i] = id
}
return res, nil
}
func (api *PubsubAPI) Publish(ctx context.Context, topic string, message []byte) error {
return api.core().Request("pubsub/pub", toMultibase([]byte(topic))).
FileBody(bytes.NewReader(message)).
Exec(ctx, nil)
}
type pubsubSub struct {
messages chan pubsubMessage
done chan struct{}
rcloser func() error
}
type pubsubMessage struct {
JFrom string `json:"from,omitempty"`
JData string `json:"data,omitempty"`
JSeqno string `json:"seqno,omitempty"`
JTopicIDs []string `json:"topicIDs,omitempty"`
// real values after unpacking from text/multibase envelopes
from peer.ID
data []byte
seqno []byte
topics []string
err error
}
func (msg *pubsubMessage) From() peer.ID {
return msg.from
}
func (msg *pubsubMessage) Data() []byte {
return msg.data
}
func (msg *pubsubMessage) Seq() []byte {
return msg.seqno
}
// TODO: do we want to keep this interface as []string,
// or change to more correct [][]byte?
func (msg *pubsubMessage) Topics() []string {
return msg.topics
}
func (s *pubsubSub) Next(ctx context.Context) (iface.PubSubMessage, error) {
select {
case msg, ok := <-s.messages:
if !ok {
return nil, io.EOF
}
if msg.err != nil {
return nil, msg.err
}
// unpack values from text/multibase envelopes
var err error
msg.from, err = peer.Decode(msg.JFrom)
if err != nil {
return nil, err
}
_, msg.data, err = mbase.Decode(msg.JData)
if err != nil {
return nil, err
}
_, msg.seqno, err = mbase.Decode(msg.JSeqno)
if err != nil {
return nil, err
}
for _, mbt := range msg.JTopicIDs {
_, topic, err := mbase.Decode(mbt)
if err != nil {
return nil, err
}
msg.topics = append(msg.topics, string(topic))
}
return &msg, nil
case <-ctx.Done():
return nil, ctx.Err()
}
}
func (api *PubsubAPI) Subscribe(ctx context.Context, topic string, opts ...caopts.PubSubSubscribeOption) (iface.PubSubSubscription, error) {
/* right now we have no options (discover got deprecated)
options, err := caopts.PubSubSubscribeOptions(opts...)
if err != nil {
return nil, err
}
*/
resp, err := api.core().Request("pubsub/sub", toMultibase([]byte(topic))).Send(ctx)
if err != nil {
return nil, err
}
if resp.Error != nil {
return nil, resp.Error
}
sub := &pubsubSub{
messages: make(chan pubsubMessage),
done: make(chan struct{}),
rcloser: func() error {
return resp.Cancel()
},
}
dec := json.NewDecoder(resp.Output)
go func() {
defer close(sub.messages)
for {
var msg pubsubMessage
if err := dec.Decode(&msg); err != nil {
if err == io.EOF {
return
}
msg.err = err
}
select {
case sub.messages <- msg:
case <-sub.done:
return
case <-ctx.Done():
return
}
}
}()
return sub, nil
}
func (s *pubsubSub) Close() error {
if s.done != nil {
close(s.done)
s.done = nil
}
return s.rcloser()
}
func (api *PubsubAPI) core() *HttpApi {
return (*HttpApi)(api)
}
// Encodes bytes into URL-safe multibase that can be sent over HTTP RPC (URL or body)
func toMultibase(data []byte) string {
mb, _ := mbase.Encode(mbase.Base64url, data)
return mb
}

36
client/rpc/request.go Normal file
View File

@ -0,0 +1,36 @@
package rpc
import (
"context"
"io"
"strings"
)
type Request struct {
Ctx context.Context
ApiBase string
Command string
Args []string
Opts map[string]string
Body io.Reader
Headers map[string]string
}
func NewRequest(ctx context.Context, url, command string, args ...string) *Request {
if !strings.HasPrefix(url, "http") {
url = "http://" + url
}
opts := map[string]string{
"encoding": "json",
"stream-channels": "true",
}
return &Request{
Ctx: ctx,
ApiBase: url + "/api/v0",
Command: command,
Args: args,
Opts: opts,
Headers: make(map[string]string),
}
}

View File

@ -0,0 +1,127 @@
package rpc
import (
"bytes"
"context"
"fmt"
"io"
"strconv"
"strings"
"github.com/ipfs/boxo/files"
)
type RequestBuilder interface {
Arguments(args ...string) RequestBuilder
BodyString(body string) RequestBuilder
BodyBytes(body []byte) RequestBuilder
Body(body io.Reader) RequestBuilder
FileBody(body io.Reader) RequestBuilder
Option(key string, value interface{}) RequestBuilder
Header(name, value string) RequestBuilder
Send(ctx context.Context) (*Response, error)
Exec(ctx context.Context, res interface{}) error
}
// requestBuilder is an IPFS commands request builder.
type requestBuilder struct {
command string
args []string
opts map[string]string
headers map[string]string
body io.Reader
shell *HttpApi
}
// Arguments adds the arguments to the args.
func (r *requestBuilder) Arguments(args ...string) RequestBuilder {
r.args = append(r.args, args...)
return r
}
// BodyString sets the request body to the given string.
func (r *requestBuilder) BodyString(body string) RequestBuilder {
return r.Body(strings.NewReader(body))
}
// BodyBytes sets the request body to the given buffer.
func (r *requestBuilder) BodyBytes(body []byte) RequestBuilder {
return r.Body(bytes.NewReader(body))
}
// Body sets the request body to the given reader.
func (r *requestBuilder) Body(body io.Reader) RequestBuilder {
r.body = body
return r
}
// FileBody sets the request body to the given reader wrapped into multipartreader.
func (r *requestBuilder) FileBody(body io.Reader) RequestBuilder {
pr, _ := files.NewReaderPathFile("/dev/stdin", io.NopCloser(body), nil)
d := files.NewMapDirectory(map[string]files.Node{"": pr})
r.body = files.NewMultiFileReader(d, false)
return r
}
// Option sets the given option.
func (r *requestBuilder) Option(key string, value interface{}) RequestBuilder {
var s string
switch v := value.(type) {
case bool:
s = strconv.FormatBool(v)
case string:
s = v
case []byte:
s = string(v)
default:
// slow case.
s = fmt.Sprint(value)
}
if r.opts == nil {
r.opts = make(map[string]string, 1)
}
r.opts[key] = s
return r
}
// Header sets the given header.
func (r *requestBuilder) Header(name, value string) RequestBuilder {
if r.headers == nil {
r.headers = make(map[string]string, 1)
}
r.headers[name] = value
return r
}
// Send sends the request and return the response.
func (r *requestBuilder) Send(ctx context.Context) (*Response, error) {
r.shell.applyGlobal(r)
req := NewRequest(ctx, r.shell.url, r.command, r.args...)
req.Opts = r.opts
req.Headers = r.headers
req.Body = r.body
return req.Send(&r.shell.httpcli)
}
// Exec sends the request a request and decodes the response.
func (r *requestBuilder) Exec(ctx context.Context, res interface{}) error {
httpRes, err := r.Send(ctx)
if err != nil {
return err
}
if res == nil {
lateErr := httpRes.Close()
if httpRes.Error != nil {
return httpRes.Error
}
return lateErr
}
return httpRes.decode(res)
}
var _ RequestBuilder = &requestBuilder{}

170
client/rpc/response.go Normal file
View File

@ -0,0 +1,170 @@
package rpc
import (
"encoding/json"
"errors"
"fmt"
"io"
"mime"
"net/http"
"net/url"
"os"
"github.com/ipfs/boxo/files"
cmds "github.com/ipfs/go-ipfs-cmds"
cmdhttp "github.com/ipfs/go-ipfs-cmds/http"
)
type Error = cmds.Error
type trailerReader struct {
resp *http.Response
}
func (r *trailerReader) Read(b []byte) (int, error) {
n, err := r.resp.Body.Read(b)
if err != nil {
if e := r.resp.Trailer.Get(cmdhttp.StreamErrHeader); e != "" {
err = errors.New(e)
}
}
return n, err
}
func (r *trailerReader) Close() error {
return r.resp.Body.Close()
}
type Response struct {
Output io.ReadCloser
Error *Error
}
func (r *Response) Close() error {
if r.Output != nil {
// drain output (response body)
_, err1 := io.Copy(io.Discard, r.Output)
err2 := r.Output.Close()
if err1 != nil {
return err1
}
return err2
}
return nil
}
// Cancel aborts running request (without draining request body)
func (r *Response) Cancel() error {
if r.Output != nil {
return r.Output.Close()
}
return nil
}
// Decode reads request body and decodes it as json
func (r *Response) decode(dec interface{}) error {
if r.Error != nil {
return r.Error
}
err := json.NewDecoder(r.Output).Decode(dec)
err2 := r.Close()
if err != nil {
return err
}
return err2
}
func (r *Request) Send(c *http.Client) (*Response, error) {
url := r.getURL()
req, err := http.NewRequest("POST", url, r.Body)
if err != nil {
return nil, err
}
req = req.WithContext(r.Ctx)
// Add any headers that were supplied via the requestBuilder.
for k, v := range r.Headers {
req.Header.Add(k, v)
}
if fr, ok := r.Body.(*files.MultiFileReader); ok {
req.Header.Set("Content-Type", "multipart/form-data; boundary="+fr.Boundary())
req.Header.Set("Content-Disposition", "form-data; name=\"files\"")
}
resp, err := c.Do(req)
if err != nil {
return nil, err
}
contentType, _, err := mime.ParseMediaType(resp.Header.Get("Content-Type"))
if err != nil {
return nil, err
}
nresp := new(Response)
nresp.Output = &trailerReader{resp}
if resp.StatusCode >= http.StatusBadRequest {
e := new(Error)
switch {
case resp.StatusCode == http.StatusNotFound:
e.Message = "command not found"
case contentType == "text/plain":
out, err := io.ReadAll(resp.Body)
if err != nil {
fmt.Fprintf(os.Stderr, "ipfs-shell: warning! response (%d) read error: %s\n", resp.StatusCode, err)
}
e.Message = string(out)
// set special status codes.
switch resp.StatusCode {
case http.StatusNotFound, http.StatusBadRequest:
e.Code = cmds.ErrClient
case http.StatusTooManyRequests:
e.Code = cmds.ErrRateLimited
case http.StatusForbidden:
e.Code = cmds.ErrForbidden
}
case contentType == "application/json":
if err = json.NewDecoder(resp.Body).Decode(e); err != nil {
fmt.Fprintf(os.Stderr, "ipfs-shell: warning! response (%d) unmarshall error: %s\n", resp.StatusCode, err)
}
default:
// This is a server-side bug (probably).
e.Code = cmds.ErrImplementation
fmt.Fprintf(os.Stderr, "ipfs-shell: warning! unhandled response (%d) encoding: %s", resp.StatusCode, contentType)
out, err := io.ReadAll(resp.Body)
if err != nil {
fmt.Fprintf(os.Stderr, "ipfs-shell: response (%d) read error: %s\n", resp.StatusCode, err)
}
e.Message = fmt.Sprintf("unknown ipfs-shell error encoding: %q - %q", contentType, out)
}
nresp.Error = e
nresp.Output = nil
// drain body and close
_, _ = io.Copy(io.Discard, resp.Body)
_ = resp.Body.Close()
}
return nresp, nil
}
func (r *Request) getURL() string {
values := make(url.Values)
for _, arg := range r.Args {
values.Add("arg", arg)
}
for k, v := range r.Opts {
values.Add(k, v)
}
return fmt.Sprintf("%s/%s?%s", r.ApiBase, r.Command, values.Encode())
}

64
client/rpc/routing.go Normal file
View File

@ -0,0 +1,64 @@
package rpc
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"github.com/ipfs/boxo/coreiface/options"
"github.com/libp2p/go-libp2p/core/routing"
)
type RoutingAPI HttpApi
func (api *RoutingAPI) Get(ctx context.Context, key string) ([]byte, error) {
resp, err := api.core().Request("routing/get", key).Send(ctx)
if err != nil {
return nil, err
}
if resp.Error != nil {
return nil, resp.Error
}
defer resp.Close()
var out routing.QueryEvent
dec := json.NewDecoder(resp.Output)
if err := dec.Decode(&out); err != nil {
return nil, err
}
res, err := base64.StdEncoding.DecodeString(out.Extra)
if err != nil {
return nil, err
}
return res, nil
}
func (api *RoutingAPI) Put(ctx context.Context, key string, value []byte, opts ...options.RoutingPutOption) error {
var cfg options.RoutingPutSettings
for _, o := range opts {
if err := o(&cfg); err != nil {
return err
}
}
resp, err := api.core().Request("routing/put", key).
Option("allow-offline", cfg.AllowOffline).
FileBody(bytes.NewReader(value)).
Send(ctx)
if err != nil {
return err
}
if resp.Error != nil {
return resp.Error
}
return nil
}
func (api *RoutingAPI) core() *HttpApi {
return (*HttpApi)(api)
}

187
client/rpc/swarm.go Normal file
View File

@ -0,0 +1,187 @@
package rpc
import (
"context"
"time"
iface "github.com/ipfs/boxo/coreiface"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/protocol"
"github.com/multiformats/go-multiaddr"
)
type SwarmAPI HttpApi
func (api *SwarmAPI) Connect(ctx context.Context, pi peer.AddrInfo) error {
pidma, err := multiaddr.NewComponent("p2p", pi.ID.Pretty())
if err != nil {
return err
}
saddrs := make([]string, len(pi.Addrs))
for i, addr := range pi.Addrs {
saddrs[i] = addr.Encapsulate(pidma).String()
}
return api.core().Request("swarm/connect", saddrs...).Exec(ctx, nil)
}
func (api *SwarmAPI) Disconnect(ctx context.Context, addr multiaddr.Multiaddr) error {
return api.core().Request("swarm/disconnect", addr.String()).Exec(ctx, nil)
}
type connInfo struct {
addr multiaddr.Multiaddr
peer peer.ID
latency time.Duration
muxer string
direction network.Direction
streams []protocol.ID
}
func (c *connInfo) ID() peer.ID {
return c.peer
}
func (c *connInfo) Address() multiaddr.Multiaddr {
return c.addr
}
func (c *connInfo) Direction() network.Direction {
return c.direction
}
func (c *connInfo) Latency() (time.Duration, error) {
return c.latency, nil
}
func (c *connInfo) Streams() ([]protocol.ID, error) {
return c.streams, nil
}
func (api *SwarmAPI) Peers(ctx context.Context) ([]iface.ConnectionInfo, error) {
var resp struct {
Peers []struct {
Addr string
Peer string
Latency string
Muxer string
Direction network.Direction
Streams []struct {
Protocol string
}
}
}
err := api.core().Request("swarm/peers").
Option("streams", true).
Option("latency", true).
Exec(ctx, &resp)
if err != nil {
return nil, err
}
res := make([]iface.ConnectionInfo, len(resp.Peers))
for i, conn := range resp.Peers {
latency, _ := time.ParseDuration(conn.Latency)
out := &connInfo{
latency: latency,
muxer: conn.Muxer,
direction: conn.Direction,
}
out.peer, err = peer.Decode(conn.Peer)
if err != nil {
return nil, err
}
out.addr, err = multiaddr.NewMultiaddr(conn.Addr)
if err != nil {
return nil, err
}
out.streams = make([]protocol.ID, len(conn.Streams))
for i, p := range conn.Streams {
out.streams[i] = protocol.ID(p.Protocol)
}
res[i] = out
}
return res, nil
}
func (api *SwarmAPI) KnownAddrs(ctx context.Context) (map[peer.ID][]multiaddr.Multiaddr, error) {
var out struct {
Addrs map[string][]string
}
if err := api.core().Request("swarm/addrs").Exec(ctx, &out); err != nil {
return nil, err
}
res := map[peer.ID][]multiaddr.Multiaddr{}
for spid, saddrs := range out.Addrs {
addrs := make([]multiaddr.Multiaddr, len(saddrs))
for i, addr := range saddrs {
a, err := multiaddr.NewMultiaddr(addr)
if err != nil {
return nil, err
}
addrs[i] = a
}
pid, err := peer.Decode(spid)
if err != nil {
return nil, err
}
res[pid] = addrs
}
return res, nil
}
func (api *SwarmAPI) LocalAddrs(ctx context.Context) ([]multiaddr.Multiaddr, error) {
var out struct {
Strings []string
}
if err := api.core().Request("swarm/addrs/local").Exec(ctx, &out); err != nil {
return nil, err
}
res := make([]multiaddr.Multiaddr, len(out.Strings))
for i, addr := range out.Strings {
ma, err := multiaddr.NewMultiaddr(addr)
if err != nil {
return nil, err
}
res[i] = ma
}
return res, nil
}
func (api *SwarmAPI) ListenAddrs(ctx context.Context) ([]multiaddr.Multiaddr, error) {
var out struct {
Strings []string
}
if err := api.core().Request("swarm/addrs/listen").Exec(ctx, &out); err != nil {
return nil, err
}
res := make([]multiaddr.Multiaddr, len(out.Strings))
for i, addr := range out.Strings {
ma, err := multiaddr.NewMultiaddr(addr)
if err != nil {
return nil, err
}
res[i] = ma
}
return res, nil
}
func (api *SwarmAPI) core() *HttpApi {
return (*HttpApi)(api)
}

230
client/rpc/unixfs.go Normal file
View File

@ -0,0 +1,230 @@
package rpc
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
iface "github.com/ipfs/boxo/coreiface"
caopts "github.com/ipfs/boxo/coreiface/options"
"github.com/ipfs/boxo/coreiface/path"
"github.com/ipfs/boxo/files"
unixfs "github.com/ipfs/boxo/ipld/unixfs"
unixfs_pb "github.com/ipfs/boxo/ipld/unixfs/pb"
"github.com/ipfs/go-cid"
mh "github.com/multiformats/go-multihash"
)
type addEvent struct {
Name string
Hash string `json:",omitempty"`
Bytes int64 `json:",omitempty"`
Size string `json:",omitempty"`
}
type UnixfsAPI HttpApi
func (api *UnixfsAPI) Add(ctx context.Context, f files.Node, opts ...caopts.UnixfsAddOption) (path.Resolved, error) {
options, _, err := caopts.UnixfsAddOptions(opts...)
if err != nil {
return nil, err
}
mht, ok := mh.Codes[options.MhType]
if !ok {
return nil, fmt.Errorf("unknowm mhType %d", options.MhType)
}
req := api.core().Request("add").
Option("hash", mht).
Option("chunker", options.Chunker).
Option("cid-version", options.CidVersion).
Option("fscache", options.FsCache).
Option("inline", options.Inline).
Option("inline-limit", options.InlineLimit).
Option("nocopy", options.NoCopy).
Option("only-hash", options.OnlyHash).
Option("pin", options.Pin).
Option("silent", options.Silent).
Option("progress", options.Progress)
if options.RawLeavesSet {
req.Option("raw-leaves", options.RawLeaves)
}
switch options.Layout {
case caopts.BalancedLayout:
// noop, default
case caopts.TrickleLayout:
req.Option("trickle", true)
}
d := files.NewMapDirectory(map[string]files.Node{"": f}) // unwrapped on the other side
req.Body(files.NewMultiFileReader(d, false))
var out addEvent
resp, err := req.Send(ctx)
if err != nil {
return nil, err
}
if resp.Error != nil {
return nil, resp.Error
}
defer resp.Output.Close()
dec := json.NewDecoder(resp.Output)
loop:
for {
var evt addEvent
switch err := dec.Decode(&evt); err {
case nil:
case io.EOF:
break loop
default:
return nil, err
}
out = evt
if options.Events != nil {
ifevt := &iface.AddEvent{
Name: out.Name,
Size: out.Size,
Bytes: out.Bytes,
}
if out.Hash != "" {
c, err := cid.Parse(out.Hash)
if err != nil {
return nil, err
}
ifevt.Path = path.IpfsPath(c)
}
select {
case options.Events <- ifevt:
case <-ctx.Done():
return nil, ctx.Err()
}
}
}
c, err := cid.Parse(out.Hash)
if err != nil {
return nil, err
}
return path.IpfsPath(c), nil
}
type lsLink struct {
Name, Hash string
Size uint64
Type unixfs_pb.Data_DataType
Target string
}
type lsObject struct {
Hash string
Links []lsLink
}
type lsOutput struct {
Objects []lsObject
}
func (api *UnixfsAPI) Ls(ctx context.Context, p path.Path, opts ...caopts.UnixfsLsOption) (<-chan iface.DirEntry, error) {
options, err := caopts.UnixfsLsOptions(opts...)
if err != nil {
return nil, err
}
resp, err := api.core().Request("ls", p.String()).
Option("resolve-type", options.ResolveChildren).
Option("size", options.ResolveChildren).
Option("stream", true).
Send(ctx)
if err != nil {
return nil, err
}
if resp.Error != nil {
return nil, resp.Error
}
dec := json.NewDecoder(resp.Output)
out := make(chan iface.DirEntry)
go func() {
defer resp.Close()
defer close(out)
for {
var link lsOutput
if err := dec.Decode(&link); err != nil {
if err == io.EOF {
return
}
select {
case out <- iface.DirEntry{Err: err}:
case <-ctx.Done():
}
return
}
if len(link.Objects) != 1 {
select {
case out <- iface.DirEntry{Err: errors.New("unexpected Objects len")}:
case <-ctx.Done():
}
return
}
if len(link.Objects[0].Links) != 1 {
select {
case out <- iface.DirEntry{Err: errors.New("unexpected Links len")}:
case <-ctx.Done():
}
return
}
l0 := link.Objects[0].Links[0]
c, err := cid.Decode(l0.Hash)
if err != nil {
select {
case out <- iface.DirEntry{Err: err}:
case <-ctx.Done():
}
return
}
var ftype iface.FileType
switch l0.Type {
case unixfs.TRaw, unixfs.TFile:
ftype = iface.TFile
case unixfs.THAMTShard, unixfs.TDirectory, unixfs.TMetadata:
ftype = iface.TDirectory
case unixfs.TSymlink:
ftype = iface.TSymlink
}
select {
case out <- iface.DirEntry{
Name: l0.Name,
Cid: c,
Size: l0.Size,
Type: ftype,
Target: l0.Target,
}:
case <-ctx.Done():
}
}
}()
return out, nil
}
func (api *UnixfsAPI) core() *HttpApi {
return (*HttpApi)(api)
}

View File

@ -1,6 +1,9 @@
package config
const DefaultInlineDNSLink = false
const (
DefaultInlineDNSLink = false
DefaultDeserializedResponses = true
)
type GatewaySpec struct {
// Paths is explicit list of path prefixes that should be handled by
@ -25,6 +28,11 @@ type GatewaySpec struct {
// (FQDN) into a single DNS label in order to interop with wildcard TLS certs
// and Origin per CID isolation provided by rules like https://publicsuffix.org
InlineDNSLink Flag
// DeserializedResponses configures this gateway to respond to deserialized
// responses. Disabling this option enables a Trustless Gateway, as per:
// https://specs.ipfs.tech/http-gateways/trustless-gateway/.
DeserializedResponses Flag
}
// Gateway contains options for the HTTP gateway server.
@ -56,6 +64,12 @@ type Gateway struct {
// This flag can be overridden per FQDN in PublicGateways.
NoDNSLink bool
// DeserializedResponses configures this gateway to respond to deserialized
// requests. Disabling this option enables a Trustless only gateway, as per:
// https://specs.ipfs.tech/http-gateways/trustless-gateway/. This can
// be overridden per FQDN in PublicGateways.
DeserializedResponses Flag
// PublicGateways configures behavior of known public gateways.
// Each key is a fully qualified domain name (FQDN).
PublicGateways map[string]*GatewaySpec

View File

@ -2,9 +2,10 @@ package config
type Internal struct {
// All marked as omitempty since we are expecting to make changes to all subcomponents of Internal
Bitswap *InternalBitswap `json:",omitempty"`
UnixFSShardingSizeThreshold *OptionalString `json:",omitempty"`
Libp2pForceReachability *OptionalString `json:",omitempty"`
Bitswap *InternalBitswap `json:",omitempty"`
UnixFSShardingSizeThreshold *OptionalString `json:",omitempty"`
Libp2pForceReachability *OptionalString `json:",omitempty"`
BackupBootstrapInterval *OptionalDuration `json:",omitempty"`
}
type InternalBitswap struct {

View File

@ -3,16 +3,16 @@ package bootstrap
import (
"context"
"errors"
"fmt"
"io"
"math/rand"
"sync"
"sync/atomic"
"time"
logging "github.com/ipfs/go-log"
"github.com/jbenet/goprocess"
"github.com/jbenet/goprocess/context"
"github.com/jbenet/goprocess/periodic"
goprocessctx "github.com/jbenet/goprocess/context"
periodicproc "github.com/jbenet/goprocess/periodic"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
@ -50,13 +50,26 @@ type BootstrapConfig struct {
// for the bootstrap process to use. This makes it possible for clients
// to control the peers the process uses at any moment.
BootstrapPeers func() []peer.AddrInfo
// BackupBootstrapInterval governs the periodic interval at which the node will
// attempt to save connected nodes to use as temporary bootstrap peers.
BackupBootstrapInterval time.Duration
// MaxBackupBootstrapSize controls the maximum number of peers we're saving
// as backup bootstrap peers.
MaxBackupBootstrapSize int
SaveBackupBootstrapPeers func(context.Context, []peer.AddrInfo)
LoadBackupBootstrapPeers func(context.Context) []peer.AddrInfo
}
// DefaultBootstrapConfig specifies default sane parameters for bootstrapping.
var DefaultBootstrapConfig = BootstrapConfig{
MinPeerThreshold: 4,
Period: 30 * time.Second,
ConnectionTimeout: (30 * time.Second) / 3, // Perod / 3
MinPeerThreshold: 4,
Period: 30 * time.Second,
ConnectionTimeout: (30 * time.Second) / 3, // Perod / 3
BackupBootstrapInterval: 1 * time.Hour,
MaxBackupBootstrapSize: 20,
}
func BootstrapConfigWithPeers(pis []peer.AddrInfo) BootstrapConfig {
@ -90,6 +103,9 @@ func Bootstrap(id peer.ID, host host.Host, rt routing.Routing, cfg BootstrapConf
log.Debugf("%s bootstrap error: %s", id, err)
}
// Exit the first call (triggered independently by `proc.Go`, not `Tick`)
// only after being done with the *single* Routing.Bootstrap call. Following
// periodic calls (`Tick`) will not block on this.
<-doneWithRound
}
@ -108,9 +124,100 @@ func Bootstrap(id peer.ID, host host.Host, rt routing.Routing, cfg BootstrapConf
doneWithRound <- struct{}{}
close(doneWithRound) // it no longer blocks periodic
startSavePeersAsTemporaryBootstrapProc(cfg, host, proc)
return proc, nil
}
// Aside of the main bootstrap process we also run a secondary one that saves
// connected peers as a backup measure if we can't connect to the official
// bootstrap ones. These peers will serve as *temporary* bootstrap nodes.
func startSavePeersAsTemporaryBootstrapProc(cfg BootstrapConfig, host host.Host, bootstrapProc goprocess.Process) {
savePeersFn := func(worker goprocess.Process) {
ctx := goprocessctx.OnClosingContext(worker)
if err := saveConnectedPeersAsTemporaryBootstrap(ctx, host, cfg); err != nil {
log.Debugf("saveConnectedPeersAsTemporaryBootstrap error: %s", err)
}
}
savePeersProc := periodicproc.Tick(cfg.BackupBootstrapInterval, savePeersFn)
// When the main bootstrap process ends also terminate the 'save connected
// peers' ones. Coupling the two seems the easiest way to handle this backup
// process without additional complexity.
go func() {
<-bootstrapProc.Closing()
savePeersProc.Close()
}()
// Run the first round now (after the first bootstrap process has finished)
// as the SavePeersPeriod can be much longer than bootstrap.
savePeersProc.Go(savePeersFn)
}
func saveConnectedPeersAsTemporaryBootstrap(ctx context.Context, host host.Host, cfg BootstrapConfig) error {
// Randomize the list of connected peers, we don't prioritize anyone.
connectedPeers := randomizeList(host.Network().Peers())
bootstrapPeers := cfg.BootstrapPeers()
backupPeers := make([]peer.AddrInfo, 0, cfg.MaxBackupBootstrapSize)
// Choose peers to save and filter out the ones that are already bootstrap nodes.
for _, p := range connectedPeers {
found := false
for _, bootstrapPeer := range bootstrapPeers {
if p == bootstrapPeer.ID {
found = true
break
}
}
if !found {
backupPeers = append(backupPeers, peer.AddrInfo{
ID: p,
Addrs: host.Network().Peerstore().Addrs(p),
})
}
if len(backupPeers) >= cfg.MaxBackupBootstrapSize {
break
}
}
// If we didn't reach the target number use previously stored connected peers.
if len(backupPeers) < cfg.MaxBackupBootstrapSize {
oldSavedPeers := cfg.LoadBackupBootstrapPeers(ctx)
log.Debugf("missing %d peers to reach backup bootstrap target of %d, trying from previous list of %d saved peers",
cfg.MaxBackupBootstrapSize-len(backupPeers), cfg.MaxBackupBootstrapSize, len(oldSavedPeers))
// Add some of the old saved peers. Ensure we don't duplicate them.
for _, p := range oldSavedPeers {
found := false
for _, sp := range backupPeers {
if p.ID == sp.ID {
found = true
break
}
}
if !found {
backupPeers = append(backupPeers, p)
}
if len(backupPeers) >= cfg.MaxBackupBootstrapSize {
break
}
}
}
cfg.SaveBackupBootstrapPeers(ctx, backupPeers)
log.Debugf("saved %d peers (of %d target) as bootstrap backup in the config", len(backupPeers), cfg.MaxBackupBootstrapSize)
return nil
}
// Connect to as many peers needed to reach the BootstrapConfig.MinPeerThreshold.
// Peers can be original bootstrap or temporary ones (drawn from a list of
// persisted previously connected peers).
func bootstrapRound(ctx context.Context, host host.Host, cfg BootstrapConfig) error {
ctx, cancel := context.WithTimeout(ctx, cfg.ConnectionTimeout)
@ -127,35 +234,58 @@ func bootstrapRound(ctx context.Context, host host.Host, cfg BootstrapConfig) er
id, len(connected), cfg.MinPeerThreshold)
return nil
}
numToDial := cfg.MinPeerThreshold - len(connected)
numToDial := cfg.MinPeerThreshold - len(connected) // numToDial > 0
// filter out bootstrap nodes we are already connected to
var notConnected []peer.AddrInfo
for _, p := range peers {
if host.Network().Connectedness(p.ID) != network.Connected {
notConnected = append(notConnected, p)
if len(peers) > 0 {
numToDial -= int(peersConnect(ctx, host, peers, numToDial, true))
if numToDial <= 0 {
return nil
}
}
// if connected to all bootstrap peer candidates, exit
if len(notConnected) < 1 {
log.Debugf("%s no more bootstrap peers to create %d connections", id, numToDial)
return ErrNotEnoughBootstrapPeers
log.Debugf("not enough bootstrap peers to fill the remaining target of %d connections, trying backup list", numToDial)
tempBootstrapPeers := cfg.LoadBackupBootstrapPeers(ctx)
if len(tempBootstrapPeers) > 0 {
numToDial -= int(peersConnect(ctx, host, tempBootstrapPeers, numToDial, false))
if numToDial <= 0 {
return nil
}
}
// connect to a random susbset of bootstrap candidates
randSubset := randomSubsetOfPeers(notConnected, numToDial)
log.Debugf("tried both original bootstrap peers and temporary ones but still missing target of %d connections", numToDial)
log.Debugf("%s bootstrapping to %d nodes: %s", id, numToDial, randSubset)
return bootstrapConnect(ctx, host, randSubset)
return ErrNotEnoughBootstrapPeers
}
func bootstrapConnect(ctx context.Context, ph host.Host, peers []peer.AddrInfo) error {
if len(peers) < 1 {
return ErrNotEnoughBootstrapPeers
}
// Attempt to make `needed` connections from the `availablePeers` list. Mark
// peers as either `permanent` or temporary when adding them to the Peerstore.
// Return the number of connections completed. We eagerly over-connect in parallel,
// so we might connect to more than needed.
// (We spawn as many routines and attempt connections as the number of availablePeers,
// but this list comes from restricted sets of original or temporary bootstrap
// nodes which will keep it under a sane value.)
func peersConnect(ctx context.Context, ph host.Host, availablePeers []peer.AddrInfo, needed int, permanent bool) uint64 {
peers := randomizeList(availablePeers)
// Monitor the number of connections and stop if we reach the target.
var connected uint64
ctx, cancel := context.WithCancel(ctx)
defer cancel()
go func() {
for {
select {
case <-ctx.Done():
return
case <-time.After(1 * time.Second):
if int(atomic.LoadUint64(&connected)) >= needed {
cancel()
return
}
}
}
}()
errs := make(chan error, len(peers))
var wg sync.WaitGroup
for _, p := range peers {
@ -164,45 +294,46 @@ func bootstrapConnect(ctx context.Context, ph host.Host, peers []peer.AddrInfo)
// fail/abort due to an expiring context.
// Also, performed asynchronously for dial speed.
if int(atomic.LoadUint64(&connected)) >= needed {
cancel()
break
}
wg.Add(1)
go func(p peer.AddrInfo) {
defer wg.Done()
log.Debugf("%s bootstrapping to %s", ph.ID(), p.ID)
ph.Peerstore().AddAddrs(p.ID, p.Addrs, peerstore.PermanentAddrTTL)
if err := ph.Connect(ctx, p); err != nil {
log.Debugf("failed to bootstrap with %v: %s", p.ID, err)
errs <- err
// Skip addresses belonging to a peer we're already connected to.
// (Not a guarantee but a best-effort policy.)
if ph.Network().Connectedness(p.ID) == network.Connected {
return
}
log.Debugf("%s bootstrapping to %s", ph.ID(), p.ID)
if err := ph.Connect(ctx, p); err != nil {
if ctx.Err() != context.Canceled {
log.Debugf("failed to bootstrap with %v: %s", p.ID, err)
}
return
}
if permanent {
// We're connecting to an original bootstrap peer, mark it as
// a permanent address (Connect will register it as TempAddrTTL).
ph.Peerstore().AddAddrs(p.ID, p.Addrs, peerstore.PermanentAddrTTL)
}
log.Infof("bootstrapped with %v", p.ID)
atomic.AddUint64(&connected, 1)
}(p)
}
wg.Wait()
// our failure condition is when no connection attempt succeeded.
// So drain the errs channel, counting the results.
close(errs)
count := 0
var err error
for err = range errs {
if err != nil {
count++
}
}
if count == len(peers) {
return fmt.Errorf("failed to bootstrap. %s", err)
}
return nil
return connected
}
func randomSubsetOfPeers(in []peer.AddrInfo, max int) []peer.AddrInfo {
if max > len(in) {
max = len(in)
}
out := make([]peer.AddrInfo, max)
for i, val := range rand.Perm(len(in))[:max] {
func randomizeList[T any](in []T) []T {
out := make([]T, len(in))
for i, val := range rand.Perm(len(in)) {
out[i] = in[val]
}
return out

View File

@ -7,9 +7,9 @@ import (
"github.com/libp2p/go-libp2p/core/test"
)
func TestSubsetWhenMaxIsGreaterThanLengthOfSlice(t *testing.T) {
func TestRandomizeAddressList(t *testing.T) {
var ps []peer.AddrInfo
sizeofSlice := 100
sizeofSlice := 10
for i := 0; i < sizeofSlice; i++ {
pid, err := test.RandPeerID()
if err != nil {
@ -18,7 +18,7 @@ func TestSubsetWhenMaxIsGreaterThanLengthOfSlice(t *testing.T) {
ps = append(ps, peer.AddrInfo{ID: pid})
}
out := randomSubsetOfPeers(ps, 2*sizeofSlice)
out := randomizeList(ps)
if len(out) != len(ps) {
t.Fail()
}

View File

@ -83,7 +83,7 @@ var keyGenCmd = &cmds.Command{
Tagline: "Create a new keypair",
},
Options: []cmds.Option{
cmds.StringOption(keyStoreTypeOptionName, "t", "type of the key to create: rsa, ed25519").WithDefault(keyStoreAlgorithmDefault),
cmds.StringOption(keyStoreTypeOptionName, "t", "type of the key to create: rsa, ed25519, secp256k1").WithDefault(keyStoreAlgorithmDefault),
cmds.IntOption(keyStoreSizeOptionName, "s", "size of the key to generate"),
ke.OptionIPNSBase,
},
@ -398,7 +398,7 @@ The PEM format allows for key generation outside of the IPFS node:
allowAnyKeyType, _ := req.Options[keyAllowAnyTypeOptionName].(bool)
if !allowAnyKeyType {
switch t := sk.(type) {
case *crypto.RsaPrivateKey, *crypto.Ed25519PrivateKey:
case *crypto.RsaPrivateKey, *crypto.Ed25519PrivateKey, *crypto.Secp256k1PrivateKey:
default:
return fmt.Errorf("key type %T is not allowed to be imported, only RSA or Ed25519;"+
" use flag --%s if you are sure of what you're doing",
@ -604,7 +604,7 @@ environment variable:
Arguments: []cmds.Argument{},
Options: []cmds.Option{
cmds.StringOption(oldKeyOptionName, "o", "Keystore name to use for backing up your existing identity"),
cmds.StringOption(keyStoreTypeOptionName, "t", "type of the key to create: rsa, ed25519").WithDefault(keyStoreAlgorithmDefault),
cmds.StringOption(keyStoreTypeOptionName, "t", "type of the key to create: rsa, ed25519, secp256k1").WithDefault(keyStoreAlgorithmDefault),
cmds.IntOption(keyStoreSizeOptionName, "s", "size of the key to generate"),
},
NoRemote: true,

View File

@ -17,7 +17,6 @@ import (
"github.com/ipld/go-ipld-prime"
"github.com/ipld/go-ipld-prime/codec/dagcbor"
"github.com/ipld/go-ipld-prime/codec/dagjson"
ic "github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/core/peer"
mbase "github.com/multiformats/go-multibase"
)
@ -216,33 +215,7 @@ Passing --verify will verify signature against provided public key.
PublicKey: id,
}
pub, err := id.ExtractPublicKey()
if err != nil {
// Make sure it works with all those RSA that cannot be embedded into the
// Peer ID.
if len(entry.PubKey) > 0 {
pub, err = ic.UnmarshalPublicKey(entry.PubKey)
if err != nil {
return err
}
// Verify the public key matches the name we are verifying.
entryID, err := peer.IDFromPublicKey(pub)
if err != nil {
return err
}
if id != entryID {
return fmt.Errorf("record public key does not match the verified name")
}
}
}
if err != nil {
return err
}
err = ipns.Validate(pub, &entry)
err = ipns.ValidateWithPeerID(id, &entry)
if err == nil {
result.Validation.Valid = true
} else {

View File

@ -648,13 +648,14 @@ var verifyPinCmd = &cmds.Command{
// PinVerifyRes is the result returned for each pin checked in "pin verify"
type PinVerifyRes struct {
Cid string
Cid string `json:",omitempty"`
Err string `json:",omitempty"`
PinStatus
}
// PinStatus is part of PinVerifyRes, do not use directly
type PinStatus struct {
Ok bool
Ok bool `json:",omitempty"`
BadNodes []BadNode `json:",omitempty"`
}
@ -669,7 +670,8 @@ type pinVerifyOpts struct {
includeOk bool
}
func pinVerify(ctx context.Context, n *core.IpfsNode, opts pinVerifyOpts, enc cidenc.Encoder) (<-chan interface{}, error) {
// FIXME: this implementation is duplicated sith core/coreapi.PinAPI.Verify, remove this one and exclusively rely on CoreAPI.
func pinVerify(ctx context.Context, n *core.IpfsNode, opts pinVerifyOpts, enc cidenc.Encoder) (<-chan any, error) {
visited := make(map[cid.Cid]PinStatus)
bs := n.Blocks.Blockstore()
@ -715,18 +717,18 @@ func pinVerify(ctx context.Context, n *core.IpfsNode, opts pinVerifyOpts, enc ci
return status
}
out := make(chan interface{})
out := make(chan any)
go func() {
defer close(out)
for p := range n.Pinning.RecursiveKeys(ctx) {
if p.Err != nil {
out <- p.Err
out <- PinVerifyRes{Err: p.Err.Error()}
return
}
pinStatus := checkPin(p.C)
if !pinStatus.Ok || opts.includeOk {
select {
case out <- &PinVerifyRes{enc.Encode(p.C), pinStatus}:
case out <- PinVerifyRes{Cid: enc.Encode(p.C), PinStatus: pinStatus}:
case <-ctx.Done():
return
}
@ -739,12 +741,18 @@ func pinVerify(ctx context.Context, n *core.IpfsNode, opts pinVerifyOpts, enc ci
// Format formats PinVerifyRes
func (r PinVerifyRes) Format(out io.Writer) {
if r.Err != "" {
fmt.Fprintf(out, "error: %s\n", r.Err)
return
}
if r.Ok {
fmt.Fprintf(out, "%s ok\n", r.Cid)
} else {
fmt.Fprintf(out, "%s broken\n", r.Cid)
for _, e := range r.BadNodes {
fmt.Fprintf(out, " %s: %s\n", e.Cid, e.Err)
}
return
}
fmt.Fprintf(out, "%s broken\n", r.Cid)
for _, e := range r.BadNodes {
fmt.Fprintf(out, " %s: %s\n", e.Cid, e.Err)
}
}

View File

@ -17,6 +17,7 @@ import (
var log = logging.Logger("core/commands")
var ErrNotOnline = errors.New("this command must be run in online mode. Try running 'ipfs daemon' first")
var ErrSelfUnsupported = errors.New("finding your own node in the DHT is currently not supported")
const (
RepoDirOption = "repo-dir"

View File

@ -301,6 +301,10 @@ var findPeerRoutingCmd = &cmds.Command{
return err
}
if pid == nd.Identity {
return ErrSelfUnsupported
}
ctx, cancel := context.WithCancel(req.Context)
ctx, events := routing.RegisterForQueryEvents(ctx)

View File

@ -11,10 +11,13 @@ package core
import (
"context"
"encoding/json"
"io"
"time"
"github.com/ipfs/boxo/filestore"
pin "github.com/ipfs/boxo/pinning/pinner"
"github.com/ipfs/go-datastore"
bserv "github.com/ipfs/boxo/blockservice"
bstore "github.com/ipfs/boxo/blockstore"
@ -46,6 +49,7 @@ import (
"github.com/ipfs/boxo/namesys"
ipnsrp "github.com/ipfs/boxo/namesys/republisher"
"github.com/ipfs/kubo/config"
"github.com/ipfs/kubo/core/bootstrap"
"github.com/ipfs/kubo/core/node"
"github.com/ipfs/kubo/core/node/libp2p"
@ -165,12 +169,40 @@ func (n *IpfsNode) Bootstrap(cfg bootstrap.BootstrapConfig) error {
return ps
}
}
if cfg.SaveBackupBootstrapPeers == nil {
cfg.SaveBackupBootstrapPeers = func(ctx context.Context, peerList []peer.AddrInfo) {
err := n.saveTempBootstrapPeers(ctx, peerList)
if err != nil {
log.Warnf("saveTempBootstrapPeers failed: %s", err)
return
}
}
}
if cfg.LoadBackupBootstrapPeers == nil {
cfg.LoadBackupBootstrapPeers = func(ctx context.Context) []peer.AddrInfo {
peerList, err := n.loadTempBootstrapPeers(ctx)
if err != nil {
log.Warnf("loadTempBootstrapPeers failed: %s", err)
return nil
}
return peerList
}
}
repoConf, err := n.Repo.Config()
if err != nil {
return err
}
if repoConf.Internal.BackupBootstrapInterval != nil {
cfg.BackupBootstrapInterval = repoConf.Internal.BackupBootstrapInterval.WithDefault(time.Hour)
}
var err error
n.Bootstrapper, err = bootstrap.Bootstrap(n.Identity, n.PeerHost, n.Routing, cfg)
return err
}
var TempBootstrapPeersKey = datastore.NewKey("/local/temp_bootstrap_peers")
func (n *IpfsNode) loadBootstrapPeers() ([]peer.AddrInfo, error) {
cfg, err := n.Repo.Config()
if err != nil {
@ -180,6 +212,33 @@ func (n *IpfsNode) loadBootstrapPeers() ([]peer.AddrInfo, error) {
return cfg.BootstrapPeers()
}
func (n *IpfsNode) saveTempBootstrapPeers(ctx context.Context, peerList []peer.AddrInfo) error {
ds := n.Repo.Datastore()
bytes, err := json.Marshal(config.BootstrapPeerStrings(peerList))
if err != nil {
return err
}
if err := ds.Put(ctx, TempBootstrapPeersKey, bytes); err != nil {
return err
}
return ds.Sync(ctx, TempBootstrapPeersKey)
}
func (n *IpfsNode) loadTempBootstrapPeers(ctx context.Context) ([]peer.AddrInfo, error) {
ds := n.Repo.Datastore()
bytes, err := ds.Get(ctx, TempBootstrapPeersKey)
if err != nil {
return nil, err
}
var addrs []string
if err := json.Unmarshal(bytes, &addrs); err != nil {
return nil, err
}
return config.ParseBootstrapPeers(addrs)
}
type ConstructPeerHostOpts struct {
AddrsFactory p2pbhost.AddrsFactory
DisableNatPortMap bool

View File

@ -82,6 +82,14 @@ func (api *KeyAPI) Generate(ctx context.Context, name string, opts ...caopts.Key
return nil, err
}
sk = priv
pk = pub
case "secp256k1":
priv, pub, err := crypto.GenerateSecp256k1Key(rand.Reader)
if err != nil {
return nil, err
}
sk = priv
pk = pub
default:

View File

@ -31,7 +31,7 @@ const testPeerID = "QmTFauExutTsy4XP6JbMFcw2Wa9645HJt2bTqL6qYDCKfe"
type NodeProvider struct{}
func (NodeProvider) MakeAPISwarm(ctx context.Context, fullIdentity bool, online bool, n int) ([]coreiface.CoreAPI, error) {
func (NodeProvider) MakeAPISwarm(t *testing.T, ctx context.Context, fullIdentity bool, online bool, n int) ([]coreiface.CoreAPI, error) {
mn := mocknet.New()
nodes := make([]*core.IpfsNode, n)
@ -120,5 +120,5 @@ func (NodeProvider) MakeAPISwarm(ctx context.Context, fullIdentity bool, online
}
func TestIface(t *testing.T) {
tests.TestApi(&NodeProvider{})(t)
tests.TestApi(NodeProvider{})(t)
}

View File

@ -19,7 +19,7 @@ func TestPathUnixFSHAMTPartial(t *testing.T) {
defer cancel()
// Create a node
apis, err := NodeProvider{}.MakeAPISwarm(ctx, true, true, 1)
apis, err := NodeProvider{}.MakeAPISwarm(t, ctx, true, true, 1)
if err != nil {
t.Fatal(err)
}

View File

@ -28,22 +28,11 @@ import (
func GatewayOption(paths ...string) ServeOption {
return func(n *core.IpfsNode, _ net.Listener, mux *http.ServeMux) (*http.ServeMux, error) {
cfg, err := n.Repo.Config()
gwConfig, err := getGatewayConfig(n)
if err != nil {
return nil, err
}
headers := make(map[string][]string, len(cfg.Gateway.HTTPHeaders))
for h, v := range cfg.Gateway.HTTPHeaders {
headers[http.CanonicalHeaderKey(h)] = v
}
gateway.AddAccessControlHeaders(headers)
gwConfig := gateway.Config{
Headers: headers,
}
gwAPI, err := newGatewayBackend(n)
if err != nil {
return nil, err
@ -65,7 +54,7 @@ func GatewayOption(paths ...string) ServeOption {
func HostnameOption() ServeOption {
return func(n *core.IpfsNode, _ net.Listener, mux *http.ServeMux) (*http.ServeMux, error) {
cfg, err := n.Repo.Config()
gwConfig, err := getGatewayConfig(n)
if err != nil {
return nil, err
}
@ -75,9 +64,8 @@ func HostnameOption() ServeOption {
return nil, err
}
publicGateways := convertPublicGateways(cfg.Gateway.PublicGateways)
childMux := http.NewServeMux()
mux.HandleFunc("/", gateway.WithHostname(childMux, gwAPI, publicGateways, cfg.Gateway.NoDNSLink).ServeHTTP)
mux.HandleFunc("/", gateway.WithHostname(gwConfig, gwAPI, childMux).ServeHTTP)
return childMux, nil
}
}
@ -212,30 +200,49 @@ var defaultKnownGateways = map[string]*gateway.Specification{
"localhost": subdomainGatewaySpec,
}
func convertPublicGateways(publicGateways map[string]*config.GatewaySpec) map[string]*gateway.Specification {
gws := map[string]*gateway.Specification{}
// First, implicit defaults such as subdomain gateway on localhost
for hostname, gw := range defaultKnownGateways {
gws[hostname] = gw
func getGatewayConfig(n *core.IpfsNode) (gateway.Config, error) {
cfg, err := n.Repo.Config()
if err != nil {
return gateway.Config{}, err
}
// Then apply values from Gateway.PublicGateways, if present in the config
for hostname, gw := range publicGateways {
// Parse configuration headers and add the default Access Control Headers.
headers := make(map[string][]string, len(cfg.Gateway.HTTPHeaders))
for h, v := range cfg.Gateway.HTTPHeaders {
headers[http.CanonicalHeaderKey(h)] = v
}
gateway.AddAccessControlHeaders(headers)
// Initialize gateway configuration, with empty PublicGateways, handled after.
gwCfg := gateway.Config{
Headers: headers,
DeserializedResponses: cfg.Gateway.DeserializedResponses.WithDefault(config.DefaultDeserializedResponses),
NoDNSLink: cfg.Gateway.NoDNSLink,
PublicGateways: map[string]*gateway.Specification{},
}
// Add default implicit known gateways, such as subdomain gateway on localhost.
for hostname, gw := range defaultKnownGateways {
gwCfg.PublicGateways[hostname] = gw
}
// Apply values from cfg.Gateway.PublicGateways if they exist.
for hostname, gw := range cfg.Gateway.PublicGateways {
if gw == nil {
// Remove any implicit defaults, if present. This is useful when one
// wants to disable subdomain gateway on localhost etc.
delete(gws, hostname)
// wants to disable subdomain gateway on localhost, etc.
delete(gwCfg.PublicGateways, hostname)
continue
}
gws[hostname] = &gateway.Specification{
Paths: gw.Paths,
NoDNSLink: gw.NoDNSLink,
UseSubdomains: gw.UseSubdomains,
InlineDNSLink: gw.InlineDNSLink.WithDefault(config.DefaultInlineDNSLink),
gwCfg.PublicGateways[hostname] = &gateway.Specification{
Paths: gw.Paths,
NoDNSLink: gw.NoDNSLink,
UseSubdomains: gw.UseSubdomains,
InlineDNSLink: gw.InlineDNSLink.WithDefault(config.DefaultInlineDNSLink),
DeserializedResponses: gw.DeserializedResponses.WithDefault(gwCfg.DeserializedResponses),
}
}
return gws
return gwCfg, nil
}

View File

@ -14,6 +14,7 @@ import (
core "github.com/ipfs/kubo/core"
"github.com/ipfs/kubo/core/coreapi"
repo "github.com/ipfs/kubo/repo"
"github.com/stretchr/testify/assert"
iface "github.com/ipfs/boxo/coreiface"
nsopts "github.com/ipfs/boxo/coreiface/options/namesys"
@ -173,3 +174,42 @@ func TestVersion(t *testing.T) {
t.Fatalf("response doesn't contain protocol version:\n%s", s)
}
}
func TestDeserializedResponsesInheritance(t *testing.T) {
for _, testCase := range []struct {
globalSetting config.Flag
gatewaySetting config.Flag
expectedGatewaySetting bool
}{
{config.True, config.Default, true},
{config.False, config.Default, false},
{config.False, config.True, true},
{config.True, config.False, false},
} {
c := config.Config{
Identity: config.Identity{
PeerID: "QmTFauExutTsy4XP6JbMFcw2Wa9645HJt2bTqL6qYDCKfe", // required by offline node
},
Gateway: config.Gateway{
DeserializedResponses: testCase.globalSetting,
PublicGateways: map[string]*config.GatewaySpec{
"example.com": {
DeserializedResponses: testCase.gatewaySetting,
},
},
},
}
r := &repo.Mock{
C: c,
D: syncds.MutexWrap(datastore.NewMapDatastore()),
}
n, err := core.NewNode(context.Background(), &core.BuildCfg{Repo: r})
assert.NoError(t, err)
gwCfg, err := getGatewayConfig(n)
assert.NoError(t, err)
assert.Contains(t, gwCfg.PublicGateways, "example.com")
assert.Equal(t, testCase.expectedGatewaySetting, gwCfg.PublicGateways["example.com"].DeserializedResponses)
}
}

View File

@ -34,7 +34,16 @@ func NewMockNode() (*core.IpfsNode, error) {
}
func MockHostOption(mn mocknet.Mocknet) libp2p2.HostOption {
return func(id peer.ID, ps pstore.Peerstore, _ ...libp2p.Option) (host.Host, error) {
return func(id peer.ID, ps pstore.Peerstore, opts ...libp2p.Option) (host.Host, error) {
var cfg libp2p.Config
if err := cfg.Apply(opts...); err != nil {
return nil, err
}
// The mocknet does not use the provided libp2p.Option. This options include
// the listening addresses we want our peer listening on. Therefore, we have
// to manually parse the configuration and add them here.
ps.AddAddrs(id, cfg.ListenAddrs, pstore.PermanentAddrTTL)
return mn.AddPeerWithPeerstore(id, ps)
}
}

View File

@ -155,7 +155,7 @@ func LibP2P(bcfg *BuildCfg, cfg *config.Config, userResourceOverrides rcmgr.Part
fx.Provide(libp2p.RelayTransport(enableRelayTransport)),
fx.Provide(libp2p.RelayService(enableRelayService, cfg.Swarm.RelayService)),
fx.Provide(libp2p.Transports(cfg.Swarm.Transports)),
fx.Invoke(libp2p.StartListening(cfg.Addresses.Swarm)),
fx.Provide(libp2p.ListenOn(cfg.Addresses.Swarm)),
fx.Invoke(libp2p.SetupDiscovery(cfg.Discovery.MDNS.Enabled)),
fx.Provide(libp2p.ForceReachability(cfg.Internal.Libp2pForceReachability)),
fx.Provide(libp2p.HolePunching(cfg.Swarm.EnableHolePunching, enableRelayClient)),

View File

@ -4,7 +4,6 @@ import (
"fmt"
"github.com/libp2p/go-libp2p"
"github.com/libp2p/go-libp2p/core/host"
p2pbhost "github.com/libp2p/go-libp2p/p2p/host/basic"
ma "github.com/multiformats/go-multiaddr"
mamask "github.com/whyrusleeping/multiaddr-filter"
@ -99,37 +98,12 @@ func AddrsFactory(announce []string, appendAnnouce []string, noAnnounce []string
}
}
func listenAddresses(addresses []string) ([]ma.Multiaddr, error) {
listen := make([]ma.Multiaddr, len(addresses))
for i, addr := range addresses {
maddr, err := ma.NewMultiaddr(addr)
if err != nil {
return nil, fmt.Errorf("failure to parse config.Addresses.Swarm: %s", addresses)
func ListenOn(addresses []string) interface{} {
return func() (opts Libp2pOpts) {
return Libp2pOpts{
Opts: []libp2p.Option{
libp2p.ListenAddrStrings(addresses...),
},
}
listen[i] = maddr
}
return listen, nil
}
func StartListening(addresses []string) func(host host.Host) error {
return func(host host.Host) error {
listenAddrs, err := listenAddresses(addresses)
if err != nil {
return err
}
// Actually start listening:
if err := host.Network().Listen(listenAddrs...); err != nil {
return err
}
// list out our addresses
addrs, err := host.Network().InterfaceListenAddresses()
if err != nil {
return err
}
log.Infof("Swarm listening at: %s", addrs)
return nil
}
}

View File

@ -63,6 +63,7 @@ This section covers tasks to be done ahead of the release.
- open an access request in the [pldw](https://github.com/protocol/pldw/issues/new/choose)
- [example](https://github.com/protocol/pldw/issues/158)
- [ ] [kuboreleaser](https://github.com/ipfs/kuboreleaser) checked out on your system (_only if you're using [kuboreleaser](https://github.com/ipfs/kuboreleaser)_)
- [ ] [Thunderdome](https://github.com/ipfs-shipyard/thunderdome) checked out on your system and configured (see the [Thunderdome release docs](./releases_thunderdome.md) for setup)
- [ ] [docker](https://docs.docker.com/get-docker/) installed on your system (_only if you're using [kuboreleaser](https://github.com/ipfs/kuboreleaser)_)
- [ ] [npm](https://docs.npmjs.com/downloading-and-installing-node-js-and-npm) installed on your system (_only if you're **NOT** using [kuboreleaser](https://github.com/ipfs/kuboreleaser)_)
- [ ] [zsh](https://github.com/ohmyzsh/ohmyzsh/wiki/Installing-ZSH#install-and-set-up-zsh-as-default) installed on your system
@ -102,6 +103,8 @@ This section covers tasks to be done during each release.
- do **NOT** use `Squash and merge` nor `Rebase and merge` because we need to be able to sign the merge commit
- do **NOT** delete the `release-vX.Y` branch
</details>
- [ ] Run Thunderdome testing, see the [Thunderdome release docs](./releases_thunderdome.md) for details
- [ ] create a PR and merge the experiment config into Thunderdome
- [ ] Create the release tag <details><summary>using `kuboreleaser release --version vX.Y.Z(-rcN) tag` or ...</summary>
- This is a dangerous operation! Go and Docker publishing are difficult to reverse! Have the release reviewer verify all the commands marked with ⚠️!
- [ ] ⚠️ ![](https://img.shields.io/badge/only-RC-blue?style=flat-square) tag the HEAD commit using `git tag -s vX.Y.Z(-RCN) -m 'Prerelease X.Y.Z(-RCN)'`

View File

@ -116,6 +116,478 @@ You can read more about the rationale behind this decision on the [tracking issu
### 📝 Changelog
<details><summary>Full Changelog</summary>
- github.com/ipfs/kubo:
- fix: deadlock on retrieving WebTransport addresses (#9857) ([ipfs/kubo#9857](https://github.com/ipfs/kubo/pull/9857))
- docs(config): remove mentions of relay v1 (#9860) ([ipfs/kubo#9860](https://github.com/ipfs/kubo/pull/9860))
- Merge branch 'master' into merge-release-v0.19.2
- docs: add changelog for v0.19.2
- feat: webui@3.0.0 (#9835) ([ipfs/kubo#9835](https://github.com/ipfs/kubo/pull/9835))
- fix: use default HTTP routers when FullRT DHT client is used (#9841) ([ipfs/kubo#9841](https://github.com/ipfs/kubo/pull/9841))
- chore: update version
- docs: add `ipfs pubsub` deprecation reminder to changelog (#9827) ([ipfs/kubo#9827](https://github.com/ipfs/kubo/pull/9827))
- docs: preparing 0.20 changelog for release (#9799) ([ipfs/kubo#9799](https://github.com/ipfs/kubo/pull/9799))
- feat: boxo tracing and traceparent support (#9811) ([ipfs/kubo#9811](https://github.com/ipfs/kubo/pull/9811))
- chore: update version
- chore: update version
- update go-libp2p to v0.27.0
- docs: add optimistic provide feature description
- feat: add experimental optimistic provide
- fix(ci): speed up docker build (#9800) ([ipfs/kubo#9800](https://github.com/ipfs/kubo/pull/9800))
- feat(tracing): use OTEL_PROPAGATORS as per OTel spec (#9801) ([ipfs/kubo#9801](https://github.com/ipfs/kubo/pull/9801))
- docs: fix jaeger command (#9797) ([ipfs/kubo#9797](https://github.com/ipfs/kubo/pull/9797))
- Merge Release: v0.19.1 (#9794) ([ipfs/kubo#9794](https://github.com/ipfs/kubo/pull/9794))
- chore: upgrade OpenTelemetry dependencies (#9736) ([ipfs/kubo#9736](https://github.com/ipfs/kubo/pull/9736))
- test: fix flaky content routing over HTTP test (#9772) ([ipfs/kubo#9772](https://github.com/ipfs/kubo/pull/9772))
- feat: allow injecting custom path resolvers (#9750) ([ipfs/kubo#9750](https://github.com/ipfs/kubo/pull/9750))
- feat: add changelog entry for router timeouts for v0.19.1 (#9784) ([ipfs/kubo#9784](https://github.com/ipfs/kubo/pull/9784))
- feat(gw): new metrics and HTTP range support (#9786) ([ipfs/kubo#9786](https://github.com/ipfs/kubo/pull/9786))
- feat!: make --empty-repo default (#9758) ([ipfs/kubo#9758](https://github.com/ipfs/kubo/pull/9758))
- fix: remove timeout on default DHT operations (#9783) ([ipfs/kubo#9783](https://github.com/ipfs/kubo/pull/9783))
- refactor: switch gateway code to new API from go-libipfs (#9681) ([ipfs/kubo#9681](https://github.com/ipfs/kubo/pull/9681))
- test: port remote pinning tests to Go (#9720) ([ipfs/kubo#9720](https://github.com/ipfs/kubo/pull/9720))
- feat: add identify option to swarm peers command
- test: port routing DHT tests to Go (#9709) ([ipfs/kubo#9709](https://github.com/ipfs/kubo/pull/9709))
- test: fix autoclient flakiness (#9769) ([ipfs/kubo#9769](https://github.com/ipfs/kubo/pull/9769))
- test: skip flaky pubsub test (#9770) ([ipfs/kubo#9770](https://github.com/ipfs/kubo/pull/9770))
- chore: migrate go-libipfs to boxo
- feat: add tracing to the commands client
- feat: add client-side metrics for routing-v1 client
- test: increase max wait time for peering assertion
- feat: remove writable gateway (#9743) ([ipfs/kubo#9743](https://github.com/ipfs/kubo/pull/9743))
- Process Improvement: v0.18.0 ([ipfs/kubo#9484](https://github.com/ipfs/kubo/pull/9484))
- fix: deadlock while racing `ipfs dag import` and `ipfs repo gc`
- feat: improve dag/import (#9721) ([ipfs/kubo#9721](https://github.com/ipfs/kubo/pull/9721))
- ci: remove circleci config ([ipfs/kubo#9687](https://github.com/ipfs/kubo/pull/9687))
- docs: use fx.Decorate instead of fx.Replace in examples (#9725) ([ipfs/kubo#9725](https://github.com/ipfs/kubo/pull/9725))
- Create Changelog: v0.20 ([ipfs/kubo#9742](https://github.com/ipfs/kubo/pull/9742))
- Merge Release: v0.19.0 ([ipfs/kubo#9741](https://github.com/ipfs/kubo/pull/9741))
- feat(gateway): invalid CID returns 400 Bad Request (#9726) ([ipfs/kubo#9726](https://github.com/ipfs/kubo/pull/9726))
- fix: remove outdated changelog part ([ipfs/kubo#9739](https://github.com/ipfs/kubo/pull/9739))
- docs: 0.19 changelog ([ipfs/kubo#9707](https://github.com/ipfs/kubo/pull/9707))
- fix: canonicalize user defined headers
- fix: apply API.HTTPHeaders to /webui redirect
- feat: add heap allocs to 'ipfs diag profile'
- fix: future proof with > rcmgr.DefaultLimit for new enum rcmgr values
- test: add test for presarvation of unlimited configs for inbound systems
- fix: preserve Unlimited StreamsInbound in connmgr reconciliation
- test: fix flaky rcmgr test
- chore: deprecate the pubsub api
- Revert "chore: add hamt directory sharding test"
- chore: add hamt directory sharding test
- test: port peering test from sharness to Go
- test: use `T.TempDir` to create temporary test directory
- fix: --verify forgets the verified key
- test: name --verify forgets the verified key
- chore: fix toc in changelog for 0.18
- feat: add "autoclient" routing type
- test: parallelize more of rcmgr Go tests
- test: port legacy DHT tests to Go
- fix: t0116-gateway-cache.sh ([ipfs/kubo#9696](https://github.com/ipfs/kubo/pull/9696))
- docs: add bifrost to early testers ([ipfs/kubo#9699](https://github.com/ipfs/kubo/pull/9699))
- fix: typo in documentation for install path
- docs: fix typos
- Update Version: v0.19 ([ipfs/kubo#9698](https://github.com/ipfs/kubo/pull/9698))
- github.com/ipfs/go-block-format (v0.1.1 -> v0.1.2):
- chore: release v0.1.2
- Revert deprecation and go-libipfs/blocks stub types
- docs: deprecation notice [ci skip]
- github.com/ipfs/go-cid (v0.3.2 -> v0.4.1):
- v0.4.1
- Add unit test for unexpected eof
- Update cid.go
- CidFromReader should not wrap valid EOF return.
- chore: version 0.4.0
- feat: wrap parsing errors into ErrInvalidCid
- fix: use crypto/rand.Read
- Fix README.md example error (#146) ([ipfs/go-cid#146](https://github.com/ipfs/go-cid/pull/146))
- github.com/ipfs/go-delegated-routing (v0.7.0 -> v0.8.0):
- chore: release v0.8.0
- chore: migrate from go-ipns to boxo
- docs: add deprecation notice [ci skip]
- github.com/ipfs/go-graphsync (v0.14.1 -> v0.14.4):
- Update version to cover latest fixes (#419) ([ipfs/go-graphsync#419](https://github.com/ipfs/go-graphsync/pull/419))
- Bring changes from #412
- Bring changes from #391
- fix: calling message queue Shutdown twice causes panic (because close is called twice on done channel) (#414) ([ipfs/go-graphsync#414](https://github.com/ipfs/go-graphsync/pull/414))
- docs(CHANGELOG): update for v0.14.3
- fix: wire up proper linksystem to traverser (#411) ([ipfs/go-graphsync#411](https://github.com/ipfs/go-graphsync/pull/411))
- sync: update CI config files (#378) ([ipfs/go-graphsync#378](https://github.com/ipfs/go-graphsync/pull/378))
- chore: remove social links (#398) ([ipfs/go-graphsync#398](https://github.com/ipfs/go-graphsync/pull/398))
- Removes `main` branch callout.
- release v0.14.2
- github.com/ipfs/go-ipfs-blockstore (v1.2.0 -> v1.3.0):
- chore: release v1.3.0
- feat: stub and deprecate NewBlockstoreNoPrefix
- Accept options for blockstore: start with WriteThrough and NoPrefix
- Allow using a NewWriteThrough() blockstore.
- sync: update CI config files (#105) ([ipfs/go-ipfs-blockstore#105](https://github.com/ipfs/go-ipfs-blockstore/pull/105))
- feat: fast-path for PutMany, falling back to Put for single block call (#97) ([ipfs/go-ipfs-blockstore#97](https://github.com/ipfs/go-ipfs-blockstore/pull/97))
- github.com/ipfs/go-ipfs-cmds (v0.8.2 -> v0.9.0):
- chore: release v0.9.0
- chore: change go-libipfs to boxo
- github.com/ipfs/go-libipfs (v0.6.2 -> v0.7.0):
- chore: bump to 0.7.0 (#213) ([ipfs/go-libipfs#213](https://github.com/ipfs/go-libipfs/pull/213))
- feat: return 400 on /ipfs/invalid-cid (#205) ([ipfs/go-libipfs#205](https://github.com/ipfs/go-libipfs/pull/205))
- docs: add note in README that go-libipfs is not comprehensive (#163) ([ipfs/go-libipfs#163](https://github.com/ipfs/go-libipfs/pull/163))
- github.com/ipfs/go-merkledag (v0.9.0 -> v0.10.0):
- chore: bump version to 0.10.0
- fix: switch to crypto/rand.Read
- stop using the deprecated io/ioutil package
- github.com/ipfs/go-unixfs (v0.4.4 -> v0.4.5):
- chore: release v0.4.5
- chore: remove go-libipfs dependency
- github.com/ipfs/go-unixfsnode (v1.5.2 -> v1.6.0):
- chore: bump v1.6.0
- feat: add UnixFSPathSelectorBuilder ([ipfs/go-unixfsnode#45](https://github.com/ipfs/go-unixfsnode/pull/45))
- fix: update state to allow iter continuance on NotFound errors
- chore!: make PBLinkItr private - not intended for public use
- fix: propagate iteration errors
- github.com/ipld/go-car/v2 (v2.5.1 -> v2.9.1-0.20230325062757-fff0e4397a3d):
- chore: unmigrate from go-libipfs
- Create CODEOWNERS
- blockstore: give a direct access to the index for read operations
- blockstore: only close the file on error in OpenReadWrite, not OpenReadWriteFile
- fix: handle (and test) WholeCID vs not; fast Has() path for storage
- ReadWrite: faster Has() by using the in-memory index instead of reading on disk
- fix: let `extract` skip missing unixfs shard links
- fix: error when no files extracted
- fix: make -f optional, read from stdin if omitted
- fix: update cmd/car/README with latest description
- chore: add test cases for extract modes
- feat: extract accepts '-' as an output path for stdout
- feat: extract specific path, accept stdin as streaming input
- fix: if we don't read the full block data, don't error on !EOF
- blockstore: try to close during Finalize(), even in case of previous error
- ReadWrite: add an alternative FinalizeReadOnly+Close flow
- feat: add WithTrustedCar() reader option (#381) ([ipld/go-car#381](https://github.com/ipld/go-car/pull/381))
- blockstore: fast path for AllKeysChan using the index
- fix: switch to crypto/rand.Read
- stop using the deprecated io/ioutil package
- fix(doc): fix storage package doc formatting
- fix: return errors for unsupported operations
- chore: move insertionindex into store pkg
- chore: add experimental note
- fix: minor lint & windows fd test problems
- feat: docs for StorageCar interfaces
- feat: ReadableWritable; dedupe shared code
- feat: add Writable functionality to StorageCar
- feat: StorageCar as a Readable storage, separate from blockstore
- feat(blockstore): implement a streaming read only storage
- feat(cmd): add index create subcommand to create an external carv2 index ([ipld/go-car#350](https://github.com/ipld/go-car/pull/350))
- chore: bump version to 0.6.0
- fix: use goreleaser instead
- Allow using WalkOption in WriteCar function ([ipld/go-car#357](https://github.com/ipld/go-car/pull/357))
- fix: update go-block-format to the version that includes the stubs
- feat: upgrade from go-block-format to go-libipfs/blocks
- cleanup readme a bit to make the cli more discoverable (#353) ([ipld/go-car#353](https://github.com/ipld/go-car/pull/353))
- Update install instructions in README.md
- Add a debugging form for car files. (#341) ([ipld/go-car#341](https://github.com/ipld/go-car/pull/341))
- ([ipld/go-car#340](https://github.com/ipld/go-car/pull/340))
- github.com/ipld/go-codec-dagpb (v1.5.0 -> v1.6.0):
- Update version.json
- github.com/ipld/go-ipld-prime (v0.19.0 -> v0.20.0):
- Prepare v0.20.0
- fix(datamodel): add tests to Copy, make it complain on nil
- feat(dagcbor): mode to allow parsing undelimited streamed objects
- Fix mispatched package declaration.
- Add several pieces of docs to schema/dmt.
- Additional access to schema/dmt package; schema concatenation feature ([ipld/go-ipld-prime#483](https://github.com/ipld/go-ipld-prime/pull/483))
- Fix hash mismatch error on matching link pointer
- feat: support errors.Is for schema errors
- github.com/ipld/go-ipld-prime/storage/bsadapter (v0.0.0-20211210234204-ce2a1c70cd73 -> v0.0.0-20230102063945-1a409dc236dd):
- build(deps): bump github.com/ipfs/go-blockservice
- Fix mispatched package declaration.
- Add several pieces of docs to schema/dmt.
- Additional access to schema/dmt package; schema concatenation feature ([ipld/go-ipld-prime/storage/bsadapter#483](https://github.com/ipld/go-ipld-prime/storage/bsadapter/pull/483))
- fix: go mod tidy
- build(deps): bump github.com/frankban/quicktest from 1.14.3 to 1.14.4
- Fix hash mismatch error on matching link pointer
- build(deps): bump github.com/warpfork/go-testmark from 0.10.0 to 0.11.0
- feat: support errors.Is for schema errors
- build(deps): bump github.com/multiformats/go-multicodec
- Prepare v0.19.0
- fix: correct json codec links & bytes handling
- build(deps): bump github.com/google/go-cmp from 0.5.8 to 0.5.9 (#468) ([ipld/go-ipld-prime/storage/bsadapter#468](https://github.com/ipld/go-ipld-prime/storage/bsadapter/pull/468))
- build(deps): bump github.com/ipfs/go-cid from 0.3.0 to 0.3.2 (#466) ([ipld/go-ipld-prime/storage/bsadapter#466](https://github.com/ipld/go-ipld-prime/storage/bsadapter/pull/466))
- build(deps): bump github.com/ipfs/go-cid in /storage/bsrvadapter (#464) ([ipld/go-ipld-prime/storage/bsadapter#464](https://github.com/ipld/go-ipld-prime/storage/bsadapter/pull/464))
- test(basicnode): increase test coverage for int and map types (#454) ([ipld/go-ipld-prime/storage/bsadapter#454](https://github.com/ipld/go-ipld-prime/storage/bsadapter/pull/454))
- build(deps): bump github.com/ipfs/go-cid in /storage/bsrvadapter
- build(deps): bump github.com/ipfs/go-cid from 0.2.0 to 0.3.0
- build(deps): bump github.com/multiformats/go-multicodec
- fix: remove reliance on ioutil
- fix: update sub-package modules
- build(deps): bump github.com/multiformats/go-multihash
- build(deps): bump github.com/ipfs/go-datastore in /storage/dsadapter
- update .github/workflows/go-check.yml
- update .github/workflows/go-test.yml
- run gofmt -s
- bump go.mod to Go 1.18 and run go fix
- bump go.mod to Go 1.18 and run go fix
- bump go.mod to Go 1.18 and run go fix
- bump go.mod to Go 1.18 and run go fix
- feat: add kinded union to gendemo
- fix: go mod 1.17 compat problems
- build(deps): bump github.com/ipfs/go-blockservice
- Prepare v0.18.0
- fix(deps): update benchmarks go.sum
- build(deps): bump github.com/multiformats/go-multihash
- feat(bindnode): add a BindnodeRegistry utility (#437) ([ipld/go-ipld-prime/storage/bsadapter#437](https://github.com/ipld/go-ipld-prime/storage/bsadapter/pull/437))
- feat(bindnode): support full uint64 range
- chore(bindnode): remove typed functions for options
- chore(bindnode): docs and minor tweaks
- feat(bindnode): make Any converters work for List and Map values
- fix(bindnode): shorten converter option names, minor perf improvements
- fix(bindnode): only custom convert AssignNull for Any converter
- feat(bindnode): pass Null on to nullable custom converters
- chore(bindnode): config helper refactor w/ short-circuit
- feat(bindnode): add AddCustomTypeAnyConverter() to handle `Any` fields
- feat(bindnode): add AddCustomTypeXConverter() options for most scalar kinds
- chore(bindnode): back out of reflection for converters
- feat(bindnode): switch to converter functions instead of type
- feat(bindnode): allow custom type conversions with options
- feat: add release checklist (#442) ([ipld/go-ipld-prime/storage/bsadapter#442](https://github.com/ipld/go-ipld-prime/storage/bsadapter/pull/442))
- Prepare v0.17.0
- feat: introduce UIntNode interface, used within DAG-CBOR codec
- add option to not parse beyond end of structure (#435) ([ipld/go-ipld-prime/storage/bsadapter#435](https://github.com/ipld/go-ipld-prime/storage/bsadapter/pull/435))
- sync benchmarks go.sum
- build(deps): bump github.com/multiformats/go-multicodec
- patch: first draft. ([ipld/go-ipld-prime/storage/bsadapter#350](https://github.com/ipld/go-ipld-prime/storage/bsadapter/pull/350))
- feat(bindnode): infer links and Any from Go types (#432) ([ipld/go-ipld-prime/storage/bsadapter#432](https://github.com/ipld/go-ipld-prime/storage/bsadapter/pull/432))
- fix(codecs): error on cid.Undef links in dag{json,cbor} encoding (#433) ([ipld/go-ipld-prime/storage/bsadapter#433](https://github.com/ipld/go-ipld-prime/storage/bsadapter/pull/433))
- chore(bindnode): add test for sub-node unwrapping
- fix(bindnode): more helpful error message for enum value footgun
- fix(bindnode): panic early if API has been passed ptr-to-ptr
- fix(deps): mod tidy for dependencies
- build(deps): bump github.com/warpfork/go-testmark from 0.3.0 to 0.10.0
- build(deps): bump github.com/multiformats/go-multicodec
- build(deps): bump github.com/ipfs/go-cid from 0.0.4 to 0.2.0
- build(deps): bump github.com/google/go-cmp from 0.5.7 to 0.5.8
- build(deps): bump github.com/frankban/quicktest from 1.14.2 to 1.14.3
- build(deps): bump github.com/ipfs/go-cid in /storage/bsrvadapter
- chore(deps): expand dependabot to sub-modules
- chore(deps): add dependabot config
- printer: fix printing of floats
- add version.json file (#411) ([ipld/go-ipld-prime/storage/bsadapter#411](https://github.com/ipld/go-ipld-prime/storage/bsadapter/pull/411))
- ci: use GOFLAGS to control test tags
- ci: disable coverpkg using custom workflow insertion
- ci: add initial web3 unified-ci files
- fix: make 32-bit safe and stable & add to CI
- ci: add go-check.yml workflow from unified-ci
- ci: go mod tidy
- fix: staticcheck and govet fixes
- test: make tests work on Windows, add Windows to CI (#405) ([ipld/go-ipld-prime/storage/bsadapter#405](https://github.com/ipld/go-ipld-prime/storage/bsadapter/pull/405))
- schema: enable inline types through dsl parser & compiler (#404) ([ipld/go-ipld-prime/storage/bsadapter#404](https://github.com/ipld/go-ipld-prime/storage/bsadapter/pull/404))
- node/bindnode: allow nilable types for IPLD optional/nullable
- test(ci): enable macos in GitHub Actions
- test(gen-go): disable parallelism when testing on macos
- storage: update deps
- dsl support for stringjoin struct repr and stringprefix union repr ([ipld/go-ipld-prime/storage/bsadapter#397](https://github.com/ipld/go-ipld-prime/storage/bsadapter/pull/397))
- codec/dagcbor: add DecodeOptions.ExperimentalDeterminism
- node/bindnode: add some more docs
- start testing on Go 1.18.x, drop Go 1.16.x
- readme: getting started pointers.
- readme: bindnode definitely needs a mention!
- Readme updates!
- datamodel: document that repr prototypes produce type nodes
- node/bindnode: minor fuzz improvements
- gengo: update readme.
- fix(dagcbor): don't accept trailing bytes
- schema/dmt: reject duplicate or missing union repr members
- node/bindnode: actually check schemadmt.Compile errors when fuzzing
- node/bindnode: avoid OOM when inferring from cyclic IPLD schemas
- schema/dmt: require enum reprs to refer valid members
- skip NaN/Inf errors for dag-json
- node/bindnode: refuse to decode empty union values
- schema/dmt: error in Compile if union reprs refer to unknown members
- node/bindnode: start fuzzing with schema/dmt and codec/dagcbor
- mark v0.16.0
- node/bindnode: enforce pointer requirement for nullable maps
- Implement WalkTransforming traversal (#376) ([ipld/go-ipld-prime/storage/bsadapter#376](https://github.com/ipld/go-ipld-prime/storage/bsadapter/pull/376))
- docs(datamodel): add comment to LargeBytesNode
- Add partial-match traversal of large bytes (#375) ([ipld/go-ipld-prime/storage/bsadapter#375](https://github.com/ipld/go-ipld-prime/storage/bsadapter/pull/375))
- Implement option to start traversals at a path ([ipld/go-ipld-prime/storage/bsadapter#358](https://github.com/ipld/go-ipld-prime/storage/bsadapter/pull/358))
- add top-level "go value with schema" example
- Support optional `LargeBytesNode` interface (#372) ([ipld/go-ipld-prime/storage/bsadapter#372](https://github.com/ipld/go-ipld-prime/storage/bsadapter/pull/372))
- node/bindnode: support pointers to datamodel.Node to bind with Any
- fix(bindnode): tuple struct iterator should handle absent fields properly
- node/bindnode: make AssignNode work at the repr level
- node/bindnode: add support for unsigned integers
- node/bindnode: cover even more edge case panics
- node/bindnode: polish some more AsT panics
- schema/dmt: stop using a fake test to generate code ([ipld/go-ipld-prime/storage/bsadapter#356](https://github.com/ipld/go-ipld-prime/storage/bsadapter/pull/356))
- schema: remove one review note; add another.
- fix: minor EncodedLength fixes, add tests to fully exercise
- feat: add dagcbor.EncodedLength(Node) to calculate length without encoding
- chore: rename Garbage() to Generate()
- fix: minor garbage nits
- fix: Garbage() takes rand parameter, tweak algorithms, improve docs
- feat: add Garbage() Node generator
- node/bindnode: introduce an assembler that always errors
- node/bindnode: polish panics on invalid AssignT calls
- datamodel: don't panic when stringifying an empty KindSet
- node/bindnode: start using ipld.LoadSchema APIs
- selectors: fix for edge case around recursion clauses with an immediate edge. ([ipld/go-ipld-prime/storage/bsadapter#334](https://github.com/ipld/go-ipld-prime/storage/bsadapter/pull/334))
- node/bindnode: improve support for pointer types
- node/bindnode: subtract all absents in Length at the repr level
- fix(codecs): error when encoding maps whose lengths don't match entry count
- schema: avoid alloc and copy in Struct and Enum methods
- node/bindnode: allow mapping int-repr enums with Go integers
- schema,node/bindnode: add support for Any
- signaling ADLs in selectors (#301) ([ipld/go-ipld-prime/storage/bsadapter#301](https://github.com/ipld/go-ipld-prime/storage/bsadapter/pull/301))
- node/bindnode: add support for enums
- schema/...: add support for enum int representations
- node/bindnode: allow binding cidlink.Link to links
- github.com/libp2p/go-libp2p (v0.26.4 -> v0.27.3):
- release v0.27.3
- quic virtual listener: don't panic when quic-go's accept call errors (#2276) ([libp2p/go-libp2p#2276](https://github.com/libp2p/go-libp2p/pull/2276))
- Release v0.27.2 (#2270) ([libp2p/go-libp2p#2270](https://github.com/libp2p/go-libp2p/pull/2270))
- release v0.27.1 (#2252) ([libp2p/go-libp2p#2252](https://github.com/libp2p/go-libp2p/pull/2252))
- Infer public webtransport addrs from quic-v1 addrs. (#2251) ([libp2p/go-libp2p#2251](https://github.com/libp2p/go-libp2p/pull/2251))
- basichost: don't allocate when deduplicating multiaddrs (#2206) ([libp2p/go-libp2p#2206](https://github.com/libp2p/go-libp2p/pull/2206))
- identify: fix normalization of interface listen addresses (#2250) ([libp2p/go-libp2p#2250](https://github.com/libp2p/go-libp2p/pull/2250))
- autonat: fix flaky TestAutoNATDialRefused (#2245) ([libp2p/go-libp2p#2245](https://github.com/libp2p/go-libp2p/pull/2245))
- basichost: remove stray print statement in test (#2249) ([libp2p/go-libp2p#2249](https://github.com/libp2p/go-libp2p/pull/2249))
- swarm: fix multiaddr comparison in ListenClose (#2247) ([libp2p/go-libp2p#2247](https://github.com/libp2p/go-libp2p/pull/2247))
- release v0.27.0 (#2242) ([libp2p/go-libp2p#2242](https://github.com/libp2p/go-libp2p/pull/2242))
- add a security policy (#2238) ([libp2p/go-libp2p#2238](https://github.com/libp2p/go-libp2p/pull/2238))
- chore: 0.27.0 changelog entries (#2241) ([libp2p/go-libp2p#2241](https://github.com/libp2p/go-libp2p/pull/2241))
- correctly handle WebTransport addresses without certhashes (#2239) ([libp2p/go-libp2p#2239](https://github.com/libp2p/go-libp2p/pull/2239))
- autorelay: add metrics (#2185) ([libp2p/go-libp2p#2185](https://github.com/libp2p/go-libp2p/pull/2185))
- autonat: don't change status on dial request refused (#2225) ([libp2p/go-libp2p#2225](https://github.com/libp2p/go-libp2p/pull/2225))
- autonat: fix closing of listeners in dialPolicy tests (#2226) ([libp2p/go-libp2p#2226](https://github.com/libp2p/go-libp2p/pull/2226))
- discovery (backoff): fix typo in comment (#2214) ([libp2p/go-libp2p#2214](https://github.com/libp2p/go-libp2p/pull/2214))
- relaysvc: flaky TestReachabilityChangeEvent (#2215) ([libp2p/go-libp2p#2215](https://github.com/libp2p/go-libp2p/pull/2215))
- Add wss transport to interop tester impl (#2178) ([libp2p/go-libp2p#2178](https://github.com/libp2p/go-libp2p/pull/2178))
- tests: add a stream read deadline transport test (#2210) ([libp2p/go-libp2p#2210](https://github.com/libp2p/go-libp2p/pull/2210))
- autorelay: fix busy loop bug and flaky tests in relay finder (#2208) ([libp2p/go-libp2p#2208](https://github.com/libp2p/go-libp2p/pull/2208))
- tests: test mplex and Yamux, Noise and TLS in transport tests (#2209) ([libp2p/go-libp2p#2209](https://github.com/libp2p/go-libp2p/pull/2209))
- tests: add some basic transport integration tests (#2207) ([libp2p/go-libp2p#2207](https://github.com/libp2p/go-libp2p/pull/2207))
- autorelay: remove unused semaphore (#2184) ([libp2p/go-libp2p#2184](https://github.com/libp2p/go-libp2p/pull/2184))
- basichost: prevent duplicate dials (#2196) ([libp2p/go-libp2p#2196](https://github.com/libp2p/go-libp2p/pull/2196))
- websocket: don't set a WSS multiaddr for accepted unencrypted conns (#2199) ([libp2p/go-libp2p#2199](https://github.com/libp2p/go-libp2p/pull/2199))
- websocket: Don't limit message sizes in the websocket reader (#2193) ([libp2p/go-libp2p#2193](https://github.com/libp2p/go-libp2p/pull/2193))
- identify: fix stale comment (#2179) ([libp2p/go-libp2p#2179](https://github.com/libp2p/go-libp2p/pull/2179))
- relay service: add metrics (#2154) ([libp2p/go-libp2p#2154](https://github.com/libp2p/go-libp2p/pull/2154))
- identify: Fix IdentifyWait when Connected events happen out of order (#2173) ([libp2p/go-libp2p#2173](https://github.com/libp2p/go-libp2p/pull/2173))
- chore: fix ressource manager's README (#2168) ([libp2p/go-libp2p#2168](https://github.com/libp2p/go-libp2p/pull/2168))
- relay: fix deadlock when closing (#2171) ([libp2p/go-libp2p#2171](https://github.com/libp2p/go-libp2p/pull/2171))
- core: remove LocalPrivateKey method from network.Conn interface (#2144) ([libp2p/go-libp2p#2144](https://github.com/libp2p/go-libp2p/pull/2144))
- routed host: return connection error instead of routing error (#2169) ([libp2p/go-libp2p#2169](https://github.com/libp2p/go-libp2p/pull/2169))
- connmgr: reduce log level for closing connections (#2165) ([libp2p/go-libp2p#2165](https://github.com/libp2p/go-libp2p/pull/2165))
- circuitv2: cleanup relay service properly (#2164) ([libp2p/go-libp2p#2164](https://github.com/libp2p/go-libp2p/pull/2164))
- chore: add patch release to changelog (#2151) ([libp2p/go-libp2p#2151](https://github.com/libp2p/go-libp2p/pull/2151))
- chore: remove superfluous testing section from README (#2150) ([libp2p/go-libp2p#2150](https://github.com/libp2p/go-libp2p/pull/2150))
- autonat: don't use autonat for address discovery (#2148) ([libp2p/go-libp2p#2148](https://github.com/libp2p/go-libp2p/pull/2148))
- swarm metrics: fix connection direction (#2147) ([libp2p/go-libp2p#2147](https://github.com/libp2p/go-libp2p/pull/2147))
- connmgr: Use eventually equal helper in connmgr tests (#2128) ([libp2p/go-libp2p#2128](https://github.com/libp2p/go-libp2p/pull/2128))
- swarm: emit PeerConnectedness event from swarm instead of from hosts (#1574) ([libp2p/go-libp2p#1574](https://github.com/libp2p/go-libp2p/pull/1574))
- relay: initialize the ASN util when starting the service (#2143) ([libp2p/go-libp2p#2143](https://github.com/libp2p/go-libp2p/pull/2143))
- Fix flaky TestMetricsNoAllocNoCover test (#2142) ([libp2p/go-libp2p#2142](https://github.com/libp2p/go-libp2p/pull/2142))
- identify: Bump timeouts/sleep in tests (#2135) ([libp2p/go-libp2p#2135](https://github.com/libp2p/go-libp2p/pull/2135))
- Add sleep to fix flaky test (#2129) ([libp2p/go-libp2p#2129](https://github.com/libp2p/go-libp2p/pull/2129))
- basic_host: Fix flaky tests (#2136) ([libp2p/go-libp2p#2136](https://github.com/libp2p/go-libp2p/pull/2136))
- swarm: Check context once more before dialing (#2139) ([libp2p/go-libp2p#2139](https://github.com/libp2p/go-libp2p/pull/2139))
- github.com/libp2p/go-libp2p-asn-util (v0.2.0 -> v0.3.0):
- release v0.3.0 (#26) ([libp2p/go-libp2p-asn-util#26](https://github.com/libp2p/go-libp2p-asn-util/pull/26))
- initialize the store lazily (#25) ([libp2p/go-libp2p-asn-util#25](https://github.com/libp2p/go-libp2p-asn-util/pull/25))
- github.com/libp2p/go-libp2p-gostream (v0.5.0 -> v0.6.0):
- Update libp2p ([libp2p/go-libp2p-gostream#80](https://github.com/libp2p/go-libp2p-gostream/pull/80))
- fix typo in README (#75) ([libp2p/go-libp2p-gostream#75](https://github.com/libp2p/go-libp2p-gostream/pull/75))
- github.com/libp2p/go-libp2p-http (v0.4.0 -> v0.5.0):
- sync: update CI config files ([libp2p/go-libp2p-http#82](https://github.com/libp2p/go-libp2p-http/pull/82))
- github.com/libp2p/go-libp2p-kad-dht (v0.21.1 -> v0.23.0):
- Release v0.23.0
- Specified CODEOWNERS ([libp2p/go-libp2p-kad-dht#828](https://github.com/libp2p/go-libp2p-kad-dht/pull/828))
- fix: optimistic provide ci checks in tests ([libp2p/go-libp2p-kad-dht#833](https://github.com/libp2p/go-libp2p-kad-dht/pull/833))
- feat: add experimental optimistic provide (#783) ([libp2p/go-libp2p-kad-dht#783](https://github.com/libp2p/go-libp2p-kad-dht/pull/783))
- feat: rework tracing a bit
- feat: add basic tracing
- chore: release v0.22.0
- chore: migrate go-libipfs to boxo
- Fix multiple ProviderAddrTTL definitions #795 ([libp2p/go-libp2p-kad-dht#831](https://github.com/libp2p/go-libp2p-kad-dht/pull/831))
- Increase provider Multiaddress TTL ([libp2p/go-libp2p-kad-dht#795](https://github.com/libp2p/go-libp2p-kad-dht/pull/795))
- Make provider manager options configurable in `fullrt` ([libp2p/go-libp2p-kad-dht#829](https://github.com/libp2p/go-libp2p-kad-dht/pull/829))
- Adjust PeerSet logic in the DHT lookup process ([libp2p/go-libp2p-kad-dht#802](https://github.com/libp2p/go-libp2p-kad-dht/pull/802))
- added maintainers in the README ([libp2p/go-libp2p-kad-dht#826](https://github.com/libp2p/go-libp2p-kad-dht/pull/826))
- Allow DHT crawler to be swappable
- Introduce options to parameterize config of the accelerated DHT client ([libp2p/go-libp2p-kad-dht#822](https://github.com/libp2p/go-libp2p-kad-dht/pull/822))
- github.com/libp2p/go-libp2p-pubsub (v0.9.0 -> v0.9.3):
- Fix Memory Leak In New Timecache Implementations (#528) ([libp2p/go-libp2p-pubsub#528](https://github.com/libp2p/go-libp2p-pubsub/pull/528))
- Default validator support (#525) ([libp2p/go-libp2p-pubsub#525](https://github.com/libp2p/go-libp2p-pubsub/pull/525))
- Refactor timecache implementations (#523) ([libp2p/go-libp2p-pubsub#523](https://github.com/libp2p/go-libp2p-pubsub/pull/523))
- fix(timecache): remove panic in first seen cache on Add (#522) ([libp2p/go-libp2p-pubsub#522](https://github.com/libp2p/go-libp2p-pubsub/pull/522))
- chore: update go version and dependencies (#516) ([libp2p/go-libp2p-pubsub#516](https://github.com/libp2p/go-libp2p-pubsub/pull/516))
- github.com/multiformats/go-multiaddr (v0.8.0 -> v0.9.0):
- Release v0.9.0 ([multiformats/go-multiaddr#196](https://github.com/multiformats/go-multiaddr/pull/196))
- Update webrtc protocols after rename ([multiformats/go-multiaddr#195](https://github.com/multiformats/go-multiaddr/pull/195))
- github.com/multiformats/go-multibase (v0.1.1 -> v0.2.0):
- chore: bump v0.2.0
- fix: math/rand -> crypto/rand
- fuzz: add Decoder fuzzing
- github.com/multiformats/go-multicodec (v0.7.0 -> v0.8.1):
- Bump version to release `ipns-record` code
- chore: update submodules and go generate
- deps: upgrade stringer to compatible version
- v0.8.0
- chore: update submodules and go generate
- github.com/warpfork/go-testmark (v0.10.0 -> v0.11.0):
- Quick changelog to note we have an API update.
- Index fix ([warpfork/go-testmark#13](https://github.com/warpfork/go-testmark/pull/13))
- Link to python implementation in the readme!
</details>
### 👨‍👩‍👧‍👦 Contributors
| Contributor | Commits | Lines ± | Files Changed |
|-------------|---------|---------|---------------|
| Rod Vagg | 40 | +4214/-1400 | 102 |
| Sukun | 12 | +3541/-267 | 34 |
| Gus Eggert | 22 | +2387/-1160 | 81 |
| galargh | 23 | +1331/-1734 | 34 |
| Henrique Dias | 23 | +681/-1167 | 79 |
| Marco Munizaga | 19 | +1500/-187 | 55 |
| Jorropo | 25 | +897/-597 | 180 |
| Dennis Trautwein | 4 | +990/-60 | 14 |
| Marten Seemann | 18 | +443/-450 | 53 |
| vyzo | 2 | +595/-152 | 11 |
| Michael Muré | 8 | +427/-182 | 18 |
| Will | 2 | +536/-15 | 5 |
| Adin Schmahmann | 3 | +327/-125 | 11 |
| hannahhoward | 2 | +344/-1 | 4 |
| Arthur Gavazza | 1 | +210/-50 | 4 |
| Hector Sanjuan | 6 | +181/-77 | 13 |
| Masih H. Derkani | 5 | +214/-42 | 12 |
| Calvin Behling | 4 | +158/-58 | 11 |
| Eric Myhre | 7 | +113/-27 | 15 |
| Marcin Rataj | 5 | +72/-30 | 5 |
| Steve Loeppky | 2 | +99/-0 | 2 |
| Piotr Galar | 9 | +60/-18 | 9 |
| gammazero | 4 | +69/-0 | 8 |
| Prithvi Shahi | 2 | +55/-14 | 2 |
| Eng Zer Jun | 1 | +15/-54 | 5 |
| Laurent Senta | 3 | +44/-2 | 3 |
| Ian Davis | 1 | +35/-0 | 1 |
| web3-bot | 4 | +19/-13 | 7 |
| guillaumemichel | 2 | +18/-14 | 3 |
| Guillaume Michel - guissou | 4 | +24/-8 | 4 |
| omahs | 1 | +9/-9 | 3 |
| cortze | 3 | +9/-9 | 3 |
| Nishant Das | 1 | +9/-5 | 3 |
| Hlib Kanunnikov | 2 | +11/-3 | 3 |
| Andrew Gillis | 3 | +6/-8 | 3 |
| Johnny | 1 | +0/-10 | 1 |
| Rafał Leszko | 1 | +4/-4 | 1 |
| Dirk McCormick | 1 | +4/-1 | 1 |
| Antonio Navarro Perez | 1 | +4/-1 | 1 |
| RichΛrd | 2 | +2/-2 | 2 |
| Russell Dempsey | 1 | +2/-1 | 1 |
| Winterhuman | 1 | +1/-1 | 1 |
| Will Hawkins | 1 | +1/-1 | 1 |
| Nikhilesh Susarla | 1 | +1/-1 | 1 |
| Kubo Mage | 1 | +1/-1 | 1 |
| Bryan White | 1 | +1/-1 | 1 |

107
docs/changelogs/v0.21.md Normal file
View File

@ -0,0 +1,107 @@
# Kubo changelog v0.21
- [v0.21.0](#v0210)
## v0.21.0
- [Overview](#overview)
- [🔦 Highlights](#-highlights)
- [Saving previously seen nodes for later bootstrapping](#saving-previously-seen-nodes-for-later-bootstrapping)
- [Gateway: `DeserializedResponses` config flag](#gateway-deserializedresponses-config-flag)
- [`client/rpc` migration of `go-ipfs-http-client`](#clientrpc-migration-of-go-ipfs-http-client)
- [Gateway: DAG-CBOR/-JSON previews and improved error pages](#gateway-dag-cbor-json-previews-and-improved-error-pages)
- [Gateway: subdomain redirects are now `text/html`](#gateway-subdomain-redirects-are-now-texthtml)
- [📝 Changelog](#-changelog)
- [👨‍👩‍👧‍👦 Contributors](#-contributors)
### Overview
### 🔦 Highlights
#### Saving previously seen nodes for later bootstrapping
Kubo now stores a subset of connected peers as backup bootstrap nodes ([kubo#8856](https://github.com/ipfs/kubo/pull/8856)).
These nodes are used in addition to the explicitly defined bootstrappers in the
[`Bootstrap`](https://github.com/ipfs/kubo/blob/master/docs/config.md#bootstrap) configuration.
This enhancement improves the resiliency of the system, as it eliminates the
necessity of relying solely on the default bootstrappers operated by Protocol
Labs for joining the public IPFS swarm. Previously, this level of robustness
was only available in LAN contexts with [mDNS peer discovery](https://github.com/ipfs/kubo/blob/master/docs/config.md#discoverymdns)
enabled.
With this update, the same level of robustness is applied to peers that lack
mDNS peers and solely rely on the public DHT.
#### Gateway: `DeserializedResponses` config flag
This release introduces the
[`Gateway.DeserializedResponses`](https://github.com/ipfs/kubo/blob/master/docs/config.md#gatewaydeserializedresponses)
configuration flag.
With this flag, one can explicitly configure whether the gateway responds to
deserialized requests or not. By default, this flag is enabled.
Disabling deserialized responses allows the
gateway to operate
as a [Trustless Gateway](https://specs.ipfs.tech/http-gateways/trustless-gateway/)
limited to three [verifiable](https://docs.ipfs.tech/reference/http/gateway/#trustless-verifiable-retrieval)
response types:
[application/vnd.ipld.raw](https://www.iana.org/assignments/media-types/application/vnd.ipld.raw),
[application/vnd.ipld.car](https://www.iana.org/assignments/media-types/application/vnd.ipld.car),
and [application/vnd.ipfs.ipns-record](https://www.iana.org/assignments/media-types/application/vnd.ipfs.ipns-record).
With deserialized responses disabled, the Kubo gateway can serve as a block
backend for other software (like
[bifrost-gateway](https://github.com/ipfs/bifrost-gateway#readme),
[IPFS in Chromium](https://github.com/little-bear-labs/ipfs-chromium/blob/main/README.md)
etc) without the usual risks associated with hosting deserialized data behind
third-party CIDs.
#### `client/rpc` migration of `go-ipfs-http-client`
The [`go-ipfs-http-client`](https://github.com/ipfs/go-ipfs-http-client) RPC has
been migrated into [`kubo/client/rpc`](../../client/rpc).
With this change the two will be kept in sync, in some previous releases we
updated the CoreAPI with new Kubo features but forgot to port thoses to the
http-client, making it impossible to use them together with the same coreapi
version.
For smooth transition `v0.7.0` of `go-ipfs-http-client` provides updated stubs
for Kubo `v0.21`.
#### Gateway: DAG-CBOR/-JSON previews and improved error pages
In this release, we improved the HTML templates of our HTTP gateway:
1. You can now preview the contents of a DAG-CBOR and DAG-JSON document from your browser, as well as follow any IPLD Links ([CBOR Tag 42](https://github.com/ipld/cid-cbor/)) contained within them.
2. The HTML directory listings now contain [updated, higher-definition icons](https://user-images.githubusercontent.com/5447088/241224419-5385793a-d3bb-40aa-8cb0-0382b5bc56a0.png).
3. On gateway error, instead of a plain text error message, web browsers will now get a friendly HTML response with more details regarding the problem.
HTML responses are returned when request's `Accept` header includes `text/html`.
| DAG-CBOR Preview | Error Page |
| ---- | ---- |
| ![DAG-CBOR Preview](https://github.com/ipfs/boxo/assets/5447088/973f05d1-5731-4469-9da5-d1d776891899) | ![Error Page](https://github.com/ipfs/boxo/assets/5447088/14c453df-adbc-4634-b038-133121914550) |
#### Gateway: subdomain redirects are now `text/html`
HTTP 301 redirects [from path to subdomain](https://specs.ipfs.tech/http-gateways/subdomain-gateway/#migrating-from-path-to-subdomain-gateway)
no longer include the target data in the body.
The data is returned only once, with the final HTTP 200 returned from the
target subdomain.
The HTTP 301 body now includes human-readable `text/html` message
for clients that do not follow redirects by default:
```console
$ curl "https://subdomain-gw.example.net/ipfs/${cid}/"
<a href="http://${cid}.ipfs.subdomain-gw.example.net/">Moved Permanently</a>.
```
Rationale can be found in [kubo#9913](https://github.com/ipfs/kubo/pull/9913).
### 📝 Changelog
### 👨‍👩‍👧‍👦 Contributors

View File

@ -50,6 +50,7 @@ config file at runtime.
- [`Gateway`](#gateway)
- [`Gateway.NoFetch`](#gatewaynofetch)
- [`Gateway.NoDNSLink`](#gatewaynodnslink)
- [`Gateway.DeserializedResponses`](#gatewaydeserializedresponses)
- [`Gateway.HTTPHeaders`](#gatewayhttpheaders)
- [`Gateway.RootRedirect`](#gatewayrootredirect)
- [`Gateway.FastDirIndexThreshold`](#gatewayfastdirindexthreshold)
@ -60,6 +61,7 @@ config file at runtime.
- [`Gateway.PublicGateways: UseSubdomains`](#gatewaypublicgateways-usesubdomains)
- [`Gateway.PublicGateways: NoDNSLink`](#gatewaypublicgateways-nodnslink)
- [`Gateway.PublicGateways: InlineDNSLink`](#gatewaypublicgateways-inlinednslink)
- [`Gateway.PublicGateways: DeserializedResponses`](#gatewaypublicgateways-deserializedresponses)
- [Implicit defaults of `Gateway.PublicGateways`](#implicit-defaults-of-gatewaypublicgateways)
- [`Gateway` recipes](#gateway-recipes)
- [`Identity`](#identity)
@ -236,7 +238,7 @@ documented in `ipfs config profile --help`.
smaller than several gigabytes. If you run IPFS with `--enable-gc`, you plan on storing very little data in
your IPFS node, and disk usage is more critical than performance, consider using
`flatfs`.
- This datastore uses up to several gigabytes of memory.
- This datastore uses up to several gigabytes of memory.
- Good for medium-size datastores, but may run into performance issues if your dataset is bigger than a terabyte.
- The current implementation is based on old badger 1.x which is no longer supported by the upstream team.
@ -646,6 +648,16 @@ Default: `false`
Type: `bool`
#### `Gateway.DeserializedResponses`
An optional flag to explicitly configure whether this gateway responds to deserialized
requests, or not. By default, it is enabled. When disabling this option, the gateway
operates as a Trustless Gateway only: https://specs.ipfs.tech/http-gateways/trustless-gateway/.
Default: `true`
Type: `flag`
### `Gateway.HTTPHeaders`
Headers to set on gateway responses.
@ -790,6 +802,16 @@ Default: `false`
Type: `flag`
#### `Gateway.PublicGateways: DeserializedResponses`
An optional flag to explicitly configure whether this gateway responds to deserialized
requests, or not. By default, it is enabled. When disabling this option, the gateway
operates as a Trustless Gateway only: https://specs.ipfs.tech/http-gateways/trustless-gateway/.
Default: same as global `Gateway.DeserializedResponses`
Type: `flag`
#### Implicit defaults of `Gateway.PublicGateways`
Default entries for `localhost` hostname and loopback IPs are always present.
@ -895,7 +917,7 @@ Type: `string` (base64 encoded)
## `Internal`
This section includes internal knobs for various subsystems to allow advanced users with big or private infrastructures to fine-tune some behaviors without the need to recompile Kubo.
This section includes internal knobs for various subsystems to allow advanced users with big or private infrastructures to fine-tune some behaviors without the need to recompile Kubo.
**Be aware that making informed change here requires in-depth knowledge and most users should leave these untouched. All knobs listed here are subject to breaking changes between versions.**
@ -971,7 +993,7 @@ Type: `optionalInteger` (byte count, `null` means default which is 1MB)
### `Internal.Bitswap.ProviderSearchDelay`
This parameter determines how long to wait before looking for providers outside of bitswap.
Other routing systems like the DHT are able to provide results in less than a second, so lowering
Other routing systems like the DHT are able to provide results in less than a second, so lowering
this number will allow faster peers lookups in some cases.
Type: `optionalDuration` (`null` means default which is 1s)
@ -1326,15 +1348,19 @@ Type: `array[peering]`
### `Reprovider.Interval`
Sets the time between rounds of reproviding local content to the routing
system. If unset, it defaults to 12 hours. If set to the value `"0"` it will
disable content reproviding.
system.
- If unset, it uses the implicit safe default.
- If set to the value `"0"` it will disable content reproviding.
Note: disabling content reproviding will result in other nodes on the network
not being able to discover that you have the objects that you have. If you want
to have this disabled and keep the network aware of what you have, you must
manually announce your content periodically.
Type: `duration`
Default: `22h` (`DefaultReproviderInterval`)
Type: `optionalDuration` (unset for the default)
### `Reprovider.Strategy`
@ -1346,7 +1372,7 @@ Tells reprovider what should be announced. Valid strategies are:
Default: `"all"`
Type: `string` (or unset for the default, which is "all")
Type: `optionalString` (unset for the default)
## `Routing`
@ -1548,7 +1574,7 @@ another node, even if this other node is on a different network. This may
trigger netscan alerts on some hosting providers or cause strain in some setups.
The `server` configuration profile fills up this list with sensible defaults,
preventing dials to all non-routable IP addresses (e.g., `/ip4/192.168.0.0/ipcidr/16`,
preventing dials to all non-routable IP addresses (e.g., `/ip4/192.168.0.0/ipcidr/16`,
which is the multiaddress representation of `192.168.0.0/16`) but you should always
check settings against your own network and/or hosting provider.
@ -1622,8 +1648,8 @@ Type: `flag`
#### `Swarm.RelayClient.StaticRelays`
Your node will use these statically configured relay servers (V1 or V2)
instead of discovering public relays V2 from the network.
Your node will use these statically configured relay servers
instead of discovering public relays ([Circuit Relay v2](https://github.com/libp2p/specs/blob/master/relay/circuit-v2.md)) from the network.
Default: `[]`

60
docs/customizing.md Normal file
View File

@ -0,0 +1,60 @@
# Customizing Kubo
You may want to customize Kubo if you want to reuse most of Kubo's machinery. This document discusses some approaches you may consider for customizing Kubo, and their tradeoffs.
Some common use cases for customizing Kubo include:
- Using a custom datastore for storing blocks, pins, or other Kubo metadata
- Adding a custom data transfer protocol into Kubo
- Customizing Kubo internals, such as adding allowlist/blocklist functionality to Bitswap
- Adding new commands, interfaces, functionality, etc. to Kubo while reusing the libp2p swarm
- Building on top of Kubo's configuration and config migration functionality
## Summary
This table summarizes the tradeoffs between the approaches below:
| | [Boxo](#boxo-build-your-own-binary) | [Kubo Plugin](#kubo-plugins) | [Bespoke Extension Point](#bespoke-extension-points) | [Go Plugin](#go-plugins) | [Fork](#fork-kubo) |
|:-------------------:|:-----------------------------------:|:----------------------------:|:----------------------------------------------------:|:------------------------:|:------------------:|
| Supported? | ✅ | ✅ | ✅ | ❌ | ❌ |
| Future-proof? | ✅ | ❌ | ✅ | ❌ | ❌ |
| Fully customizable? | ✅ | ✅ | ❌ | ✅ | ✅ |
| Fast to implement? | ❌ | ✅ | ✅ | ✅ | ✅ |
| Dynamic at runtime? | ❌ | ❌ | ✅ | ✅ | ❌ |
| Add new commands? | ❌ | ✅ | ❌ | ✅ | ✅ |
## Boxo: build your own binary
The best way to reuse Kubo functionality is to pick the functionality you need directly from [Boxo](https://github.com/ipfs/boxo) and compile your own binary.
Boxo's raison d'etre is to be an IPFS component toolbox to support building custom-made implementations and applications. If your use case is not easy to implement with Boxo, you may want to consider adding whatever functionality is needed to Boxo instead of customizing Kubo, so that the community can benefit. If you are interested in this option, please reach out to Boxo maintainers, who will be happy to help you scope & plan the work. See [Boxo's FAQ](https://github.com/ipfs/boxo#help) for more info.
## Kubo Plugins
Kubo plugins are a set of interfaces that may be implemented and injected into Kubo. Generally you should recompile the Kubo binary with your plugins added. A popular example of a Kubo plugin is [go-ds-s3](https://github.com/ipfs/go-ds-s3), which can be used to store blocks in Amazon S3.
Some plugins, such as the `fx` plugin, allow deep customization of Kubo internals. As a result, Kubo maintainers can't guarantee backwards compatibility with these, so you may need to adapt to breaking changes when upgrading to new Kubo versions.
For more information about the different types of Kubo plugins, see [plugins.md](./plugins.md).
Kubo plugins can also be injected at runtime using Go plugins (see below), but these are hard to use and not well supported by Go, so we don't recommend them.
## Bespoke Extension Points
Certain Kubo functionality may have their own extension points. For example:
* Kubo supports the [Routing v1](https://github.com/ipfs/specs/blob/main/routing/ROUTING_V1_HTTP.md) API for delegating content routing to external processes
* Kubo supports the [Pinning Service API](https://github.com/ipfs/pinning-services-api-spec) for delegating pinning to external processes
* Kubo supports [DNSLink](https://dnslink.dev/) for delegating name->CID mappings to DNS
(This list is not exhaustive.)
These can generally be developed and deployed as sidecars (or full external services) without modifying the Kubo binary.
## Go Plugins
Go provides [dynamic plugins](https://pkg.go.dev/plugin) which can be loaded at runtime into a Go binary.
Kubo currently works with Go plugins. But using Go plugins requires that you compile the plugin using the exact same version of the Go toolchain with the same configuration (build flags, environment variables, etc.). As a result, you likely need to build Kubo and the plugins together at the same time, and at that point you may as well just compile the functionality directly into Kubo and avoid Go plugins.
As a result, we don't recommend using Go plugins, and are likely to remove them in a future release of Kubo.
## Fork Kubo
The "nuclear option" is to fork Kubo into your own repo, make your changes, and periodically sync your repo with the Kubo repo. This can be a good option if your changes are significant and you can commit to keeping your repo in sync with Kubo.
Kubo maintainers can't make any backwards compatibility guarantees about Kubo internals, so by choosing this option you're accepting the risk that you may need to spend more time adapting to breaking changes.

View File

@ -1,6 +1,6 @@
# Use Kubo (go-ipfs) as a library to spawn a node and add a file
> This tutorial is the sibling of the [js-ipfs IPFS 101 tutorial](https://github.com/ipfs-examples/js-ipfs-examples/tree/master/examples/ipfs-101#readme).
> Note: if you are trying to customize or extend Kubo, you should read the [Customizing Kubo](../../customizing.md) doc
By the end of this tutorial, you will learn how to:

View File

@ -7,9 +7,9 @@ go 1.18
replace github.com/ipfs/kubo => ./../../..
require (
github.com/ipfs/boxo v0.8.2-0.20230503105907-8059f183d866
github.com/ipfs/boxo v0.8.2-0.20230602144903-e2fc7f2fd023
github.com/ipfs/kubo v0.0.0-00010101000000-000000000000
github.com/libp2p/go-libp2p v0.27.1
github.com/libp2p/go-libp2p v0.27.3
github.com/multiformats/go-multiaddr v0.9.0
)
@ -174,18 +174,18 @@ require (
go.opentelemetry.io/otel/trace v1.14.0 // indirect
go.opentelemetry.io/proto/otlp v0.19.0 // indirect
go.uber.org/atomic v1.10.0 // indirect
go.uber.org/dig v1.16.1 // indirect
go.uber.org/dig v1.17.0 // indirect
go.uber.org/fx v1.19.2 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.24.0 // indirect
go4.org v0.0.0-20230225012048-214862532bf5 // indirect
golang.org/x/crypto v0.7.0 // indirect
golang.org/x/crypto v0.9.0 // indirect
golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect
golang.org/x/mod v0.10.0 // indirect
golang.org/x/net v0.8.0 // indirect
golang.org/x/net v0.10.0 // indirect
golang.org/x/sync v0.1.0 // indirect
golang.org/x/sys v0.7.0 // indirect
golang.org/x/text v0.8.0 // indirect
golang.org/x/sys v0.8.0 // indirect
golang.org/x/text v0.9.0 // indirect
golang.org/x/tools v0.7.0 // indirect
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
gonum.org/v1/gonum v0.11.0 // indirect

View File

@ -321,8 +321,8 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs=
github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0=
github.com/ipfs/boxo v0.8.2-0.20230503105907-8059f183d866 h1:ThRTXD/EyoLb/jz+YW+ZlOLbjX9FyaxP0dEpgUp3cCE=
github.com/ipfs/boxo v0.8.2-0.20230503105907-8059f183d866/go.mod h1:bORAHrH6hUtDZjbzTEaLrSpTdyhHKDIpjDRT+A14B7w=
github.com/ipfs/boxo v0.8.2-0.20230602144903-e2fc7f2fd023 h1:+9QiIziAuKW8AuGi26dFHw8SXTNB+MKooTp/sMlCmDY=
github.com/ipfs/boxo v0.8.2-0.20230602144903-e2fc7f2fd023/go.mod h1:Ej2r08Z4VIaFKqY08UXMNhwcLf6VekHhK8c+KqA1B9Y=
github.com/ipfs/go-bitfield v1.1.0 h1:fh7FIo8bSwaJEh6DdTWbCeZ1eqOaOkKFI74SCnsWbGA=
github.com/ipfs/go-bitfield v1.1.0/go.mod h1:paqf1wjq/D2BBmzfTVFlJQ9IlFOZpg422HL0HqsGWHU=
github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY=
@ -489,8 +489,8 @@ github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZ
github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs=
github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM=
github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro=
github.com/libp2p/go-libp2p v0.27.1 h1:k1u6RHsX3hqKnslDjsSgLNURxJ3O1atIZCY4gpMbbus=
github.com/libp2p/go-libp2p v0.27.1/go.mod h1:FAvvfQa/YOShUYdiSS03IR9OXzkcJXwcNA2FUCh9ImE=
github.com/libp2p/go-libp2p v0.27.3 h1:tkV/zm3KCZ4R5er9Xcs2pt0YNB4JH0iBfGAtHJdLHRs=
github.com/libp2p/go-libp2p v0.27.3/go.mod h1:FAvvfQa/YOShUYdiSS03IR9OXzkcJXwcNA2FUCh9ImE=
github.com/libp2p/go-libp2p-asn-util v0.3.0 h1:gMDcMyYiZKkocGXDQ5nsUQyquC9+H+iLEQHwOCZ7s8s=
github.com/libp2p/go-libp2p-asn-util v0.3.0/go.mod h1:B1mcOrKUE35Xq/ASTmQ4tN3LNzVVaMNmq2NACuqyB9w=
github.com/libp2p/go-libp2p-core v0.2.4/go.mod h1:STh4fdfa5vDYr0/SzYYeqnt+E6KfEV5VxfIrm0bcI0g=
@ -840,8 +840,8 @@ go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/dig v1.16.1 h1:+alNIBsl0qfY0j6epRubp/9obgtrObRAc5aD+6jbWY8=
go.uber.org/dig v1.16.1/go.mod h1:557JTAUZT5bUK0SvCwikmLPPtdQhfvLYtO5tJgQSbnk=
go.uber.org/dig v1.17.0 h1:5Chju+tUvcC+N7N6EV08BJz41UZuO3BmHcN4A287ZLI=
go.uber.org/dig v1.17.0/go.mod h1:rTxpf7l5I0eBTlE6/9RL+lDybC7WFwY2QH55ZSjy1mU=
go.uber.org/fx v1.19.2 h1:SyFgYQFr1Wl0AYstE8vyYIzP4bFz2URrScjwC4cwUvY=
go.uber.org/fx v1.19.2/go.mod h1:43G1VcqSzbIv77y00p1DRAsyZS8WdzuYdhZXmEUkMyQ=
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
@ -879,8 +879,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A=
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g=
golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@ -961,8 +961,8 @@ golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@ -1046,8 +1046,8 @@ golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@ -1061,8 +1061,8 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=

View File

@ -2,13 +2,13 @@
Kubo provides official HTTP RPC (`/api/v0`) clients for selected languages:
- [js-kubo-rpc-client](https://github.com/ipfs/js-kubo-rpc-client) - Official JS client for talking to Kubo RPC over HTTP
- [go-ipfs-api](https://github.com/ipfs/go-ipfs-api) - The go interface to ipfs's HTTP RPC - Follow https://github.com/ipfs/kubo/issues/9124 for coming changes.
- [go-ipfs-http-client](https://github.com/ipfs/go-ipfs-http-client) - IPFS CoreAPI implementation using HTTP RPC - Follow https://github.com/ipfs/kubo/issues/9124 for coming changes.
- [`js-kubo-rpc-client`](https://github.com/ipfs/js-kubo-rpc-client) - Official JS client for talking to Kubo RPC over HTTP
- [`go-ipfs-api`](https://github.com/ipfs/go-ipfs-api) - The go interface to ipfs's HTTP RPC - Follow https://github.com/ipfs/kubo/issues/9124 for coming changes.
- [`httpapi`](./client/rpc) (previously `go-ipfs-http-client`) - [`coreiface.CoreAPI`](https://pkg.go.dev/github.com/ipfs/boxo/coreiface#CoreAPI) implementation using HTTP RPC
## Recommended clients
| Language | Package Name | Github Repository |
|:--------:|:-------------------:|---------------------------------------------|
| JS | kubo-rpc-client | https://github.com/ipfs/js-kubo-rpc-client |
| Go | go-ipfs-http-client | https://github.com/ipfs/go-ipfs-http-client |
| Language | Package Name | Github Repository |
|:--------:|:-------------------:|--------------------------------------------|
| JS | kubo-rpc-client | https://github.com/ipfs/js-kubo-rpc-client |
| Go | `rpc` | [`./client/rpc`](./client/rpc) |

View File

@ -0,0 +1,60 @@
# Testing Kubo releases with Thunderdome
This document is for running Thunderdome tests by release engineers as part of releasing Kubo.
We use Thunderdome to replay ipfs.io gateway traffic in a controlled environment against two different versions of Kubo, and we record metrics and compare them to look for logic or performance regressions before releasing a new Kubo version.
For background information about how Thunderdome works, see: https://github.com/ipfs-shipyard/thunderdome
## Prerequisites
* Ensure you have access to the "IPFS Stewards" vault in 1Password, which contains the requisite AWS Console and API credentials
* Ensure you have Docker and the Docker CLI installed
* Checkout the Thunderdome repo locally (or `git pull` to ensure it's up-to-date)
* Install AWS CLI v2: https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html
* Configure the AWS CLI
* Configure the credentials as described in the [Thunderdome documentation](https://github.com/ipfs-shipyard/thunderdome/blob/main/cmd/thunderdome/README.md#credentials), using the credentials from 1Password
* Make sure the `thunderdome` binary is up-to-date: `go build ./cmd/thunderdome`
## Add & run an experiment
Create a new release configuration JSON in the `experiments/` directory, based on the most recent `kubo-release` configuration, and tweak as necessary. Generally we setup the targets to run a commit against the tag of the last release, such as:
```json
"targets": [
{
"name": "kubo190-4283b9",
"description": "kubo 0.19.0-rc1",
"build_from_git": {
"repo": "https://github.com/ipfs/kubo.git",
"commit":"4283b9d98f8438fc8751ccc840d8fc24eeae6f13"
}
},
{
"name": "kubo181",
"description": "kubo 0.18.",
"build_from_git": {
"repo": "https://github.com/ipfs/kubo.git",
"tag":"v0.18.1"
}
}
]
```
Run the experiment (where `$EXPERIMENT_CONFIG_JSON` is a path to the config JSON created above):
```shell
AWS_PROFILE=thunderdome ./thunderdome deploy --verbose --duration 120 $EXPERIMENT_CONFIG_JSON
```
This will build the Docker images, upload them to ECR, and then launch the experiment in Thunderdome. Once the experiment starts, the CLI will exit and the experiment will continue to run for the duration.
## Analyze Results
Add a log entry in https://www.notion.so/pl-strflt/ce2d1bd56f3541028d960d3711465659 and link to it from the release issue, so that experiment results are publicly visible.
The `deploy` command will output a link to the Grafana dashboard for the experiment. We don't currently have rigorous acceptance criteria, so you should look for anomalies or changes in the metrics and make sure they are tolerable and explainable. Unexplainable anomalies should be noted in the log with a screenshot, and then root caused.
## Open a PR to merge the experiment config into Thunderdome
This is important for both posterity, and so that someone else can sanity-check the test parameters.

21
go.mod
View File

@ -1,8 +1,5 @@
module github.com/ipfs/kubo
// https://github.com/ipfs/boxo/pull/290
replace github.com/ipfs/boxo => github.com/MichaelMure/boxo v0.0.0-20230505145003-9207501a615f
require (
bazil.org/fuse v0.0.0-20200117225306-7b5117fecadc
contrib.go.opencensus.io/exporter/prometheus v0.4.2
@ -19,7 +16,7 @@ require (
github.com/gogo/protobuf v1.3.2
github.com/google/uuid v1.3.0
github.com/hashicorp/go-multierror v1.1.1
github.com/ipfs/boxo v0.8.2-0.20230503105907-8059f183d866
github.com/ipfs/boxo v0.8.2-0.20230602144903-e2fc7f2fd023
github.com/ipfs/go-block-format v0.1.2
github.com/ipfs/go-cid v0.4.1
github.com/ipfs/go-cidutil v0.1.0
@ -48,7 +45,7 @@ require (
github.com/jbenet/goprocess v0.1.4
github.com/julienschmidt/httprouter v1.3.0
github.com/libp2p/go-doh-resolver v0.4.0
github.com/libp2p/go-libp2p v0.27.1
github.com/libp2p/go-libp2p v0.27.3
github.com/libp2p/go-libp2p-http v0.5.0
github.com/libp2p/go-libp2p-kad-dht v0.23.0
github.com/libp2p/go-libp2p-kbucket v0.5.0
@ -80,13 +77,14 @@ require (
go.opentelemetry.io/otel v1.14.0
go.opentelemetry.io/otel/sdk v1.14.0
go.opentelemetry.io/otel/trace v1.14.0
go.uber.org/dig v1.16.1
go.uber.org/dig v1.17.0
go.uber.org/fx v1.19.2
go.uber.org/multierr v1.11.0
go.uber.org/zap v1.24.0
golang.org/x/crypto v0.7.0
golang.org/x/crypto v0.9.0
golang.org/x/mod v0.10.0
golang.org/x/sync v0.1.0
golang.org/x/sys v0.7.0
golang.org/x/sys v0.8.0
)
require (
@ -216,13 +214,12 @@ require (
go.opentelemetry.io/otel/metric v0.37.0 // indirect
go.opentelemetry.io/proto/otlp v0.19.0 // indirect
go.uber.org/atomic v1.10.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go4.org v0.0.0-20230225012048-214862532bf5 // indirect
golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect
golang.org/x/net v0.8.0 // indirect
golang.org/x/net v0.10.0 // indirect
golang.org/x/oauth2 v0.5.0 // indirect
golang.org/x/term v0.6.0 // indirect
golang.org/x/text v0.8.0 // indirect
golang.org/x/term v0.8.0 // indirect
golang.org/x/text v0.9.0 // indirect
golang.org/x/tools v0.7.0 // indirect
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
gonum.org/v1/gonum v0.11.0 // indirect

32
go.sum
View File

@ -49,8 +49,6 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Kubuxu/go-os-helper v0.0.1 h1:EJiD2VUQyh5A9hWJLmc6iWg6yIcJ7jpBcwC8GMGXfDk=
github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y=
github.com/MichaelMure/boxo v0.0.0-20230505145003-9207501a615f h1:2UbpOJ6cIC43V/hIDxgvP0VLbJIk+cBofPAWmXBlSrg=
github.com/MichaelMure/boxo v0.0.0-20230505145003-9207501a615f/go.mod h1:bORAHrH6hUtDZjbzTEaLrSpTdyhHKDIpjDRT+A14B7w=
github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
@ -358,6 +356,8 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs=
github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0=
github.com/ipfs/boxo v0.8.2-0.20230602144903-e2fc7f2fd023 h1:+9QiIziAuKW8AuGi26dFHw8SXTNB+MKooTp/sMlCmDY=
github.com/ipfs/boxo v0.8.2-0.20230602144903-e2fc7f2fd023/go.mod h1:Ej2r08Z4VIaFKqY08UXMNhwcLf6VekHhK8c+KqA1B9Y=
github.com/ipfs/go-bitfield v1.1.0 h1:fh7FIo8bSwaJEh6DdTWbCeZ1eqOaOkKFI74SCnsWbGA=
github.com/ipfs/go-bitfield v1.1.0/go.mod h1:paqf1wjq/D2BBmzfTVFlJQ9IlFOZpg422HL0HqsGWHU=
github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY=
@ -540,8 +540,8 @@ github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZ
github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs=
github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM=
github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro=
github.com/libp2p/go-libp2p v0.27.1 h1:k1u6RHsX3hqKnslDjsSgLNURxJ3O1atIZCY4gpMbbus=
github.com/libp2p/go-libp2p v0.27.1/go.mod h1:FAvvfQa/YOShUYdiSS03IR9OXzkcJXwcNA2FUCh9ImE=
github.com/libp2p/go-libp2p v0.27.3 h1:tkV/zm3KCZ4R5er9Xcs2pt0YNB4JH0iBfGAtHJdLHRs=
github.com/libp2p/go-libp2p v0.27.3/go.mod h1:FAvvfQa/YOShUYdiSS03IR9OXzkcJXwcNA2FUCh9ImE=
github.com/libp2p/go-libp2p-asn-util v0.3.0 h1:gMDcMyYiZKkocGXDQ5nsUQyquC9+H+iLEQHwOCZ7s8s=
github.com/libp2p/go-libp2p-asn-util v0.3.0/go.mod h1:B1mcOrKUE35Xq/ASTmQ4tN3LNzVVaMNmq2NACuqyB9w=
github.com/libp2p/go-libp2p-core v0.2.4/go.mod h1:STh4fdfa5vDYr0/SzYYeqnt+E6KfEV5VxfIrm0bcI0g=
@ -970,8 +970,8 @@ go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/dig v1.16.1 h1:+alNIBsl0qfY0j6epRubp/9obgtrObRAc5aD+6jbWY8=
go.uber.org/dig v1.16.1/go.mod h1:557JTAUZT5bUK0SvCwikmLPPtdQhfvLYtO5tJgQSbnk=
go.uber.org/dig v1.17.0 h1:5Chju+tUvcC+N7N6EV08BJz41UZuO3BmHcN4A287ZLI=
go.uber.org/dig v1.17.0/go.mod h1:rTxpf7l5I0eBTlE6/9RL+lDybC7WFwY2QH55ZSjy1mU=
go.uber.org/fx v1.19.2 h1:SyFgYQFr1Wl0AYstE8vyYIzP4bFz2URrScjwC4cwUvY=
go.uber.org/fx v1.19.2/go.mod h1:43G1VcqSzbIv77y00p1DRAsyZS8WdzuYdhZXmEUkMyQ=
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
@ -1009,8 +1009,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A=
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g=
golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@ -1097,8 +1097,8 @@ golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@ -1199,14 +1199,14 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -1216,8 +1216,8 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=

View File

@ -87,7 +87,7 @@ func (ls loaderState) String() string {
// 5. Call Close to close all plugins.
type PluginLoader struct {
state loaderState
plugins map[string]plugin.Plugin
plugins []plugin.Plugin
started []plugin.Plugin
config config.Plugins
repo string
@ -95,7 +95,7 @@ type PluginLoader struct {
// NewPluginLoader creates new plugin loader
func NewPluginLoader(repo string) (*PluginLoader, error) {
loader := &PluginLoader{plugins: make(map[string]plugin.Plugin, len(preloadPlugins)), repo: repo}
loader := &PluginLoader{plugins: make([]plugin.Plugin, 0, len(preloadPlugins)), repo: repo}
if repo != "" {
cfg, err := cserialize.Load(filepath.Join(repo, config.DefaultConfigFile))
switch err {
@ -106,6 +106,7 @@ func NewPluginLoader(repo string) (*PluginLoader, error) {
return nil, err
}
}
for _, v := range preloadPlugins {
if err := loader.Load(v); err != nil {
return nil, err
@ -140,18 +141,22 @@ func (loader *PluginLoader) Load(pl plugin.Plugin) error {
}
name := pl.Name()
if ppl, ok := loader.plugins[name]; ok {
// plugin is already loaded
return fmt.Errorf(
"plugin: %s, is duplicated in version: %s, "+
"while trying to load dynamically: %s",
name, ppl.Version(), pl.Version())
for _, p := range loader.plugins {
if p.Name() == name {
// plugin is already loaded
return fmt.Errorf(
"plugin: %s, is duplicated in version: %s, "+
"while trying to load dynamically: %s",
name, p.Version(), pl.Version())
}
}
if loader.config.Plugins[name].Disabled {
log.Infof("not loading disabled plugin %s", name)
return nil
}
loader.plugins[name] = pl
loader.plugins = append(loader.plugins, pl)
return nil
}
@ -219,10 +224,10 @@ func (loader *PluginLoader) Initialize() error {
if err := loader.transition(loaderLoading, loaderInitializing); err != nil {
return err
}
for name, p := range loader.plugins {
for _, p := range loader.plugins {
err := p.Init(&plugin.Environment{
Repo: loader.repo,
Config: loader.config.Plugins[name].Config,
Config: loader.config.Plugins[p.Name()].Config,
})
if err != nil {
loader.state = loaderFailed

View File

@ -0,0 +1,60 @@
package cli
import (
"fmt"
"testing"
"time"
"github.com/ipfs/kubo/config"
"github.com/ipfs/kubo/test/cli/harness"
"github.com/stretchr/testify/assert"
)
func TestBackupBootstrapPeers(t *testing.T) {
nodes := harness.NewT(t).NewNodes(3).Init()
nodes.ForEachPar(func(n *harness.Node) {
n.UpdateConfig(func(cfg *config.Config) {
cfg.Bootstrap = []string{}
cfg.Addresses.Swarm = []string{fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", harness.NewRandPort())}
cfg.Discovery.MDNS.Enabled = false
cfg.Internal.BackupBootstrapInterval = config.NewOptionalDuration(250 * time.Millisecond)
})
})
// Start all nodes and ensure they all have no peers.
nodes.StartDaemons()
nodes.ForEachPar(func(n *harness.Node) {
assert.Len(t, n.Peers(), 0)
})
// Connect nodes 0 and 1, ensure they know each other.
nodes[0].Connect(nodes[1])
assert.Len(t, nodes[0].Peers(), 1)
assert.Len(t, nodes[1].Peers(), 1)
assert.Len(t, nodes[2].Peers(), 0)
// Wait a bit to ensure that 0 and 1 saved their temporary bootstrap backups.
time.Sleep(time.Millisecond * 500)
nodes.StopDaemons()
// Start 1 and 2. 2 does not know anyone yet.
nodes[1].StartDaemon()
nodes[2].StartDaemon()
assert.Len(t, nodes[1].Peers(), 0)
assert.Len(t, nodes[2].Peers(), 0)
// Connect 1 and 2, ensure they know each other.
nodes[1].Connect(nodes[2])
assert.Len(t, nodes[1].Peers(), 1)
assert.Len(t, nodes[2].Peers(), 1)
// Start 0, wait a bit. Should connect to 1, and then discover 2 via the
// backup bootstrap peers.
nodes[0].StartDaemon()
time.Sleep(time.Millisecond * 500)
// Check if they're all connected.
assert.Len(t, nodes[0].Peers(), 2)
assert.Len(t, nodes[1].Peers(), 2)
assert.Len(t, nodes[2].Peers(), 2)
}

View File

@ -11,6 +11,7 @@ import (
"github.com/ipfs/boxo/routing/http/server"
"github.com/ipfs/boxo/routing/http/types"
"github.com/ipfs/boxo/routing/http/types/iter"
"github.com/ipfs/go-cid"
"github.com/ipfs/kubo/test/cli/harness"
"github.com/ipfs/kubo/test/cli/testutils"
@ -23,11 +24,11 @@ type fakeHTTPContentRouter struct {
provideCalls int
}
func (r *fakeHTTPContentRouter) FindProviders(ctx context.Context, key cid.Cid) ([]types.ProviderResponse, error) {
func (r *fakeHTTPContentRouter) FindProviders(ctx context.Context, key cid.Cid, limit int) (iter.ResultIter[types.ProviderResponse], error) {
r.m.Lock()
defer r.m.Unlock()
r.findProvidersCalls++
return []types.ProviderResponse{}, nil
return iter.FromSlice([]iter.Result[types.ProviderResponse]{}), nil
}
func (r *fakeHTTPContentRouter) ProvideBitswap(ctx context.Context, req *server.BitswapWriteProvideRequest) (time.Duration, error) {

View File

@ -15,17 +15,20 @@ func TestHTTPDelegatedRouting(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
fakeServer := func(resp string) *httptest.Server {
fakeServer := func(contentType string, resp ...string) *httptest.Server {
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
_, err := w.Write([]byte(resp))
if err != nil {
panic(err)
w.Header().Set("Content-Type", contentType)
for _, r := range resp {
_, err := w.Write([]byte(r))
if err != nil {
panic(err)
}
}
}))
}
findProvsCID := "baeabep4vu3ceru7nerjjbk37sxb7wmftteve4hcosmyolsbsiubw2vr6pqzj6mw7kv6tbn6nqkkldnklbjgm5tzbi4hkpkled4xlcr7xz4bq"
prov := "12D3KooWARYacCc6eoCqvsS9RW9MA2vo51CV75deoiqssx3YgyYJ"
provs := []string{"12D3KooWAobjw92XDcnQ1rRmRJDA3zAQpdPYUpZKrJxH6yccSpje", "12D3KooWARYacCc6eoCqvsS9RW9MA2vo51CV75deoiqssx3YgyYJ"}
t.Run("default routing config has no routers defined", func(t *testing.T) {
assert.Nil(t, node.ReadConfig().Routing.Routers)
@ -84,11 +87,11 @@ func TestHTTPDelegatedRouting(t *testing.T) {
})
t.Run("adding HTTP delegated routing endpoint to Routing.Routers config works", func(t *testing.T) {
server := fakeServer(ToJSONStr(JSONObj{
server := fakeServer("application/json", ToJSONStr(JSONObj{
"Providers": []JSONObj{{
"Protocol": "transport-bitswap",
"Schema": "bitswap",
"ID": prov,
"ID": provs[0],
"Addrs": []string{"/ip4/0.0.0.0/tcp/4001", "/ip4/0.0.0.0/tcp/4002"},
}},
}))
@ -113,9 +116,39 @@ func TestHTTPDelegatedRouting(t *testing.T) {
assert.Equal(t, res.Stdout.Trimmed(), server.URL)
node.StartDaemon()
res = node.IPFS("routing", "findprovs", findProvsCID)
assert.Equal(t, prov, res.Stdout.Trimmed())
assert.Equal(t, provs[0], res.Stdout.Trimmed())
})
node.StopDaemon()
t.Run("adding HTTP delegated routing endpoint to Routing.Routers config works (streaming)", func(t *testing.T) {
server := fakeServer("application/x-ndjson", ToJSONStr(JSONObj{
"Protocol": "transport-bitswap",
"Schema": "bitswap",
"ID": provs[1],
"Addrs": []string{"/ip4/0.0.0.0/tcp/4001", "/ip4/0.0.0.0/tcp/4002"},
}), ToJSONStr(JSONObj{
"Protocol": "transport-bitswap",
"Schema": "bitswap",
"ID": provs[0],
"Addrs": []string{"/ip4/0.0.0.0/tcp/4001", "/ip4/0.0.0.0/tcp/4002"},
}))
t.Cleanup(server.Close)
node.IPFS("config", "Routing.Routers.TestDelegatedRouter", "--json", ToJSONStr(JSONObj{
"Type": "http",
"Parameters": JSONObj{
"Endpoint": server.URL,
},
}))
res := node.IPFS("config", "Routing.Routers.TestDelegatedRouter.Parameters.Endpoint")
assert.Equal(t, res.Stdout.Trimmed(), server.URL)
node.StartDaemon()
res = node.IPFS("routing", "findprovs", findProvsCID)
assert.Equal(t, provs[1]+"\n"+provs[0], res.Stdout.Trimmed())
})
t.Run("HTTP client should emit OpenCensus metrics", func(t *testing.T) {

View File

@ -31,10 +31,10 @@ func TestGatewayHAMTDirectory(t *testing.T) {
// Import fixtures
r, err := os.Open("./fixtures/TestGatewayHAMTDirectory.car")
assert.Nil(t, err)
assert.NoError(t, err)
defer r.Close()
err = node.IPFSDagImport(r, fixtureCid)
assert.Nil(t, err)
assert.NoError(t, err)
// Fetch HAMT directory succeeds with minimal refs
resp := client.Get(fmt.Sprintf("/ipfs/%s/", hamtCid))
@ -60,10 +60,10 @@ func TestGatewayMultiRange(t *testing.T) {
// Import fixtures
r, err := os.Open("./fixtures/TestGatewayMultiRange.car")
assert.Nil(t, err)
assert.NoError(t, err)
defer r.Close()
err = node.IPFSDagImport(r, fixtureCid)
assert.Nil(t, err)
assert.NoError(t, err)
// Succeeds fetching a range of blocks we have
resp := client.Get(fmt.Sprintf("/ipfs/%s", fileCid), func(r *http.Request) {

View File

@ -13,8 +13,10 @@ import (
"github.com/ipfs/kubo/config"
"github.com/ipfs/kubo/test/cli/harness"
. "github.com/ipfs/kubo/test/cli/testutils"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr/net"
"github.com/multiformats/go-multibase"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@ -25,10 +27,13 @@ func TestGateway(t *testing.T) {
node := h.NewNode().Init().StartDaemon("--offline")
cid := node.IPFSAddStr("Hello Worlds!")
peerID, err := peer.ToCid(node.PeerID()).StringOfBase(multibase.Base36)
assert.NoError(t, err)
client := node.GatewayClient()
client.TemplateData = map[string]string{
"CID": cid,
"PeerID": node.PeerID().String(),
"PeerID": peerID,
}
t.Run("GET IPFS path succeeds", func(t *testing.T) {
@ -182,7 +187,7 @@ func TestGateway(t *testing.T) {
t.Run("GET /ipfs/ipns/{peerid} returns redirect to the valid path", func(t *testing.T) {
t.Parallel()
resp := client.Get("/ipfs/ipns/{{.PeerID}}?query=to-remember")
peerID := node.PeerID().String()
assert.Contains(t,
resp.Body,
fmt.Sprintf(`<meta http-equiv="refresh" content="10;url=/ipns/%s?query=to-remember" />`, peerID),
@ -474,6 +479,9 @@ func TestGateway(t *testing.T) {
cfg.Gateway.NoFetch = true
})
node2PeerID, err := peer.ToCid(node2.PeerID()).StringOfBase(multibase.Base36)
assert.NoError(t, err)
nodes.StartDaemons().Connect()
t.Run("not present", func(t *testing.T) {
@ -486,7 +494,7 @@ func TestGateway(t *testing.T) {
t.Run("not present IPNS key from node 1", func(t *testing.T) {
t.Parallel()
assert.Equal(t, 500, node1.GatewayClient().Get("/ipns/"+node2.PeerID().String()).StatusCode)
assert.Equal(t, 500, node1.GatewayClient().Get("/ipns/"+node2PeerID).StatusCode)
})
})
@ -501,9 +509,91 @@ func TestGateway(t *testing.T) {
t.Run("present IPNS key from node 1", func(t *testing.T) {
t.Parallel()
node2.IPFS("name", "publish", "/ipfs/"+cidBar)
assert.Equal(t, 200, node1.GatewayClient().Get("/ipns/"+node2.PeerID().String()).StatusCode)
assert.Equal(t, 200, node1.GatewayClient().Get("/ipns/"+node2PeerID).StatusCode)
})
})
})
t.Run("DeserializedResponses", func(t *testing.T) {
type testCase struct {
globalValue config.Flag
gatewayValue config.Flag
deserializedGlobalStatusCode int
deserializedGatewayStaticCode int
message string
}
setHost := func(r *http.Request) {
r.Host = "example.com"
}
withAccept := func(accept string) func(r *http.Request) {
return func(r *http.Request) {
r.Header.Set("Accept", accept)
}
}
withHostAndAccept := func(accept string) func(r *http.Request) {
return func(r *http.Request) {
setHost(r)
withAccept(accept)(r)
}
}
makeTest := func(test *testCase) func(t *testing.T) {
return func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
node.UpdateConfig(func(cfg *config.Config) {
cfg.Gateway.DeserializedResponses = test.globalValue
cfg.Gateway.PublicGateways = map[string]*config.GatewaySpec{
"example.com": {
Paths: []string{"/ipfs", "/ipns"},
DeserializedResponses: test.gatewayValue,
},
}
})
node.StartDaemon()
cidFoo := node.IPFSAddStr("foo")
client := node.GatewayClient()
deserializedPath := "/ipfs/" + cidFoo
blockPath := deserializedPath + "?format=raw"
carPath := deserializedPath + "?format=car"
// Global Check (Gateway.DeserializedResponses)
assert.Equal(t, http.StatusOK, client.Get(blockPath).StatusCode)
assert.Equal(t, http.StatusOK, client.Get(deserializedPath, withAccept("application/vnd.ipld.raw")).StatusCode)
assert.Equal(t, http.StatusOK, client.Get(carPath).StatusCode)
assert.Equal(t, http.StatusOK, client.Get(deserializedPath, withAccept("application/vnd.ipld.car")).StatusCode)
assert.Equal(t, test.deserializedGlobalStatusCode, client.Get(deserializedPath).StatusCode)
assert.Equal(t, test.deserializedGlobalStatusCode, client.Get(deserializedPath, withAccept("application/json")).StatusCode)
// Public Gateway (example.com) Check (Gateway.PublicGateways[example.com].DeserializedResponses)
assert.Equal(t, http.StatusOK, client.Get(blockPath, setHost).StatusCode)
assert.Equal(t, http.StatusOK, client.Get(deserializedPath, withHostAndAccept("application/vnd.ipld.raw")).StatusCode)
assert.Equal(t, http.StatusOK, client.Get(carPath, setHost).StatusCode)
assert.Equal(t, http.StatusOK, client.Get(deserializedPath, withHostAndAccept("application/vnd.ipld.car")).StatusCode)
assert.Equal(t, test.deserializedGatewayStaticCode, client.Get(deserializedPath, setHost).StatusCode)
assert.Equal(t, test.deserializedGatewayStaticCode, client.Get(deserializedPath, withHostAndAccept("application/json")).StatusCode)
}
}
for _, test := range []*testCase{
{config.True, config.Default, http.StatusOK, http.StatusOK, "when Gateway.DeserializedResponses is globally enabled, leaving implicit default for Gateway.PublicGateways[example.com] should inherit the global setting (enabled)"},
{config.False, config.Default, http.StatusNotAcceptable, http.StatusNotAcceptable, "when Gateway.DeserializedResponses is globally disabled, leaving implicit default on Gateway.PublicGateways[example.com] should inherit the global setting (disabled)"},
{config.False, config.True, http.StatusNotAcceptable, http.StatusOK, "when Gateway.DeserializedResponses is globally disabled, explicitly enabling on Gateway.PublicGateways[example.com] should override global (enabled)"},
{config.True, config.False, http.StatusOK, http.StatusNotAcceptable, "when Gateway.DeserializedResponses is globally enabled, explicitly disabling on Gateway.PublicGateways[example.com] should override global (disabled)"},
} {
t.Run(test.message, makeTest(test))
}
})
}

View File

@ -13,7 +13,7 @@ type Peering struct {
To int
}
func newRandPort() int {
func NewRandPort() int {
n := rand.Int()
return 3000 + (n % 1000)
}
@ -24,7 +24,7 @@ func CreatePeerNodes(t *testing.T, n int, peerings []Peering) (*Harness, Nodes)
nodes.ForEachPar(func(node *Node) {
node.UpdateConfig(func(cfg *config.Config) {
cfg.Routing.Type = config.NewOptionalString("none")
cfg.Addresses.Swarm = []string{fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", newRandPort())}
cfg.Addresses.Swarm = []string{fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", NewRandPort())}
})
})

View File

@ -117,7 +117,23 @@ func testRoutingDHT(t *testing.T, enablePubsub bool) {
})
}
func testSelfFindDHT(t *testing.T) {
t.Run("ipfs routing findpeer fails for self", func(t *testing.T) {
t.Parallel()
nodes := harness.NewT(t).NewNodes(1).Init()
nodes.ForEachPar(func(node *harness.Node) {
node.IPFS("config", "Routing.Type", "dht")
})
nodes.StartDaemons()
res := nodes[0].RunIPFS("dht", "findpeer", nodes[0].PeerID().String())
assert.Equal(t, 1, res.ExitCode())
})
}
func TestRoutingDHT(t *testing.T) {
testRoutingDHT(t, false)
testRoutingDHT(t, true)
testSelfFindDHT(t)
}

View File

@ -35,7 +35,7 @@ func TestSwarm(t *testing.T) {
res := node.RunIPFS("swarm", "peers", "--enc=json", "--identify")
var output expectedOutputType
err := json.Unmarshal(res.Stdout.Bytes(), &output)
assert.Nil(t, err)
assert.NoError(t, err)
assert.Equal(t, 0, len(output.Peers))
})
@ -48,7 +48,7 @@ func TestSwarm(t *testing.T) {
res := node.RunIPFS("swarm", "peers", "--enc=json", "--identify")
var output expectedOutputType
err := json.Unmarshal(res.Stdout.Bytes(), &output)
assert.Nil(t, err)
assert.NoError(t, err)
actualID := output.Peers[0].Identify.ID
actualPublicKey := output.Peers[0].Identify.PublicKey
actualAgentVersion := output.Peers[0].Identify.AgentVersion
@ -78,12 +78,12 @@ func TestSwarm(t *testing.T) {
otherNodeIDResponse := otherNode.RunIPFS("id", "--enc=json")
var otherNodeIDOutput identifyType
err := json.Unmarshal(otherNodeIDResponse.Stdout.Bytes(), &otherNodeIDOutput)
assert.Nil(t, err)
assert.NoError(t, err)
res := node.RunIPFS("swarm", "peers", "--enc=json", "--identify")
var output expectedOutputType
err = json.Unmarshal(res.Stdout.Bytes(), &output)
assert.Nil(t, err)
assert.NoError(t, err)
outputIdentify := output.Peers[0].Identify
assert.Equal(t, outputIdentify.ID, otherNodeIDOutput.ID)

View File

@ -1,6 +1,7 @@
package cli
import (
"fmt"
"os"
"path/filepath"
"testing"
@ -124,4 +125,26 @@ func TestTransports(t *testing.T) {
runTests(nodes)
})
t.Run("QUIC connects with non-dialable transports", func(t *testing.T) {
// This test targets specific Kubo internals which may change later. This checks
// if we can announce an address we do not listen on, and then are able to connect
// via a different address that is available.
t.Parallel()
nodes := harness.NewT(t).NewNodes(5).Init()
nodes.ForEachPar(func(n *harness.Node) {
n.UpdateConfig(func(cfg *config.Config) {
// We need a specific port to announce so we first generate a random port.
// We can't use 0 here to automatically assign an available port because
// that would only work with Swarm, but not for the announcing.
port := harness.NewRandPort()
quicAddr := fmt.Sprintf("/ip4/127.0.0.1/udp/%d/quic-v1", port)
cfg.Addresses.Swarm = []string{quicAddr}
cfg.Addresses.Announce = []string{quicAddr, quicAddr + "/webtransport"}
})
})
disableRouting(nodes)
nodes.StartDaemons().Connect()
runTests(nodes)
})
}

View File

@ -486,6 +486,14 @@ test_check_ed25519_b58mh_peerid() {
}
}
test_check_secp256k1_b58mh_peerid() {
peeridlen=$(echo "$1" | tr -dC "[:alnum:]" | wc -c | tr -d " ") &&
test "$peeridlen" = "53" || {
echo "Bad SECP256K1 B58MH peerid '$1' with len '$peeridlen'"
return 1
}
}
test_check_rsa2048_base36_peerid() {
peeridlen=$(echo "$1" | tr -dC "[:alnum:]" | wc -c | tr -d " ") &&
test "$peeridlen" = "56" || {
@ -502,6 +510,14 @@ test_check_ed25519_base36_peerid() {
}
}
test_check_secp256k1_base36_peerid() {
peeridlen=$(echo "$1" | tr -dC "[:alnum:]" | wc -c | tr -d " ") &&
test "$peeridlen" = "63" || {
echo "Bad SECP256K1 B36CID peerid '$1' with len '$peeridlen'"
return 1
}
}
convert_tcp_maddr() {
echo $1 | awk -F'/' '{ printf "%s:%s", $3, $5 }'
}

View File

@ -87,12 +87,19 @@ test_rotate() {
}
test_rotate 'rsa' ''
test_rotate 'ed25519' ''
test_rotate 'secp256k1' ''
test_rotate '' ''
test_rotate 'rsa' 'rsa'
test_rotate 'ed25519' 'rsa'
test_rotate 'secp256k1' 'rsa'
test_rotate '' 'rsa'
test_rotate 'rsa' 'ed25519'
test_rotate 'ed25519' 'ed25519'
test_rotate 'secp256k1' 'ed25519'
test_rotate '' 'ed25519'
test_rotate 'rsa' 'secp256k1'
test_rotate 'ed25519' 'secp256k1'
test_rotate 'secp256k1' 'secp256k1'
test_rotate '' 'secp256k1'
test_done

View File

@ -162,14 +162,12 @@ test_localhost_gateway_response_should_contain \
"http://localhost:$GWAY_PORT/ipfs/$DIR_CID/" \
"Location: http://$DIR_CID.ipfs.localhost:$GWAY_PORT/"
# We return body with HTTP 301 so existing cli scripts that use path-based
# gateway do not break (curl doesn't auto-redirect without passing -L; wget
# does not span across hostnames by default)
# Context: https://github.com/ipfs/go-ipfs/issues/6975
# We return human-readable body with HTTP 301 so existing cli scripts that use path-based
# gateway are informed to enable following HTTP redirects
test_localhost_gateway_response_should_contain \
"request for localhost/ipfs/{CIDv1} includes valid payload in body for CLI tools like curl" \
"request for localhost/ipfs/{CIDv1} includes human-readable link and redirect info in HTTP 301 body" \
"http://localhost:$GWAY_PORT/ipfs/$CIDv1" \
"$CID_VAL"
">Moved Permanently</a>"
test_localhost_gateway_response_should_contain \
"request for localhost/ipfs/{CIDv0} redirects to CIDv1 representation in subdomain" \

View File

@ -349,7 +349,7 @@ test_native_dag () {
# As this is generated, we don't return immutable Cache-Control, even on /ipfs (same as for dir-index-html)
test_expect_success "GET $name on /ipfs with Accept: text/html returns HTML (dag-index-html)" '
curl -sD - -H "Accept: text/html" "http://127.0.0.1:$GWAY_PORT/ipfs/$CID" > curl_output 2>&1 &&
curl -sD - -H "Accept: text/html" "http://127.0.0.1:$GWAY_PORT/ipfs/$CID/" > curl_output 2>&1 &&
test_should_not_contain "Content-Disposition" curl_output &&
test_should_not_contain "Cache-Control" curl_output &&
test_should_contain "Etag: \"DagIndex-" curl_output &&
@ -358,7 +358,7 @@ test_native_dag () {
'
test_expect_success "GET $name on /ipns with Accept: text/html returns HTML (dag-index-html)" '
curl -sD - -H "Accept: text/html" "http://127.0.0.1:$GWAY_PORT/ipns/$IPNS_ID" > curl_output 2>&1 &&
curl -sD - -H "Accept: text/html" "http://127.0.0.1:$GWAY_PORT/ipns/$IPNS_ID/" > curl_output 2>&1 &&
test_should_not_contain "Content-Disposition" curl_output &&
test_should_not_contain "Cache-Control" curl_output &&
test_should_contain "Etag: \"DagIndex-" curl_output &&

View File

@ -55,6 +55,29 @@ PEERID=$(ipfs key list --ipns-base=base36 -l | grep key_ed25519 | head -n 1 | cu
test_check_ed25519_base36_peerid $PEERID &&
ipfs key rm key_ed25519
'
test_expect_success "create an SECP256k1 key and test B58MH/B36CID output formats" '
PEERID=$(ipfs key gen --ipns-base=b58mh --type=secp256k1 key_secp256k1) &&
test_check_secp256k1_b58mh_peerid $PEERID &&
ipfs key rm key_secp256k1 &&
PEERID=$(ipfs key gen --ipns-base=base36 --type=secp256k1 key_secp256k1) &&
test_check_secp256k1_base36_peerid $PEERID
'
test_expect_success "test SECP256k1 key sk export format" '
ipfs key export key_secp256k1 &&
test_check_ed25519_sk key_secp256k1.key &&
rm key_secp256k1.key
'
test_expect_success "test SECP256k1 key B58MH/B36CID multihash format" '
PEERID=$(ipfs key list --ipns-base=b58mh -l | grep key_secp256k1 | head -n 1 | cut -d " " -f1) &&
test_check_secp256k1_b58mh_peerid $PEERID &&
PEERID=$(ipfs key list --ipns-base=base36 -l | grep key_secp256k1 | head -n 1 | cut -d " " -f1) &&
test_check_secp256k1_base36_peerid $PEERID &&
ipfs key rm key_secp256k1
'
# end of format test
@ -72,6 +95,11 @@ ipfs key rm key_ed25519
test_key_import_export_all_formats ed25519_key
test_expect_success "create a new secp256k1 key" '
k1hash=$(ipfs key gen generated_secp256k1_key --type=secp256k1)
echo $k1hash > secp256k1_key_id
'
test_openssl_compatibility_all_types
INVALID_KEY=../t0165-keystore-data/openssl_secp384r1.pem
@ -116,6 +144,7 @@ ipfs key rm key_ed25519
test_expect_success "all keys show up in list output" '
echo generated_ed25519_key > list_exp &&
echo generated_rsa_key >> list_exp &&
echo generated_secp256k1_key >> list_exp &&
echo quxel >> list_exp &&
echo self >> list_exp
ipfs key list > list_out &&
@ -135,6 +164,7 @@ ipfs key rm key_ed25519
test_expect_success "key rm remove a key" '
ipfs key rm generated_rsa_key
echo generated_ed25519_key > list_exp &&
echo generated_secp256k1_key >> list_exp &&
echo quxel >> list_exp &&
echo self >> list_exp
ipfs key list > list_out &&
@ -149,6 +179,7 @@ ipfs key rm key_ed25519
test_expect_success "key rename rename a key" '
ipfs key rename generated_ed25519_key fooed
echo fooed > list_exp &&
echo generated_secp256k1_key >> list_exp &&
echo quxel >> list_exp &&
echo self >> list_exp
ipfs key list > list_out &&