mirror of
https://github.com/ipfs/kubo.git
synced 2026-03-02 06:47:51 +08:00
commit
64b532fbb1
@ -1,410 +1,37 @@
|
||||
version: 2.1
|
||||
|
||||
aliases:
|
||||
make_out_dirs: &make_out_dirs
|
||||
run: mkdir -p /tmp/circleci-artifacts /tmp/circleci-workspace /tmp/circleci-test-results/{unit,sharness}
|
||||
restore_gomod: &restore_gomod
|
||||
restore_cache:
|
||||
keys:
|
||||
- v5-dep-{{ .Branch }}-{{ checksum "~/ipfs/go-ipfs/go.sum" }}-{{ .Environment.CIRCLE_JOB }}
|
||||
- v5-dep-{{ .Branch }}-{{ checksum "~/ipfs/go-ipfs/go.sum" }}-
|
||||
- v5-dep-{{ .Branch }}-
|
||||
- v5-dep-master-
|
||||
store_gomod: &store_gomod
|
||||
save_cache:
|
||||
key: v5-dep-{{ .Branch }}-{{ checksum "~/ipfs/go-ipfs/go.sum" }}-{{ .Environment.CIRCLE_JOB }}
|
||||
paths:
|
||||
- ~/go/pkg/mod
|
||||
- ~/.cache/go-build/
|
||||
only-version-tags: &only-version-tags
|
||||
tags:
|
||||
only: /^v[0-9].*/
|
||||
branches:
|
||||
ignore: /.*/
|
||||
|
||||
default_environment: &default_environment
|
||||
SERVICE: circle-ci
|
||||
TRAVIS: 1
|
||||
CIRCLE: 1
|
||||
CIRCLE_TEST_REPORTS: /tmp/circleci-test-results
|
||||
CIRCLE_ARTIFACTS: /tmp/circleci-artifacts
|
||||
GIT_PAGER: cat
|
||||
|
||||
executors:
|
||||
golang:
|
||||
docker:
|
||||
- image: circleci/golang:1.15.2
|
||||
working_directory: ~/ipfs/go-ipfs
|
||||
environment:
|
||||
<<: *default_environment
|
||||
GO111MODULE: "on"
|
||||
TEST_NO_DOCKER: 1
|
||||
TEST_NO_FUSE: 1
|
||||
GOPATH: /home/circleci/go
|
||||
TEST_VERBOSE: 1
|
||||
node:
|
||||
docker:
|
||||
- image: circleci/node:14
|
||||
working_directory: ~/ipfs/go-ipfs
|
||||
environment:
|
||||
<<: *default_environment
|
||||
node-browsers:
|
||||
docker:
|
||||
- image: circleci/node:14-browsers
|
||||
working_directory: ~/ipfs/go-ipfs
|
||||
environment:
|
||||
<<: *default_environment
|
||||
NO_SANDBOX: true
|
||||
IPFS_REUSEPORT: false
|
||||
LIBP2P_ALLOW_WEAK_RSA_KEYS: 1
|
||||
E2E_IPFSD_TYPE: go
|
||||
dockerizer:
|
||||
docker:
|
||||
- image: circleci/golang:1.15.2
|
||||
environment:
|
||||
IMAGE_NAME: ipfs/go-ipfs
|
||||
WIP_IMAGE_TAG: wip
|
||||
|
||||
setup: true
|
||||
orbs:
|
||||
continuation: circleci/continuation@0.2.0
|
||||
jobs:
|
||||
gobuild:
|
||||
executor: golang
|
||||
steps:
|
||||
- checkout
|
||||
- *make_out_dirs
|
||||
- *restore_gomod
|
||||
- run:
|
||||
command: make cmd/ipfs-try-build
|
||||
environment:
|
||||
TEST_NO_FUSE: 0
|
||||
- run:
|
||||
command: make cmd/ipfs-try-build
|
||||
environment:
|
||||
TEST_NO_FUSE: 1
|
||||
- *store_gomod
|
||||
golint:
|
||||
executor: golang
|
||||
steps:
|
||||
- checkout
|
||||
- *make_out_dirs
|
||||
- *restore_gomod
|
||||
- run: |
|
||||
make -O test_go_lint
|
||||
- *store_gomod
|
||||
gotest:
|
||||
executor: golang
|
||||
steps:
|
||||
- checkout
|
||||
- *make_out_dirs
|
||||
- *restore_gomod
|
||||
|
||||
- run: |
|
||||
make -j 1 test/unit/gotest.junit.xml \
|
||||
&& [[ ! $(jq -s -c 'map(select(.Action == "fail")) | .[]' test/unit/gotest.json) ]]
|
||||
- run:
|
||||
when: always
|
||||
command: bash <(curl -s https://codecov.io/bash) -cF unittests -X search -f coverage/unit_tests.coverprofile
|
||||
|
||||
- run:
|
||||
when: always
|
||||
command: mv "test/unit/gotest.junit.xml" /tmp/circleci-test-results/unit
|
||||
|
||||
- *store_gomod
|
||||
|
||||
- store_test_results:
|
||||
path: /tmp/circleci-test-results
|
||||
# Save artifacts
|
||||
- store_artifacts:
|
||||
path: /tmp/circleci-artifacts
|
||||
- store_artifacts:
|
||||
path: /tmp/circleci-test-results
|
||||
sharness:
|
||||
machine:
|
||||
image: ubuntu-2004:202010-01
|
||||
working_directory: ~/ipfs/go-ipfs
|
||||
environment:
|
||||
<<: *default_environment
|
||||
GO111MODULE: "on"
|
||||
TEST_NO_DOCKER: 1
|
||||
TEST_NO_FUSE: 1
|
||||
GOPATH: /home/circleci/go
|
||||
TEST_VERBOSE: 1
|
||||
steps:
|
||||
- run: sudo apt update
|
||||
- run: sudo apt install socat net-tools
|
||||
- checkout
|
||||
|
||||
- run:
|
||||
mkdir rb-pinning-service-api &&
|
||||
cd rb-pinning-service-api &&
|
||||
git init &&
|
||||
git remote add origin https://github.com/ipfs-shipyard/rb-pinning-service-api.git &&
|
||||
git fetch --depth 1 origin 773c3adbb421c551d2d89288abac3e01e1f7c3a8 &&
|
||||
git checkout FETCH_HEAD
|
||||
- run:
|
||||
cd rb-pinning-service-api &&
|
||||
docker-compose pull &&
|
||||
docker-compose up -d
|
||||
|
||||
- *make_out_dirs
|
||||
- *restore_gomod
|
||||
|
||||
- run:
|
||||
name: Setup Environment Variables
|
||||
# we need the docker host IP; all ports exported by child containers can be accessed there.
|
||||
command: echo "export DOCKER_HOST=$(ip -4 addr show docker0 | grep -Po 'inet \K[\d.]+')" >> $BASH_ENV
|
||||
- run:
|
||||
echo DOCKER_HOST=$DOCKER_HOST &&
|
||||
make -O -j 3 coverage/sharness_tests.coverprofile test/sharness/test-results/sharness.xml TEST_GENERATE_JUNIT=1 CONTINUE_ON_S_FAILURE=1 DOCKER_HOST=$DOCKER_HOST
|
||||
|
||||
- run:
|
||||
when: always
|
||||
command: bash <(curl -s https://codecov.io/bash) -cF sharness -X search -f coverage/sharness_tests.coverprofile
|
||||
|
||||
- run: mv "test/sharness/test-results/sharness.xml" /tmp/circleci-test-results/sharness
|
||||
# make sure we fail if there are test failures
|
||||
- run: find test/sharness/test-results -name 't*-*.sh.*.counts' | test/sharness/lib/sharness/aggregate-results.sh | grep 'failed\s*0'
|
||||
|
||||
- *store_gomod
|
||||
|
||||
- store_test_results:
|
||||
path: /tmp/circleci-test-results
|
||||
# Save artifacts
|
||||
- store_artifacts:
|
||||
path: /tmp/circleci-artifacts
|
||||
- store_artifacts:
|
||||
path: /tmp/circleci-test-results
|
||||
build:
|
||||
executor: golang
|
||||
generate-params:
|
||||
executor: continuation/default
|
||||
steps:
|
||||
- checkout
|
||||
- *make_out_dirs
|
||||
- *restore_gomod
|
||||
- run:
|
||||
name: Building
|
||||
command: make build
|
||||
- run:
|
||||
name: Storing
|
||||
- run:
|
||||
name: Generate params
|
||||
# for builds on the ipfs/go-ipfs repo, use 2xlarge for faster builds
|
||||
# but since this is not available for many contributors, we otherwise use medium
|
||||
command: |
|
||||
mkdir -p /tmp/circleci-workspace/bin
|
||||
cp cmd/ipfs/ipfs /tmp/circleci-workspace/bin
|
||||
- persist_to_workspace:
|
||||
root: /tmp/circleci-workspace
|
||||
paths:
|
||||
- bin/ipfs
|
||||
- *store_gomod
|
||||
interop:
|
||||
executor: node
|
||||
parallelism: 4
|
||||
steps:
|
||||
- *make_out_dirs
|
||||
- attach_workspace:
|
||||
at: /tmp/circleci-workspace
|
||||
- run:
|
||||
name: Installing dependencies
|
||||
command: |
|
||||
npm init -y
|
||||
npm install ipfs@^0.52.2
|
||||
npm install ipfs-interop@^4.0.0
|
||||
npm install mocha-circleci-reporter@0.0.3
|
||||
working_directory: ~/ipfs/go-ipfs/interop
|
||||
- run:
|
||||
name: Running tests
|
||||
command: |
|
||||
mkdir -p /tmp/test-results/interop/
|
||||
export MOCHA_FILE="$(mktemp /tmp/test-results/interop/unit.XXXXXX.xml)"
|
||||
npx ipfs-interop -- -t node -f $(sed -n -e "s|^require('\(.*\)')$|test/\1|p" node_modules/ipfs-interop/test/node.js | circleci tests split) -- --reporter mocha-circleci-reporter
|
||||
working_directory: ~/ipfs/go-ipfs/interop
|
||||
environment:
|
||||
IPFS_REUSEPORT: false
|
||||
LIBP2P_ALLOW_WEAK_RSA_KEYS: 1
|
||||
IPFS_GO_EXEC: /tmp/circleci-workspace/bin/ipfs
|
||||
- store_test_results:
|
||||
path: /tmp/test-results
|
||||
go-ipfs-api:
|
||||
executor: golang
|
||||
steps:
|
||||
- *make_out_dirs
|
||||
- attach_workspace:
|
||||
at: /tmp/circleci-workspace
|
||||
- run:
|
||||
name: Cloning
|
||||
command: |
|
||||
git clone https://github.com/ipfs/go-ipfs-api.git
|
||||
git -C go-ipfs-api log -1
|
||||
- run:
|
||||
name: Starting the daemon
|
||||
command: /tmp/circleci-workspace/bin/ipfs daemon --init --enable-namesys-pubsub
|
||||
background: true
|
||||
- run:
|
||||
name: Waiting for the daemon
|
||||
no_output_timeout: 30s
|
||||
command: |
|
||||
while ! /tmp/circleci-workspace/bin/ipfs id --api=/ip4/127.0.0.1/tcp/5001 2>/dev/null; do
|
||||
sleep 1
|
||||
done
|
||||
- restore_cache:
|
||||
keys:
|
||||
- v1-go-api-{{ checksum "~/ipfs/go-ipfs/go-ipfs-api/go.sum" }}
|
||||
- v1-go-api-
|
||||
- run:
|
||||
command: go test -v ./...
|
||||
working_directory: ~/ipfs/go-ipfs/go-ipfs-api
|
||||
- save_cache:
|
||||
key: v1-go-api-{{ checksum "~/ipfs/go-ipfs/go-ipfs-api/go.sum" }}
|
||||
paths:
|
||||
- ~/go/pkg/mod
|
||||
- ~/.cache/go-build/
|
||||
- run:
|
||||
name: Stopping the daemon
|
||||
command: /tmp/circleci-workspace/bin/ipfs shutdown
|
||||
go-ipfs-http-client:
|
||||
executor: golang
|
||||
steps:
|
||||
- *make_out_dirs
|
||||
- attach_workspace:
|
||||
at: /tmp/circleci-workspace
|
||||
- run:
|
||||
name: Cloning
|
||||
command: |
|
||||
git clone https://github.com/ipfs/go-ipfs-http-client.git
|
||||
git -C go-ipfs-http-client log -1
|
||||
- restore_cache:
|
||||
keys:
|
||||
- v1-http-client-{{ checksum "~/ipfs/go-ipfs/go-ipfs-http-client/go.sum" }}
|
||||
- v1-http-client-
|
||||
- run:
|
||||
name: go test -v ./...
|
||||
command: |
|
||||
export PATH=/tmp/circleci-workspace/bin:$PATH
|
||||
go test -v ./...
|
||||
working_directory: ~/ipfs/go-ipfs/go-ipfs-http-client
|
||||
- save_cache:
|
||||
key: v1-http-client-{{ checksum "~/ipfs/go-ipfs/go-ipfs-http-client/go.sum" }}
|
||||
paths:
|
||||
- ~/go/pkg/mod
|
||||
- ~/.cache/go-build/
|
||||
ipfs-webui:
|
||||
executor: node-browsers
|
||||
steps:
|
||||
- *make_out_dirs
|
||||
- attach_workspace:
|
||||
at: /tmp/circleci-workspace
|
||||
- run:
|
||||
name: Cloning
|
||||
command: |
|
||||
git clone https://github.com/ipfs-shipyard/ipfs-webui.git
|
||||
git -C ipfs-webui log -1
|
||||
- restore_cache:
|
||||
keys:
|
||||
- v1-ipfs-webui-{{ checksum "~/ipfs/go-ipfs/ipfs-webui/package-lock.json" }}
|
||||
- v1-ipfs-webui-
|
||||
- run:
|
||||
name: Installing dependencies
|
||||
command: |
|
||||
npm install
|
||||
working_directory: ~/ipfs/go-ipfs/ipfs-webui
|
||||
- run:
|
||||
name: Running upstream tests (finish early if they fail)
|
||||
command: |
|
||||
npm test || circleci-agent step halt
|
||||
working_directory: ~/ipfs/go-ipfs/ipfs-webui
|
||||
- run:
|
||||
name: Running tests with go-ipfs built from current commit
|
||||
command: npm test
|
||||
working_directory: ~/ipfs/go-ipfs/ipfs-webui
|
||||
environment:
|
||||
IPFS_GO_EXEC: /tmp/circleci-workspace/bin/ipfs
|
||||
- save_cache:
|
||||
key: v1-ipfs-webui-{{ checksum "~/ipfs/go-ipfs/ipfs-webui/package-lock.json" }}
|
||||
paths:
|
||||
- ~/ipfs/go-ipfs/ipfs-webui/node_modules
|
||||
docker-build:
|
||||
executor: dockerizer
|
||||
steps:
|
||||
- checkout
|
||||
- setup_remote_docker:
|
||||
version: "19.03.13"
|
||||
- run:
|
||||
name: Build Docker image
|
||||
command: |
|
||||
docker build -t $IMAGE_NAME:$WIP_IMAGE_TAG .
|
||||
- run:
|
||||
name: Archive Docker image
|
||||
command: docker save -o go-ipfs-image.tar $IMAGE_NAME
|
||||
- persist_to_workspace:
|
||||
root: .
|
||||
paths:
|
||||
- ./go-ipfs-image.tar
|
||||
docker-push:
|
||||
executor: dockerizer
|
||||
steps:
|
||||
- checkout
|
||||
- setup_remote_docker:
|
||||
version: "19.03.13"
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- run:
|
||||
name: Load archived Docker image
|
||||
command: docker load -i /tmp/workspace/go-ipfs-image.tar
|
||||
- run:
|
||||
name: Publish Docker Image to Docker Hub
|
||||
command: |
|
||||
echo "$DOCKERHUB_PASS" | docker login -u "$DOCKERHUB_USERNAME" --password-stdin
|
||||
./bin/push-docker-tags.sh $(date -u +%F) "$CIRCLE_SHA1" "$CIRCLE_BRANCH" "$CIRCLE_TAG"
|
||||
|
||||
echo $CIRCLE_REPOSITORY_URL
|
||||
if [ "$CIRCLE_REPOSITORY_URL" = 'git@github.com:ipfs/go-ipfs.git' ]; then
|
||||
resource_class=2xlarge
|
||||
make_jobs=10
|
||||
else
|
||||
resource_class=medium
|
||||
make_jobs=3
|
||||
fi
|
||||
cat \<<- EOF > params.json
|
||||
{
|
||||
"resource_class": "$resource_class",
|
||||
"make_jobs": "$make_jobs"
|
||||
}
|
||||
EOF
|
||||
cat params.json
|
||||
- continuation/continue:
|
||||
parameters: params.json
|
||||
configuration_path: .circleci/main.yml
|
||||
workflows:
|
||||
version: 2
|
||||
|
||||
# Runs for all branches, but not on tags
|
||||
# see: https://circleci.com/docs/2.0/workflows/#executing-workflows-for-a-git-tag
|
||||
test:
|
||||
setup-workflow:
|
||||
jobs:
|
||||
- gobuild
|
||||
- golint
|
||||
- gotest
|
||||
- sharness
|
||||
- build
|
||||
- interop:
|
||||
requires:
|
||||
- build
|
||||
- go-ipfs-api:
|
||||
requires:
|
||||
- build
|
||||
- go-ipfs-http-client:
|
||||
requires:
|
||||
- build
|
||||
- ipfs-webui:
|
||||
requires:
|
||||
- build
|
||||
- docker-build
|
||||
- docker-push:
|
||||
# Requires dockerhub credentials, from circleci context.
|
||||
context: dockerhub
|
||||
requires:
|
||||
- docker-build
|
||||
- golint
|
||||
- gotest
|
||||
- sharness
|
||||
- interop
|
||||
- go-ipfs-api
|
||||
- go-ipfs-http-client
|
||||
- ipfs-webui
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- feat/stabilize-dht
|
||||
|
||||
# NOTE: CircleCI only builds tags if you explicitly filter for them. That
|
||||
# also means tag-based jobs can only depend on other tag-based jobs, so we
|
||||
# use a separate workflow because every job needs to be tagged together.
|
||||
# see: https://circleci.com/docs/2.0/workflows/#executing-workflows-for-a-git-tag
|
||||
docker-on-tag:
|
||||
jobs:
|
||||
- docker-build:
|
||||
filters: *only-version-tags
|
||||
- docker-push:
|
||||
context: dockerhub
|
||||
filters: *only-version-tags
|
||||
requires:
|
||||
- docker-build
|
||||
- generate-params
|
||||
|
||||
366
.circleci/main.yml
Normal file
366
.circleci/main.yml
Normal file
@ -0,0 +1,366 @@
|
||||
version: 2.1
|
||||
|
||||
parameters:
|
||||
resource_class:
|
||||
type: string
|
||||
default: medium
|
||||
make_jobs:
|
||||
type: string
|
||||
default: 3
|
||||
|
||||
aliases:
|
||||
make_out_dirs: &make_out_dirs
|
||||
run: mkdir -p /tmp/circleci-artifacts /tmp/circleci-workspace /tmp/circleci-test-results/{unit,sharness}
|
||||
restore_gomod: &restore_gomod
|
||||
restore_cache:
|
||||
keys:
|
||||
- v5-dep-{{ .Branch }}-{{ checksum "~/ipfs/go-ipfs/go.sum" }}-{{ .Environment.CIRCLE_JOB }}
|
||||
- v5-dep-{{ .Branch }}-{{ checksum "~/ipfs/go-ipfs/go.sum" }}-
|
||||
- v5-dep-{{ .Branch }}-
|
||||
- v5-dep-master-
|
||||
store_gomod: &store_gomod
|
||||
save_cache:
|
||||
key: v5-dep-{{ .Branch }}-{{ checksum "~/ipfs/go-ipfs/go.sum" }}-{{ .Environment.CIRCLE_JOB }}
|
||||
paths:
|
||||
- ~/go/pkg/mod
|
||||
- ~/.cache/go-build/
|
||||
|
||||
default_environment: &default_environment
|
||||
SERVICE: circle-ci
|
||||
TRAVIS: 1
|
||||
CIRCLE: 1
|
||||
CIRCLE_TEST_REPORTS: /tmp/circleci-test-results
|
||||
CIRCLE_ARTIFACTS: /tmp/circleci-artifacts
|
||||
GIT_PAGER: cat
|
||||
|
||||
executors:
|
||||
golang:
|
||||
docker:
|
||||
- image: cimg/go:1.16.7
|
||||
working_directory: ~/ipfs/go-ipfs
|
||||
environment:
|
||||
<<: *default_environment
|
||||
TEST_NO_DOCKER: 1
|
||||
TEST_NO_FUSE: 1
|
||||
TEST_VERBOSE: 1
|
||||
node:
|
||||
docker:
|
||||
- image: circleci/node:14
|
||||
working_directory: ~/ipfs/go-ipfs
|
||||
environment:
|
||||
<<: *default_environment
|
||||
node-browsers:
|
||||
docker:
|
||||
- image: circleci/node:14-browsers
|
||||
working_directory: ~/ipfs/go-ipfs
|
||||
environment:
|
||||
<<: *default_environment
|
||||
NO_SANDBOX: true
|
||||
IPFS_REUSEPORT: false
|
||||
LIBP2P_ALLOW_WEAK_RSA_KEYS: 1
|
||||
E2E_IPFSD_TYPE: go
|
||||
dockerizer:
|
||||
docker:
|
||||
- image: cimg/go:1.16.7
|
||||
environment:
|
||||
IMAGE_NAME: ipfs/go-ipfs
|
||||
WIP_IMAGE_TAG: wip
|
||||
|
||||
jobs:
|
||||
gobuild:
|
||||
executor: golang
|
||||
steps:
|
||||
- checkout
|
||||
- *make_out_dirs
|
||||
- *restore_gomod
|
||||
- run:
|
||||
command: make cmd/ipfs-try-build
|
||||
environment:
|
||||
TEST_NO_FUSE: 0
|
||||
- run:
|
||||
command: make cmd/ipfs-try-build
|
||||
environment:
|
||||
TEST_NO_FUSE: 1
|
||||
- *store_gomod
|
||||
golint:
|
||||
executor: golang
|
||||
steps:
|
||||
- checkout
|
||||
- *make_out_dirs
|
||||
- *restore_gomod
|
||||
- run: |
|
||||
make -O test_go_lint
|
||||
- *store_gomod
|
||||
gotest:
|
||||
executor: golang
|
||||
steps:
|
||||
- checkout
|
||||
- *make_out_dirs
|
||||
- *restore_gomod
|
||||
|
||||
- run: |
|
||||
make -j 1 test/unit/gotest.junit.xml \
|
||||
&& [[ ! $(jq -s -c 'map(select(.Action == "fail")) | .[]' test/unit/gotest.json) ]]
|
||||
- run:
|
||||
when: always
|
||||
command: bash <(curl -s https://codecov.io/bash) -cF unittests -X search -f coverage/unit_tests.coverprofile
|
||||
- run:
|
||||
command: go test -v ./...
|
||||
working_directory: ~/ipfs/go-ipfs/docs/examples/go-ipfs-as-a-library
|
||||
|
||||
- run:
|
||||
when: always
|
||||
command: mv "test/unit/gotest.junit.xml" /tmp/circleci-test-results/unit
|
||||
|
||||
- *store_gomod
|
||||
|
||||
- store_test_results:
|
||||
path: /tmp/circleci-test-results
|
||||
# Save artifacts
|
||||
- store_artifacts:
|
||||
path: /tmp/circleci-artifacts
|
||||
- store_artifacts:
|
||||
path: /tmp/circleci-test-results
|
||||
sharness:
|
||||
machine:
|
||||
image: ubuntu-2004:202010-01
|
||||
resource_class: << pipeline.parameters.resource_class >>
|
||||
working_directory: ~/ipfs/go-ipfs
|
||||
environment:
|
||||
<<: *default_environment
|
||||
TEST_NO_DOCKER: 1
|
||||
TEST_NO_FUSE: 1
|
||||
TEST_VERBOSE: 1
|
||||
steps:
|
||||
- run: sudo apt update
|
||||
- run: |
|
||||
mkdir ~/localgo && cd ~/localgo
|
||||
wget https://golang.org/dl/go1.16.7.linux-amd64.tar.gz
|
||||
tar xfz go1.16.7.linux-amd64.tar.gz
|
||||
echo "export PATH=$(pwd)/go/bin:\$PATH" >> ~/.bashrc
|
||||
- run: go version
|
||||
- run: sudo apt install socat net-tools
|
||||
- checkout
|
||||
|
||||
- run:
|
||||
mkdir rb-pinning-service-api &&
|
||||
cd rb-pinning-service-api &&
|
||||
git init &&
|
||||
git remote add origin https://github.com/ipfs-shipyard/rb-pinning-service-api.git &&
|
||||
git fetch --depth 1 origin 773c3adbb421c551d2d89288abac3e01e1f7c3a8 &&
|
||||
git checkout FETCH_HEAD
|
||||
- run:
|
||||
cd rb-pinning-service-api &&
|
||||
docker-compose pull &&
|
||||
docker-compose up -d
|
||||
|
||||
- *make_out_dirs
|
||||
- *restore_gomod
|
||||
|
||||
- run:
|
||||
name: Setup Environment Variables
|
||||
# we need the docker host IP; all ports exported by child containers can be accessed there.
|
||||
command: echo "export TEST_DOCKER_HOST=$(ip -4 addr show docker0 | grep -Po 'inet \K[\d.]+')" >> $BASH_ENV
|
||||
- run:
|
||||
echo TEST_DOCKER_HOST=$TEST_DOCKER_HOST &&
|
||||
make -O -j << pipeline.parameters.make_jobs >> coverage/sharness_tests.coverprofile test/sharness/test-results/sharness.xml TEST_GENERATE_JUNIT=1 CONTINUE_ON_S_FAILURE=1 TEST_DOCKER_HOST=$TEST_DOCKER_HOST
|
||||
- run:
|
||||
when: always
|
||||
command: bash <(curl -s https://codecov.io/bash) -cF sharness -X search -f coverage/sharness_tests.coverprofile
|
||||
|
||||
- run: mv "test/sharness/test-results/sharness.xml" /tmp/circleci-test-results/sharness
|
||||
# make sure we fail if there are test failures
|
||||
- run: find test/sharness/test-results -name 't*-*.sh.*.counts' | test/sharness/lib/sharness/aggregate-results.sh | grep 'failed\s*0'
|
||||
|
||||
- *store_gomod
|
||||
|
||||
- store_test_results:
|
||||
path: /tmp/circleci-test-results
|
||||
# Save artifacts
|
||||
- store_artifacts:
|
||||
path: /tmp/circleci-artifacts
|
||||
- store_artifacts:
|
||||
path: /tmp/circleci-test-results
|
||||
build:
|
||||
executor: golang
|
||||
steps:
|
||||
- checkout
|
||||
- *make_out_dirs
|
||||
- *restore_gomod
|
||||
- run:
|
||||
name: Building
|
||||
command: make build
|
||||
- run:
|
||||
name: Storing
|
||||
command: |
|
||||
mkdir -p /tmp/circleci-workspace/bin
|
||||
cp cmd/ipfs/ipfs /tmp/circleci-workspace/bin
|
||||
- persist_to_workspace:
|
||||
root: /tmp/circleci-workspace
|
||||
paths:
|
||||
- bin/ipfs
|
||||
- *store_gomod
|
||||
interop:
|
||||
executor: node
|
||||
parallelism: 4
|
||||
steps:
|
||||
- *make_out_dirs
|
||||
- attach_workspace:
|
||||
at: /tmp/circleci-workspace
|
||||
- run:
|
||||
name: Installing dependencies
|
||||
command: |
|
||||
npm init -y
|
||||
npm install ipfs@^0.52.2
|
||||
npm install ipfs-interop@^4.0.0
|
||||
npm install mocha-circleci-reporter@0.0.3
|
||||
working_directory: ~/ipfs/go-ipfs/interop
|
||||
- run:
|
||||
name: Running tests
|
||||
command: |
|
||||
mkdir -p /tmp/test-results/interop/
|
||||
export MOCHA_FILE="$(mktemp /tmp/test-results/interop/unit.XXXXXX.xml)"
|
||||
npx ipfs-interop -- -t node -f $(sed -n -e "s|^require('\(.*\)')$|test/\1|p" node_modules/ipfs-interop/test/node.js | circleci tests split) -- --reporter mocha-circleci-reporter
|
||||
working_directory: ~/ipfs/go-ipfs/interop
|
||||
environment:
|
||||
IPFS_REUSEPORT: false
|
||||
LIBP2P_ALLOW_WEAK_RSA_KEYS: 1
|
||||
IPFS_GO_EXEC: /tmp/circleci-workspace/bin/ipfs
|
||||
- store_test_results:
|
||||
path: /tmp/test-results
|
||||
go-ipfs-api:
|
||||
executor: golang
|
||||
steps:
|
||||
- *make_out_dirs
|
||||
- attach_workspace:
|
||||
at: /tmp/circleci-workspace
|
||||
- run:
|
||||
name: Cloning
|
||||
command: |
|
||||
git clone https://github.com/ipfs/go-ipfs-api.git
|
||||
git -C go-ipfs-api log -1
|
||||
- run:
|
||||
name: Starting the daemon
|
||||
command: /tmp/circleci-workspace/bin/ipfs daemon --init --enable-namesys-pubsub
|
||||
background: true
|
||||
- run:
|
||||
name: Waiting for the daemon
|
||||
no_output_timeout: 30s
|
||||
command: |
|
||||
while ! /tmp/circleci-workspace/bin/ipfs id --api=/ip4/127.0.0.1/tcp/5001 2>/dev/null; do
|
||||
sleep 1
|
||||
done
|
||||
- restore_cache:
|
||||
keys:
|
||||
- v1-go-api-{{ checksum "~/ipfs/go-ipfs/go-ipfs-api/go.sum" }}
|
||||
- v1-go-api-
|
||||
- run:
|
||||
command: go test -v ./...
|
||||
working_directory: ~/ipfs/go-ipfs/go-ipfs-api
|
||||
- save_cache:
|
||||
key: v1-go-api-{{ checksum "~/ipfs/go-ipfs/go-ipfs-api/go.sum" }}
|
||||
paths:
|
||||
- ~/go/pkg/mod
|
||||
- ~/.cache/go-build/
|
||||
- run:
|
||||
name: Stopping the daemon
|
||||
command: /tmp/circleci-workspace/bin/ipfs shutdown
|
||||
go-ipfs-http-client:
|
||||
executor: golang
|
||||
steps:
|
||||
- *make_out_dirs
|
||||
- attach_workspace:
|
||||
at: /tmp/circleci-workspace
|
||||
- run:
|
||||
name: Cloning
|
||||
command: |
|
||||
git clone https://github.com/ipfs/go-ipfs-http-client.git
|
||||
git -C go-ipfs-http-client log -1
|
||||
- restore_cache:
|
||||
keys:
|
||||
- v1-http-client-{{ checksum "~/ipfs/go-ipfs/go-ipfs-http-client/go.sum" }}
|
||||
- v1-http-client-
|
||||
- run:
|
||||
name: go test -v ./...
|
||||
command: |
|
||||
export PATH=/tmp/circleci-workspace/bin:$PATH
|
||||
go test -v ./...
|
||||
working_directory: ~/ipfs/go-ipfs/go-ipfs-http-client
|
||||
- save_cache:
|
||||
key: v1-http-client-{{ checksum "~/ipfs/go-ipfs/go-ipfs-http-client/go.sum" }}
|
||||
paths:
|
||||
- ~/go/pkg/mod
|
||||
- ~/.cache/go-build/
|
||||
ipfs-webui:
|
||||
executor: node-browsers
|
||||
steps:
|
||||
- *make_out_dirs
|
||||
- attach_workspace:
|
||||
at: /tmp/circleci-workspace
|
||||
- run:
|
||||
name: Cloning
|
||||
command: |
|
||||
git clone https://github.com/ipfs-shipyard/ipfs-webui.git
|
||||
git -C ipfs-webui log -1
|
||||
- restore_cache:
|
||||
keys:
|
||||
- v1-ipfs-webui-{{ checksum "~/ipfs/go-ipfs/ipfs-webui/package-lock.json" }}
|
||||
- v1-ipfs-webui-
|
||||
- run:
|
||||
name: Installing dependencies
|
||||
command: |
|
||||
npm install
|
||||
working_directory: ~/ipfs/go-ipfs/ipfs-webui
|
||||
- run:
|
||||
name: Running upstream tests (finish early if they fail)
|
||||
command: |
|
||||
npm test || circleci-agent step halt
|
||||
working_directory: ~/ipfs/go-ipfs/ipfs-webui
|
||||
- run:
|
||||
name: Running tests with go-ipfs built from current commit
|
||||
command: npm test
|
||||
working_directory: ~/ipfs/go-ipfs/ipfs-webui
|
||||
environment:
|
||||
IPFS_GO_EXEC: /tmp/circleci-workspace/bin/ipfs
|
||||
- save_cache:
|
||||
key: v1-ipfs-webui-{{ checksum "~/ipfs/go-ipfs/ipfs-webui/package-lock.json" }}
|
||||
paths:
|
||||
- ~/ipfs/go-ipfs/ipfs-webui/node_modules
|
||||
# We only run build as a test here. DockerHub images are built and published
|
||||
# by Github Action now: https://github.com/ipfs/go-ipfs/pull/8467
|
||||
docker-build:
|
||||
executor: dockerizer
|
||||
steps:
|
||||
- checkout
|
||||
- setup_remote_docker:
|
||||
version: "19.03.13"
|
||||
- run:
|
||||
name: Build Docker image
|
||||
command: |
|
||||
docker build -t $IMAGE_NAME:$WIP_IMAGE_TAG .
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
|
||||
# Runs for all branches, but not on tags
|
||||
# see: https://circleci.com/docs/2.0/workflows/#executing-workflows-for-a-git-tag
|
||||
test:
|
||||
jobs:
|
||||
- gobuild
|
||||
- golint
|
||||
- gotest
|
||||
- sharness
|
||||
- build
|
||||
- interop:
|
||||
requires:
|
||||
- build
|
||||
- go-ipfs-api:
|
||||
requires:
|
||||
- build
|
||||
- go-ipfs-http-client:
|
||||
requires:
|
||||
- build
|
||||
- ipfs-webui:
|
||||
requires:
|
||||
- build
|
||||
- docker-build
|
||||
23
.github/ISSUE_TEMPLATE/bug-report.md
vendored
23
.github/ISSUE_TEMPLATE/bug-report.md
vendored
@ -1,23 +0,0 @@
|
||||
---
|
||||
name: 'Bug Report'
|
||||
about: 'Report a bug in go-ipfs.'
|
||||
labels: kind/bug, need/triage
|
||||
---
|
||||
|
||||
<!-- Please report security issues by email to security@ipfs.io -->
|
||||
|
||||
#### Version information:
|
||||
<!-- Output From `ipfs version --all`
|
||||
|
||||
Please check dist.ipfs.io for a newer version of go-ipfs and update if necessary. Report back if the problem persists.
|
||||
|
||||
If you can't run `ipfs version --all` or that command fails, include as much information as you can: IPFS version, computer architecture (e.g., Intel x86 64bit), operating system, etc. -->
|
||||
|
||||
#### Description:
|
||||
<!-- This is where you get to tell us what went wrong. When doing so, please make sure to include *all* relevant information.
|
||||
|
||||
Please try to include:
|
||||
* What you were doing when you experienced the bug.
|
||||
* Any error messages you saw, *where* you saw them, and what you believe may have caused them (if you have any ideas).
|
||||
* When possible, steps to reliably produce the bug.
|
||||
-->
|
||||
62
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
Normal file
62
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
Normal file
@ -0,0 +1,62 @@
|
||||
name: Bug Report
|
||||
description: Report a bug in go-ipfs.
|
||||
labels:
|
||||
- kind/bug
|
||||
- need/triage
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
- Make sure you are running the [latest version of go-ipfs][releases] before reporting an issue.
|
||||
- If you have an enhancement or feature request for go-ipfs, please select [a different option][issues].
|
||||
- Please report possible security issues by email to security@ipfs.io
|
||||
|
||||
[issues]: https://github.com/ipfs/go-ipfs/issues/new/choose
|
||||
[releases]: https://github.com/ipfs/go-ipfs/releases
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Checklist
|
||||
description: Please verify that you've followed these steps
|
||||
options:
|
||||
- label: This is a bug report, not a question. Ask questions on [discuss.ipfs.io](https://discuss.ipfs.io).
|
||||
required: true
|
||||
- label: I have searched on the [issue tracker](https://github.com/ipfs/go-ipfs/issues?q=is%3Aissue) for my bug.
|
||||
required: true
|
||||
- label: I am running the latest [go-ipfs version](https://dist.ipfs.io/#go-ipfs) or have an issue updating.
|
||||
required: true
|
||||
- type: dropdown
|
||||
id: install
|
||||
validations:
|
||||
required: true
|
||||
attributes:
|
||||
label: Installation method
|
||||
description: Please select your installation method
|
||||
options:
|
||||
- ipfs-desktop
|
||||
- ipfs-update or dist.ipfs.io
|
||||
- third-party binary
|
||||
- built from source
|
||||
- type: textarea
|
||||
id: version
|
||||
attributes:
|
||||
label: Version
|
||||
render: Text
|
||||
description: |
|
||||
Enter the output of `ipfs version --all`. If you can't run that command, please include a copy of your [gateway's version page](http://localhost:8080/api/v0/version?enc=text&all=true).
|
||||
- type: textarea
|
||||
id: config
|
||||
attributes:
|
||||
label: Config
|
||||
render: json
|
||||
description: |
|
||||
Enter the output of `ipfs config show`.
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Description
|
||||
description: |
|
||||
This is where you get to tell us what went wrong. When doing so, please make sure to include *all* relevant information.
|
||||
|
||||
Please try to include:
|
||||
* What you were doing when you experienced the bug.
|
||||
* Any error messages you saw, *where* you saw them, and what you believe may have caused them (if you have any ideas).
|
||||
* When possible, steps to reliably produce the bug.
|
||||
14
.github/ISSUE_TEMPLATE/doc.md
vendored
14
.github/ISSUE_TEMPLATE/doc.md
vendored
@ -1,14 +0,0 @@
|
||||
---
|
||||
name: 'Documentation Issue'
|
||||
about: 'Report missing, erroneous docs, broken links or propose new go-ipfs docs'
|
||||
labels: topic/docs-ipfs, need/triage
|
||||
---
|
||||
<!-- Problems with documentation on https://docs.ipfs.io should be reported to https://github.com/ipfs/ipfs-docs -->
|
||||
|
||||
#### Location
|
||||
|
||||
<!-- In the case of missing/erroneous documentation, where is the error? If possible, a link/URL would be great! -->
|
||||
|
||||
#### Description
|
||||
|
||||
<!-- Describe the documentation issue. -->
|
||||
29
.github/ISSUE_TEMPLATE/doc.yml
vendored
Normal file
29
.github/ISSUE_TEMPLATE/doc.yml
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
name: Documentation Issue
|
||||
description: Report missing, erroneous docs, broken links or propose new go-ipfs docs.
|
||||
labels:
|
||||
- topic/docs-ipfs
|
||||
- need/triage
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Problems with documentation on https://docs.ipfs.io should be reported to https://github.com/ipfs/ipfs-docs
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Checklist
|
||||
description: Please verify the following.
|
||||
options:
|
||||
- label: I am reporting a documentation issue in this repo, not https://docs.ipfs.io.
|
||||
required: true
|
||||
- label: I have searched on the [issue tracker](https://github.com/ipfs/go-ipfs/issues?q=is%3Aissue) for my issue.
|
||||
required: true
|
||||
- type: input
|
||||
attributes:
|
||||
label: Location
|
||||
description: |
|
||||
If possible, please provide a link to the documentation issue.
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Description
|
||||
description: |
|
||||
Please describe your issue.
|
||||
11
.github/ISSUE_TEMPLATE/enhancement.md
vendored
11
.github/ISSUE_TEMPLATE/enhancement.md
vendored
@ -1,11 +0,0 @@
|
||||
---
|
||||
name: 'Enhancement'
|
||||
about: 'Suggest an improvement to an existing go-ipfs feature.'
|
||||
labels: kind/enhancement
|
||||
---
|
||||
|
||||
<!--
|
||||
Note: If you'd like to suggest an idea related to IPFS but not specifically related to the Go implementation, please post in https://discuss.ipfs.io instead.
|
||||
|
||||
When requesting an _enhancement_, please be sure to include your motivation and try to be as specific as possible.
|
||||
-->
|
||||
33
.github/ISSUE_TEMPLATE/enhancement.yml
vendored
Normal file
33
.github/ISSUE_TEMPLATE/enhancement.yml
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
name: Enhancement
|
||||
description: Suggest an improvement to an existing go-ipfs feature.
|
||||
labels:
|
||||
- kind/enhancement
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Suggest an enhancement to go-ipfs (the program). If you'd like to suggest an improvement to the IPFS protocol, please discuss it on [the forum](https://discuss.ipfs.io).
|
||||
|
||||
Issues in this repo must be specific, actionable, and well motivated. They should be starting points for _building_ new features, not brainstorming ideas.
|
||||
|
||||
If you have an idea you'd like to discuss, please open a new thread on [the forum](https://discuss.ipfs.io).
|
||||
|
||||
**Example:**
|
||||
|
||||
> Reduce memory usage of `ipfs cat` (specific) by buffering less in ... (actionable). This would let me run go-ipfs on my Raspberry Pi (motivated).
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Checklist
|
||||
description: Please verify the following.
|
||||
options:
|
||||
- label: My issue is specific & actionable.
|
||||
required: true
|
||||
- label: I am not suggesting a protocol enhancement.
|
||||
required: true
|
||||
- label: I have searched on the [issue tracker](https://github.com/ipfs/go-ipfs/issues?q=is%3Aissue) for my issue.
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Description
|
||||
description: |
|
||||
Please describe your idea. When requesting an enhancement, please be sure to include your motivation and try to be as specific as possible.
|
||||
15
.github/ISSUE_TEMPLATE/feature.md
vendored
15
.github/ISSUE_TEMPLATE/feature.md
vendored
@ -1,15 +0,0 @@
|
||||
---
|
||||
name: 'Feature'
|
||||
about: 'Suggest a new feature in go-ipfs.'
|
||||
labels: kind/feature, need/triage
|
||||
---
|
||||
|
||||
<!--
|
||||
Note: If you'd like to suggest an idea related to IPFS but not specifically related to the Go implementation, please post in https://discuss.ipfs.io instead.
|
||||
|
||||
When requesting a _feature_, please be sure to include:
|
||||
* Your motivation. Why do you need the feature?
|
||||
* How the feature should work.
|
||||
|
||||
Please try to be as specific and concrete as possible.
|
||||
-->
|
||||
34
.github/ISSUE_TEMPLATE/feature.yml
vendored
Normal file
34
.github/ISSUE_TEMPLATE/feature.yml
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
name: Feature
|
||||
description: Suggest a new feature in go-ipfs.
|
||||
labels:
|
||||
- kind/feature
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Suggest a new feature in go-ipfs (the program). If you'd like to suggest an improvement to the IPFS protocol, please discuss it on [the forum](https://discuss.ipfs.io).
|
||||
|
||||
Issues in this repo must be specific, actionable, and well motivated. They should be starting points for _building_ new features, not brainstorming ideas.
|
||||
|
||||
If you have an idea you'd like to discuss, please open a new thread on [the forum](https://discuss.ipfs.io).
|
||||
|
||||
**Example:**
|
||||
|
||||
> Add deduplication-optimized chunking of tar files in `ipfs add` (specific) by examining tar headers ... (actionable). This would let me efficiently store and update many versions of code archives (motivated).
|
||||
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Checklist
|
||||
description: Please verify the following.
|
||||
options:
|
||||
- label: My issue is specific & actionable.
|
||||
required: true
|
||||
- label: I am not suggesting a protocol enhancement.
|
||||
required: true
|
||||
- label: I have searched on the [issue tracker](https://github.com/ipfs/go-ipfs/issues?q=is%3Aissue) for my issue.
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Description
|
||||
description: |
|
||||
Please describe your idea. When requesting a feature, please be sure to include your motivation and and a concrete description of how the feature should work.
|
||||
34
.github/workflows/docker-image.yml
vendored
Normal file
34
.github/workflows/docker-image.yml
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
name: Publish Docker image
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'bifrost-*'
|
||||
tags:
|
||||
- 'v*'
|
||||
|
||||
jobs:
|
||||
push_to_registry:
|
||||
name: Push Docker image to Docker Hub
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
IMAGE_NAME: ipfs/go-ipfs
|
||||
WIP_IMAGE_TAG: wip
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Build wip Docker image
|
||||
run: docker build -t $IMAGE_NAME:$WIP_IMAGE_TAG .
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@f054a8b539a109f9f41c372932f1ae047eff08c9
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Publish Docker Image to Docker Hub
|
||||
run: ./bin/push-docker-tags.sh $(date -u +%F)
|
||||
|
||||
605
CHANGELOG.md
605
CHANGELOG.md
@ -1,5 +1,610 @@
|
||||
# go-ipfs changelog
|
||||
|
||||
## v0.10.0 2021-09-30
|
||||
|
||||
We're happy to announce go-ipfs 0.10.0. This release brings some big changes to the IPLD internals of go-ipfs that make working with non-UnixFS DAGs easier than ever. There are also a variety of new commands and configuration options available.
|
||||
|
||||
As usual, this release includes important fixes, some of which may be critical for security. Unless the fix addresses a bug being exploited in the wild, the fix will _not_ be called out in the release notes. Please make sure to update ASAP. See our [release process](https://github.com/ipfs/go-ipfs/tree/master/docs/releases.md#security-fix-policy) for details.
|
||||
|
||||
### 🛠 TLDR: BREAKING CHANGES
|
||||
|
||||
- `ipfs dag get`
|
||||
- default output changed to [`dag-json`](https://ipld.io/specs/codecs/dag-json/spec/)
|
||||
- dag-pb (e.g. unixfs) field names changed - impacts userland code that works with `dag-pb` objects returned by `dag get`
|
||||
- no longer emits an additional new-line character at the end of the data output
|
||||
- `ipfs dag put`
|
||||
- defaults changed to reduce ambiguity and surprises: input is now assumed to be [`dag-json`](https://ipld.io/specs/codecs/dag-json/spec/), and data is serialized to [`dag-cbor`](https://ipld.io/specs/codecs/dag-cbor/spec/) at rest.
|
||||
- `--format` and `--input-enc` were removed and replaced with `--store-codec` and `--input-codec`
|
||||
- codec names now match the ones defined in the [multicodec table](https://github.com/multiformats/multicodec/blob/master/table.csv)
|
||||
- dag-pb (e.g. unixfs) field names changed - impacts userland code that works with `dag-pb` objects stored via `dag put`
|
||||
|
||||
Keep reading to learn more details.
|
||||
|
||||
### 🔦 Highlights
|
||||
|
||||
#### 🌲 IPLD Levels Up
|
||||
|
||||
The handling of data serialization as well as many aspects of DAG traversal and pathing have been migrated from older libraries, including [go-merkledag](https://github.com/ipfs/go-merkledag) and [go-ipld-format](https://github.com/ipfs/go-ipld-format) to the new **[go-ipld-prime](https://github.com/ipld/go-ipld-prime)** library and its components. This allows us to use many of the newer tools afforded by go-ipld-prime, stricter and more uniform codec implementations, support for additional (pluggable) codecs, and some minor performance improvements.
|
||||
|
||||
This is significant refactor of a core component that touches many parts of IPFS, and does come with some **breaking changes**:
|
||||
|
||||
* **IPLD plugins**:
|
||||
* The `PluginIPLD` interface has been changed to utilize go-ipld-prime. There is a demonstration of the change in the [bundled git plugin](./plugin/plugins/git/).
|
||||
* **The semantics of `dag put` and `dag get` change**:
|
||||
* `dag get` now takes the `output-codec` option which accepts a [multicodec](https://docs.ipfs.io/concepts/glossary/#multicodec) name used to encode the output. By default this is `dag-json`, which is a strict and deterministic subset of JSON created by the IPLD team. Users may notice differences from the previously plain Go JSON output, particularly where bytes are concerned which are now encoded using a form similar to CIDs: `{"/":{"bytes":"unpadded-base64-bytes"}}` rather than the previously Go-specific plain padded base64 string. See the [dag-json specification](https://ipld.io/specs/codecs/dag-json/spec/) for an explanation of these forms.
|
||||
* `dag get` no longer prints an additional new-line character at the end of the encoded block output. This means that the output as presented by `dag get` are the exact bytes of the requested node. A round-trip of such bytes back in through `dag put` using the same codec should result in the same CID.
|
||||
* `dag put` uses the `input-codec` option to specify the multicodec name of the format data is being provided in, and the `store-codec` option to specify the multicodec name of the format the data should be stored in at rest. These formerly defaulted to `json` and `cbor` respectively. They now default to `dag-json` and `dag-cbor` respectively but may be changed to any supported codec (bundled or loaded via plugin) by its [multicodec name](https://github.com/multiformats/multicodec/blob/master/table.csv).
|
||||
* The `json` and `cbor` multicodec names (as used by `input-enc` and `format` options) are now no longer aliases for `dag-json` and `dag-cbor` respectively. Instead, they now refer to their proper [multicodec](https://github.com/multiformats/multicodec/blob/master/table.csv) types. `cbor` refers to a plain CBOR format, which will not encode CIDs and does not have strict deterministic encoding rules. `json` is a plain JSON format, which also won't encode CIDs and will encode bytes in the Go-specific padded base64 string format rather than the dag-json method of byte encoding. See https://ipld.io/specs/codecs/ for more information on IPLD codecs.
|
||||
* `protobuf` is no longer used as the codec name for `dag-pb`
|
||||
* The codec name `raw` is used to mean Bytes in the [IPLD Data Model](https://github.com/ipld/specs/blob/master/data-model-layer/data-model.md#bytes-kind)
|
||||
* **UnixFS refactor**. The **dag-pb codec**, which is used to encode UnixFS data for IPFS, is now represented through the `dag` API in a form that mirrors the protobuf schema used to define the binary format. This unifies the implementations and specification of dag-pb across the IPLD and IPFS stacks. Previously, additional layers of code for file and directory handling within IPFS between protobuf serialization and UnixFS obscured the protobuf representation. Much of this code has now been replaced and there are fewer layers of transformation. This means that interacting with dag-pb data via the `dag` API will use different forms:
|
||||
* Previously, using `dag get` on a dag-pb block would present the block serialized as JSON as `{"data":"padded-base64-bytes","links":[{"Name":"foo","Size":100,"Cid":{"/":"Qm..."}},...]}`.
|
||||
* Now, the dag-pb data with dag-json codec for output will be serialized using the data model from the [dag-pb specification](https://ipld.io/specs/codecs/dag-pb/spec/): `{"Data":{"/":{"bytes":"unpadded-base64-bytes"}},"Links":[{"Name":"foo","Tsize":100,"Hash":{"/":"Qm..."}},...]}`. Aside from the change in byte formatting, most field names have changed: `data` → `Data`, `links` → `Links`, `Size` → `Tsize`, `Cid` → `Hash`. Note that this output can be changed now using the `output-codec` option to specify an alternative codec.
|
||||
* Similarly, using `dag put` and a `store-codec` option of `dag-pb` now requires that the input conform to this dag-pb specified form. Previously, input using `{"data":"...","links":[...]}` was accepted, now it must be `{"Data":"...","Links":[...]}`.
|
||||
* Previously it was not possible to use paths to navigate to any of these properties of a dag-pb node, the only possible paths were named links, e.g. `dag get QmFoo/NamedLink` where `NamedLink` was one of the links whose name was `NamedLink`. This functionality remains the same, but by prefixing the path with `/ipld/` we enter data model pathing semantics and can `dag get /ipld/QmFoo/Links/0/Hash` to navigate to links or `/ipld/QmFoo/Data` to simply retrieve the data section of the node, for example.
|
||||
* ℹ See the [dag-pb specification](https://ipld.io/specs/codecs/dag-pb/) for details on the codec and its data model representation.
|
||||
* ℹ See this [detailed write-up](https://github.com/ipld/ipld/blob/master/design/tricky-choices/dag-pb-forms-impl-and-use.md) for further background on these changes.
|
||||
|
||||
#### Ⓜ Multibase Command
|
||||
|
||||
go-ipfs now provides utility commands for working with [multibase](https://docs.ipfs.io/concepts/glossary/#multibase):
|
||||
|
||||
```console
|
||||
$ echo -n hello | ipfs multibase encode -b base16 > file-mbase16
|
||||
$ cat file-mbase16
|
||||
f68656c6c6f
|
||||
|
||||
$ ipfs multibase decode file-mbase16
|
||||
hello
|
||||
|
||||
$ cat file-mbase16 | ipfs multibase decode
|
||||
hello
|
||||
|
||||
$ ipfs multibase transcode -b base2 file-mbase16
|
||||
00110100001100101011011000110110001101111
|
||||
```
|
||||
|
||||
See `ipfs multibase --help` for more examples.
|
||||
|
||||
#### 🔨 Bitswap now supports greater configurability
|
||||
|
||||
This release adds an [`Internal` section](https://github.com/ipfs/go-ipfs/blob/master/docs/config.md#internal) to the configuration file that is designed to help advanced users optimize their setups without needing a custom binary. The `Internal` section is not guaranteed to be the same from release to release and may not be covered by migrations. If you use the `Internal` section you should be making sure to check the config documentation between releases for any changes.
|
||||
|
||||
#### 🐚 Programmatic shell completions command
|
||||
|
||||
`ipfs commands completion bash` will generate a bash completion script for go-ipfs commands
|
||||
|
||||
#### 📜 Profile collection command
|
||||
|
||||
Performance profiles can now be collected using `ipfs diag profile`. If you need to do some debugging or have an issue to submit the collected profiles are very useful to have around.
|
||||
|
||||
#### 🍎 Mac OS notarized binaries
|
||||
|
||||
The go-ipfs and related migration binaries (for both Intel and Apple Sillicon) are now signed and notarized to make Mac OS installation easier.
|
||||
|
||||
#### 👨👩👦 Improved MDNS
|
||||
|
||||
There is a completed implementation of the revised libp2p MDNS spec. This should result in better MDNS discovery and better local/offline operation as a result.
|
||||
|
||||
#### 🚗 CAR import statistics
|
||||
|
||||
`dag import` command now supports `--stats` option which will include the number of imported blocks and their total size in the output.
|
||||
|
||||
#### 🕸 Peering command
|
||||
|
||||
This release adds `swarm peering` command for easy management of the peering subsystem. Peer in the peering subsystem is maintained to be connected at all times, and gets reconnected on disconnect with a back-off.
|
||||
|
||||
See `ipfs swarm peering --help` for more details.
|
||||
|
||||
### Changelog
|
||||
|
||||
- github.com/ipfs/go-ipfs:
|
||||
- fuse: load unixfs adls as their dagpb substrates
|
||||
- enable the legacy mDNS implementation
|
||||
- test: add dag get --ouput-codec test
|
||||
- change ipfs dag get flag name from format to output-codec
|
||||
- test: check behavior of loading UnixFS sharded directories with missing shards
|
||||
- remove dag put option shortcuts
|
||||
- change names of ipfs dag put flags to make changes clearer
|
||||
- feat: dag import --stats (#8237) ([ipfs/go-ipfs#8237](https://github.com/ipfs/go-ipfs/pull/8237))
|
||||
- feat: ipfs-webui v2.13.0 (#8430) ([ipfs/go-ipfs#8430](https://github.com/ipfs/go-ipfs/pull/8430))
|
||||
- feat(cli): add daemon option --agent-version-suffix (#8419) ([ipfs/go-ipfs#8419](https://github.com/ipfs/go-ipfs/pull/8419))
|
||||
- feat: multibase transcode command (#8403) ([ipfs/go-ipfs#8403](https://github.com/ipfs/go-ipfs/pull/8403))
|
||||
- fix: take the lock while listing peers
|
||||
- feature: 'ipfs swarm peering' command (#8147) ([ipfs/go-ipfs#8147](https://github.com/ipfs/go-ipfs/pull/8147))
|
||||
- fix(sharness): add extra check in flush=false in files write
|
||||
- chore: update IPFS Desktop testing steps (#8393) ([ipfs/go-ipfs#8393](https://github.com/ipfs/go-ipfs/pull/8393))
|
||||
- add more buttons; remove some sections covered in the docs; general cleanup
|
||||
- Cosmetic fixups in examples (#8325) ([ipfs/go-ipfs#8325](https://github.com/ipfs/go-ipfs/pull/8325))
|
||||
- perf: use performance-enhancing FUSE mount options
|
||||
- ci: publish Docker images for bifrost-* branches
|
||||
- chore: add comments to peerlog plugin about being unsupported
|
||||
- test: add unit tests for peerlog config parsing
|
||||
- ci: preload peerlog plugin, disable by default
|
||||
- fix(mkreleaselog): specify the parent commit when diffing
|
||||
- update go-libp2p to v0.15.0-rc.1 ([ipfs/go-ipfs#8354](https://github.com/ipfs/go-ipfs/pull/8354))
|
||||
- feat: add 'ipfs multibase' commands (#8180) ([ipfs/go-ipfs#8180](https://github.com/ipfs/go-ipfs/pull/8180))
|
||||
- support bitswap configurability (#8268) ([ipfs/go-ipfs#8268](https://github.com/ipfs/go-ipfs/pull/8268))
|
||||
- IPLD Prime In IPFS: Target Merge Branch (#7976) ([ipfs/go-ipfs#7976](https://github.com/ipfs/go-ipfs/pull/7976))
|
||||
- ci: upgrade to Go 1.16.7 on CI ([ipfs/go-ipfs#8324](https://github.com/ipfs/go-ipfs/pull/8324))
|
||||
- Add flag to create parent directories in files cp command ([ipfs/go-ipfs#8340](https://github.com/ipfs/go-ipfs/pull/8340))
|
||||
- fix: avoid out of bounds error when rendering short hashes ([ipfs/go-ipfs#8318](https://github.com/ipfs/go-ipfs/pull/8318))
|
||||
- fix: remove some deprecated calls ([ipfs/go-ipfs#8296](https://github.com/ipfs/go-ipfs/pull/8296))
|
||||
- perf: set an appropriate capacity ([ipfs/go-ipfs#8244](https://github.com/ipfs/go-ipfs/pull/8244))
|
||||
- Fix: Use a pointer type on IpfsNode.Peering ([ipfs/go-ipfs#8331](https://github.com/ipfs/go-ipfs/pull/8331))
|
||||
- fix: macos notarized fs-repo-migrations (#8333) ([ipfs/go-ipfs#8333](https://github.com/ipfs/go-ipfs/pull/8333))
|
||||
- README.md: Add MacPorts to install section ([ipfs/go-ipfs#8220](https://github.com/ipfs/go-ipfs/pull/8220))
|
||||
- feat: register first block metric by default ([ipfs/go-ipfs#8332](https://github.com/ipfs/go-ipfs/pull/8332))
|
||||
- Build a go-ipfs:extras docker image ([ipfs/go-ipfs#8142](https://github.com/ipfs/go-ipfs/pull/8142))
|
||||
- fix/go-ipfs-as-a-library ([ipfs/go-ipfs#8266](https://github.com/ipfs/go-ipfs/pull/8266))
|
||||
- Expose additional migration APIs (#8153) ([ipfs/go-ipfs#8153](https://github.com/ipfs/go-ipfs/pull/8153))
|
||||
- point ipfs to pinner that syncs on every pin (#8231) ([ipfs/go-ipfs#8231](https://github.com/ipfs/go-ipfs/pull/8231))
|
||||
- docs: chocolatey package name
|
||||
- Disambiguate online/offline naming in sharness tests ([ipfs/go-ipfs#8254](https://github.com/ipfs/go-ipfs/pull/8254))
|
||||
- Rename DOCKER_HOST to TEST_DOCKER_HOST to avoid conflicts ([ipfs/go-ipfs#8283](https://github.com/ipfs/go-ipfs/pull/8283))
|
||||
- feat: add an "ipfs diag profile" command ([ipfs/go-ipfs#8291](https://github.com/ipfs/go-ipfs/pull/8291))
|
||||
- Merge branch 'release'
|
||||
- feat: improve mkreleaslog ([ipfs/go-ipfs#8290](https://github.com/ipfs/go-ipfs/pull/8290))
|
||||
- Add test with expected failure for #3503 ([ipfs/go-ipfs#8280](https://github.com/ipfs/go-ipfs/pull/8280))
|
||||
- Create PATCH_RELEASE_TEMPLATE.md
|
||||
- fix document error ([ipfs/go-ipfs#8271](https://github.com/ipfs/go-ipfs/pull/8271))
|
||||
- feat: webui v2.12.4
|
||||
- programmatic shell completions ([ipfs/go-ipfs#8043](https://github.com/ipfs/go-ipfs/pull/8043))
|
||||
- test: gateway response for bafkqaaa
|
||||
- doc(README): update chat links (and misc fixes) ([ipfs/go-ipfs#8222](https://github.com/ipfs/go-ipfs/pull/8222))
|
||||
- link to the actual doc (#8126) ([ipfs/go-ipfs#8126](https://github.com/ipfs/go-ipfs/pull/8126))
|
||||
- Improve peer hints for pin remote add (#8143) ([ipfs/go-ipfs#8143](https://github.com/ipfs/go-ipfs/pull/8143))
|
||||
- fix(mkreleaselog): support multiple commit authors ([ipfs/go-ipfs#8214](https://github.com/ipfs/go-ipfs/pull/8214))
|
||||
- fix(mkreleaselog): handle commit 0 ([ipfs/go-ipfs#8121](https://github.com/ipfs/go-ipfs/pull/8121))
|
||||
- bump snap to build with Go 1.16
|
||||
- chore: update CHANGELOG
|
||||
- chore: switch tar-utils dep to ipfs org
|
||||
- feat: print error on bootstrap failure ([ipfs/go-ipfs#8166](https://github.com/ipfs/go-ipfs/pull/8166))
|
||||
- fix: typo in migration error
|
||||
- refactor: improved humanNumber and humanSI
|
||||
- feat: humanized durations in stat provide
|
||||
- feat: humanized numbers in stat provide
|
||||
- feat: add a text output encoding for the stats provide command
|
||||
- fix: webui-2.12.3
|
||||
- refactor(pinmfs): log error if pre-existing pin failed (#8056) ([ipfs/go-ipfs#8056](https://github.com/ipfs/go-ipfs/pull/8056))
|
||||
- config.md: fix typos/improve wording ([ipfs/go-ipfs#8031](https://github.com/ipfs/go-ipfs/pull/8031))
|
||||
- fix(peering_test) : Fix the peering_test to check the connection explicitly added ([ipfs/go-ipfs#8140](https://github.com/ipfs/go-ipfs/pull/8140))
|
||||
- build: ignore generated files in changelog ([ipfs/go-ipfs#7712](https://github.com/ipfs/go-ipfs/pull/7712))
|
||||
- update version to 0.10.0-dev ([ipfs/go-ipfs#8136](https://github.com/ipfs/go-ipfs/pull/8136))
|
||||
- github.com/ipfs/go-bitswap (v0.3.4 -> v0.4.0):
|
||||
- More stats, knobs and tunings (#514) ([ipfs/go-bitswap#514](https://github.com/ipfs/go-bitswap/pull/514))
|
||||
- fix: fix a map access race condition in the want index ([ipfs/go-bitswap#523](https://github.com/ipfs/go-bitswap/pull/523))
|
||||
- fix: make blockstore cancel test less timing dependent ([ipfs/go-bitswap#507](https://github.com/ipfs/go-bitswap/pull/507))
|
||||
- fix(decision): fix a datarace on disconnect ([ipfs/go-bitswap#508](https://github.com/ipfs/go-bitswap/pull/508))
|
||||
- optimize the lookup which peers are waiting for a given block ([ipfs/go-bitswap#486](https://github.com/ipfs/go-bitswap/pull/486))
|
||||
- fix: hold the task worker lock when starting task workers ([ipfs/go-bitswap#504](https://github.com/ipfs/go-bitswap/pull/504))
|
||||
- fix: Nil dereference while using SetSendDontHaves ([ipfs/go-bitswap#488](https://github.com/ipfs/go-bitswap/pull/488))
|
||||
- Fix flaky tests in message queue ([ipfs/go-bitswap#497](https://github.com/ipfs/go-bitswap/pull/497))
|
||||
- Fix flaky DontHaveTimeoutManger tests ([ipfs/go-bitswap#495](https://github.com/ipfs/go-bitswap/pull/495))
|
||||
- sync: update CI config files ([ipfs/go-bitswap#485](https://github.com/ipfs/go-bitswap/pull/485))
|
||||
- github.com/ipfs/go-blockservice (v0.1.4 -> v0.1.7):
|
||||
- update go-bitswap to v0.3.4 ([ipfs/go-blockservice#78](https://github.com/ipfs/go-blockservice/pull/78))
|
||||
- fix staticcheck ([ipfs/go-blockservice#75](https://github.com/ipfs/go-blockservice/pull/75))
|
||||
- fix: handle missing session exchange in Session ([ipfs/go-blockservice#73](https://github.com/ipfs/go-blockservice/pull/73))
|
||||
- github.com/ipfs/go-datastore (v0.4.5 -> v0.4.6):
|
||||
- sync: update CI config files ([ipfs/go-datastore#175](https://github.com/ipfs/go-datastore/pull/175))
|
||||
- speedup tests ([ipfs/go-datastore#177](https://github.com/ipfs/go-datastore/pull/177))
|
||||
- test: reduce element count when the race detector is enabled ([ipfs/go-datastore#176](https://github.com/ipfs/go-datastore/pull/176))
|
||||
- fix staticcheck ([ipfs/go-datastore#173](https://github.com/ipfs/go-datastore/pull/173))
|
||||
- remove Makefile ([ipfs/go-datastore#172](https://github.com/ipfs/go-datastore/pull/172))
|
||||
- github.com/ipfs/go-ds-badger (v0.2.6 -> v0.2.7):
|
||||
- Log start and end of GC rounds ([ipfs/go-ds-badger#115](https://github.com/ipfs/go-ds-badger/pull/115))
|
||||
- github.com/ipfs/go-fs-lock (v0.0.6 -> v0.0.7):
|
||||
- chore: update log ([ipfs/go-fs-lock#24](https://github.com/ipfs/go-fs-lock/pull/24))
|
||||
- sync: update CI config files ([ipfs/go-fs-lock#21](https://github.com/ipfs/go-fs-lock/pull/21))
|
||||
- fix TestLockedByOthers on Windows ([ipfs/go-fs-lock#19](https://github.com/ipfs/go-fs-lock/pull/19))
|
||||
- github.com/ipfs/go-ipfs-config (v0.14.0 -> v0.16.0):
|
||||
- feat: add Internal and Internal.Bitswap config options
|
||||
- feat: add an OptionalInteger type
|
||||
- fix: make sure the Priority type properly implements the JSON marshal/unmarshal interfaces
|
||||
- fix: remove deprecated calls ([ipfs/go-ipfs-config#138](https://github.com/ipfs/go-ipfs-config/pull/138))
|
||||
- sync: update CI config files ([ipfs/go-ipfs-config#132](https://github.com/ipfs/go-ipfs-config/pull/132))
|
||||
- remove period, fix staticcheck ([ipfs/go-ipfs-config#131](https://github.com/ipfs/go-ipfs-config/pull/131))
|
||||
- github.com/ipfs/go-ipfs-pinner (v0.1.1 -> v0.1.2):
|
||||
- Fix/minimize rebuild (#15) ([ipfs/go-ipfs-pinner#15](https://github.com/ipfs/go-ipfs-pinner/pull/15))
|
||||
- Define ErrNotPinned alongside the Pinner interface
|
||||
- fix staticcheck ([ipfs/go-ipfs-pinner#11](https://github.com/ipfs/go-ipfs-pinner/pull/11))
|
||||
- fix: remove the rest of the pb backed pinner ([ipfs/go-ipfs-pinner#9](https://github.com/ipfs/go-ipfs-pinner/pull/9))
|
||||
- Remove old ipldpinner that has been replaced by dspinner ([ipfs/go-ipfs-pinner#7](https://github.com/ipfs/go-ipfs-pinner/pull/7))
|
||||
- optimize CheckIfPinned ([ipfs/go-ipfs-pinner#6](https://github.com/ipfs/go-ipfs-pinner/pull/6))
|
||||
- github.com/ipfs/go-ipfs-provider (v0.5.1 -> v0.6.1):
|
||||
- Update to IPLD Prime (#32) ([ipfs/go-ipfs-provider#32](https://github.com/ipfs/go-ipfs-provider/pull/32))
|
||||
- github.com/ipfs/go-ipld-git (v0.0.4 -> v0.1.1):
|
||||
- return ErrUnexpectedEOF when Decode input is too short
|
||||
- Update go-ipld-git to a go-ipld-prime codec (#46) ([ipfs/go-ipld-git#46](https://github.com/ipfs/go-ipld-git/pull/46))
|
||||
- fix staticcheck ([ipfs/go-ipld-git#49](https://github.com/ipfs/go-ipld-git/pull/49))
|
||||
- change WriteTo to the standard signature ([ipfs/go-ipld-git#47](https://github.com/ipfs/go-ipld-git/pull/47))
|
||||
- don't copy mutexes ([ipfs/go-ipld-git#48](https://github.com/ipfs/go-ipld-git/pull/48))
|
||||
- github.com/ipfs/go-ipns (v0.1.0 -> v0.1.2):
|
||||
- fix: remove deprecated calls ([ipfs/go-ipns#30](https://github.com/ipfs/go-ipns/pull/30))
|
||||
- remove Makefile ([ipfs/go-ipns#27](https://github.com/ipfs/go-ipns/pull/27))
|
||||
- github.com/ipfs/go-log/v2 (v2.1.3 -> v2.3.0):
|
||||
- Stop defaulting to color output on non-TTY ([ipfs/go-log#116](https://github.com/ipfs/go-log/pull/116))
|
||||
- feat: add ability to use custom zap core ([ipfs/go-log#114](https://github.com/ipfs/go-log/pull/114))
|
||||
- fix staticcheck ([ipfs/go-log#112](https://github.com/ipfs/go-log/pull/112))
|
||||
- test: fix flaky label test ([ipfs/go-log#111](https://github.com/ipfs/go-log/pull/111))
|
||||
- per-subsystem log-levels ([ipfs/go-log#109](https://github.com/ipfs/go-log/pull/109))
|
||||
- fix: don't panic on invalid log labels ([ipfs/go-log#110](https://github.com/ipfs/go-log/pull/110))
|
||||
- github.com/ipfs/go-merkledag (v0.3.2 -> v0.4.0):
|
||||
- Use IPLD-prime: target merge branch ([ipfs/go-merkledag#67](https://github.com/ipfs/go-merkledag/pull/67))
|
||||
- sync: update CI config files ([ipfs/go-merkledag#70](https://github.com/ipfs/go-merkledag/pull/70))
|
||||
- staticcheck ([ipfs/go-merkledag#69](https://github.com/ipfs/go-merkledag/pull/69))
|
||||
- Fix bug in dagutils MergeDiffs. (#59) ([ipfs/go-merkledag#59](https://github.com/ipfs/go-merkledag/pull/59))
|
||||
- chore: add tests to verify allowable data layouts ([ipfs/go-merkledag#58](https://github.com/ipfs/go-merkledag/pull/58))
|
||||
- github.com/ipfs/go-namesys (v0.3.0 -> v0.3.1):
|
||||
- fix: remove deprecated call to pk.Bytes ([ipfs/go-namesys#19](https://github.com/ipfs/go-namesys/pull/19))
|
||||
- github.com/ipfs/go-path (v0.0.9 -> v0.1.2):
|
||||
- fix: give one minute timeouts to function calls instead of block retrievals ([ipfs/go-path#44](https://github.com/ipfs/go-path/pull/44))
|
||||
- IPLD Prime In IPFS: Target Merge Branch (#36) ([ipfs/go-path#36](https://github.com/ipfs/go-path/pull/36))
|
||||
- remove Makefile ([ipfs/go-path#40](https://github.com/ipfs/go-path/pull/40))
|
||||
- sync: update CI config files ([ipfs/go-path#39](https://github.com/ipfs/go-path/pull/39))
|
||||
- github.com/ipfs/go-peertaskqueue (v0.2.0 -> v0.4.0):
|
||||
- add stats
|
||||
- Have a configurable maximum active work per peer ([ipfs/go-peertaskqueue#10](https://github.com/ipfs/go-peertaskqueue/pull/10))
|
||||
- sync: update CI config files ([ipfs/go-peertaskqueue#13](https://github.com/ipfs/go-peertaskqueue/pull/13))
|
||||
- fix staticcheck ([ipfs/go-peertaskqueue#12](https://github.com/ipfs/go-peertaskqueue/pull/12))
|
||||
- fix go vet ([ipfs/go-peertaskqueue#11](https://github.com/ipfs/go-peertaskqueue/pull/11))
|
||||
- github.com/ipfs/go-unixfsnode (null -> v1.1.3):
|
||||
- make UnixFSHAMTShard implement the ADL interface (#11) ([ipfs/go-unixfsnode#11](https://github.com/ipfs/go-unixfsnode/pull/11))
|
||||
- github.com/ipfs/interface-go-ipfs-core (v0.4.0 -> v0.5.1):
|
||||
- IPLD In IPFS: Target Merge Branch (#67) ([ipfs/interface-go-ipfs-core#67](https://github.com/ipfs/interface-go-ipfs-core/pull/67))
|
||||
- fix staticcheck ([ipfs/interface-go-ipfs-core#72](https://github.com/ipfs/interface-go-ipfs-core/pull/72))
|
||||
- remove Makefile ([ipfs/interface-go-ipfs-core#70](https://github.com/ipfs/interface-go-ipfs-core/pull/70))
|
||||
- github.com/ipld/go-codec-dagpb (v1.2.0 -> v1.3.0):
|
||||
- fix staticcheck warnings ([ipld/go-codec-dagpb#29](https://github.com/ipld/go-codec-dagpb/pull/29))
|
||||
- update go-ipld-prime, use go:generate
|
||||
- allow decoding PBNode fields in any order
|
||||
- expose APIs without Reader/Writer overhead
|
||||
- preallocate 1KiB on the stack for marshals
|
||||
- encode directly with a []byte
|
||||
- decode directly with a []byte
|
||||
- remove unnecessary xerrors dep
|
||||
- github.com/ipld/go-ipld-prime (v0.9.1-0.20210324083106-dc342a9917db -> v0.12.2):
|
||||
- Printer feature ([ipld/go-ipld-prime#238](https://github.com/ipld/go-ipld-prime/pull/238))
|
||||
- schema: keep TypeSystem names ordered
|
||||
- schema/dmt: redesign with bindnode and add Compile
|
||||
- codec: make cbor and json codecs use ErrUnexpectedEOF
|
||||
- bindnode: fix for stringjoin struct emission when first field is the empty string ([ipld/go-ipld-prime#239](https://github.com/ipld/go-ipld-prime/pull/239))
|
||||
- schema: typekind names are not capitalized.
|
||||
- Bindnode fixes continued ([ipld/go-ipld-prime#233](https://github.com/ipld/go-ipld-prime/pull/233))
|
||||
- helper methods for encoding and decoding ([ipld/go-ipld-prime#232](https://github.com/ipld/go-ipld-prime/pull/232))
|
||||
- mark v0.12.0
|
||||
- Major refactor: extract datamodel package.
|
||||
([ipld/go-ipld-prime#228](https://github.com/ipld/go-ipld-prime/pull/228))
|
||||
- Fix ExploreRecursive stopAt condition, add tests, add error return to Explore (#229) ([ipld/go-ipld-prime#229](https://github.com/ipld/go-ipld-prime/pull/229))
|
||||
- selector: add tests which are driven by language-agnostic spec fixtures. ([ipld/go-ipld-prime#231](https://github.com/ipld/go-ipld-prime/pull/231))
|
||||
- selector: Improve docs for implementors. (#227) ([ipld/go-ipld-prime#227](https://github.com/ipld/go-ipld-prime/pull/227))
|
||||
- Bindnode fixes of opportunity ([ipld/go-ipld-prime#226](https://github.com/ipld/go-ipld-prime/pull/226))
|
||||
- node/bindnode: redesign the shape of unions in Go ([ipld/go-ipld-prime#223](https://github.com/ipld/go-ipld-prime/pull/223))
|
||||
- summary of the v0.11.0 changelog should holler even more about how cool bindnode is.
|
||||
- mark v0.11.0
|
||||
- node/bindnode: mark as experimental in its godoc.
|
||||
- codecs: more docs, a terminology guide, consistency in options. ([ipld/go-ipld-prime#221](https://github.com/ipld/go-ipld-prime/pull/221))
|
||||
- Changelog backfill.
|
||||
- selectors: docs enhancements, new construction helpers. ([ipld/go-ipld-prime#199](https://github.com/ipld/go-ipld-prime/pull/199))
|
||||
- Changelog backfill.
|
||||
- Allow parsing of single Null tokens from refmt
|
||||
- Add link conditions for 'stop-at' expression in ExploreRecursive selector ([ipld/go-ipld-prime#214](https://github.com/ipld/go-ipld-prime/pull/214))
|
||||
- Remove base64 padding for dag-json bytes as per spec
|
||||
- node/bindnode: temporarily skip Links schema test
|
||||
- test: add test for traversal of typed node links
|
||||
- fix: typed links LinkTargetNodePrototype should return ReferencedType
|
||||
- Make `go vet` happy
|
||||
- Add MapSortMode to MarshalOptions
|
||||
- Add {Unm,M}arshalOptions for explicit mode switching for cbor vs dagcbor
|
||||
- Sort map entries marshalling dag-cbor
|
||||
- node/bindnode: first pass at inferring IPLD schemas
|
||||
- Add {Unm,M}arshalOptions for explicit mode switching for json vs dagjson
|
||||
- Make tests pass with sorted dag-json output
|
||||
- Sort map entries marshalling dag-json
|
||||
- Simplify refmt usage
|
||||
- Fix failing test using dagjson encoding
|
||||
- Fix some failing tests using dagjson
|
||||
- Remove pretty-printing
|
||||
- Update readme linking to specs and meta repo.
|
||||
- Fix example names so they render on go.pkg.dev.
|
||||
- fluent/quip: remove in favor of qp
|
||||
- node/basic: add Chooser
|
||||
- schema: add TypedPrototype
|
||||
- node/bindnode: rethink and better document APIs
|
||||
- node/tests: cover yet more interface methods
|
||||
- node/tests: cover more error cases for scalar kinds
|
||||
- node/tests: add more extensive scalar kind tests
|
||||
- node/bindnode: start running all schema tests
|
||||
- mark v0.10.0
|
||||
- More changelog grooming.
|
||||
- Changelog grooming.
|
||||
- node/tests: put most of the schema test cases here
|
||||
- Add more explicit discussion of indicies to ListIterator.
|
||||
- node/bindnode: start of a reflect-based Node implementation
|
||||
- add DeepEqual and start using it in tests
|
||||
- Add enumerate methods to the multicodec registries. ([ipld/go-ipld-prime#176](https://github.com/ipld/go-ipld-prime/pull/176))
|
||||
- Make a multicodec.Registry type available. ([ipld/go-ipld-prime#172](https://github.com/ipld/go-ipld-prime/pull/172))
|
||||
- fluent/qp: don't panic on string panics
|
||||
- Allow emitting & parsing of bytes per dagjson codec spec ([ipld/go-ipld-prime#166](https://github.com/ipld/go-ipld-prime/pull/166))
|
||||
- Package docs for dag-cbor.
|
||||
- Update package docs.
|
||||
- schema/gen/go: apply gofmt automatically ([ipld/go-ipld-prime#163](https://github.com/ipld/go-ipld-prime/pull/163))
|
||||
- schema/gen/go: fix remaining vet warnings on generated code
|
||||
- schema/gen/go: batch file writes via a bytes.Buffer ([ipld/go-ipld-prime#161](https://github.com/ipld/go-ipld-prime/pull/161))
|
||||
- schema/gen/go: avoid Maybe pointers for small types
|
||||
- fix readme formatting typo
|
||||
- feat(linksystem): add reification to LinkSystem ([ipld/go-ipld-prime#158](https://github.com/ipld/go-ipld-prime/pull/158))
|
||||
- github.com/libp2p/go-addr-util (v0.0.2 -> v0.1.0):
|
||||
- stop using the deprecated go-multiaddr-net package ([libp2p/go-addr-util#34](https://github.com/libp2p/go-addr-util/pull/34))
|
||||
- Remove `IsFDCostlyTransport` ([libp2p/go-addr-util#31](https://github.com/libp2p/go-addr-util/pull/31))
|
||||
- github.com/libp2p/go-libp2p (v0.14.3 -> v0.15.0):
|
||||
- chore: update go-tcp-transport to v0.2.8
|
||||
- implement the new mDNS spec, move the old mDNS implementation (#1161) ([libp2p/go-libp2p#1161](https://github.com/libp2p/go-libp2p/pull/1161))
|
||||
- remove deprecated basichost.New constructor ([libp2p/go-libp2p#1156](https://github.com/libp2p/go-libp2p/pull/1156))
|
||||
- Make BasicHost.evtLocalAddrsUpdated event emitter stateful. ([libp2p/go-libp2p#1147](https://github.com/libp2p/go-libp2p/pull/1147))
|
||||
- fix: deflake multipro echo test ([libp2p/go-libp2p#1149](https://github.com/libp2p/go-libp2p/pull/1149))
|
||||
- fix(basic_host): stream not closed when context done ([libp2p/go-libp2p#1148](https://github.com/libp2p/go-libp2p/pull/1148))
|
||||
- chore: update deps ([libp2p/go-libp2p#1141](https://github.com/libp2p/go-libp2p/pull/1141))
|
||||
- remove secio from examples ([libp2p/go-libp2p#1143](https://github.com/libp2p/go-libp2p/pull/1143))
|
||||
- remove deprecated Filter option ([libp2p/go-libp2p#1132](https://github.com/libp2p/go-libp2p/pull/1132))
|
||||
- fix: remove deprecated call ([libp2p/go-libp2p#1136](https://github.com/libp2p/go-libp2p/pull/1136))
|
||||
- test: fix flaky example test ([libp2p/go-libp2p#1135](https://github.com/libp2p/go-libp2p/pull/1135))
|
||||
- remove deprecated identify.ClientVersion ([libp2p/go-libp2p#1133](https://github.com/libp2p/go-libp2p/pull/1133))
|
||||
- remove Go version requirement and note about Go modules from README ([libp2p/go-libp2p#1126](https://github.com/libp2p/go-libp2p/pull/1126))
|
||||
- Error assignment fix ([libp2p/go-libp2p#1124](https://github.com/libp2p/go-libp2p/pull/1124))
|
||||
- perf/basic_host: Don't handle address change if we hasn't anyone ([libp2p/go-libp2p#1115](https://github.com/libp2p/go-libp2p/pull/1115))
|
||||
- github.com/libp2p/go-libp2p-core (v0.8.5 -> v0.9.0):
|
||||
- feat: remove unused metrics (#208) ([libp2p/go-libp2p-core#208](https://github.com/libp2p/go-libp2p-core/pull/208))
|
||||
- feat: keep addresses for longer (#207) ([libp2p/go-libp2p-core#207](https://github.com/libp2p/go-libp2p-core/pull/207))
|
||||
- remove deprecated key stretching struct / function (#203) ([libp2p/go-libp2p-core#203](https://github.com/libp2p/go-libp2p-core/pull/203))
|
||||
- remove deprecated Bytes method from the Key interface (#204) ([libp2p/go-libp2p-core#204](https://github.com/libp2p/go-libp2p-core/pull/204))
|
||||
- remove deprecated functions in the peer package (#205) ([libp2p/go-libp2p-core#205](https://github.com/libp2p/go-libp2p-core/pull/205))
|
||||
- remove deprecated constructor for the insecure transport (#206) ([libp2p/go-libp2p-core#206](https://github.com/libp2p/go-libp2p-core/pull/206))
|
||||
- feat: add helper functions for working with addr infos (#202) ([libp2p/go-libp2p-core#202](https://github.com/libp2p/go-libp2p-core/pull/202))
|
||||
- fix: make timestamps strictly increasing (#201) ([libp2p/go-libp2p-core#201](https://github.com/libp2p/go-libp2p-core/pull/201))
|
||||
- ci: use github-actions for compatibility testing (#200) ([libp2p/go-libp2p-core#200](https://github.com/libp2p/go-libp2p-core/pull/200))
|
||||
- sync: update CI config files (#189) ([libp2p/go-libp2p-core#189](https://github.com/libp2p/go-libp2p-core/pull/189))
|
||||
- remove minimum Go version from README (#199) ([libp2p/go-libp2p-core#199](https://github.com/libp2p/go-libp2p-core/pull/199))
|
||||
- remove flaky tests (#194) ([libp2p/go-libp2p-core#194](https://github.com/libp2p/go-libp2p-core/pull/194))
|
||||
- reduce default timeouts to 15s (#192) ([libp2p/go-libp2p-core#192](https://github.com/libp2p/go-libp2p-core/pull/192))
|
||||
- fix benchmark of key verifications (#190) ([libp2p/go-libp2p-core#190](https://github.com/libp2p/go-libp2p-core/pull/190))
|
||||
- fix staticcheck errors (#191) ([libp2p/go-libp2p-core#191](https://github.com/libp2p/go-libp2p-core/pull/191))
|
||||
- doc: document Close on Transport (#188) ([libp2p/go-libp2p-core#188](https://github.com/libp2p/go-libp2p-core/pull/188))
|
||||
- add a helper function to go directly from a string to an AddrInfo (#184) ([libp2p/go-libp2p-core#184](https://github.com/libp2p/go-libp2p-core/pull/184))
|
||||
- github.com/libp2p/go-libp2p-http (v0.2.0 -> v0.2.1):
|
||||
- remove Makefile ([libp2p/go-libp2p-http#70](https://github.com/libp2p/go-libp2p-http/pull/70))
|
||||
- fix staticcheck ([libp2p/go-libp2p-http#67](https://github.com/libp2p/go-libp2p-http/pull/67))
|
||||
- Revert "increase buffer size"
|
||||
- Increase read buffer size to reduce poll system calls ([libp2p/go-libp2p-http#66](https://github.com/libp2p/go-libp2p-http/pull/66))
|
||||
- github.com/libp2p/go-libp2p-kad-dht (v0.12.2 -> v0.13.1):
|
||||
- Extract validation from ProtocolMessenger ([libp2p/go-libp2p-kad-dht#741](https://github.com/libp2p/go-libp2p-kad-dht/pull/741))
|
||||
- remove codecov.yml ([libp2p/go-libp2p-kad-dht#742](https://github.com/libp2p/go-libp2p-kad-dht/pull/742))
|
||||
- integrate some basic opentelemetry tracing ([libp2p/go-libp2p-kad-dht#734](https://github.com/libp2p/go-libp2p-kad-dht/pull/734))
|
||||
- feat: delete GetValues ([libp2p/go-libp2p-kad-dht#728](https://github.com/libp2p/go-libp2p-kad-dht/pull/728))
|
||||
- chore: skip flaky test when race detector is enabled ([libp2p/go-libp2p-kad-dht#731](https://github.com/libp2p/go-libp2p-kad-dht/pull/731))
|
||||
- Dont count connection times in usefulness ([libp2p/go-libp2p-kad-dht#660](https://github.com/libp2p/go-libp2p-kad-dht/pull/660))
|
||||
- Routing table refresh should NOT block ([libp2p/go-libp2p-kad-dht#705](https://github.com/libp2p/go-libp2p-kad-dht/pull/705))
|
||||
- update bootstrapPeers to be func() []peer.AddrInfo (#716) ([libp2p/go-libp2p-kad-dht#716](https://github.com/libp2p/go-libp2p-kad-dht/pull/716))
|
||||
- github.com/libp2p/go-libp2p-noise (v0.2.0 -> v0.2.2):
|
||||
- remove note about go modules in README ([libp2p/go-libp2p-noise#100](https://github.com/libp2p/go-libp2p-noise/pull/100))
|
||||
- fix: remove deprecated call to pk.Bytes ([libp2p/go-libp2p-noise#99](https://github.com/libp2p/go-libp2p-noise/pull/99))
|
||||
- github.com/libp2p/go-libp2p-peerstore (v0.2.7 -> v0.2.8):
|
||||
- Fix perfomance issue in updating addr book ([libp2p/go-libp2p-peerstore#141](https://github.com/libp2p/go-libp2p-peerstore/pull/141))
|
||||
- Fix test flakes ([libp2p/go-libp2p-peerstore#164](https://github.com/libp2p/go-libp2p-peerstore/pull/164))
|
||||
- Only remove records during GC ([libp2p/go-libp2p-peerstore#135](https://github.com/libp2p/go-libp2p-peerstore/pull/135))
|
||||
- sync: update CI config files ([libp2p/go-libp2p-peerstore#160](https://github.com/libp2p/go-libp2p-peerstore/pull/160))
|
||||
- fix: fix some race conditions in the ds address book ([libp2p/go-libp2p-peerstore#161](https://github.com/libp2p/go-libp2p-peerstore/pull/161))
|
||||
- address lints and test failures ([libp2p/go-libp2p-peerstore#159](https://github.com/libp2p/go-libp2p-peerstore/pull/159))
|
||||
- stop using the deprecated go-multiaddr-net package ([libp2p/go-libp2p-peerstore#158](https://github.com/libp2p/go-libp2p-peerstore/pull/158))
|
||||
- github.com/libp2p/go-libp2p-pubsub (v0.4.2 -> v0.5.4):
|
||||
- make slowness a warning, with a user configurable threshold
|
||||
- reduce log spam from empty heartbeat messages
|
||||
- fix: code review
|
||||
- add support for custom protocol matching function
|
||||
- fix: remove deprecated Bytes call (#436) ([libp2p/go-libp2p-pubsub#436](https://github.com/libp2p/go-libp2p-pubsub/pull/436))
|
||||
- cleanup: fix vet and staticcheck failures (#435) ([libp2p/go-libp2p-pubsub#435](https://github.com/libp2p/go-libp2p-pubsub/pull/435))
|
||||
- Revert noisy newline changes
|
||||
- fix: avoid panic when peer is blacklisted after connection
|
||||
- release priority locks early when handling batches
|
||||
- don't respawn writer if we fail to open a stream; declare it a peer error
|
||||
- batch process dead peer notifications
|
||||
- use a priority lock instead of a semaphore
|
||||
- do the notification in a goroutine
|
||||
- emit new peer notification without holding the semaphore
|
||||
- use a semaphore for new peer notifications so that we don't block the event loop
|
||||
- don't accumulate pending goroutines from new connections
|
||||
- rename RawTracer's DroppedInSubscribe into UndeliverableMessage
|
||||
- add a new RawTracer event to track messages dropped in Subscribe
|
||||
- add an option to configure the Subscription output queue length
|
||||
- fix some comments
|
||||
- expose more events for RawTracer
|
||||
- Make close concurrent safe
|
||||
- Fix close of closed channel
|
||||
- Update README to point to correct example directory (#424) ([libp2p/go-libp2p-pubsub#424](https://github.com/libp2p/go-libp2p-pubsub/pull/424))
|
||||
- fix: remove deprecated and never used topic descriptors (#423) ([libp2p/go-libp2p-pubsub#423](https://github.com/libp2p/go-libp2p-pubsub/pull/423))
|
||||
- Refactor Gossipsub Parameters To Make Them More Configurable (#421) ([libp2p/go-libp2p-pubsub#421](https://github.com/libp2p/go-libp2p-pubsub/pull/421))
|
||||
- add tests for gs features and custom protocols
|
||||
- add support for custom gossipsub protocols and feature tests
|
||||
- RIP travis, Long Live CircleCI (#414) ([libp2p/go-libp2p-pubsub#414](https://github.com/libp2p/go-libp2p-pubsub/pull/414))
|
||||
- Ignore transient connections (#412) ([libp2p/go-libp2p-pubsub#412](https://github.com/libp2p/go-libp2p-pubsub/pull/412))
|
||||
- demote log spam to debug
|
||||
- fix bug
|
||||
- add last amount of validation
|
||||
- add threshold validation
|
||||
- strengthen validation
|
||||
- rename checkSignature to checkSigningPolicy
|
||||
- rename validation.Publish to PushLocal
|
||||
- fix TestValidate, add TestValidate2
|
||||
- skip flaky test until we can fix it
|
||||
- implement synchronous validation for locally published messages
|
||||
- expose internalTracer as RawTracer
|
||||
- export rejection named string constants
|
||||
- more intelligent handling of ip whitelist check
|
||||
- remove obsolete explicit IP whitelisting in favor of subnets
|
||||
- add subnet whitelisting for IPColocation
|
||||
- github.com/libp2p/go-libp2p-quic-transport (v0.11.2 -> v0.12.0):
|
||||
- sync: update CI config files (#228) ([libp2p/go-libp2p-quic-transport#228](https://github.com/libp2p/go-libp2p-quic-transport/pull/228))
|
||||
- fix closing of streams in example ([libp2p/go-libp2p-quic-transport#221](https://github.com/libp2p/go-libp2p-quic-transport/pull/221))
|
||||
- close all UDP connections when the reuse is closed ([libp2p/go-libp2p-quic-transport#216](https://github.com/libp2p/go-libp2p-quic-transport/pull/216))
|
||||
- fix staticcheck ([libp2p/go-libp2p-quic-transport#217](https://github.com/libp2p/go-libp2p-quic-transport/pull/217))
|
||||
- sync: update CI config files (#214) ([libp2p/go-libp2p-quic-transport#214](https://github.com/libp2p/go-libp2p-quic-transport/pull/214))
|
||||
- implement a Transport.Close that waits for the reuse's GC to finish ([libp2p/go-libp2p-quic-transport#211](https://github.com/libp2p/go-libp2p-quic-transport/pull/211))
|
||||
- don't compare peer IDs when hole punching ([libp2p/go-libp2p-quic-transport#210](https://github.com/libp2p/go-libp2p-quic-transport/pull/210))
|
||||
- add hole punching support (#194) ([libp2p/go-libp2p-quic-transport#194](https://github.com/libp2p/go-libp2p-quic-transport/pull/194))
|
||||
- github.com/libp2p/go-libp2p-swarm (v0.5.0 -> v0.5.3):
|
||||
- sync: update CI config files ([libp2p/go-libp2p-swarm#263](https://github.com/libp2p/go-libp2p-swarm/pull/263))
|
||||
- remove incorrect call to InterceptAddrDial ([libp2p/go-libp2p-swarm#260](https://github.com/libp2p/go-libp2p-swarm/pull/260))
|
||||
- speed up the TestFDLimitUnderflow test ([libp2p/go-libp2p-swarm#262](https://github.com/libp2p/go-libp2p-swarm/pull/262))
|
||||
- sync: update CI config files (#248) ([libp2p/go-libp2p-swarm#248](https://github.com/libp2p/go-libp2p-swarm/pull/248))
|
||||
- github.com/libp2p/go-libp2p-testing (v0.4.0 -> v0.4.2):
|
||||
- fix deadlock in the transport's serve function ([libp2p/go-libp2p-testing#35](https://github.com/libp2p/go-libp2p-testing/pull/35))
|
||||
- fix: cleanup transport suite ([libp2p/go-libp2p-testing#34](https://github.com/libp2p/go-libp2p-testing/pull/34))
|
||||
- Address `go vet` and `saticcheck` issues ([libp2p/go-libp2p-testing#33](https://github.com/libp2p/go-libp2p-testing/pull/33))
|
||||
- Defer closing stream for reading ([libp2p/go-libp2p-testing#32](https://github.com/libp2p/go-libp2p-testing/pull/32))
|
||||
- github.com/libp2p/go-libp2p-tls (v0.1.3 -> v0.2.0):
|
||||
- fix: don't fail the handshake when the libp2p extension is critical ([libp2p/go-libp2p-tls#88](https://github.com/libp2p/go-libp2p-tls/pull/88))
|
||||
- fix deprecated call to key.Bytes ([libp2p/go-libp2p-tls#86](https://github.com/libp2p/go-libp2p-tls/pull/86))
|
||||
- fix usage of deprecated peer.IDB58Decode ([libp2p/go-libp2p-tls#77](https://github.com/libp2p/go-libp2p-tls/pull/77))
|
||||
- remove setting of the TLS 1.3 GODEBUG flag ([libp2p/go-libp2p-tls#68](https://github.com/libp2p/go-libp2p-tls/pull/68))
|
||||
- improve the error message returned when peer verification fails ([libp2p/go-libp2p-tls#57](https://github.com/libp2p/go-libp2p-tls/pull/57))
|
||||
- update to Go 1.14 ([libp2p/go-libp2p-tls#54](https://github.com/libp2p/go-libp2p-tls/pull/54))
|
||||
- Update deps and fix tests ([libp2p/go-libp2p-tls#43](https://github.com/libp2p/go-libp2p-tls/pull/43))
|
||||
- github.com/libp2p/go-libp2p-transport-upgrader (v0.4.2 -> v0.4.6):
|
||||
- chore: update deps ([libp2p/go-libp2p-transport-upgrader#78](https://github.com/libp2p/go-libp2p-transport-upgrader/pull/78))
|
||||
- fix typo in error message ([libp2p/go-libp2p-transport-upgrader#77](https://github.com/libp2p/go-libp2p-transport-upgrader/pull/77))
|
||||
- fix staticcheck ([libp2p/go-libp2p-transport-upgrader#74](https://github.com/libp2p/go-libp2p-transport-upgrader/pull/74))
|
||||
- don't listen on all interfaces in tests ([libp2p/go-libp2p-transport-upgrader#73](https://github.com/libp2p/go-libp2p-transport-upgrader/pull/73))
|
||||
- stop using the deprecated go-multiaddr-net ([libp2p/go-libp2p-transport-upgrader#72](https://github.com/libp2p/go-libp2p-transport-upgrader/pull/72))
|
||||
- github.com/libp2p/go-libp2p-xor (v0.0.0-20200501025846-71e284145d58 -> v0.0.0-20210714161855-5c005aca55db):
|
||||
- Add immutable remove operation ([libp2p/go-libp2p-xor#14](https://github.com/libp2p/go-libp2p-xor/pull/14))
|
||||
- fix go vet and staticcheck ([libp2p/go-libp2p-xor#11](https://github.com/libp2p/go-libp2p-xor/pull/11))
|
||||
- github.com/libp2p/go-reuseport-transport (v0.0.4 -> v0.0.5):
|
||||
- remove note about Go modules in README ([libp2p/go-reuseport-transport#32](https://github.com/libp2p/go-reuseport-transport/pull/32))
|
||||
- stop using the deprecated go-multiaddr-net package ([libp2p/go-reuseport-transport#30](https://github.com/libp2p/go-reuseport-transport/pull/30))
|
||||
- github.com/libp2p/go-socket-activation (v0.0.2 -> v0.1.0):
|
||||
- chore: stop using the deprecated go-multiaddr-net package ([libp2p/go-socket-activation#16](https://github.com/libp2p/go-socket-activation/pull/16))
|
||||
- fix staticcheck ([libp2p/go-socket-activation#13](https://github.com/libp2p/go-socket-activation/pull/13))
|
||||
- github.com/libp2p/go-tcp-transport (v0.2.4 -> v0.2.8):
|
||||
- disable metrics collection on Windows ([libp2p/go-tcp-transport#93](https://github.com/libp2p/go-tcp-transport/pull/93))
|
||||
- sync: update CI config files (#90) ([libp2p/go-tcp-transport#90](https://github.com/libp2p/go-tcp-transport/pull/90))
|
||||
- chore: update go-libp2p-transport-upgrader and go-reuseport-transport ([libp2p/go-tcp-transport#84](https://github.com/libp2p/go-tcp-transport/pull/84))
|
||||
- github.com/libp2p/go-ws-transport (v0.4.0 -> v0.5.0):
|
||||
- chore: update go-libp2p-transport-upgrader and go-libp2p-core ([libp2p/go-ws-transport#103](https://github.com/libp2p/go-ws-transport/pull/103))
|
||||
- remove deprecated type ([libp2p/go-ws-transport#102](https://github.com/libp2p/go-ws-transport/pull/102))
|
||||
- sync: update CI config files ([libp2p/go-ws-transport#101](https://github.com/libp2p/go-ws-transport/pull/101))
|
||||
- chore: various cleanups required to get vet/staticcheck/test to pass ([libp2p/go-ws-transport#100](https://github.com/libp2p/go-ws-transport/pull/100))
|
||||
- github.com/lucas-clemente/quic-go (v0.21.2 -> v0.23.0):
|
||||
- update to Go 1.17.x ([lucas-clemente/quic-go#3258](https://github.com/lucas-clemente/quic-go/pull/3258))
|
||||
- quicvarint: export Min and Max (#3253) ([lucas-clemente/quic-go#3253](https://github.com/lucas-clemente/quic-go/pull/3253))
|
||||
- drop support for Go 1.15 ([lucas-clemente/quic-go#3247](https://github.com/lucas-clemente/quic-go/pull/3247))
|
||||
- quicvarint: add Reader and Writer interfaces (#3233) ([lucas-clemente/quic-go#3233](https://github.com/lucas-clemente/quic-go/pull/3233))
|
||||
- fix race when stream.Read and CancelRead are called concurrently ([lucas-clemente/quic-go#3241](https://github.com/lucas-clemente/quic-go/pull/3241))
|
||||
- also count coalesced 0-RTT packets in the integration tests ([lucas-clemente/quic-go#3251](https://github.com/lucas-clemente/quic-go/pull/3251))
|
||||
- remove draft versions 32 and 34 from README (#3244) ([lucas-clemente/quic-go#3244](https://github.com/lucas-clemente/quic-go/pull/3244))
|
||||
- update Changelog ([lucas-clemente/quic-go#3245](https://github.com/lucas-clemente/quic-go/pull/3245))
|
||||
- optimize hasOutstandingCryptoPackets in sentPacketHandler ([lucas-clemente/quic-go#3230](https://github.com/lucas-clemente/quic-go/pull/3230))
|
||||
- permit underlying conn to implement batch interface directly ([lucas-clemente/quic-go#3237](https://github.com/lucas-clemente/quic-go/pull/3237))
|
||||
- cancel the PTO timer when all Handshake packets are acknowledged ([lucas-clemente/quic-go#3231](https://github.com/lucas-clemente/quic-go/pull/3231))
|
||||
- fix flaky INVALID_TOKEN server test ([lucas-clemente/quic-go#3223](https://github.com/lucas-clemente/quic-go/pull/3223))
|
||||
- drop support for QUIC draft version 32 and 34 ([lucas-clemente/quic-go#3217](https://github.com/lucas-clemente/quic-go/pull/3217))
|
||||
- fix flaky 0-RTT integration test ([lucas-clemente/quic-go#3224](https://github.com/lucas-clemente/quic-go/pull/3224))
|
||||
- use batched reads ([lucas-clemente/quic-go#3142](https://github.com/lucas-clemente/quic-go/pull/3142))
|
||||
- add a config option to disable sending of Version Negotiation packets ([lucas-clemente/quic-go#3216](https://github.com/lucas-clemente/quic-go/pull/3216))
|
||||
- remove the RetireBugBackwardsCompatibilityMode ([lucas-clemente/quic-go#3213](https://github.com/lucas-clemente/quic-go/pull/3213))
|
||||
- remove outdated ackhandler test case ([lucas-clemente/quic-go#3212](https://github.com/lucas-clemente/quic-go/pull/3212))
|
||||
- remove unused StripGreasedVersions function ([lucas-clemente/quic-go#3214](https://github.com/lucas-clemente/quic-go/pull/3214))
|
||||
- fix incorrect usage of errors.Is ([lucas-clemente/quic-go#3215](https://github.com/lucas-clemente/quic-go/pull/3215))
|
||||
- return error on SendMessage when session is closed ([lucas-clemente/quic-go#3218](https://github.com/lucas-clemente/quic-go/pull/3218))
|
||||
- remove a redundant error check ([lucas-clemente/quic-go#3210](https://github.com/lucas-clemente/quic-go/pull/3210))
|
||||
- update golangci-lint to v1.41.1 ([lucas-clemente/quic-go#3205](https://github.com/lucas-clemente/quic-go/pull/3205))
|
||||
- Update doc for dialer in http3.RoundTripper ([lucas-clemente/quic-go#3208](https://github.com/lucas-clemente/quic-go/pull/3208))
|
||||
- github.com/multiformats/go-multiaddr (v0.3.3 -> v0.4.0):
|
||||
- remove forced dependency on deprecated go-maddr-filter ([multiformats/go-multiaddr#162](https://github.com/multiformats/go-multiaddr/pull/162))
|
||||
- remove deprecated SwapToP2pMultiaddrs ([multiformats/go-multiaddr#161](https://github.com/multiformats/go-multiaddr/pull/161))
|
||||
- remove Makefile ([multiformats/go-multiaddr#163](https://github.com/multiformats/go-multiaddr/pull/163))
|
||||
- remove deprecated filter functions ([multiformats/go-multiaddr#157](https://github.com/multiformats/go-multiaddr/pull/157))
|
||||
- remove deprecated NetCodec ([multiformats/go-multiaddr#159](https://github.com/multiformats/go-multiaddr/pull/159))
|
||||
- add Noise ([multiformats/go-multiaddr#156](https://github.com/multiformats/go-multiaddr/pull/156))
|
||||
- Add TLS protocol ([multiformats/go-multiaddr#153](https://github.com/multiformats/go-multiaddr/pull/153))
|
||||
- github.com/multiformats/go-multicodec (v0.2.0 -> v0.3.0):
|
||||
- Export reserved range constants (#53) ([multiformats/go-multicodec#53](https://github.com/multiformats/go-multicodec/pull/53))
|
||||
- make Code.Set accept valid code numbers
|
||||
- replace Of with Code.Set, implementing flag.Value
|
||||
- add multiformats/multicodec as a git submodule
|
||||
- update the generator with the "status" CSV column
|
||||
- Run `go generate` to generate the latest codecs
|
||||
- Add lookup for multicodec code by string name ([multiformats/go-multicodec#40](https://github.com/multiformats/go-multicodec/pull/40))
|
||||
|
||||
### Contributors
|
||||
|
||||
| Contributor | Commits | Lines ± | Files Changed |
|
||||
|-------------|---------|---------|---------------|
|
||||
| Daniel Martí | 42 | +8549/-6587 | 170 |
|
||||
| Eric Myhre | 55 | +5883/-6715 | 395 |
|
||||
| Marten Seemann | 100 | +1814/-2028 | 275 |
|
||||
| Steven Allen | 80 | +1573/-1998 | 127 |
|
||||
| hannahhoward | 18 | +1721/-671 | 53 |
|
||||
| Will | 2 | +1114/-1217 | 18 |
|
||||
| Andrew Gillis | 2 | +1220/-720 | 14 |
|
||||
| gammazero | 3 | +43/-1856 | 10 |
|
||||
| Masih H. Derkani | 3 | +960/-896 | 8 |
|
||||
| Adin Schmahmann | 25 | +1458/-313 | 44 |
|
||||
| vyzo | 27 | +986/-353 | 60 |
|
||||
| Will Scott | 6 | +852/-424 | 16 |
|
||||
| Rod Vagg | 19 | +983/-255 | 66 |
|
||||
| Petar Maymounkov | 6 | +463/-179 | 22 |
|
||||
| web3-bot | 10 | +211/-195 | 24 |
|
||||
| adlrocha | 1 | +330/-75 | 15 |
|
||||
| RubenKelevra | 2 | +128/-210 | 2 |
|
||||
| Ian Davis | 3 | +200/-109 | 17 |
|
||||
| Cory Schwartz | 3 | +231/-33 | 7 |
|
||||
| Keenan Nemetz | 1 | +184/-71 | 2 |
|
||||
| Randy Reddig | 2 | +187/-53 | 8 |
|
||||
| Takashi Matsuda | 3 | +201/-2 | 7 |
|
||||
| guseggert | 4 | +161/-20 | 9 |
|
||||
| Lucas Molas | 5 | +114/-47 | 27 |
|
||||
| nisdas | 4 | +115/-45 | 7 |
|
||||
| Michael Muré | 6 | +107/-33 | 24 |
|
||||
| Richard Ramos | 2 | +113/-9 | 3 |
|
||||
| Marcin Rataj | 12 | +88/-24 | 13 |
|
||||
| Ondrej Prazak | 2 | +104/-6 | 4 |
|
||||
| Michal Dobaczewski | 2 | +77/-28 | 3 |
|
||||
| Jorropo | 3 | +9/-75 | 4 |
|
||||
| Andey Robins | 1 | +70/-3 | 3 |
|
||||
| Gus Eggert | 10 | +34/-31 | 12 |
|
||||
| noot | 1 | +54/-9 | 5 |
|
||||
| Maxim Merzhanov | 1 | +29/-24 | 1 |
|
||||
| Adrian Lanzafame | 1 | +30/-13 | 2 |
|
||||
| Bogdan Stirbat | 1 | +22/-16 | 2 |
|
||||
| Shad Sterling | 1 | +28/-3 | 1 |
|
||||
| Jesse Bouwman | 5 | +30/-0 | 5 |
|
||||
| Pavel Karpy | 1 | +19/-7 | 2 |
|
||||
| lasiar | 5 | +14/-10 | 5 |
|
||||
| Dennis Trautwein | 1 | +20/-4 | 2 |
|
||||
| Louis Thibault | 1 | +22/-1 | 2 |
|
||||
| whyrusleeping | 2 | +21/-1 | 2 |
|
||||
| aarshkshah1992 | 3 | +12/-8 | 3 |
|
||||
| Peter Rabbitson | 2 | +20/-0 | 2 |
|
||||
| bt90 | 2 | +17/-2 | 2 |
|
||||
| Dominic Della Valle | 1 | +13/-1 | 2 |
|
||||
| Audrius Butkevicius | 1 | +12/-1 | 1 |
|
||||
| Brian Strauch | 1 | +9/-3 | 1 |
|
||||
| Aarsh Shah | 2 | +1/-11 | 2 |
|
||||
| Whyrusleeping | 1 | +11/-0 | 1 |
|
||||
| Max | 1 | +7/-3 | 1 |
|
||||
| vallder | 1 | +3/-5 | 1 |
|
||||
| Michael Burns | 3 | +2/-6 | 3 |
|
||||
| Lasse Johnsen | 1 | +4/-4 | 2 |
|
||||
| snyh | 1 | +5/-2 | 1 |
|
||||
| Hector Sanjuan | 2 | +3/-2 | 2 |
|
||||
| 市川恭佑 (ebi) | 1 | +1/-3 | 1 |
|
||||
| godcong | 2 | +2/-1 | 2 |
|
||||
| Mathis Engelbart | 1 | +1/-2 | 1 |
|
||||
| folbrich | 1 | +1/-1 | 1 |
|
||||
| Med Mouine | 1 | +1/-1 | 1 |
|
||||
|
||||
|
||||
## v0.9.1 2021-07-20
|
||||
|
||||
This is a small bug fix release resolving the following issues:
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
# Note: when updating the go minor version here, also update the go-channel in snap/snapcraft.yml
|
||||
FROM golang:1.15.2-buster
|
||||
FROM golang:1.16.7-buster
|
||||
LABEL maintainer="Steven Allen <steven@stebalien.com>"
|
||||
|
||||
# Install deps
|
||||
|
||||
296
README.md
296
README.md
@ -2,13 +2,9 @@
|
||||
|
||||

|
||||
|
||||
[](http://ipn.io)
|
||||
[](https://matrix.to/#/room/#ipfs:matrix.org)
|
||||
[](http://webchat.freenode.net/?channels=%23ipfs)
|
||||
[](https://discord.gg/24fmuwR)
|
||||
[](https://godoc.org/github.com/ipfs/go-ipfs)
|
||||
[](https://github.com/RichardLitt/standard-readme)
|
||||
[](https://circleci.com/gh/ipfs/go-ipfs)
|
||||
[](https://protocol.ai)
|
||||
[](https://godoc.org/github.com/ipfs/go-ipfs)
|
||||
[](https://circleci.com/gh/ipfs/go-ipfs)
|
||||
|
||||
## What is IPFS?
|
||||
|
||||
@ -22,29 +18,60 @@ Before opening an issue, consider using one of the following locations to ensure
|
||||
- IPFS _design_ in [ipfs/specs issues](https://github.com/ipfs/specs/issues).
|
||||
- Exploration of new ideas in [ipfs/notes issues](https://github.com/ipfs/notes/issues).
|
||||
- Ask questions and meet the rest of the community at the [IPFS Forum](https://discuss.ipfs.io).
|
||||
- Or [chat with us](https://docs.ipfs.io/community/chat/).
|
||||
|
||||
[](https://www.youtube.com/channel/UCdjsUXJ3QawK4O5L1kqqsew) [](https://twitter.com/IPFS)
|
||||
|
||||
## Next milestones
|
||||
|
||||
[Milestones on Github](https://github.com/ipfs/go-ipfs/milestones)
|
||||
|
||||
<!-- ToDo automate creation of these
|
||||
[](https://github.com/ipfs/go-ipfs/milestone/51)
|
||||
[](https://github.com/ipfs/go-ipfs/milestone/48)
|
||||
[](https://github.com/ipfs/go-ipfs/milestone/50)
|
||||
[](https://github.com/ipfs/go-ipfs/milestone/49)
|
||||
-->
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Security Issues](#security-issues)
|
||||
- [Install](#install)
|
||||
- [System Requirements](#system-requirements)
|
||||
- [Install prebuilt packages](#install-prebuilt-packages)
|
||||
- [From Linux package managers](#from-linux-package-managers)
|
||||
- [Docker](#docker)
|
||||
- [Native Linux package managers](#native-linux-package-managers)
|
||||
- [ArchLinux](#archlinux)
|
||||
- [Nix](#nix-linux)
|
||||
- [Solus](#solus)
|
||||
- [openSUSE](#opensuse)
|
||||
- [Other package managers](#other-package-managers)
|
||||
- [Guix](#guix)
|
||||
- [Snap](#snap)
|
||||
- [macOS package managers](#macos-package-managers)
|
||||
- [MacPorts](#MacPorts)
|
||||
- [Nix](#nix-macos)
|
||||
- [Windows package managers](#windows-package-managers)
|
||||
- [Chocolatey](#chocolatey)
|
||||
- [Scoop](#scoop)
|
||||
- [Install prebuilt binaries](#install-prebuilt-binaries)
|
||||
- [Build from Source](#build-from-source)
|
||||
- [Install Go](#install-go)
|
||||
- [Download and Compile IPFS](#download-and-compile-ipfs)
|
||||
- [Cross Compiling](#cross-compiling)
|
||||
- [OpenSSL](#openssl)
|
||||
- [Troubleshooting](#troubleshooting)
|
||||
- [Updating go-ipfs](#updating-go-ipfs)
|
||||
- [Getting Started](#getting-started)
|
||||
- [Some things to try](#some-things-to-try)
|
||||
- [Usage](#usage)
|
||||
- [Running IPFS inside Docker](#running-ipfs-inside-docker)
|
||||
- [Troubleshooting](#troubleshooting-1)
|
||||
- [Packages](#packages)
|
||||
- [Development](#development)
|
||||
- [Map of go-ipfs Subsystems](#map-of-go-ipfs-subsystems)
|
||||
- [CLI, HTTP-API, Architecture Diagram](#cli-http-api-architecture-diagram)
|
||||
- [Testing](#testing)
|
||||
- [Development Dependencies](#development-dependencies)
|
||||
- [Developer Notes](#developer-notes)
|
||||
- [Contributing](#contributing)
|
||||
- [License](#license)
|
||||
|
||||
@ -69,40 +96,32 @@ If your system is resource-constrained, we recommend:
|
||||
1. Installing OpenSSL and rebuilding go-ipfs manually with `make build GOTAGS=openssl`. See the [download and compile](#download-and-compile-ipfs) section for more information on compiling go-ipfs.
|
||||
2. Initializing your daemon with `ipfs init --profile=lowpower`
|
||||
|
||||
### Install prebuilt packages
|
||||
### Docker
|
||||
|
||||
We host prebuilt binaries over at our [distributions page](https://ipfs.io/ipns/dist.ipfs.io#go-ipfs).
|
||||
[](https://hub.docker.com/r/ipfs/go-ipfs/)
|
||||
|
||||
From there:
|
||||
- Click the blue "Download go-ipfs" on the right side of the page.
|
||||
- Open/extract the archive.
|
||||
- Move `ipfs` to your path (`install.sh` can do it for you).
|
||||
More info on how to run go-ipfs inside docker can be found [here](https://docs.ipfs.io/how-to/run-ipfs-inside-docker/).
|
||||
|
||||
You can also download go-ipfs from this project's GitHub releases page if you are unable to access ipfs.io.
|
||||
|
||||
### From Linux package managers
|
||||
### Native Linux package managers
|
||||
|
||||
- [Arch Linux](#arch-linux)
|
||||
- [Nix](#nix)
|
||||
- [Nix](#nix-linux)
|
||||
- [Solus](#solus)
|
||||
- [Snap](#snap)
|
||||
- [openSUSE](#openSUSE)
|
||||
|
||||
#### Arch Linux
|
||||
#### ArchLinux
|
||||
|
||||
In Arch Linux go-ipfs is available as
|
||||
[go-ipfs](https://www.archlinux.org/packages/community/x86_64/go-ipfs/) package.
|
||||
[](https://wiki.archlinux.org/title/IPFS)
|
||||
|
||||
```
|
||||
$ sudo pacman -S go-ipfs
|
||||
```bash
|
||||
# pacman -Syu go-ipfs
|
||||
```
|
||||
|
||||
Development version of go-ipfs is also on AUR under
|
||||
[go-ipfs-git](https://aur.archlinux.org/packages/go-ipfs-git/).
|
||||
You can install it using your favorite AUR Helper or manually from AUR.
|
||||
[](https://aur.archlinux.org/packages/go-ipfs-git/)
|
||||
|
||||
#### Nix
|
||||
#### <a name="nix-linux">Nix</a>
|
||||
|
||||
For Linux and MacOSX you can use the purely functional package manager [Nix](https://nixos.org/nix/):
|
||||
With the purely functional package manager [Nix](https://nixos.org/nix/) you can install go-ipfs like this:
|
||||
|
||||
```
|
||||
$ nix-env -i ipfs
|
||||
@ -110,14 +129,6 @@ $ nix-env -i ipfs
|
||||
|
||||
You can also install the Package by using its attribute name, which is also `ipfs`.
|
||||
|
||||
#### Guix
|
||||
|
||||
GNU's functional package manager, [Guix](https://www.gnu.org/software/guix/), also provides a go-ipfs package:
|
||||
|
||||
```
|
||||
$ guix package -i go-ipfs
|
||||
```
|
||||
|
||||
#### Solus
|
||||
|
||||
In solus, go-ipfs is available in the main repository as
|
||||
@ -129,6 +140,23 @@ $ sudo eopkg install go-ipfs
|
||||
|
||||
You can also install it through the Solus software center.
|
||||
|
||||
#### openSUSE
|
||||
|
||||
[Community Package for go-ipfs](https://software.opensuse.org/package/go-ipfs)
|
||||
|
||||
### Other package managers
|
||||
|
||||
- [Guix](#guix)
|
||||
- [Snap](#snap)
|
||||
|
||||
#### Guix
|
||||
|
||||
GNU's functional package manager, [Guix](https://www.gnu.org/software/guix/), also provides a go-ipfs package:
|
||||
|
||||
```
|
||||
$ guix package -i go-ipfs
|
||||
```
|
||||
|
||||
#### Snap
|
||||
|
||||
With snap, in any of the [supported Linux distributions](https://snapcraft.io/docs/core/install):
|
||||
@ -137,14 +165,37 @@ With snap, in any of the [supported Linux distributions](https://snapcraft.io/do
|
||||
$ sudo snap install ipfs
|
||||
```
|
||||
|
||||
### From Windows package managers
|
||||
#### macOS package managers
|
||||
|
||||
- [MacPorts](#macports)
|
||||
- [Nix](#nix-macos)
|
||||
|
||||
#### MacPorts
|
||||
|
||||
The package [ipfs](https://ports.macports.org/port/ipfs) currently points to go-ipfs and is being maintained.
|
||||
|
||||
```
|
||||
$ sudo port install ipfs
|
||||
```
|
||||
|
||||
#### <a name="nix-macos">Nix</a>
|
||||
|
||||
In macOS you can use the purely functional package manager [Nix](https://nixos.org/nix/):
|
||||
|
||||
```
|
||||
$ nix-env -i ipfs
|
||||
```
|
||||
|
||||
You can also install the Package by using its attribute name, which is also `ipfs`.
|
||||
|
||||
### Windows package managers
|
||||
|
||||
- [Chocolatey](#chocolatey)
|
||||
- [Scoop](#scoop)
|
||||
|
||||
#### Chocolatey
|
||||
|
||||
The package [ipfs](https://chocolatey.org/packages/ipfs) currently points to go-ipfs and is being maintained.
|
||||
[](https://chocolatey.org/packages/go-ipfs)
|
||||
|
||||
```Powershell
|
||||
PS> choco install ipfs
|
||||
@ -158,9 +209,24 @@ PS> scoop bucket add extras
|
||||
PS> scoop install go-ipfs
|
||||
```
|
||||
|
||||
### Install prebuilt binaries
|
||||
|
||||
[](https://ipfs.io/ipns/dist.ipfs.io#go-ipfs)
|
||||
|
||||
From there:
|
||||
- Click the blue "Download go-ipfs" on the right side of the page.
|
||||
- Open/extract the archive.
|
||||
- Move `ipfs` to your path (`install.sh` can do it for you).
|
||||
|
||||
You can also download go-ipfs from this project's GitHub releases page if you are unable to access [dist.ipfs.io](https://ipfs.io/ipns/dist.ipfs.io#go-ipfs):
|
||||
|
||||
[GitHub releases](https://github.com/ipfs/go-ipfs/releases)
|
||||
|
||||
### Build from Source
|
||||
|
||||
go-ipfs's build system requires Go 1.15.2 and some standard POSIX build tools:
|
||||

|
||||
|
||||
go-ipfs's build system requires Go and some standard POSIX build tools:
|
||||
|
||||
* GNU make
|
||||
* Git
|
||||
@ -170,7 +236,9 @@ To build without GCC, build with `CGO_ENABLED=0` (e.g., `make build CGO_ENABLED=
|
||||
|
||||
#### Install Go
|
||||
|
||||
The build process for ipfs requires Go 1.15.2 or higher. If you don't have it: [Download Go 1.15+](https://golang.org/dl/).
|
||||

|
||||
|
||||
If you need to update: [Download latest version of Go](https://golang.org/dl/).
|
||||
|
||||
You'll need to add Go's bin directories to your `$PATH` environment variable e.g., by adding these lines to your `/etc/profile` (for a system-wide installation) or `$HOME/.profile`:
|
||||
|
||||
@ -222,8 +290,8 @@ dependencies as well.
|
||||
- _WARNING_: Older versions of OSX FUSE (for Mac OS X) can cause kernel panics when mounting!-
|
||||
We strongly recommend you use the [latest version of OSX FUSE](http://osxfuse.github.io/).
|
||||
(See https://github.com/ipfs/go-ipfs/issues/177)
|
||||
- For more details on setting up FUSE (so that you can mount the filesystem), see the docs folder.
|
||||
- Shell command completion is available in `misc/completion/ipfs-completion.bash`. Read [docs/command-completion.md](docs/command-completion.md) to learn how to install it.
|
||||
- Read [docs/fuse.md](docs/fuse.md) for more details on setting up FUSE (so that you can mount the filesystem).
|
||||
- Shell command completions can be generated with one of the `ipfs commands completion` subcommands. Read [docs/command-completion.md](docs/command-completion.md) to learn more.
|
||||
- See the [misc folder](https://github.com/ipfs/go-ipfs/tree/master/misc) for how to connect IPFS to systemd or whatever init system your distro uses.
|
||||
|
||||
### Updating go-ipfs
|
||||
@ -262,7 +330,10 @@ $ ipfs get /ipns/dist.ipfs.io/go-ipfs/$VERSION/go-ipfs_$VERSION_windows-amd64.zi
|
||||
|
||||
## Getting Started
|
||||
|
||||
See also: https://docs.ipfs.io/introduction/usage/
|
||||
### Usage
|
||||
|
||||
[](https://docs.ipfs.io/how-to/command-line-quick-start/)
|
||||
[](https://docs.ipfs.io/reference/cli/)
|
||||
|
||||
To start using IPFS, you must first initialize IPFS's config files on your
|
||||
system, this is done with `ipfs init`. See `ipfs init --help` for information on
|
||||
@ -279,136 +350,6 @@ Basic proof of 'ipfs working' locally:
|
||||
# QmT78zSuBmuS4z925WZfrqQ1qHaJ56DQaTfyMUF7F8ff5o
|
||||
ipfs cat <that hash>
|
||||
|
||||
### Usage
|
||||
|
||||
```
|
||||
ipfs - Global p2p merkle-dag filesystem.
|
||||
|
||||
ipfs [<flags>] <command> [<arg>] ...
|
||||
|
||||
SUBCOMMANDS
|
||||
BASIC COMMANDS
|
||||
init Initialize local IPFS configuration
|
||||
add <path> Add a file to IPFS
|
||||
cat <ref> Show IPFS object data
|
||||
get <ref> Download IPFS objects
|
||||
ls <ref> List links from an object
|
||||
refs <ref> List hashes of links from an object
|
||||
|
||||
DATA STRUCTURE COMMANDS
|
||||
dag Interact with IPLD DAG nodes
|
||||
files Interact with files as if they were a unix filesystem
|
||||
block Interact with raw blocks in the datastore
|
||||
|
||||
ADVANCED COMMANDS
|
||||
daemon Start a long-running daemon process
|
||||
mount Mount an IPFS read-only mount point
|
||||
resolve Resolve any type of name
|
||||
name Publish and resolve IPNS names
|
||||
key Create and list IPNS name keypairs
|
||||
dns Resolve DNS links
|
||||
pin Pin objects to local storage
|
||||
repo Manipulate the IPFS repository
|
||||
stats Various operational stats
|
||||
p2p Libp2p stream mounting
|
||||
filestore Manage the filestore (experimental)
|
||||
|
||||
NETWORK COMMANDS
|
||||
id Show info about IPFS peers
|
||||
bootstrap Add or remove bootstrap peers
|
||||
swarm Manage connections to the p2p network
|
||||
dht Query the DHT for values or peers
|
||||
ping Measure the latency of a connection
|
||||
diag Print diagnostics
|
||||
|
||||
TOOL COMMANDS
|
||||
config Manage configuration
|
||||
version Show IPFS version information
|
||||
update Download and apply go-ipfs updates
|
||||
commands List all available commands
|
||||
cid Convert and discover properties of CIDs
|
||||
log Manage and show logs of running daemon
|
||||
|
||||
Use 'ipfs <command> --help' to learn more about each command.
|
||||
|
||||
ipfs uses a repository in the local file system. By default, the repo is located at
|
||||
~/.ipfs. To change the repo location, set the $IPFS_PATH environment variable:
|
||||
|
||||
export IPFS_PATH=/path/to/ipfsrepo
|
||||
```
|
||||
|
||||
### Running IPFS inside Docker
|
||||
|
||||
An IPFS docker image is hosted at [hub.docker.com/r/ipfs/go-ipfs](https://hub.docker.com/r/ipfs/go-ipfs/).
|
||||
To make files visible inside the container you need to mount a host directory
|
||||
with the `-v` option to docker. Choose a directory that you want to use to
|
||||
import/export files from IPFS. You should also choose a directory to store
|
||||
IPFS files that will persist when you restart the container.
|
||||
|
||||
export ipfs_staging=</absolute/path/to/somewhere/>
|
||||
export ipfs_data=</absolute/path/to/somewhere_else/>
|
||||
|
||||
Start a container running ipfs and expose ports 4001, 5001 and 8080:
|
||||
|
||||
docker run -d --name ipfs_host -v $ipfs_staging:/export -v $ipfs_data:/data/ipfs -p 4001:4001 -p 4001:4001/udp -p 127.0.0.1:8080:8080 -p 127.0.0.1:5001:5001 ipfs/go-ipfs:latest
|
||||
|
||||
Watch the ipfs log:
|
||||
|
||||
docker logs -f ipfs_host
|
||||
|
||||
Wait for ipfs to start. ipfs is running when you see:
|
||||
|
||||
Gateway (readonly) server
|
||||
listening on /ip4/0.0.0.0/tcp/8080
|
||||
|
||||
You can now stop watching the log.
|
||||
|
||||
Run ipfs commands:
|
||||
|
||||
docker exec ipfs_host ipfs <args...>
|
||||
|
||||
For example: connect to peers
|
||||
|
||||
docker exec ipfs_host ipfs swarm peers
|
||||
|
||||
Add files:
|
||||
|
||||
cp -r <something> $ipfs_staging
|
||||
docker exec ipfs_host ipfs add -r /export/<something>
|
||||
|
||||
Stop the running container:
|
||||
|
||||
docker stop ipfs_host
|
||||
|
||||
When starting a container running ipfs for the first time with an empty data directory, it will call `ipfs init` to initialize configuration files and generate a new keypair. At this time, you can choose which profile to apply using the `IPFS_PROFILE` environment variable:
|
||||
|
||||
docker run -d --name ipfs_host -e IPFS_PROFILE=server -v $ipfs_staging:/export -v $ipfs_data:/data/ipfs -p 4001:4001 -p 4001:4001/udp -p 127.0.0.1:8080:8080 -p 127.0.0.1:5001:5001 ipfs/go-ipfs:latest
|
||||
|
||||
#### Private swarms inside Docker
|
||||
|
||||
It is possible to initialize the container with a swarm key file (`/data/ipfs/swarm.key`) using the variables `IPFS_SWARM_KEY` and `IPFS_SWARM_KEY_FILE`. The `IPFS_SWARM_KEY` creates `swarm.key` with the contents of the variable itself, whilst `IPFS_SWARM_KEY_FILE` copies the key from a path stored in the variable. The `IPFS_SWARM_KEY_FILE` **overwrites** the key generated by `IPFS_SWARM_KEY`.
|
||||
|
||||
docker run -d --name ipfs_host -e IPFS_SWARM_KEY=<your swarm key> -v $ipfs_staging:/export -v $ipfs_data:/data/ipfs -p 4001:4001 -p 4001:4001/udp -p 127.0.0.1:8080:8080 -p 127.0.0.1:5001:5001 ipfs/go-ipfs:latest
|
||||
|
||||
The swarm key initialization can also be done using docker secrets **(requires docker swarm or docker-compose)**:
|
||||
|
||||
cat your_swarm.key | docker secret create swarm_key_secret -
|
||||
docker run -d --name ipfs_host --secret swarm_key_secret -e IPFS_SWARM_KEY_FILE=/run/secrets/swarm_key_secret -v $ipfs_staging:/export -v $ipfs_data:/data/ipfs -p 4001:4001 -p 4001:4001/udp -p 127.0.0.1:8080:8080 -p 127.0.0.1:5001:5001 ipfs/go-ipfs:latest
|
||||
|
||||
#### Key rotation inside Docker
|
||||
|
||||
If needed, it is possible to do key rotation in an ephemeral container that is temporarily executing against a volume that is mounted under `/data/ipfs`:
|
||||
|
||||
```sh
|
||||
# given container named 'ipfs-test' that persists repo at /path/to/persisted/.ipfs
|
||||
$ docker run -d --name ipfs-test -v /path/to/persisted/.ipfs:/data/ipfs ipfs/go-ipfs:v0.7.0
|
||||
$ docker stop ipfs-test
|
||||
|
||||
# key rotation works like this (old key saved under 'old-self')
|
||||
$ docker run --rm -it -v /path/to/persisted/.ipfs:/data/ipfs ipfs/go-ipfs:v0.7.0 key rotate -o old-self -t ed25519
|
||||
$ docker start ipfs-test # will start with the new key
|
||||
```
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
If you have previously installed IPFS before and you are running into problems getting a newer version to work, try deleting (or backing up somewhere else) your IPFS config directory (~/.ipfs by default) and rerunning `ipfs init`. This will reinitialize the config file to its defaults and clear out the local datastore of any bad entries.
|
||||
@ -532,8 +473,7 @@ We ❤️ all [our contributors](docs/AUTHORS); this project wouldn’t be what
|
||||
|
||||
This repository falls under the IPFS [Code of Conduct](https://github.com/ipfs/community/blob/master/code-of-conduct.md).
|
||||
|
||||
You can contact us on the freenode #ipfs-dev channel or attend one of our
|
||||
[weekly calls](https://github.com/ipfs/team-mgmt/issues/674).
|
||||
Please reach out to us in one [chat](https://docs.ipfs.io/community/chat/) rooms.
|
||||
|
||||
## License
|
||||
|
||||
|
||||
137
bin/mkreleaselog
137
bin/mkreleaselog
@ -1,40 +1,78 @@
|
||||
#!/bin/zsh
|
||||
#set -x
|
||||
#
|
||||
# Invocation: mkreleaselog [FIRST_REF [LAST_REF]]
|
||||
|
||||
set -euo pipefail
|
||||
export GO111MODULE=on
|
||||
export GOPATH="$(go env GOPATH)"
|
||||
|
||||
alias jq="jq --unbuffered"
|
||||
|
||||
AUTHORS=(
|
||||
# List of PCRE regular expressions to match "included" modules.
|
||||
INCLUDE_MODULES=(
|
||||
# orgs
|
||||
ipfs
|
||||
ipld
|
||||
libp2p
|
||||
multiformats
|
||||
filecoin-project
|
||||
ipfs-shipyard
|
||||
"^github.com/ipfs/"
|
||||
"^github.com/ipld/"
|
||||
"^github.com/libp2p/"
|
||||
"^github.com/multiformats/"
|
||||
"^github.com/filecoin-project/"
|
||||
"^github.com/ipfs-shipyard/"
|
||||
|
||||
# Authors of personal repos used by go-ipfs that should be mentioned in the
|
||||
# Authors of personal modules used by go-ipfs that should be mentioned in the
|
||||
# release notes.
|
||||
whyrusleeping
|
||||
Kubuxu
|
||||
jbenet
|
||||
Stebalien
|
||||
marten-seemann
|
||||
hsanjuan
|
||||
lucas-clemente
|
||||
warpfork
|
||||
"^github.com/whyrusleeping/"
|
||||
"^github.com/Kubuxu/"
|
||||
"^github.com/jbenet/"
|
||||
"^github.com/Stebalien/"
|
||||
"^github.com/marten-seemann/"
|
||||
"^github.com/hsanjuan/"
|
||||
"^github.com/lucas-clemente/"
|
||||
"^github.com/warpfork/"
|
||||
)
|
||||
|
||||
[[ -n "${REPO_FILTER+x}" ]] || REPO_FILTER="github.com/(${$(printf "|%s" "${AUTHORS[@]}"):1})"
|
||||
# List of PCRE regular expressions to match "excluded" modules. Applied after includes.
|
||||
EXCLUDE_MODULES=(
|
||||
"^github.com/marten-seemann/qtls"
|
||||
)
|
||||
|
||||
# Ignored files as git pathspecs. These patters will match any full path component.
|
||||
IGNORE_FILES=(
|
||||
".gx"
|
||||
"package.json"
|
||||
".travis.yml"
|
||||
"go.mod"
|
||||
"go.sum"
|
||||
".github"
|
||||
".circleci"
|
||||
"*.pb.go"
|
||||
"cbor_gen.go"
|
||||
"ipldsch_*.go"
|
||||
)
|
||||
|
||||
##########################################################################################
|
||||
|
||||
if [[ ${#INCLUDE_MODULES[@]} -gt 0 ]]; then
|
||||
INCLUDE_REGEX="(${$(printf "|%s" "${INCLUDE_MODULES[@]}"):1})"
|
||||
else
|
||||
INCLUDE_REGEX="" # "match anything"
|
||||
fi
|
||||
|
||||
if [[ ${#EXCLUDE_MODULES[@]} -gt 0 ]]; then
|
||||
EXCLUDE_REGEX="(${$(printf "|%s" "${EXCLUDE_MODULES[@]}"):1})"
|
||||
else
|
||||
EXCLUDE_REGEX='$^' # "match nothing"
|
||||
fi
|
||||
|
||||
IGNORE_FILES_PATHSPEC=()
|
||||
for f in "${IGNORE_FILES[@]}"; do
|
||||
IGNORE_FILES_PATHSPEC+=(":^:**/$f" ":^:$f") # Prepend the magic "ignore this" sequence.
|
||||
done
|
||||
|
||||
[[ -n "${IGNORED_FILES+x}" ]] || IGNORED_FILES='^\(\.gx\|package\.json\|\.travis\.yml\|go.mod\|go\.sum|\.github|\.circleci\)$'
|
||||
|
||||
NL=$'\n'
|
||||
|
||||
ROOT_DIR="$(git rev-parse --show-toplevel)"
|
||||
|
||||
alias jq="jq --unbuffered"
|
||||
|
||||
msg() {
|
||||
echo "$*" >&2
|
||||
}
|
||||
@ -49,13 +87,15 @@ statlog() {
|
||||
mailmap_file="$ROOT_DIR/.mailmap"
|
||||
fi
|
||||
|
||||
git -C "$rpath" -c mailmap.file="$mailmap_file" log --use-mailmap --shortstat --no-merges --pretty="tformat:%H%n%aN%n%aE" "$start..$end" | while
|
||||
read hash
|
||||
read name
|
||||
read email
|
||||
read _ # empty line
|
||||
read changes
|
||||
do
|
||||
local stack=()
|
||||
git -C "$rpath" -c mailmap.file="$mailmap_file" log --use-mailmap --shortstat --no-merges --pretty="tformat:%H%x09%aN%x09%aE" "$start..$end" -- . "${IGNORE_FILES_PATHSPEC[@]}" | while read -r line; do
|
||||
if [[ -n "$line" ]]; then
|
||||
stack+=("$line")
|
||||
continue
|
||||
fi
|
||||
|
||||
read -r changes
|
||||
|
||||
changed=0
|
||||
insertions=0
|
||||
deletions=0
|
||||
@ -72,14 +112,18 @@ statlog() {
|
||||
fi
|
||||
done<<<"${changes//,/$NL}"
|
||||
|
||||
jq -n \
|
||||
--arg "hash" "$hash" \
|
||||
--arg "name" "$name" \
|
||||
--arg "email" "$email" \
|
||||
--argjson "changed" "$changed" \
|
||||
--argjson "insertions" "$insertions" \
|
||||
--argjson "deletions" "$deletions" \
|
||||
'{Commit: $hash, Author: $name, Email: $email, Files: $changed, Insertions: $insertions, Deletions: $deletions}'
|
||||
for author in "${stack[@]}"; do
|
||||
IFS=$'\t' read -r hash name email <<<"$author"
|
||||
jq -n \
|
||||
--arg "hash" "$hash" \
|
||||
--arg "name" "$name" \
|
||||
--arg "email" "$email" \
|
||||
--argjson "changed" "$changed" \
|
||||
--argjson "insertions" "$insertions" \
|
||||
--argjson "deletions" "$deletions" \
|
||||
'{Commit: $hash, Author: $name, Email: $email, Files: $changed, Insertions: $insertions, Deletions: $deletions}'
|
||||
done
|
||||
stack=()
|
||||
done
|
||||
}
|
||||
|
||||
@ -103,6 +147,16 @@ pr_link() {
|
||||
printf -- "[%s#%s](https://%s/pull/%s)" "$ghname" "$prnum" "$repo" "$prnum"
|
||||
}
|
||||
|
||||
ignored_commit() {
|
||||
local commit="$1"
|
||||
local matches
|
||||
|
||||
# Check to see if this commit includes any non-ignored files.
|
||||
matches=$(git -C "$dir" diff-tree --no-commit-id --name-only -r "$commit^" "$commit" \
|
||||
-- "${IGNORE_FILES_PATHSPEC[@]}" | wc -l)
|
||||
[[ "$matches" -eq 0 ]]
|
||||
}
|
||||
|
||||
# Generate a release log for a range of commits in a single repo.
|
||||
release_log() {
|
||||
setopt local_options BASH_REMATCH
|
||||
@ -119,9 +173,10 @@ release_log() {
|
||||
--first-parent \
|
||||
"$start..$end" |
|
||||
while read commit subject; do
|
||||
# Skip gx-only PRs.
|
||||
git -C "$dir" diff-tree --no-commit-id --name-only "$commit^" "$commit" |
|
||||
grep -v "${IGNORED_FILES}" >/dev/null || continue
|
||||
# Skip commits that only touch ignored files.
|
||||
if ignored_commit "$commit"; then
|
||||
continue
|
||||
fi
|
||||
|
||||
if [[ "$subject" =~ '^Merge pull request #([0-9]+) from' ]]; then
|
||||
local prnum="${BASH_REMATCH[2]}"
|
||||
@ -207,11 +262,11 @@ recursive_release_log() {
|
||||
printf -- "- %s:\n" "$module"
|
||||
release_log "$module" "$start" "$end" | indent
|
||||
|
||||
|
||||
statlog "$module" "$start" "$end" > statlog.json
|
||||
|
||||
dep_changes old_deps.json new_deps.json |
|
||||
jq --arg filter "$REPO_FILTER" 'select(.Path | match($filter))' |
|
||||
jq --arg inc "$INCLUDE_REGEX" --arg exc "$EXCLUDE_REGEX" \
|
||||
'select(.Path | test($inc)) | select(.Path | test($exc) | not)' |
|
||||
# Compute changelogs
|
||||
jq -r '"\(.Path) \(.New.Version) \(.New.Ref) \(.Old.Version) \(.Old.Ref // "")"' |
|
||||
while read module new new_ref old old_ref; do
|
||||
|
||||
@ -29,18 +29,18 @@
|
||||
#
|
||||
set -euo pipefail
|
||||
|
||||
if [[ $# -lt 3 ]] ; then
|
||||
echo 'At least 3 args required. Pass 5 args for a dry run.'
|
||||
if [[ $# -lt 1 ]] ; then
|
||||
echo 'At least 1 arg required. Pass 5 args for a dry run.'
|
||||
echo 'Usage:'
|
||||
echo './push-docker-tags.sh <build number> <git commit sha1> <git branch name> [git tag name] [dry run]'
|
||||
echo './push-docker-tags.sh <build number> [git commit sha1] [git branch name] [git tag name] [dry run]'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
BUILD_NUM=$1
|
||||
GIT_SHA1=$2
|
||||
GIT_SHA1=${2:-$(git rev-parse HEAD)}
|
||||
GIT_SHA1_SHORT=$(echo "$GIT_SHA1" | cut -c 1-7)
|
||||
GIT_BRANCH=$3
|
||||
GIT_TAG=${4:-""}
|
||||
GIT_BRANCH=${3:-$(git symbolic-ref -q --short HEAD || echo "unknown")}
|
||||
GIT_TAG=${4:-$(git describe --tags --exact-match || echo "")}
|
||||
DRY_RUN=${5:-false}
|
||||
|
||||
WIP_IMAGE_TAG=${WIP_IMAGE_TAG:-wip}
|
||||
@ -67,9 +67,10 @@ elif [[ $GIT_TAG =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
pushTag "latest"
|
||||
pushTag "release" # see: https://github.com/ipfs/go-ipfs/issues/3999#issuecomment-742228981
|
||||
|
||||
elif [ "$GIT_BRANCH" = "feat/stabilize-dht" ]; then
|
||||
pushTag "bifrost-${BUILD_NUM}-${GIT_SHA1_SHORT}"
|
||||
pushTag "bifrost-latest"
|
||||
elif [[ $GIT_BRANCH =~ ^bifrost-.* ]]; then
|
||||
# sanitize the branch name since docker tags have stricter char limits than git branch names
|
||||
branch=$(echo "$GIT_BRANCH" | tr '/' '-' | tr --delete --complement '[:alnum:]-')
|
||||
pushTag "${branch}-${BUILD_NUM}-${GIT_SHA1_SHORT}"
|
||||
|
||||
elif [ "$GIT_BRANCH" = "master" ]; then
|
||||
pushTag "master-${BUILD_NUM}-${GIT_SHA1_SHORT}"
|
||||
|
||||
@ -2,17 +2,13 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
config "github.com/ipfs/go-ipfs-config"
|
||||
"github.com/ipfs/go-ipfs-files"
|
||||
"github.com/ipfs/go-ipfs/core"
|
||||
"github.com/ipfs/go-ipfs/core/coreapi"
|
||||
@ -24,140 +20,7 @@ import (
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
)
|
||||
|
||||
// readMigrationConfig reads the migration config out of the config, avoiding
|
||||
// reading anything other than the migration section. That way, we're free to
|
||||
// make arbitrary changes to all _other_ sections in migrations.
|
||||
func readMigrationConfig(repoRoot string) (*config.Migration, error) {
|
||||
var cfg struct {
|
||||
Migration config.Migration
|
||||
}
|
||||
|
||||
cfgPath, err := config.Filename(repoRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfgFile, err := os.Open(cfgPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cfgFile.Close()
|
||||
|
||||
err = json.NewDecoder(cfgFile).Decode(&cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch cfg.Migration.Keep {
|
||||
case "":
|
||||
cfg.Migration.Keep = config.DefaultMigrationKeep
|
||||
case "discard", "cache", "keep":
|
||||
default:
|
||||
return nil, errors.New("unknown config value, Migrations.Keep must be 'cache', 'pin', or 'discard'")
|
||||
}
|
||||
|
||||
if len(cfg.Migration.DownloadSources) == 0 {
|
||||
cfg.Migration.DownloadSources = config.DefaultMigrationDownloadSources
|
||||
}
|
||||
|
||||
return &cfg.Migration, nil
|
||||
}
|
||||
|
||||
func readIpfsConfig(repoRoot *string) (bootstrap []string, peers []peer.AddrInfo) {
|
||||
if repoRoot == nil {
|
||||
return
|
||||
}
|
||||
|
||||
cfgPath, err := config.Filename(*repoRoot)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
return
|
||||
}
|
||||
|
||||
cfgFile, err := os.Open(cfgPath)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
return
|
||||
}
|
||||
defer cfgFile.Close()
|
||||
|
||||
// Attempt to read bootstrap addresses
|
||||
var bootstrapCfg struct {
|
||||
Bootstrap []string
|
||||
}
|
||||
err = json.NewDecoder(cfgFile).Decode(&bootstrapCfg)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "cannot read bootstrap peers from config")
|
||||
} else {
|
||||
bootstrap = bootstrapCfg.Bootstrap
|
||||
}
|
||||
|
||||
if _, err = cfgFile.Seek(0, 0); err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
}
|
||||
|
||||
// Attempt to read peers
|
||||
var peeringCfg struct {
|
||||
Peering config.Peering
|
||||
}
|
||||
err = json.NewDecoder(cfgFile).Decode(&peeringCfg)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "cannot read peering from config")
|
||||
} else {
|
||||
peers = peeringCfg.Peering.Peers
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// getMigrationFetcher creates one or more fetchers according to
|
||||
// config.Migration.DownloadSources. If an IpfsFetcher is required, then
|
||||
// bootstrap and peer information in read from the config file in repoRoot,
|
||||
// unless repoRoot is nil.
|
||||
func getMigrationFetcher(cfg *config.Migration, repoRoot *string) (migrations.Fetcher, error) {
|
||||
const httpUserAgent = "go-ipfs"
|
||||
|
||||
// Fetch migrations from current distribution, or location from environ
|
||||
fetchDistPath := migrations.GetDistPathEnv(migrations.CurrentIpfsDist)
|
||||
|
||||
var fetchers []migrations.Fetcher
|
||||
for _, src := range cfg.DownloadSources {
|
||||
src := strings.TrimSpace(src)
|
||||
switch src {
|
||||
case "IPFS", "ipfs":
|
||||
bootstrap, peers := readIpfsConfig(repoRoot)
|
||||
fetchers = append(fetchers, ipfsfetcher.NewIpfsFetcher(fetchDistPath, 0, bootstrap, peers))
|
||||
case "HTTPS", "https", "HTTP", "http":
|
||||
fetchers = append(fetchers, migrations.NewHttpFetcher(fetchDistPath, "", httpUserAgent, 0))
|
||||
default:
|
||||
u, err := url.Parse(src)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("bad gateway address: %s", err)
|
||||
}
|
||||
switch u.Scheme {
|
||||
case "":
|
||||
u.Scheme = "https"
|
||||
case "https", "http":
|
||||
default:
|
||||
return nil, errors.New("bad gateway address: url scheme must be http or https")
|
||||
}
|
||||
fetchers = append(fetchers, migrations.NewHttpFetcher(fetchDistPath, u.String(), httpUserAgent, 0))
|
||||
case "":
|
||||
// Ignore empty string
|
||||
}
|
||||
}
|
||||
if len(fetchers) == 0 {
|
||||
return nil, errors.New("no sources specified")
|
||||
}
|
||||
|
||||
if len(fetchers) == 1 {
|
||||
return fetchers[0], nil
|
||||
}
|
||||
|
||||
// Wrap fetchers in a MultiFetcher to try them in order
|
||||
return migrations.NewMultiFetcher(fetchers...), nil
|
||||
}
|
||||
|
||||
// addMigrations adds any migration downloaded by the fetcher to the IPFS node
|
||||
func addMigrations(ctx context.Context, node *core.IpfsNode, fetcher migrations.Fetcher, pin bool) error {
|
||||
var fetchers []migrations.Fetcher
|
||||
if mf, ok := fetcher.(*migrations.MultiFetcher); ok {
|
||||
@ -30,6 +30,7 @@ import (
|
||||
nodeMount "github.com/ipfs/go-ipfs/fuse/node"
|
||||
fsrepo "github.com/ipfs/go-ipfs/repo/fsrepo"
|
||||
"github.com/ipfs/go-ipfs/repo/fsrepo/migrations"
|
||||
"github.com/ipfs/go-ipfs/repo/fsrepo/migrations/ipfsfetcher"
|
||||
sockets "github.com/libp2p/go-socket-activation"
|
||||
|
||||
cmds "github.com/ipfs/go-ipfs-cmds"
|
||||
@ -66,6 +67,7 @@ const (
|
||||
enablePubSubKwd = "enable-pubsub-experiment"
|
||||
enableIPNSPubSubKwd = "enable-namesys-pubsub"
|
||||
enableMultiplexKwd = "enable-mplex-experiment"
|
||||
agentVersionSuffix = "agent-version-suffix"
|
||||
// apiAddrKwd = "address-api"
|
||||
// swarmAddrKwd = "address-swarm"
|
||||
)
|
||||
@ -179,6 +181,7 @@ Headers.
|
||||
cmds.BoolOption(enablePubSubKwd, "Instantiate the ipfs daemon with the experimental pubsub feature enabled."),
|
||||
cmds.BoolOption(enableIPNSPubSubKwd, "Enable IPNS record distribution through pubsub; enables pubsub."),
|
||||
cmds.BoolOption(enableMultiplexKwd, "DEPRECATED"),
|
||||
cmds.StringOption(agentVersionSuffix, "Optional suffix to the AgentVersion presented by `ipfs id` and also advertised through BitSwap."),
|
||||
|
||||
// TODO: add way to override addresses. tricky part: updating the config if also --init.
|
||||
// cmds.StringOption(apiAddrKwd, "Address for the daemon rpc API (overrides config)"),
|
||||
@ -294,12 +297,26 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment
|
||||
return fmt.Errorf("fs-repo requires migration")
|
||||
}
|
||||
|
||||
migrationCfg, err := readMigrationConfig(cctx.ConfigRoot)
|
||||
// Read Migration section of IPFS config
|
||||
migrationCfg, err := migrations.ReadMigrationConfig(cctx.ConfigRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fetcher, err = getMigrationFetcher(migrationCfg, &cctx.ConfigRoot)
|
||||
// Define function to create IPFS fetcher. Do not supply an
|
||||
// already-constructed IPFS fetcher, because this may be expensive and
|
||||
// not needed according to migration config. Instead, supply a function
|
||||
// to construct the particular IPFS fetcher implementation used here,
|
||||
// which is called only if an IPFS fetcher is needed.
|
||||
newIpfsFetcher := func(distPath string) migrations.Fetcher {
|
||||
return ipfsfetcher.NewIpfsFetcher(distPath, 0, &cctx.ConfigRoot)
|
||||
}
|
||||
|
||||
// Fetch migrations from current distribution, or location from environ
|
||||
fetchDistPath := migrations.GetDistPathEnv(migrations.CurrentIpfsDist)
|
||||
|
||||
// Create fetchers according to migrationCfg.DownloadSources
|
||||
fetcher, err = migrations.GetMigrationFetcher(migrationCfg.DownloadSources, fetchDistPath, newIpfsFetcher)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -395,6 +412,11 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment
|
||||
return fmt.Errorf("unrecognized routing option: %s", routingOption)
|
||||
}
|
||||
|
||||
agentVersionSuffixString, _ := req.Options[agentVersionSuffix].(string)
|
||||
if agentVersionSuffixString != "" {
|
||||
version.SetUserAgentSuffix(agentVersionSuffixString)
|
||||
}
|
||||
|
||||
node, err := core.NewNode(req.Context, ncfg)
|
||||
if err != nil {
|
||||
log.Error("error from node construction: ", err)
|
||||
|
||||
@ -1,312 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
config "github.com/ipfs/go-ipfs-config"
|
||||
"github.com/ipfs/go-ipfs/repo/fsrepo/migrations"
|
||||
"github.com/ipfs/go-ipfs/repo/fsrepo/migrations/ipfsfetcher"
|
||||
)
|
||||
|
||||
var testConfig = `
|
||||
{
|
||||
"Bootstrap": [
|
||||
"/dnsaddr/bootstrap.libp2p.io/p2p/QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt",
|
||||
"/ip4/104.131.131.82/tcp/4001/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ"
|
||||
],
|
||||
"Migration": {
|
||||
"DownloadSources": ["IPFS", "HTTP", "127.0.0.1", "https://127.0.1.1"],
|
||||
"Keep": "cache"
|
||||
},
|
||||
"Peering": {
|
||||
"Peers": [
|
||||
{
|
||||
"ID": "12D3KooWGC6TvWhfapngX6wvJHMYvKpDMXPb3ZnCZ6dMoaMtimQ5",
|
||||
"Addrs": ["/ip4/127.0.0.1/tcp/4001", "/ip4/127.0.0.1/udp/4001/quic"]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
`
|
||||
|
||||
func TestReadMigrationConfigDefaults(t *testing.T) {
|
||||
tmpDir := makeConfig("{}")
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
cfg, err := readMigrationConfig(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if cfg.Keep != config.DefaultMigrationKeep {
|
||||
t.Error("expected default value for Keep")
|
||||
}
|
||||
|
||||
if len(cfg.DownloadSources) != len(config.DefaultMigrationDownloadSources) {
|
||||
t.Fatal("expected default number of download sources")
|
||||
}
|
||||
for i, src := range config.DefaultMigrationDownloadSources {
|
||||
if cfg.DownloadSources[i] != src {
|
||||
t.Errorf("wrong DownloadSource: %s", cfg.DownloadSources[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadMigrationConfigErrors(t *testing.T) {
|
||||
tmpDir := makeConfig(`{"Migration": {"Keep": "badvalue"}}`)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
_, err := readMigrationConfig(tmpDir)
|
||||
if err == nil {
|
||||
t.Fatal("expected error")
|
||||
}
|
||||
if !strings.HasPrefix(err.Error(), "unknown") {
|
||||
t.Fatal("did not get expected error:", err)
|
||||
}
|
||||
|
||||
os.RemoveAll(tmpDir)
|
||||
_, err = readMigrationConfig(tmpDir)
|
||||
if err == nil {
|
||||
t.Fatal("expected error")
|
||||
}
|
||||
|
||||
bootstrap, peers := readIpfsConfig(&tmpDir)
|
||||
if bootstrap != nil {
|
||||
t.Error("expected nil bootstrap")
|
||||
}
|
||||
if peers != nil {
|
||||
t.Error("expected nil peers")
|
||||
}
|
||||
|
||||
tmpDir = makeConfig(`}{`)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
_, err = readMigrationConfig(tmpDir)
|
||||
if err == nil {
|
||||
t.Fatal("expected error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadMigrationConfig(t *testing.T) {
|
||||
tmpDir := makeConfig(testConfig)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
cfg, err := readMigrationConfig(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(cfg.DownloadSources) != 4 {
|
||||
t.Fatal("wrong number of DownloadSources")
|
||||
}
|
||||
expect := []string{"IPFS", "HTTP", "127.0.0.1", "https://127.0.1.1"}
|
||||
for i := range expect {
|
||||
if cfg.DownloadSources[i] != expect[i] {
|
||||
t.Errorf("wrong DownloadSource at %d", i)
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.Keep != "cache" {
|
||||
t.Error("wrong value for Keep")
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadIpfsConfig(t *testing.T) {
|
||||
tmpDir := makeConfig(testConfig)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
bootstrap, peers := readIpfsConfig(nil)
|
||||
if bootstrap != nil || peers != nil {
|
||||
t.Fatal("expected nil ipfs config items")
|
||||
}
|
||||
|
||||
bootstrap, peers = readIpfsConfig(&tmpDir)
|
||||
if len(bootstrap) != 2 {
|
||||
t.Fatal("wrong number of bootstrap addresses")
|
||||
}
|
||||
if bootstrap[0] != "/dnsaddr/bootstrap.libp2p.io/p2p/QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt" {
|
||||
t.Fatal("wrong bootstrap address")
|
||||
}
|
||||
|
||||
if len(peers) != 1 {
|
||||
t.Fatal("wrong number of peers")
|
||||
}
|
||||
|
||||
peer := peers[0]
|
||||
if peer.ID.String() != "12D3KooWGC6TvWhfapngX6wvJHMYvKpDMXPb3ZnCZ6dMoaMtimQ5" {
|
||||
t.Errorf("wrong ID for first peer")
|
||||
}
|
||||
if len(peer.Addrs) != 2 {
|
||||
t.Error("wrong number of addrs for first peer")
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadPartialIpfsConfig(t *testing.T) {
|
||||
const (
|
||||
configBadBootstrap = `
|
||||
{
|
||||
"Bootstrap": "unreadable",
|
||||
"Migration": {
|
||||
"DownloadSources": ["IPFS", "HTTP", "127.0.0.1"],
|
||||
"Keep": "cache"
|
||||
},
|
||||
"Peering": {
|
||||
"Peers": [
|
||||
{
|
||||
"ID": "12D3KooWGC6TvWhfapngX6wvJHMYvKpDMXPb3ZnCZ6dMoaMtimQ5",
|
||||
"Addrs": ["/ip4/127.0.0.1/tcp/4001", "/ip4/127.0.0.1/udp/4001/quic"]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
`
|
||||
configBadPeers = `
|
||||
{
|
||||
"Bootstrap": [
|
||||
"/dnsaddr/bootstrap.libp2p.io/p2p/QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt",
|
||||
"/ip4/104.131.131.82/tcp/4001/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ"
|
||||
],
|
||||
"Migration": {
|
||||
"DownloadSources": ["IPFS", "HTTP", "127.0.0.1"],
|
||||
"Keep": "cache"
|
||||
},
|
||||
"Peering": "Unreadable-data"
|
||||
}
|
||||
`
|
||||
)
|
||||
|
||||
tmpDir := makeConfig(configBadBootstrap)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
bootstrap, peers := readIpfsConfig(&tmpDir)
|
||||
if bootstrap != nil {
|
||||
t.Fatal("expected nil bootstrap")
|
||||
}
|
||||
if len(peers) != 1 {
|
||||
t.Fatal("wrong number of peers")
|
||||
}
|
||||
if len(peers[0].Addrs) != 2 {
|
||||
t.Error("wrong number of addrs for first peer")
|
||||
}
|
||||
os.RemoveAll(tmpDir)
|
||||
|
||||
tmpDir = makeConfig(configBadPeers)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
bootstrap, peers = readIpfsConfig(&tmpDir)
|
||||
if peers != nil {
|
||||
t.Fatal("expected nil peers")
|
||||
}
|
||||
if len(bootstrap) != 2 {
|
||||
t.Fatal("wrong number of bootstrap addresses")
|
||||
}
|
||||
if bootstrap[0] != "/dnsaddr/bootstrap.libp2p.io/p2p/QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt" {
|
||||
t.Fatal("wrong bootstrap address")
|
||||
}
|
||||
}
|
||||
|
||||
func makeConfig(configData string) string {
|
||||
tmpDir, err := ioutil.TempDir("", "migration_test")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
cfgFile, err := os.Create(filepath.Join(tmpDir, "config"))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if _, err = cfgFile.Write([]byte(configData)); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err = cfgFile.Close(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return tmpDir
|
||||
}
|
||||
|
||||
func TestGetMigrationFetcher(t *testing.T) {
|
||||
var f migrations.Fetcher
|
||||
var err error
|
||||
|
||||
cfg := &config.Migration{}
|
||||
|
||||
cfg.DownloadSources = []string{"ftp://bad.gateway.io"}
|
||||
_, err = getMigrationFetcher(cfg, nil)
|
||||
if err == nil || !strings.HasPrefix(err.Error(), "bad gateway addr") {
|
||||
t.Fatal("Expected bad gateway address error, got:", err)
|
||||
}
|
||||
|
||||
cfg.DownloadSources = []string{"::bad.gateway.io"}
|
||||
_, err = getMigrationFetcher(cfg, nil)
|
||||
if err == nil || !strings.HasPrefix(err.Error(), "bad gateway addr") {
|
||||
t.Fatal("Expected bad gateway address error, got:", err)
|
||||
}
|
||||
|
||||
cfg.DownloadSources = []string{"http://localhost"}
|
||||
f, err = getMigrationFetcher(cfg, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, ok := f.(*migrations.HttpFetcher); !ok {
|
||||
t.Fatal("expected HttpFetcher")
|
||||
}
|
||||
|
||||
cfg.DownloadSources = []string{"ipfs"}
|
||||
f, err = getMigrationFetcher(cfg, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, ok := f.(*ipfsfetcher.IpfsFetcher); !ok {
|
||||
t.Fatal("expected IpfsFetcher")
|
||||
}
|
||||
|
||||
cfg.DownloadSources = []string{"http"}
|
||||
f, err = getMigrationFetcher(cfg, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, ok := f.(*migrations.HttpFetcher); !ok {
|
||||
t.Fatal("expected HttpFetcher")
|
||||
}
|
||||
|
||||
cfg.DownloadSources = []string{"IPFS", "HTTPS"}
|
||||
f, err = getMigrationFetcher(cfg, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
mf, ok := f.(*migrations.MultiFetcher)
|
||||
if !ok {
|
||||
t.Fatal("expected MultiFetcher")
|
||||
}
|
||||
if mf.Len() != 2 {
|
||||
t.Fatal("expected 2 fetchers in MultiFetcher")
|
||||
}
|
||||
|
||||
cfg.DownloadSources = []string{"ipfs", "https", "some.domain.io"}
|
||||
f, err = getMigrationFetcher(cfg, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
mf, ok = f.(*migrations.MultiFetcher)
|
||||
if !ok {
|
||||
t.Fatal("expected MultiFetcher")
|
||||
}
|
||||
if mf.Len() != 3 {
|
||||
t.Fatal("expected 3 fetchers in MultiFetcher")
|
||||
}
|
||||
|
||||
cfg.DownloadSources = nil
|
||||
_, err = getMigrationFetcher(cfg, nil)
|
||||
if err == nil {
|
||||
t.Fatal("expected error when no sources specified")
|
||||
}
|
||||
|
||||
cfg.DownloadSources = []string{"", ""}
|
||||
_, err = getMigrationFetcher(cfg, nil)
|
||||
if err == nil {
|
||||
t.Fatal("expected error when empty string fetchers specified")
|
||||
}
|
||||
}
|
||||
@ -6,6 +6,7 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
@ -63,6 +64,9 @@ func CommandsCmd(root *cmds.Command) *cmds.Command {
|
||||
Tagline: "List all available commands.",
|
||||
ShortDescription: `Lists all available commands (and subcommands) and exits.`,
|
||||
},
|
||||
Subcommands: map[string]*cmds.Command{
|
||||
"completion": CompletionCmd(root),
|
||||
},
|
||||
Options: []cmds.Option{
|
||||
cmds.BoolOption(flagsOptionName, "f", "Show command flags"),
|
||||
},
|
||||
@ -131,6 +135,44 @@ func cmdPathStrings(cmd *Command, showOptions bool) []string {
|
||||
return cmds
|
||||
}
|
||||
|
||||
func CompletionCmd(root *cmds.Command) *cmds.Command {
|
||||
return &cmds.Command{
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Generate shell completions.",
|
||||
},
|
||||
NoRemote: true,
|
||||
Subcommands: map[string]*cmds.Command{
|
||||
"bash": {
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Generate bash shell completions.",
|
||||
ShortDescription: "Generates command completions for the bash shell.",
|
||||
LongDescription: `
|
||||
Generates command completions for the bash shell.
|
||||
|
||||
The simplest way to see it working is write the completions
|
||||
to a file and then source it:
|
||||
|
||||
> ipfs commands completion bash > ipfs-completion.bash
|
||||
> source ./ipfs-completion.bash
|
||||
|
||||
To install the completions permanently, they can be moved to
|
||||
/etc/bash_completion.d or sourced from your ~/.bashrc file.
|
||||
`,
|
||||
},
|
||||
NoRemote: true,
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
var buf bytes.Buffer
|
||||
if err := writeBashCompletions(root, &buf); err != nil {
|
||||
return err
|
||||
}
|
||||
res.SetLength(uint64(buf.Len()))
|
||||
return res.Emit(&buf)
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type nonFatalError string
|
||||
|
||||
// streamResult is a helper function to stream results that possibly
|
||||
|
||||
@ -22,6 +22,8 @@ func TestROCommands(t *testing.T) {
|
||||
"/block/stat",
|
||||
"/cat",
|
||||
"/commands",
|
||||
"/commands/completion",
|
||||
"/commands/completion/bash",
|
||||
"/dag",
|
||||
"/dag/get",
|
||||
"/dag/resolve",
|
||||
@ -88,18 +90,26 @@ func TestCommands(t *testing.T) {
|
||||
"/bootstrap/rm",
|
||||
"/bootstrap/rm/all",
|
||||
"/cat",
|
||||
"/cid",
|
||||
"/cid/base32",
|
||||
"/cid/bases",
|
||||
"/cid/codecs",
|
||||
"/cid/format",
|
||||
"/cid/hashes",
|
||||
"/commands",
|
||||
"/commands/completion",
|
||||
"/commands/completion/bash",
|
||||
"/config",
|
||||
"/config/edit",
|
||||
"/config/replace",
|
||||
"/config/show",
|
||||
"/config/profile",
|
||||
"/config/profile/apply",
|
||||
"/config/replace",
|
||||
"/config/show",
|
||||
"/dag",
|
||||
"/dag/get",
|
||||
"/dag/export",
|
||||
"/dag/put",
|
||||
"/dag/get",
|
||||
"/dag/import",
|
||||
"/dag/put",
|
||||
"/dag/resolve",
|
||||
"/dag/stat",
|
||||
"/dht",
|
||||
@ -113,6 +123,7 @@ func TestCommands(t *testing.T) {
|
||||
"/diag/cmds",
|
||||
"/diag/cmds/clear",
|
||||
"/diag/cmds/set-time",
|
||||
"/diag/profile",
|
||||
"/diag/sys",
|
||||
"/dns",
|
||||
"/file",
|
||||
@ -127,16 +138,16 @@ func TestCommands(t *testing.T) {
|
||||
"/files/read",
|
||||
"/files/rm",
|
||||
"/files/stat",
|
||||
"/files/write",
|
||||
"/filestore",
|
||||
"/filestore/dups",
|
||||
"/filestore/ls",
|
||||
"/filestore/verify",
|
||||
"/files/write",
|
||||
"/get",
|
||||
"/id",
|
||||
"/key",
|
||||
"/key/gen",
|
||||
"/key/export",
|
||||
"/key/gen",
|
||||
"/key/import",
|
||||
"/key/list",
|
||||
"/key/rename",
|
||||
@ -148,12 +159,17 @@ func TestCommands(t *testing.T) {
|
||||
"/log/tail",
|
||||
"/ls",
|
||||
"/mount",
|
||||
"/multibase",
|
||||
"/multibase/decode",
|
||||
"/multibase/encode",
|
||||
"/multibase/transcode",
|
||||
"/multibase/list",
|
||||
"/name",
|
||||
"/name/publish",
|
||||
"/name/pubsub",
|
||||
"/name/pubsub/cancel",
|
||||
"/name/pubsub/state",
|
||||
"/name/pubsub/subs",
|
||||
"/name/pubsub/cancel",
|
||||
"/name/resolve",
|
||||
"/object",
|
||||
"/object/data",
|
||||
@ -222,6 +238,10 @@ func TestCommands(t *testing.T) {
|
||||
"/swarm/filters/add",
|
||||
"/swarm/filters/rm",
|
||||
"/swarm/peers",
|
||||
"/swarm/peering",
|
||||
"/swarm/peering/add",
|
||||
"/swarm/peering/ls",
|
||||
"/swarm/peering/rm",
|
||||
"/tar",
|
||||
"/tar/add",
|
||||
"/tar/cat",
|
||||
@ -230,12 +250,6 @@ func TestCommands(t *testing.T) {
|
||||
"/urlstore/add",
|
||||
"/version",
|
||||
"/version/deps",
|
||||
"/cid",
|
||||
"/cid/format",
|
||||
"/cid/base32",
|
||||
"/cid/codecs",
|
||||
"/cid/bases",
|
||||
"/cid/hashes",
|
||||
}
|
||||
|
||||
cmdSet := make(map[string]struct{})
|
||||
|
||||
142
core/commands/completion.go
Normal file
142
core/commands/completion.go
Normal file
@ -0,0 +1,142 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sort"
|
||||
"text/template"
|
||||
|
||||
cmds "github.com/ipfs/go-ipfs-cmds"
|
||||
)
|
||||
|
||||
type completionCommand struct {
|
||||
Name string
|
||||
Subcommands []*completionCommand
|
||||
ShortFlags []string
|
||||
ShortOptions []string
|
||||
LongFlags []string
|
||||
LongOptions []string
|
||||
}
|
||||
|
||||
func commandToCompletions(name string, cmd *cmds.Command) *completionCommand {
|
||||
parsed := &completionCommand{
|
||||
Name: name,
|
||||
}
|
||||
for name, subCmd := range cmd.Subcommands {
|
||||
parsed.Subcommands = append(parsed.Subcommands, commandToCompletions(name, subCmd))
|
||||
}
|
||||
sort.Slice(parsed.Subcommands, func(i, j int) bool {
|
||||
return parsed.Subcommands[i].Name < parsed.Subcommands[j].Name
|
||||
})
|
||||
|
||||
for _, opt := range cmd.Options {
|
||||
if opt.Type() == cmds.Bool {
|
||||
parsed.LongFlags = append(parsed.LongFlags, opt.Name())
|
||||
for _, name := range opt.Names() {
|
||||
if len(name) == 1 {
|
||||
parsed.ShortFlags = append(parsed.ShortFlags, name)
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
parsed.LongOptions = append(parsed.LongOptions, opt.Name())
|
||||
for _, name := range opt.Names() {
|
||||
if len(name) == 1 {
|
||||
parsed.ShortOptions = append(parsed.ShortOptions, name)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
sort.Slice(parsed.LongFlags, func(i, j int) bool {
|
||||
return parsed.LongFlags[i] < parsed.LongFlags[j]
|
||||
})
|
||||
sort.Slice(parsed.ShortFlags, func(i, j int) bool {
|
||||
return parsed.ShortFlags[i] < parsed.ShortFlags[j]
|
||||
})
|
||||
sort.Slice(parsed.LongOptions, func(i, j int) bool {
|
||||
return parsed.LongOptions[i] < parsed.LongOptions[j]
|
||||
})
|
||||
sort.Slice(parsed.ShortOptions, func(i, j int) bool {
|
||||
return parsed.ShortOptions[i] < parsed.ShortOptions[j]
|
||||
})
|
||||
return parsed
|
||||
}
|
||||
|
||||
var bashCompletionTemplate *template.Template
|
||||
|
||||
func init() {
|
||||
commandTemplate := template.Must(template.New("command").Parse(`
|
||||
while [[ ${index} -lt ${COMP_CWORD} ]]; do
|
||||
case "${COMP_WORDS[index]}" in
|
||||
-*)
|
||||
let index++
|
||||
continue
|
||||
;;
|
||||
{{ range .Subcommands }}
|
||||
"{{ .Name }}")
|
||||
let index++
|
||||
{{ template "command" . }}
|
||||
return 0
|
||||
;;
|
||||
{{ end }}
|
||||
esac
|
||||
break
|
||||
done
|
||||
|
||||
if [[ "${word}" == -* ]]; then
|
||||
{{ if .ShortFlags -}}
|
||||
_ipfs_compgen -W $'{{ range .ShortFlags }}-{{.}} \n{{end}}' -- "${word}"
|
||||
{{ end -}}
|
||||
{{- if .ShortOptions -}}
|
||||
_ipfs_compgen -S = -W $'{{ range .ShortOptions }}-{{.}}\n{{end}}' -- "${word}"
|
||||
{{ end -}}
|
||||
{{- if .LongFlags -}}
|
||||
_ipfs_compgen -W $'{{ range .LongFlags }}--{{.}} \n{{end}}' -- "${word}"
|
||||
{{ end -}}
|
||||
{{- if .LongOptions -}}
|
||||
_ipfs_compgen -S = -W $'{{ range .LongOptions }}--{{.}}\n{{end}}' -- "${word}"
|
||||
{{ end -}}
|
||||
return 0
|
||||
fi
|
||||
|
||||
while [[ ${index} -lt ${COMP_CWORD} ]]; do
|
||||
if [[ "${COMP_WORDS[index]}" != -* ]]; then
|
||||
let argidx++
|
||||
fi
|
||||
let index++
|
||||
done
|
||||
|
||||
{{- if .Subcommands }}
|
||||
if [[ "${argidx}" -eq 0 ]]; then
|
||||
_ipfs_compgen -W $'{{ range .Subcommands }}{{.Name}} \n{{end}}' -- "${word}"
|
||||
fi
|
||||
{{ end -}}
|
||||
`))
|
||||
|
||||
bashCompletionTemplate = template.Must(commandTemplate.New("root").Parse(`#!/bin/bash
|
||||
|
||||
_ipfs_compgen() {
|
||||
local oldifs="$IFS"
|
||||
IFS=$'\n'
|
||||
while read -r line; do
|
||||
COMPREPLY+=("$line")
|
||||
done < <(compgen "$@")
|
||||
IFS="$oldifs"
|
||||
}
|
||||
|
||||
_ipfs() {
|
||||
COMPREPLY=()
|
||||
local index=1
|
||||
local argidx=0
|
||||
local word="${COMP_WORDS[COMP_CWORD]}"
|
||||
{{ template "command" . }}
|
||||
}
|
||||
complete -o nosort -o nospace -o default -F _ipfs ipfs
|
||||
`))
|
||||
}
|
||||
|
||||
// writeBashCompletions generates a bash completion script for the given command tree.
|
||||
func writeBashCompletions(cmd *cmds.Command, out io.Writer) error {
|
||||
cmds := commandToCompletions("ipfs", cmd)
|
||||
return bashCompletionTemplate.Execute(out, cmds)
|
||||
}
|
||||
@ -16,9 +16,10 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
pinRootsOptionName = "pin-roots"
|
||||
progressOptionName = "progress"
|
||||
silentOptionName = "silent"
|
||||
pinRootsOptionName = "pin-roots"
|
||||
statsOptionName = "stats"
|
||||
)
|
||||
|
||||
// DagCmd provides a subset of commands for interacting with ipld dag objects
|
||||
@ -53,9 +54,15 @@ type ResolveOutput struct {
|
||||
RemPath string
|
||||
}
|
||||
|
||||
type CarImportStats struct {
|
||||
BlockCount uint64
|
||||
BlockBytesCount uint64
|
||||
}
|
||||
|
||||
// CarImportOutput is the output type of the 'dag import' commands
|
||||
type CarImportOutput struct {
|
||||
Root RootMeta
|
||||
Root *RootMeta `json:",omitempty"`
|
||||
Stats *CarImportStats `json:",omitempty"`
|
||||
}
|
||||
|
||||
// RootMeta is the metadata for a root pinning response
|
||||
@ -77,10 +84,10 @@ into an object of the specified format.
|
||||
cmds.FileArg("object data", true, true, "The object to put").EnableStdin(),
|
||||
},
|
||||
Options: []cmds.Option{
|
||||
cmds.StringOption("format", "f", "Format that the object will be added as.").WithDefault("cbor"),
|
||||
cmds.StringOption("input-enc", "Format that the input object will be.").WithDefault("json"),
|
||||
cmds.StringOption("store-codec", "Codec that the stored object will be encoded with").WithDefault("dag-cbor"),
|
||||
cmds.StringOption("input-codec", "Codec that the input object is encoded in").WithDefault("dag-json"),
|
||||
cmds.BoolOption("pin", "Pin this object when adding."),
|
||||
cmds.StringOption("hash", "Hash function to use").WithDefault(""),
|
||||
cmds.StringOption("hash", "Hash function to use").WithDefault("sha2-256"),
|
||||
},
|
||||
Run: dagPut,
|
||||
Type: OutputObject{},
|
||||
@ -108,6 +115,9 @@ format.
|
||||
Arguments: []cmds.Argument{
|
||||
cmds.StringArg("ref", true, false, "The object to get").EnableStdin(),
|
||||
},
|
||||
Options: []cmds.Option{
|
||||
cmds.StringOption("output-codec", "Format that the object will be encoded as.").WithDefault("dag-json"),
|
||||
},
|
||||
Run: dagGet,
|
||||
}
|
||||
|
||||
@ -157,8 +167,10 @@ var DagResolveCmd = &cmds.Command{
|
||||
}
|
||||
|
||||
type importResult struct {
|
||||
roots map[cid.Cid]struct{}
|
||||
err error
|
||||
blockCount uint64
|
||||
blockBytesCount uint64
|
||||
roots map[cid.Cid]struct{}
|
||||
err error
|
||||
}
|
||||
|
||||
// DagImportCmd is a command for importing a car to ipfs
|
||||
@ -190,8 +202,9 @@ Maximum supported CAR version: 1
|
||||
cmds.FileArg("path", true, true, "The path of a .car file.").EnableStdin(),
|
||||
},
|
||||
Options: []cmds.Option{
|
||||
cmds.BoolOption(silentOptionName, "No output."),
|
||||
cmds.BoolOption(pinRootsOptionName, "Pin optional roots listed in the .car headers after importing.").WithDefault(true),
|
||||
cmds.BoolOption(silentOptionName, "No output."),
|
||||
cmds.BoolOption(statsOptionName, "Output stats."),
|
||||
},
|
||||
Type: CarImportOutput{},
|
||||
Run: dagImport,
|
||||
@ -203,6 +216,22 @@ Maximum supported CAR version: 1
|
||||
return nil
|
||||
}
|
||||
|
||||
// event should have only one of `Root` or `Stats` set, not both
|
||||
if event.Root == nil {
|
||||
if event.Stats == nil {
|
||||
return fmt.Errorf("Unexpected message from DAG import")
|
||||
}
|
||||
stats, _ := req.Options[statsOptionName].(bool)
|
||||
if stats {
|
||||
fmt.Fprintf(w, "Imported %d blocks (%d bytes)\n", event.Stats.BlockCount, event.Stats.BlockBytesCount)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if event.Stats != nil {
|
||||
return fmt.Errorf("Unexpected message from DAG import")
|
||||
}
|
||||
|
||||
enc, err := cmdenv.GetLowLevelCidEncoder(req)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@ -1,11 +1,18 @@
|
||||
package dagcmd
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/ipfs/go-ipfs/core/commands/cmdenv"
|
||||
ipldlegacy "github.com/ipfs/go-ipld-legacy"
|
||||
"github.com/ipfs/interface-go-ipfs-core/path"
|
||||
|
||||
"github.com/ipld/go-ipld-prime"
|
||||
"github.com/ipld/go-ipld-prime/multicodec"
|
||||
"github.com/ipld/go-ipld-prime/traversal"
|
||||
mc "github.com/multiformats/go-multicodec"
|
||||
|
||||
cmds "github.com/ipfs/go-ipfs-cmds"
|
||||
)
|
||||
|
||||
@ -15,6 +22,12 @@ func dagGet(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) e
|
||||
return err
|
||||
}
|
||||
|
||||
codecStr, _ := req.Options["output-codec"].(string)
|
||||
var codec mc.Code
|
||||
if err := codec.Set(codecStr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rp, err := api.ResolvePath(req.Context, path.New(req.Arguments[0]))
|
||||
if err != nil {
|
||||
return err
|
||||
@ -25,14 +38,34 @@ func dagGet(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) e
|
||||
return err
|
||||
}
|
||||
|
||||
var out interface{} = obj
|
||||
universal, ok := obj.(ipldlegacy.UniversalNode)
|
||||
if !ok {
|
||||
return fmt.Errorf("%T is not a valid IPLD node", obj)
|
||||
}
|
||||
|
||||
finalNode := universal.(ipld.Node)
|
||||
|
||||
if len(rp.Remainder()) > 0 {
|
||||
rem := strings.Split(rp.Remainder(), "/")
|
||||
final, _, err := obj.Resolve(rem)
|
||||
remainderPath := ipld.ParsePath(rp.Remainder())
|
||||
|
||||
finalNode, err = traversal.Get(finalNode, remainderPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
out = final
|
||||
}
|
||||
return cmds.EmitOnce(res, &out)
|
||||
|
||||
encoder, err := multicodec.LookupEncoder(uint64(codec))
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid encoding: %s - %s", codec, err)
|
||||
}
|
||||
|
||||
r, w := io.Pipe()
|
||||
go func() {
|
||||
defer w.Close()
|
||||
if err := encoder(finalNode, w); err != nil {
|
||||
_ = w.CloseWithError(err)
|
||||
}
|
||||
}()
|
||||
|
||||
return res.Emit(r)
|
||||
}
|
||||
|
||||
@ -101,7 +101,7 @@ func dagImport(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment
|
||||
failedPins++
|
||||
}
|
||||
|
||||
if err := res.Emit(&CarImportOutput{Root: ret}); err != nil {
|
||||
if err := res.Emit(&CarImportOutput{Root: &ret}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -115,6 +115,19 @@ func dagImport(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment
|
||||
}
|
||||
}
|
||||
|
||||
stats, _ := req.Options[statsOptionName].(bool)
|
||||
if stats {
|
||||
err = res.Emit(&CarImportOutput{
|
||||
Stats: &CarImportStats{
|
||||
BlockCount: done.blockCount,
|
||||
BlockBytesCount: done.blockBytesCount,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -126,6 +139,7 @@ func importWorker(req *cmds.Request, re cmds.ResponseEmitter, api iface.CoreAPI,
|
||||
batch := ipld.NewBatch(req.Context, api.Dag())
|
||||
|
||||
roots := make(map[cid.Cid]struct{})
|
||||
var blockCount, blockBytesCount uint64
|
||||
|
||||
it := req.Files.Entries()
|
||||
for it.Next() {
|
||||
@ -176,6 +190,8 @@ func importWorker(req *cmds.Request, re cmds.ResponseEmitter, api iface.CoreAPI,
|
||||
if err := batch.Add(req.Context, nd); err != nil {
|
||||
return err
|
||||
}
|
||||
blockCount++
|
||||
blockBytesCount += uint64(len(block.RawData()))
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -197,5 +213,8 @@ func importWorker(req *cmds.Request, re cmds.ResponseEmitter, api iface.CoreAPI,
|
||||
return
|
||||
}
|
||||
|
||||
ret <- importResult{roots: roots}
|
||||
ret <- importResult{
|
||||
blockCount: blockCount,
|
||||
blockBytesCount: blockBytesCount,
|
||||
roots: roots}
|
||||
}
|
||||
|
||||
@ -1,16 +1,28 @@
|
||||
package dagcmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipfs/go-ipfs/core/commands/cmdenv"
|
||||
"github.com/ipfs/go-ipfs/core/coredag"
|
||||
ipldlegacy "github.com/ipfs/go-ipld-legacy"
|
||||
"github.com/ipld/go-ipld-prime/multicodec"
|
||||
basicnode "github.com/ipld/go-ipld-prime/node/basic"
|
||||
|
||||
cmds "github.com/ipfs/go-ipfs-cmds"
|
||||
files "github.com/ipfs/go-ipfs-files"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
mh "github.com/multiformats/go-multihash"
|
||||
mc "github.com/multiformats/go-multicodec"
|
||||
|
||||
// Expected minimal set of available format/ienc codecs.
|
||||
_ "github.com/ipld/go-codec-dagpb"
|
||||
_ "github.com/ipld/go-ipld-prime/codec/cbor"
|
||||
_ "github.com/ipld/go-ipld-prime/codec/dagcbor"
|
||||
_ "github.com/ipld/go-ipld-prime/codec/dagjson"
|
||||
_ "github.com/ipld/go-ipld-prime/codec/json"
|
||||
_ "github.com/ipld/go-ipld-prime/codec/raw"
|
||||
)
|
||||
|
||||
func dagPut(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
@ -19,21 +31,38 @@ func dagPut(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) e
|
||||
return err
|
||||
}
|
||||
|
||||
ienc, _ := req.Options["input-enc"].(string)
|
||||
format, _ := req.Options["format"].(string)
|
||||
inputCodec, _ := req.Options["input-codec"].(string)
|
||||
storeCodec, _ := req.Options["store-codec"].(string)
|
||||
hash, _ := req.Options["hash"].(string)
|
||||
dopin, _ := req.Options["pin"].(bool)
|
||||
|
||||
// mhType tells inputParser which hash should be used. MaxUint64 means 'use
|
||||
// default hash' (sha256 for cbor, sha1 for git..)
|
||||
mhType := uint64(math.MaxUint64)
|
||||
var icodec mc.Code
|
||||
if err := icodec.Set(inputCodec); err != nil {
|
||||
return err
|
||||
}
|
||||
var scodec mc.Code
|
||||
if err := scodec.Set(storeCodec); err != nil {
|
||||
return err
|
||||
}
|
||||
var mhType mc.Code
|
||||
if err := mhType.Set(hash); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if hash != "" {
|
||||
var ok bool
|
||||
mhType, ok = mh.Names[hash]
|
||||
if !ok {
|
||||
return fmt.Errorf("%s in not a valid multihash name", hash)
|
||||
}
|
||||
cidPrefix := cid.Prefix{
|
||||
Version: 1,
|
||||
Codec: uint64(scodec),
|
||||
MhType: uint64(mhType),
|
||||
MhLength: -1,
|
||||
}
|
||||
|
||||
decoder, err := multicodec.LookupDecoder(uint64(icodec))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
encoder, err := multicodec.LookupEncoder(uint64(scodec))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var adder ipld.NodeAdder = api.Dag()
|
||||
@ -48,22 +77,36 @@ func dagPut(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) e
|
||||
if file == nil {
|
||||
return fmt.Errorf("expected a regular file")
|
||||
}
|
||||
nds, err := coredag.ParseInputs(ienc, format, file, mhType, -1)
|
||||
|
||||
node := basicnode.Prototype.Any.NewBuilder()
|
||||
if err := decoder(node, file); err != nil {
|
||||
return err
|
||||
}
|
||||
n := node.Build()
|
||||
|
||||
bd := bytes.NewBuffer([]byte{})
|
||||
if err := encoder(n, bd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
blockCid, err := cidPrefix.Sum(bd.Bytes())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(nds) == 0 {
|
||||
return fmt.Errorf("no node returned from ParseInputs")
|
||||
blk, err := blocks.NewBlockWithCid(bd.Bytes(), blockCid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ln := ipldlegacy.LegacyNode{
|
||||
Block: blk,
|
||||
Node: n,
|
||||
}
|
||||
|
||||
for _, nd := range nds {
|
||||
err := b.Add(req.Context, nd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := b.Add(req.Context, &ln); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cid := nds[0].Cid()
|
||||
cid := ln.Cid()
|
||||
if err := res.Emit(&OutputObject{Cid: cid}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -10,7 +10,8 @@ var DiagCmd = &cmds.Command{
|
||||
},
|
||||
|
||||
Subcommands: map[string]*cmds.Command{
|
||||
"sys": sysDiagCmd,
|
||||
"cmds": ActiveReqsCmd,
|
||||
"sys": sysDiagCmd,
|
||||
"cmds": ActiveReqsCmd,
|
||||
"profile": sysProfileCmd,
|
||||
},
|
||||
}
|
||||
|
||||
@ -356,12 +356,21 @@ GC'ed.
|
||||
cmds.StringArg("source", true, false, "Source IPFS or MFS path to copy."),
|
||||
cmds.StringArg("dest", true, false, "Destination within MFS."),
|
||||
},
|
||||
Options: []cmds.Option{
|
||||
cmds.BoolOption(filesParentsOptionName, "p", "Make parent directories as needed."),
|
||||
},
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
mkParents, _ := req.Options[filesParentsOptionName].(bool)
|
||||
nd, err := cmdenv.GetNode(env)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
prefix, err := getPrefixNew(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
api, err := cmdenv.GetApi(env, req)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -389,6 +398,13 @@ GC'ed.
|
||||
return fmt.Errorf("cp: cannot get node from path %s: %s", src, err)
|
||||
}
|
||||
|
||||
if mkParents {
|
||||
err := ensureContainingDirectoryExists(nd.FilesRoot, dst, prefix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = mfs.PutNode(nd.FilesRoot, dst, node)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cp: cannot put node in path %s: %s", dst, err)
|
||||
|
||||
@ -223,6 +223,6 @@ func printSelf(keyEnc ke.KeyEncoder, node *core.IpfsNode) (interface{}, error) {
|
||||
sort.Strings(info.Protocols)
|
||||
}
|
||||
info.ProtocolVersion = identify.LibP2PVersion
|
||||
info.AgentVersion = version.UserAgent
|
||||
info.AgentVersion = version.GetUserAgentVersion()
|
||||
return info, nil
|
||||
}
|
||||
|
||||
@ -29,8 +29,7 @@ func KeyEncoderFromString(formatLabel string) (KeyEncoder, error) {
|
||||
|
||||
func (enc KeyEncoder) FormatID(id peer.ID) string {
|
||||
if enc.baseEnc == nil {
|
||||
//nolint deprecated
|
||||
return peer.IDB58Encode(id)
|
||||
return peer.Encode(id)
|
||||
}
|
||||
if s, err := peer.ToCid(id).StringOfBase(enc.baseEnc.Encoding()); err != nil {
|
||||
panic(err)
|
||||
|
||||
171
core/commands/multibase.go
Normal file
171
core/commands/multibase.go
Normal file
@ -0,0 +1,171 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
|
||||
cmds "github.com/ipfs/go-ipfs-cmds"
|
||||
"github.com/ipfs/go-ipfs/core/commands/cmdenv"
|
||||
mbase "github.com/multiformats/go-multibase"
|
||||
)
|
||||
|
||||
var MbaseCmd = &cmds.Command{
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Encode and decode files or stdin with multibase format",
|
||||
},
|
||||
Subcommands: map[string]*cmds.Command{
|
||||
"encode": mbaseEncodeCmd,
|
||||
"decode": mbaseDecodeCmd,
|
||||
"transcode": mbaseTranscodeCmd,
|
||||
"list": basesCmd,
|
||||
},
|
||||
Extra: CreateCmdExtras(SetDoesNotUseRepo(true)),
|
||||
}
|
||||
|
||||
const (
|
||||
mbaseOptionName = "b"
|
||||
)
|
||||
|
||||
var mbaseEncodeCmd = &cmds.Command{
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Encode data into multibase string",
|
||||
LongDescription: `
|
||||
This command expects a file name or data provided via stdin.
|
||||
|
||||
By default it will use URL-safe base64url encoding,
|
||||
but one can customize used base with -b:
|
||||
|
||||
> echo hello | ipfs multibase encode -b base16 > output_file
|
||||
> cat output_file
|
||||
f68656c6c6f0a
|
||||
|
||||
> echo hello > input_file
|
||||
> ipfs multibase encode -b base16 input_file
|
||||
f68656c6c6f0a
|
||||
`,
|
||||
},
|
||||
Arguments: []cmds.Argument{
|
||||
cmds.FileArg("file", true, false, "data to encode").EnableStdin(),
|
||||
},
|
||||
Options: []cmds.Option{
|
||||
cmds.StringOption(mbaseOptionName, "multibase encoding").WithDefault("base64url"),
|
||||
},
|
||||
Run: func(req *cmds.Request, resp cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
if err := req.ParseBodyArgs(); err != nil {
|
||||
return err
|
||||
}
|
||||
encoderName, _ := req.Options[mbaseOptionName].(string)
|
||||
encoder, err := mbase.EncoderByName(encoderName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
files := req.Files.Entries()
|
||||
file, err := cmdenv.GetFileArg(files)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to access file: %w", err)
|
||||
}
|
||||
buf, err := ioutil.ReadAll(file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read file contents: %w", err)
|
||||
}
|
||||
encoded := encoder.Encode(buf)
|
||||
reader := strings.NewReader(encoded)
|
||||
return resp.Emit(reader)
|
||||
},
|
||||
}
|
||||
|
||||
var mbaseDecodeCmd = &cmds.Command{
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Decode multibase string",
|
||||
LongDescription: `
|
||||
This command expects multibase inside of a file or via stdin:
|
||||
|
||||
> echo -n hello | ipfs multibase encode -b base16 > file
|
||||
> cat file
|
||||
f68656c6c6f
|
||||
|
||||
> ipfs multibase decode file
|
||||
hello
|
||||
|
||||
> cat file | ipfs multibase decode
|
||||
hello
|
||||
`,
|
||||
},
|
||||
Arguments: []cmds.Argument{
|
||||
cmds.FileArg("encoded_file", true, false, "encoded data to decode").EnableStdin(),
|
||||
},
|
||||
Run: func(req *cmds.Request, resp cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
if err := req.ParseBodyArgs(); err != nil {
|
||||
return err
|
||||
}
|
||||
files := req.Files.Entries()
|
||||
file, err := cmdenv.GetFileArg(files)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to access file: %w", err)
|
||||
}
|
||||
encoded_data, err := ioutil.ReadAll(file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read file contents: %w", err)
|
||||
}
|
||||
_, data, err := mbase.Decode(string(encoded_data))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to decode multibase: %w", err)
|
||||
}
|
||||
reader := bytes.NewReader(data)
|
||||
return resp.Emit(reader)
|
||||
},
|
||||
}
|
||||
|
||||
var mbaseTranscodeCmd = &cmds.Command{
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Transcode multibase string between bases",
|
||||
LongDescription: `
|
||||
This command expects multibase inside of a file or via stdin.
|
||||
|
||||
By default it will use URL-safe base64url encoding,
|
||||
but one can customize used base with -b:
|
||||
|
||||
> echo -n hello | ipfs multibase encode > file
|
||||
> cat file
|
||||
uaGVsbG8
|
||||
|
||||
> ipfs multibase transcode file -b base16 > transcoded_file
|
||||
> cat transcoded_file
|
||||
f68656c6c6f
|
||||
`,
|
||||
},
|
||||
Arguments: []cmds.Argument{
|
||||
cmds.FileArg("encoded_file", true, false, "encoded data to decode").EnableStdin(),
|
||||
},
|
||||
Options: []cmds.Option{
|
||||
cmds.StringOption(mbaseOptionName, "multibase encoding").WithDefault("base64url"),
|
||||
},
|
||||
Run: func(req *cmds.Request, resp cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
if err := req.ParseBodyArgs(); err != nil {
|
||||
return err
|
||||
}
|
||||
encoderName, _ := req.Options[mbaseOptionName].(string)
|
||||
encoder, err := mbase.EncoderByName(encoderName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
files := req.Files.Entries()
|
||||
file, err := cmdenv.GetFileArg(files)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to access file: %w", err)
|
||||
}
|
||||
encoded_data, err := ioutil.ReadAll(file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read file contents: %w", err)
|
||||
}
|
||||
_, data, err := mbase.Decode(string(encoded_data))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to decode multibase: %w", err)
|
||||
}
|
||||
encoded := encoder.Encode(data)
|
||||
reader := strings.NewReader(encoded)
|
||||
return resp.Emit(reader)
|
||||
},
|
||||
}
|
||||
@ -167,13 +167,19 @@ NOTE: a comma-separated notation is supported in CLI for convenience:
|
||||
}
|
||||
|
||||
// Prepare Pin.origins
|
||||
// Add own multiaddrs to the 'origins' array, so Pinning Service can
|
||||
// use that as a hint and connect back to us (if possible)
|
||||
// If CID in blockstore, add own multiaddrs to the 'origins' array
|
||||
// so pinning service can use that as a hint and connect back to us.
|
||||
node, err := cmdenv.GetNode(env)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if node.PeerHost != nil {
|
||||
|
||||
isInBlockstore, err := node.Blockstore.Has(rp.Cid())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if isInBlockstore && node.PeerHost != nil {
|
||||
addrs, err := peer.AddrInfoToP2pAddrs(host.InfoFromHost(node.PeerHost))
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
237
core/commands/profile.go
Normal file
237
core/commands/profile.go
Normal file
@ -0,0 +1,237 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
cmds "github.com/ipfs/go-ipfs-cmds"
|
||||
"github.com/ipfs/go-ipfs/core/commands/e"
|
||||
)
|
||||
|
||||
// time format that works in filenames on windows.
|
||||
var timeFormat = strings.ReplaceAll(time.RFC3339, ":", "_")
|
||||
|
||||
type profileResult struct {
|
||||
File string
|
||||
}
|
||||
|
||||
const cpuProfileTimeOption = "cpu-profile-time"
|
||||
|
||||
var sysProfileCmd = &cmds.Command{
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Collect a performance profile for debugging.",
|
||||
ShortDescription: `
|
||||
Collects cpu, heap, and goroutine profiles from a running go-ipfs daemon
|
||||
into a single zip file. To aid in debugging, this command also attempts to
|
||||
include a copy of the running go-ipfs binary.
|
||||
`,
|
||||
LongDescription: `
|
||||
Collects cpu, heap, and goroutine profiles from a running go-ipfs daemon
|
||||
into a single zipfile. To aid in debugging, this command also attempts to
|
||||
include a copy of the running go-ipfs binary.
|
||||
|
||||
Profile's can be examined using 'go tool pprof', some tips can be found at
|
||||
https://github.com/ipfs/go-ipfs/blob/master/docs/debug-guide.md.
|
||||
|
||||
Privacy Notice:
|
||||
|
||||
The output file includes:
|
||||
|
||||
- A list of running goroutines.
|
||||
- A CPU profile.
|
||||
- A heap profile.
|
||||
- Your copy of go-ipfs.
|
||||
- The output of 'ipfs version --all'.
|
||||
|
||||
It does not include:
|
||||
|
||||
- Any of your IPFS data or metadata.
|
||||
- Your config or private key.
|
||||
- Your IP address.
|
||||
- The contents of your computer's memory, filesystem, etc.
|
||||
|
||||
However, it could reveal:
|
||||
|
||||
- Your build path, if you built go-ipfs yourself.
|
||||
- If and how a command/feature is being used (inferred from running functions).
|
||||
- Memory offsets of various data structures.
|
||||
- Any modifications you've made to go-ipfs.
|
||||
`,
|
||||
},
|
||||
NoLocal: true,
|
||||
Options: []cmds.Option{
|
||||
cmds.StringOption(outputOptionName, "o", "The path where the output should be stored."),
|
||||
cmds.StringOption(cpuProfileTimeOption, "The amount of time spent profiling CPU usage.").WithDefault("30s"),
|
||||
},
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
cpuProfileTimeStr, _ := req.Options[cpuProfileTimeOption].(string)
|
||||
cpuProfileTime, err := time.ParseDuration(cpuProfileTimeStr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse CPU profile duration %q: %w", cpuProfileTimeStr, err)
|
||||
}
|
||||
|
||||
r, w := io.Pipe()
|
||||
go func() {
|
||||
_ = w.CloseWithError(writeProfiles(req.Context, cpuProfileTime, w))
|
||||
}()
|
||||
return res.Emit(r)
|
||||
},
|
||||
PostRun: cmds.PostRunMap{
|
||||
cmds.CLI: func(res cmds.Response, re cmds.ResponseEmitter) error {
|
||||
v, err := res.Next()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
outReader, ok := v.(io.Reader)
|
||||
if !ok {
|
||||
return e.New(e.TypeErr(outReader, v))
|
||||
}
|
||||
|
||||
outPath, _ := res.Request().Options[outputOptionName].(string)
|
||||
if outPath == "" {
|
||||
outPath = "ipfs-profile-" + time.Now().Format(timeFormat) + ".zip"
|
||||
}
|
||||
fi, err := os.Create(outPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer fi.Close()
|
||||
|
||||
_, err = io.Copy(fi, outReader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return re.Emit(&profileResult{File: outPath})
|
||||
},
|
||||
},
|
||||
Encoders: cmds.EncoderMap{
|
||||
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *profileResult) error {
|
||||
fmt.Fprintf(w, "Wrote profiles to: %s\n", out.File)
|
||||
return nil
|
||||
}),
|
||||
},
|
||||
}
|
||||
|
||||
func writeProfiles(ctx context.Context, cpuProfileTime time.Duration, w io.Writer) error {
|
||||
archive := zip.NewWriter(w)
|
||||
|
||||
// Take some profiles.
|
||||
type profile struct {
|
||||
name string
|
||||
file string
|
||||
debug int
|
||||
}
|
||||
|
||||
profiles := []profile{{
|
||||
name: "goroutine",
|
||||
file: "goroutines.stacks",
|
||||
debug: 2,
|
||||
}, {
|
||||
name: "goroutine",
|
||||
file: "goroutines.pprof",
|
||||
}, {
|
||||
name: "heap",
|
||||
file: "heap.pprof",
|
||||
}}
|
||||
|
||||
for _, profile := range profiles {
|
||||
prof := pprof.Lookup(profile.name)
|
||||
out, err := archive.Create(profile.file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = prof.WriteTo(out, profile.debug)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Take a CPU profile.
|
||||
if cpuProfileTime != 0 {
|
||||
out, err := archive.Create("cpu.pprof")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = writeCPUProfile(ctx, cpuProfileTime, out)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Collect version info
|
||||
// I'd use diag sysinfo, but that includes some more sensitive information
|
||||
// (GOPATH, etc.).
|
||||
{
|
||||
out, err := archive.Create("version.json")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = json.NewEncoder(out).Encode(getVersionInfo())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Collect binary
|
||||
if fi, err := openIPFSBinary(); err == nil {
|
||||
fname := "ipfs"
|
||||
if runtime.GOOS == "windows" {
|
||||
fname += ".exe"
|
||||
}
|
||||
|
||||
out, err := archive.Create(fname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = io.Copy(out, fi)
|
||||
_ = fi.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return archive.Close()
|
||||
}
|
||||
|
||||
func writeCPUProfile(ctx context.Context, d time.Duration, w io.Writer) error {
|
||||
if err := pprof.StartCPUProfile(w); err != nil {
|
||||
return err
|
||||
}
|
||||
defer pprof.StopCPUProfile()
|
||||
|
||||
timer := time.NewTimer(d)
|
||||
defer timer.Stop()
|
||||
|
||||
select {
|
||||
case <-timer.C:
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func openIPFSBinary() (*os.File, error) {
|
||||
if runtime.GOOS == "linux" {
|
||||
pid := os.Getpid()
|
||||
fi, err := os.Open(fmt.Sprintf("/proc/%d/exe", pid))
|
||||
if err == nil {
|
||||
return fi, nil
|
||||
}
|
||||
}
|
||||
path, err := os.Executable()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return os.Open(path)
|
||||
}
|
||||
@ -43,7 +43,10 @@ DATA STRUCTURE COMMANDS
|
||||
dag Interact with IPLD DAG nodes
|
||||
files Interact with files as if they were a unix filesystem
|
||||
block Interact with raw blocks in the datastore
|
||||
|
||||
TEXT ENCODING COMMANDS
|
||||
cid Convert and discover properties of CIDs
|
||||
multibase Encode and decode data with Multibase format
|
||||
|
||||
ADVANCED COMMANDS
|
||||
daemon Start a long-running daemon process
|
||||
@ -151,6 +154,7 @@ var rootSubcommands = map[string]*cmds.Command{
|
||||
"version": VersionCmd,
|
||||
"shutdown": daemonShutdownCmd,
|
||||
"cid": CidCmd,
|
||||
"multibase": MbaseCmd,
|
||||
}
|
||||
|
||||
// RootRO is the readonly version of Root
|
||||
|
||||
@ -51,6 +51,7 @@ ipfs peers in the internet.
|
||||
"disconnect": swarmDisconnectCmd,
|
||||
"filters": swarmFiltersCmd,
|
||||
"peers": swarmPeersCmd,
|
||||
"peering": swarmPeeringCmd,
|
||||
},
|
||||
}
|
||||
|
||||
@ -61,6 +62,149 @@ const (
|
||||
swarmDirectionOptionName = "direction"
|
||||
)
|
||||
|
||||
type peeringResult struct {
|
||||
ID peer.ID
|
||||
Status string
|
||||
}
|
||||
|
||||
var swarmPeeringCmd = &cmds.Command{
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Modify the peering subsystem.",
|
||||
ShortDescription: `
|
||||
'ipfs swarm peering' manages the peering subsystem.
|
||||
Peers in the peering subsystem is maintained to be connected, reconnected
|
||||
on disconnect with a back-off.
|
||||
The changes are not saved to the config.
|
||||
`,
|
||||
},
|
||||
Subcommands: map[string]*cmds.Command{
|
||||
"add": swarmPeeringAddCmd,
|
||||
"ls": swarmPeeringLsCmd,
|
||||
"rm": swarmPeeringRmCmd,
|
||||
},
|
||||
}
|
||||
|
||||
var swarmPeeringAddCmd = &cmds.Command{
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Add peers into the peering subsystem.",
|
||||
ShortDescription: `
|
||||
'ipfs swarm peering add' will add the new address to the peering subsystem as one that should always be connected to.
|
||||
`,
|
||||
},
|
||||
Arguments: []cmds.Argument{
|
||||
cmds.StringArg("address", true, true, "address of peer to add into the peering subsystem"),
|
||||
},
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
addrs := make([]ma.Multiaddr, len(req.Arguments))
|
||||
|
||||
for i, arg := range req.Arguments {
|
||||
addr, err := ma.NewMultiaddr(arg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
addrs[i] = addr
|
||||
}
|
||||
|
||||
addInfos, err := peer.AddrInfosFromP2pAddrs(addrs...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
node, err := cmdenv.GetNode(env)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, addrinfo := range addInfos {
|
||||
node.Peering.AddPeer(addrinfo)
|
||||
err = res.Emit(peeringResult{addrinfo.ID, "success"})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Encoders: cmds.EncoderMap{
|
||||
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, pr *peeringResult) error {
|
||||
fmt.Fprintf(w, "add %s %s\n", pr.ID.String(), pr.Status)
|
||||
return nil
|
||||
}),
|
||||
},
|
||||
Type: peeringResult{},
|
||||
}
|
||||
|
||||
var swarmPeeringLsCmd = &cmds.Command{
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "List peers registered in the peering subsystem.",
|
||||
ShortDescription: `
|
||||
'ipfs swarm peering ls' lists the peers that are registered in the peering subsystem and to which the daemon is always connected.
|
||||
`,
|
||||
},
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
node, err := cmdenv.GetNode(env)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
peers := node.Peering.ListPeers()
|
||||
return cmds.EmitOnce(res, addrInfos{Peers: peers})
|
||||
},
|
||||
Type: addrInfos{},
|
||||
Encoders: cmds.EncoderMap{
|
||||
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, ai *addrInfos) error {
|
||||
for _, info := range ai.Peers {
|
||||
fmt.Fprintf(w, "%s\n", info.ID)
|
||||
for _, addr := range info.Addrs {
|
||||
fmt.Fprintf(w, "\t%s\n", addr)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}),
|
||||
},
|
||||
}
|
||||
|
||||
type addrInfos struct {
|
||||
Peers []peer.AddrInfo
|
||||
}
|
||||
|
||||
var swarmPeeringRmCmd = &cmds.Command{
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Remove a peer from the peering subsystem.",
|
||||
ShortDescription: `
|
||||
'ipfs swarm peering rm' will remove the given ID from the peering subsystem and remove it from the always-on connection.
|
||||
`,
|
||||
},
|
||||
Arguments: []cmds.Argument{
|
||||
cmds.StringArg("ID", true, true, "ID of peer to remove from the peering subsystem"),
|
||||
},
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
node, err := cmdenv.GetNode(env)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, arg := range req.Arguments {
|
||||
id, err := peer.Decode(arg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
node.Peering.RemovePeer(id)
|
||||
if err = res.Emit(peeringResult{id, "success"}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Type: peeringResult{},
|
||||
Encoders: cmds.EncoderMap{
|
||||
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, pr *peeringResult) error {
|
||||
fmt.Fprintf(w, "add %s %s\n", pr.ID.String(), pr.Status)
|
||||
return nil
|
||||
}),
|
||||
},
|
||||
}
|
||||
|
||||
var swarmPeersCmd = &cmds.Command{
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "List peers with open connections.",
|
||||
|
||||
@ -6,6 +6,7 @@ import (
|
||||
"runtime"
|
||||
|
||||
version "github.com/ipfs/go-ipfs"
|
||||
"github.com/ipfs/go-ipfs/core"
|
||||
cmdenv "github.com/ipfs/go-ipfs/core/commands/cmdenv"
|
||||
|
||||
cmds "github.com/ipfs/go-ipfs-cmds"
|
||||
@ -21,42 +22,51 @@ Prints out information about your computer to aid in easier debugging.
|
||||
`,
|
||||
},
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
info := make(map[string]interface{})
|
||||
err := runtimeInfo(info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = envVarInfo(info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = diskSpaceInfo(info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = memInfo(info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nd, err := cmdenv.GetNode(env)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = netInfo(nd.IsOnline, info)
|
||||
info, err := getInfo(nd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
info["ipfs_version"] = version.CurrentVersionNumber
|
||||
info["ipfs_commit"] = version.CurrentCommit
|
||||
return cmds.EmitOnce(res, info)
|
||||
},
|
||||
}
|
||||
|
||||
func getInfo(nd *core.IpfsNode) (map[string]interface{}, error) {
|
||||
info := make(map[string]interface{})
|
||||
err := runtimeInfo(info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = envVarInfo(info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = diskSpaceInfo(info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = memInfo(info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = netInfo(nd.IsOnline, info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info["ipfs_version"] = version.CurrentVersionNumber
|
||||
info["ipfs_commit"] = version.CurrentCommit
|
||||
return info, nil
|
||||
}
|
||||
|
||||
func runtimeInfo(out map[string]interface{}) error {
|
||||
rt := make(map[string]interface{})
|
||||
rt["os"] = runtime.GOOS
|
||||
|
||||
@ -28,6 +28,16 @@ const (
|
||||
versionAllOptionName = "all"
|
||||
)
|
||||
|
||||
func getVersionInfo() *VersionOutput {
|
||||
return &VersionOutput{
|
||||
Version: version.CurrentVersionNumber,
|
||||
Commit: version.CurrentCommit,
|
||||
Repo: fmt.Sprint(fsrepo.RepoVersion),
|
||||
System: runtime.GOARCH + "/" + runtime.GOOS, //TODO: Precise version here
|
||||
Golang: runtime.Version(),
|
||||
}
|
||||
}
|
||||
|
||||
var VersionCmd = &cmds.Command{
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Show IPFS version information.",
|
||||
@ -46,13 +56,7 @@ var VersionCmd = &cmds.Command{
|
||||
// must be permitted to run before init
|
||||
Extra: CreateCmdExtras(SetDoesNotUseRepo(true), SetDoesNotUseConfigAsInput(true)),
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
return cmds.EmitOnce(res, &VersionOutput{
|
||||
Version: version.CurrentVersionNumber,
|
||||
Commit: version.CurrentCommit,
|
||||
Repo: fmt.Sprint(fsrepo.RepoVersion),
|
||||
System: runtime.GOARCH + "/" + runtime.GOOS, //TODO: Precise version here
|
||||
Golang: runtime.Version(),
|
||||
})
|
||||
return cmds.EmitOnce(res, getVersionInfo())
|
||||
},
|
||||
Encoders: cmds.EncoderMap{
|
||||
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, version *VersionOutput) error {
|
||||
|
||||
31
core/core.go
31
core/core.go
@ -17,6 +17,7 @@ import (
|
||||
"github.com/ipfs/go-ipfs-pinner"
|
||||
|
||||
bserv "github.com/ipfs/go-blockservice"
|
||||
"github.com/ipfs/go-fetcher"
|
||||
"github.com/ipfs/go-graphsync"
|
||||
bstore "github.com/ipfs/go-ipfs-blockstore"
|
||||
exchange "github.com/ipfs/go-ipfs-exchange-interface"
|
||||
@ -24,7 +25,6 @@ import (
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
logging "github.com/ipfs/go-log"
|
||||
mfs "github.com/ipfs/go-mfs"
|
||||
resolver "github.com/ipfs/go-path/resolver"
|
||||
goprocess "github.com/jbenet/goprocess"
|
||||
connmgr "github.com/libp2p/go-libp2p-core/connmgr"
|
||||
ic "github.com/libp2p/go-libp2p-core/crypto"
|
||||
@ -37,7 +37,7 @@ import (
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
psrouter "github.com/libp2p/go-libp2p-pubsub-router"
|
||||
record "github.com/libp2p/go-libp2p-record"
|
||||
"github.com/libp2p/go-libp2p/p2p/discovery"
|
||||
"github.com/libp2p/go-libp2p/p2p/discovery/mdns"
|
||||
p2pbhost "github.com/libp2p/go-libp2p/p2p/host/basic"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
madns "github.com/multiformats/go-multiaddr-dns"
|
||||
@ -70,22 +70,23 @@ type IpfsNode struct {
|
||||
PNetFingerprint libp2p.PNetFingerprint `optional:"true"` // fingerprint of private network
|
||||
|
||||
// Services
|
||||
Peerstore pstore.Peerstore `optional:"true"` // storage for other Peer instances
|
||||
Blockstore bstore.GCBlockstore // the block store (lower level)
|
||||
Filestore *filestore.Filestore `optional:"true"` // the filestore blockstore
|
||||
BaseBlocks node.BaseBlocks // the raw blockstore, no filestore wrapping
|
||||
GCLocker bstore.GCLocker // the locker used to protect the blockstore during gc
|
||||
Blocks bserv.BlockService // the block service, get/add blocks.
|
||||
DAG ipld.DAGService // the merkle dag service, get/add objects.
|
||||
Resolver *resolver.Resolver // the path resolution system
|
||||
Reporter *metrics.BandwidthCounter `optional:"true"`
|
||||
Discovery discovery.Service `optional:"true"`
|
||||
FilesRoot *mfs.Root
|
||||
RecordValidator record.Validator
|
||||
Peerstore pstore.Peerstore `optional:"true"` // storage for other Peer instances
|
||||
Blockstore bstore.GCBlockstore // the block store (lower level)
|
||||
Filestore *filestore.Filestore `optional:"true"` // the filestore blockstore
|
||||
BaseBlocks node.BaseBlocks // the raw blockstore, no filestore wrapping
|
||||
GCLocker bstore.GCLocker // the locker used to protect the blockstore during gc
|
||||
Blocks bserv.BlockService // the block service, get/add blocks.
|
||||
DAG ipld.DAGService // the merkle dag service, get/add objects.
|
||||
IPLDFetcherFactory fetcher.Factory `name:"ipldFetcher"` // fetcher that paths over the IPLD data model
|
||||
UnixFSFetcherFactory fetcher.Factory `name:"unixfsFetcher"` // fetcher that interprets UnixFS data
|
||||
Reporter *metrics.BandwidthCounter `optional:"true"`
|
||||
Discovery mdns.Service `optional:"true"`
|
||||
FilesRoot *mfs.Root
|
||||
RecordValidator record.Validator
|
||||
|
||||
// Online
|
||||
PeerHost p2phost.Host `optional:"true"` // the network host (server+client)
|
||||
Peering peering.PeeringService `optional:"true"`
|
||||
Peering *peering.PeeringService `optional:"true"`
|
||||
Filters *ma.Filters `optional:"true"`
|
||||
Bootstrapper io.Closer `optional:"true"` // the periodic bootstrapper
|
||||
Routing routing.Routing `optional:"true"` // the routing system. recommend ipfs-dht
|
||||
|
||||
@ -19,11 +19,12 @@ import (
|
||||
"fmt"
|
||||
|
||||
bserv "github.com/ipfs/go-blockservice"
|
||||
"github.com/ipfs/go-ipfs-blockstore"
|
||||
"github.com/ipfs/go-ipfs-exchange-interface"
|
||||
"github.com/ipfs/go-fetcher"
|
||||
blockstore "github.com/ipfs/go-ipfs-blockstore"
|
||||
exchange "github.com/ipfs/go-ipfs-exchange-interface"
|
||||
offlinexch "github.com/ipfs/go-ipfs-exchange-offline"
|
||||
"github.com/ipfs/go-ipfs-pinner"
|
||||
"github.com/ipfs/go-ipfs-provider"
|
||||
pin "github.com/ipfs/go-ipfs-pinner"
|
||||
provider "github.com/ipfs/go-ipfs-provider"
|
||||
offlineroute "github.com/ipfs/go-ipfs-routing/offline"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
dag "github.com/ipfs/go-merkledag"
|
||||
@ -55,13 +56,14 @@ type CoreAPI struct {
|
||||
baseBlocks blockstore.Blockstore
|
||||
pinning pin.Pinner
|
||||
|
||||
blocks bserv.BlockService
|
||||
dag ipld.DAGService
|
||||
|
||||
peerstore pstore.Peerstore
|
||||
peerHost p2phost.Host
|
||||
recordValidator record.Validator
|
||||
exchange exchange.Interface
|
||||
blocks bserv.BlockService
|
||||
dag ipld.DAGService
|
||||
ipldFetcherFactory fetcher.Factory
|
||||
unixFSFetcherFactory fetcher.Factory
|
||||
peerstore pstore.Peerstore
|
||||
peerHost p2phost.Host
|
||||
recordValidator record.Validator
|
||||
exchange exchange.Interface
|
||||
|
||||
namesys namesys.NameSystem
|
||||
routing routing.Routing
|
||||
@ -167,8 +169,10 @@ func (api *CoreAPI) WithOptions(opts ...options.ApiOption) (coreiface.CoreAPI, e
|
||||
baseBlocks: n.BaseBlocks,
|
||||
pinning: n.Pinning,
|
||||
|
||||
blocks: n.Blocks,
|
||||
dag: n.DAG,
|
||||
blocks: n.Blocks,
|
||||
dag: n.DAG,
|
||||
ipldFetcherFactory: n.IPLDFetcherFactory,
|
||||
unixFSFetcherFactory: n.UnixFSFetcherFactory,
|
||||
|
||||
peerstore: n.Peerstore,
|
||||
peerHost: n.PeerHost,
|
||||
|
||||
@ -8,10 +8,10 @@ import (
|
||||
"github.com/ipfs/go-namesys/resolve"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipfs/go-fetcher"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
ipfspath "github.com/ipfs/go-path"
|
||||
"github.com/ipfs/go-path/resolver"
|
||||
uio "github.com/ipfs/go-unixfs/io"
|
||||
ipfspathresolver "github.com/ipfs/go-path/resolver"
|
||||
coreiface "github.com/ipfs/interface-go-ipfs-core"
|
||||
path "github.com/ipfs/interface-go-ipfs-core/path"
|
||||
)
|
||||
@ -49,23 +49,19 @@ func (api *CoreAPI) ResolvePath(ctx context.Context, p path.Path) (path.Resolved
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var resolveOnce resolver.ResolveOnce
|
||||
|
||||
switch ipath.Segments()[0] {
|
||||
case "ipfs":
|
||||
resolveOnce = uio.ResolveUnixfsOnce
|
||||
case "ipld":
|
||||
resolveOnce = resolver.ResolveSingle
|
||||
default:
|
||||
if ipath.Segments()[0] != "ipfs" && ipath.Segments()[0] != "ipld" {
|
||||
return nil, fmt.Errorf("unsupported path namespace: %s", p.Namespace())
|
||||
}
|
||||
|
||||
r := &resolver.Resolver{
|
||||
DAG: api.dag,
|
||||
ResolveOnce: resolveOnce,
|
||||
var dataFetcher fetcher.Factory
|
||||
if ipath.Segments()[0] == "ipld" {
|
||||
dataFetcher = api.ipldFetcherFactory
|
||||
} else {
|
||||
dataFetcher = api.unixFSFetcherFactory
|
||||
}
|
||||
resolver := ipfspathresolver.NewBasicResolver(dataFetcher)
|
||||
|
||||
node, rest, err := r.ResolveToLastNode(ctx, ipath)
|
||||
node, rest, err := resolver.ResolveToLastNode(ctx, ipath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -116,18 +116,16 @@ func (api *SwarmAPI) Peers(context.Context) ([]coreiface.ConnectionInfo, error)
|
||||
|
||||
conns := api.peerHost.Network().Conns()
|
||||
|
||||
var out []coreiface.ConnectionInfo
|
||||
out := make([]coreiface.ConnectionInfo, 0, len(conns))
|
||||
for _, c := range conns {
|
||||
pid := c.RemotePeer()
|
||||
addr := c.RemoteMultiaddr()
|
||||
|
||||
ci := &connInfo{
|
||||
peerstore: api.peerstore,
|
||||
conn: c,
|
||||
dir: c.Stat().Direction,
|
||||
|
||||
addr: addr,
|
||||
peer: pid,
|
||||
addr: c.RemoteMultiaddr(),
|
||||
peer: c.RemotePeer(),
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@ -22,7 +22,7 @@ import (
|
||||
"github.com/ipfs/go-ipfs-config"
|
||||
coreiface "github.com/ipfs/interface-go-ipfs-core"
|
||||
"github.com/ipfs/interface-go-ipfs-core/tests"
|
||||
ci "github.com/libp2p/go-libp2p-core/crypto"
|
||||
"github.com/libp2p/go-libp2p-core/crypto"
|
||||
peer "github.com/libp2p/go-libp2p-core/peer"
|
||||
"github.com/libp2p/go-libp2p/p2p/net/mock"
|
||||
)
|
||||
@ -40,7 +40,7 @@ func (NodeProvider) MakeAPISwarm(ctx context.Context, fullIdentity bool, n int)
|
||||
for i := 0; i < n; i++ {
|
||||
var ident config.Identity
|
||||
if fullIdentity {
|
||||
sk, pk, err := ci.GenerateKeyPair(ci.RSA, 2048)
|
||||
sk, pk, err := crypto.GenerateKeyPair(crypto.RSA, 2048)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -50,7 +50,7 @@ func (NodeProvider) MakeAPISwarm(ctx context.Context, fullIdentity bool, n int)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
kbytes, err := sk.Bytes()
|
||||
kbytes, err := crypto.MarshalPrivateKey(sk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
79
core/coreapi/test/path_test.go
Normal file
79
core/coreapi/test/path_test.go
Normal file
@ -0,0 +1,79 @@
|
||||
package test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
files "github.com/ipfs/go-ipfs-files"
|
||||
"github.com/ipfs/go-merkledag"
|
||||
uio "github.com/ipfs/go-unixfs/io"
|
||||
"github.com/ipfs/interface-go-ipfs-core/options"
|
||||
"github.com/ipfs/interface-go-ipfs-core/path"
|
||||
"github.com/ipld/go-ipld-prime"
|
||||
)
|
||||
|
||||
|
||||
func TestPathUnixFSHAMTPartial(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// Create a node
|
||||
apis, err := NodeProvider{}.MakeAPISwarm(ctx, true, 1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
a := apis[0]
|
||||
|
||||
// Setting this after instantiating the swarm so that it's not clobbered by loading the go-ipfs config
|
||||
prevVal := uio.UseHAMTSharding
|
||||
uio.UseHAMTSharding = true
|
||||
defer func() {
|
||||
uio.UseHAMTSharding = prevVal
|
||||
}()
|
||||
|
||||
// Create and add a sharded directory
|
||||
dir := make(map[string]files.Node)
|
||||
// Make sure we have at least two levels of sharding
|
||||
for i := 0; i < uio.DefaultShardWidth + 1; i++ {
|
||||
dir[strconv.Itoa(i)] = files.NewBytesFile([]byte(strconv.Itoa(i)))
|
||||
}
|
||||
|
||||
r, err := a.Unixfs().Add(ctx, files.NewMapDirectory(dir), options.Unixfs.Pin(false))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Get the root of the directory
|
||||
nd, err := a.Dag().Get(ctx, r.Cid())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Make sure the root is a DagPB node (this API might change in the future to account for ADLs)
|
||||
_ = nd.(ipld.Node)
|
||||
pbNode := nd.(*merkledag.ProtoNode)
|
||||
|
||||
// Remove one of the sharded directory blocks
|
||||
if err := a.Block().Rm(ctx, path.IpfsPath(pbNode.Links()[0].Cid)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Try and resolve each of the entries in the sharded directory which will result in pathing over the missing block
|
||||
//
|
||||
// Note: we could just check a particular path here, but it would require either greater use of the HAMT internals
|
||||
// or some hard coded values in the test both of which would be a pain to follow.
|
||||
for k := range dir {
|
||||
// The node will go out to the (non-existent) network looking for the missing block. Make sure we're erroring
|
||||
// because we exceeded the timeout on our query
|
||||
timeoutCtx, timeoutCancel := context.WithTimeout(ctx, time.Second * 1)
|
||||
_, err := a.ResolveNode(timeoutCtx, path.Join(r, k))
|
||||
if err != nil {
|
||||
if timeoutCtx.Err() == nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
timeoutCancel()
|
||||
}
|
||||
}
|
||||
@ -104,7 +104,7 @@ func VersionOption() ServeOption {
|
||||
return func(_ *core.IpfsNode, _ net.Listener, mux *http.ServeMux) (*http.ServeMux, error) {
|
||||
mux.HandleFunc("/version", func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprintf(w, "Commit: %s\n", version.CurrentCommit)
|
||||
fmt.Fprintf(w, "Client Version: %s\n", version.UserAgent)
|
||||
fmt.Fprintf(w, "Client Version: %s\n", version.GetUserAgentVersion())
|
||||
fmt.Fprintf(w, "Protocol Version: %s\n", id.LibP2PVersion)
|
||||
})
|
||||
return mux, nil
|
||||
|
||||
@ -28,6 +28,7 @@ import (
|
||||
coreiface "github.com/ipfs/interface-go-ipfs-core"
|
||||
ipath "github.com/ipfs/interface-go-ipfs-core/path"
|
||||
routing "github.com/libp2p/go-libp2p-core/routing"
|
||||
prometheus "github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -62,6 +63,8 @@ type redirectTemplateData struct {
|
||||
type gatewayHandler struct {
|
||||
config GatewayConfig
|
||||
api coreiface.CoreAPI
|
||||
|
||||
unixfsGetMetric *prometheus.SummaryVec
|
||||
}
|
||||
|
||||
// StatusResponseWriter enables us to override HTTP Status Code passed to
|
||||
@ -84,9 +87,27 @@ func (sw *statusResponseWriter) WriteHeader(code int) {
|
||||
}
|
||||
|
||||
func newGatewayHandler(c GatewayConfig, api coreiface.CoreAPI) *gatewayHandler {
|
||||
unixfsGetMetric := prometheus.NewSummaryVec(
|
||||
prometheus.SummaryOpts{
|
||||
Namespace: "ipfs",
|
||||
Subsystem: "http",
|
||||
Name: "unixfs_get_latency_seconds",
|
||||
Help: "The time till the first block is received when 'getting' a file from the gateway.",
|
||||
},
|
||||
[]string{"gateway"},
|
||||
)
|
||||
if err := prometheus.Register(unixfsGetMetric); err != nil {
|
||||
if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
|
||||
unixfsGetMetric = are.ExistingCollector.(*prometheus.SummaryVec)
|
||||
} else {
|
||||
log.Errorf("failed to register unixfsGetMetric: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
i := &gatewayHandler{
|
||||
config: c,
|
||||
api: api,
|
||||
config: c,
|
||||
api: api,
|
||||
unixfsGetMetric: unixfsGetMetric,
|
||||
}
|
||||
return i
|
||||
}
|
||||
@ -271,7 +292,7 @@ func (i *gatewayHandler) getOrHeadHandler(w http.ResponseWriter, r *http.Request
|
||||
return
|
||||
}
|
||||
|
||||
unixfsGetMetric.WithLabelValues(parsedPath.Namespace()).Observe(time.Since(begin).Seconds())
|
||||
i.unixfsGetMetric.WithLabelValues(parsedPath.Namespace()).Observe(time.Since(begin).Seconds())
|
||||
|
||||
defer dr.Close()
|
||||
|
||||
@ -391,11 +412,12 @@ func (i *gatewayHandler) getOrHeadHandler(w http.ResponseWriter, r *http.Request
|
||||
size = humanize.Bytes(uint64(s))
|
||||
}
|
||||
|
||||
hash := ""
|
||||
if r, err := i.api.ResolvePath(r.Context(), ipath.Join(resolvedPath, dirit.Name())); err == nil {
|
||||
// Path may not be resolved. Continue anyways.
|
||||
hash = r.Cid().String()
|
||||
resolved, err := i.api.ResolvePath(r.Context(), ipath.Join(resolvedPath, dirit.Name()))
|
||||
if err != nil {
|
||||
internalWebError(w, err)
|
||||
return
|
||||
}
|
||||
hash := resolved.Cid().String()
|
||||
|
||||
// See comment above where originalUrlPath is declared.
|
||||
di := directoryItem{
|
||||
|
||||
@ -75,6 +75,9 @@ func breadcrumbs(urlPath string, dnslinkOrigin bool) []breadcrumb {
|
||||
}
|
||||
|
||||
func shortHash(hash string) string {
|
||||
if len(hash) <= 8 {
|
||||
return hash
|
||||
}
|
||||
return (hash[0:4] + "\u2026" + hash[len(hash)-4:])
|
||||
}
|
||||
|
||||
|
||||
@ -732,7 +732,7 @@ func TestVersion(t *testing.T) {
|
||||
t.Fatalf("response doesn't contain commit:\n%s", s)
|
||||
}
|
||||
|
||||
if !strings.Contains(s, "Client Version: "+version.UserAgent) {
|
||||
if !strings.Contains(s, "Client Version: "+version.GetUserAgentVersion()) {
|
||||
t.Fatalf("response doesn't contain client version:\n%s", s)
|
||||
}
|
||||
|
||||
|
||||
@ -14,7 +14,7 @@ import (
|
||||
promhttp "github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
)
|
||||
|
||||
// This adds the scraping endpoint which Prometheus uses to fetch metrics.
|
||||
// MetricsScrapingOption adds the scraping endpoint which Prometheus uses to fetch metrics.
|
||||
func MetricsScrapingOption(path string) ServeOption {
|
||||
return func(n *core.IpfsNode, _ net.Listener, mux *http.ServeMux) (*http.ServeMux, error) {
|
||||
mux.Handle(path, promhttp.HandlerFor(prometheus.DefaultGatherer, promhttp.HandlerOpts{}))
|
||||
@ -51,7 +51,7 @@ func MetricsOpenCensusCollectionOption() ServeOption {
|
||||
}
|
||||
}
|
||||
|
||||
// This adds collection of net/http-related metrics
|
||||
// MetricsCollectionOption adds collection of net/http-related metrics.
|
||||
func MetricsCollectionOption(handlerName string) ServeOption {
|
||||
return func(_ *core.IpfsNode, _ net.Listener, mux *http.ServeMux) (*http.ServeMux, error) {
|
||||
// Adapted from github.com/prometheus/client_golang/prometheus/http.go
|
||||
@ -130,14 +130,10 @@ func MetricsCollectionOption(handlerName string) ServeOption {
|
||||
var (
|
||||
peersTotalMetric = prometheus.NewDesc(
|
||||
prometheus.BuildFQName("ipfs", "p2p", "peers_total"),
|
||||
"Number of connected peers", []string{"transport"}, nil)
|
||||
|
||||
unixfsGetMetric = prometheus.NewSummaryVec(prometheus.SummaryOpts{
|
||||
Namespace: "ipfs",
|
||||
Subsystem: "http",
|
||||
Name: "unixfs_get_latency_seconds",
|
||||
Help: "The time till the first block is received when 'getting' a file from the gateway.",
|
||||
}, []string{"namespace"})
|
||||
"Number of connected peers",
|
||||
[]string{"transport"},
|
||||
nil,
|
||||
)
|
||||
)
|
||||
|
||||
type IpfsNodeCollector struct {
|
||||
|
||||
@ -5,7 +5,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
core "github.com/ipfs/go-ipfs/core"
|
||||
"github.com/ipfs/go-ipfs/core"
|
||||
|
||||
inet "github.com/libp2p/go-libp2p-core/network"
|
||||
swarmt "github.com/libp2p/go-libp2p-swarm/testing"
|
||||
@ -20,7 +20,11 @@ func TestPeersTotal(t *testing.T) {
|
||||
|
||||
hosts := make([]*bhost.BasicHost, 4)
|
||||
for i := 0; i < 4; i++ {
|
||||
hosts[i] = bhost.New(swarmt.GenSwarm(t, ctx))
|
||||
var err error
|
||||
hosts[i], err = bhost.NewHost(ctx, swarmt.GenSwarm(t, ctx), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
dial := func(a, b inet.Network) {
|
||||
|
||||
@ -1,11 +1,12 @@
|
||||
package corehttp
|
||||
|
||||
// TODO: move to IPNS
|
||||
const WebUIPath = "/ipfs/bafybeiflkjt66aetfgcrgvv75izymd5kc47g6luepqmfq6zsf5w6ueth6y" // v2.12.4
|
||||
const WebUIPath = "/ipfs/bafybeihcyruaeza7uyjd6ugicbcrqumejf6uf353e5etdkhotqffwtguva" // v2.13.0
|
||||
|
||||
// this is a list of all past webUI paths.
|
||||
var WebUIPaths = []string{
|
||||
WebUIPath,
|
||||
"/ipfs/bafybeiflkjt66aetfgcrgvv75izymd5kc47g6luepqmfq6zsf5w6ueth6y",
|
||||
"/ipfs/bafybeid26vjplsejg7t3nrh7mxmiaaxriebbm4xxrxxdunlk7o337m5sqq",
|
||||
"/ipfs/bafybeif4zkmu7qdhkpf3pnhwxipylqleof7rl6ojbe7mq3fzogz6m4xk3i",
|
||||
"/ipfs/bafybeianwe4vy7sprht5sm3hshvxjeqhwcmvbzq73u55sdhqngmohkjgs4",
|
||||
|
||||
52
core/node/bitswap.go
Normal file
52
core/node/bitswap.go
Normal file
@ -0,0 +1,52 @@
|
||||
package node
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ipfs/go-bitswap"
|
||||
"github.com/ipfs/go-bitswap/network"
|
||||
blockstore "github.com/ipfs/go-ipfs-blockstore"
|
||||
config "github.com/ipfs/go-ipfs-config"
|
||||
exchange "github.com/ipfs/go-ipfs-exchange-interface"
|
||||
"github.com/libp2p/go-libp2p-core/host"
|
||||
"github.com/libp2p/go-libp2p-core/routing"
|
||||
"go.uber.org/fx"
|
||||
|
||||
"github.com/ipfs/go-ipfs/core/node/helpers"
|
||||
)
|
||||
|
||||
const (
|
||||
// Docs: https://github.com/ipfs/go-ipfs/blob/master/docs/config.md#internalbitswap
|
||||
DefaultEngineBlockstoreWorkerCount = 128
|
||||
DefaultTaskWorkerCount = 8
|
||||
DefaultEngineTaskWorkerCount = 8
|
||||
DefaultMaxOutstandingBytesPerPeer = 1 << 20
|
||||
)
|
||||
|
||||
// OnlineExchange creates new LibP2P backed block exchange (BitSwap)
|
||||
func OnlineExchange(cfg *config.Config, provide bool) interface{} {
|
||||
return func(mctx helpers.MetricsCtx, lc fx.Lifecycle, host host.Host, rt routing.Routing, bs blockstore.GCBlockstore) exchange.Interface {
|
||||
bitswapNetwork := network.NewFromIpfsHost(host, rt)
|
||||
|
||||
var internalBsCfg config.InternalBitswap
|
||||
if cfg.Internal.Bitswap != nil {
|
||||
internalBsCfg = *cfg.Internal.Bitswap
|
||||
}
|
||||
|
||||
opts := []bitswap.Option{
|
||||
bitswap.ProvideEnabled(provide),
|
||||
bitswap.EngineBlockstoreWorkerCount(int(internalBsCfg.EngineBlockstoreWorkerCount.WithDefault(DefaultEngineBlockstoreWorkerCount))),
|
||||
bitswap.TaskWorkerCount(int(internalBsCfg.TaskWorkerCount.WithDefault(DefaultTaskWorkerCount))),
|
||||
bitswap.EngineTaskWorkerCount(int(internalBsCfg.EngineTaskWorkerCount.WithDefault(DefaultEngineTaskWorkerCount))),
|
||||
bitswap.MaxOutstandingBytesPerPeer(int(internalBsCfg.MaxOutstandingBytesPerPeer.WithDefault(DefaultMaxOutstandingBytesPerPeer))),
|
||||
}
|
||||
exch := bitswap.New(helpers.LifecycleCtx(mctx, lc), bitswapNetwork, bs, opts...)
|
||||
lc.Append(fx.Hook{
|
||||
OnStop: func(ctx context.Context) error {
|
||||
return exch.Close()
|
||||
},
|
||||
})
|
||||
return exch
|
||||
|
||||
}
|
||||
}
|
||||
@ -15,7 +15,7 @@ import (
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
dsync "github.com/ipfs/go-datastore/sync"
|
||||
cfg "github.com/ipfs/go-ipfs-config"
|
||||
ci "github.com/libp2p/go-libp2p-core/crypto"
|
||||
"github.com/libp2p/go-libp2p-core/crypto"
|
||||
peer "github.com/libp2p/go-libp2p-core/peer"
|
||||
)
|
||||
|
||||
@ -124,7 +124,7 @@ func (cfg *BuildCfg) options(ctx context.Context) (fx.Option, *cfg.Config) {
|
||||
|
||||
func defaultRepo(dstore repo.Datastore) (repo.Repo, error) {
|
||||
c := cfg.Config{}
|
||||
priv, pub, err := ci.GenerateKeyPairWithReader(ci.RSA, 2048, rand.Reader)
|
||||
priv, pub, err := crypto.GenerateKeyPairWithReader(crypto.RSA, 2048, rand.Reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -134,7 +134,7 @@ func defaultRepo(dstore repo.Datastore) (repo.Repo, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
privkeyb, err := priv.Bytes()
|
||||
privkeyb, err := crypto.MarshalPrivateKey(priv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -3,24 +3,26 @@ package node
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/go-bitswap"
|
||||
"github.com/ipfs/go-bitswap/network"
|
||||
"github.com/ipfs/go-blockservice"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipfs/go-datastore"
|
||||
"github.com/ipfs/go-fetcher"
|
||||
bsfetcher "github.com/ipfs/go-fetcher/impl/blockservice"
|
||||
"github.com/ipfs/go-filestore"
|
||||
"github.com/ipfs/go-ipfs-blockstore"
|
||||
"github.com/ipfs/go-ipfs-exchange-interface"
|
||||
"github.com/ipfs/go-ipfs-pinner"
|
||||
blockstore "github.com/ipfs/go-ipfs-blockstore"
|
||||
exchange "github.com/ipfs/go-ipfs-exchange-interface"
|
||||
pin "github.com/ipfs/go-ipfs-pinner"
|
||||
"github.com/ipfs/go-ipfs-pinner/dspinner"
|
||||
"github.com/ipfs/go-ipld-format"
|
||||
format "github.com/ipfs/go-ipld-format"
|
||||
"github.com/ipfs/go-merkledag"
|
||||
"github.com/ipfs/go-mfs"
|
||||
"github.com/ipfs/go-unixfs"
|
||||
"github.com/libp2p/go-libp2p-core/host"
|
||||
"github.com/libp2p/go-libp2p-core/routing"
|
||||
"github.com/ipfs/go-unixfsnode"
|
||||
dagpb "github.com/ipld/go-codec-dagpb"
|
||||
"github.com/ipld/go-ipld-prime"
|
||||
basicnode "github.com/ipld/go-ipld-prime/node/basic"
|
||||
"github.com/ipld/go-ipld-prime/schema"
|
||||
"go.uber.org/fx"
|
||||
|
||||
"github.com/ipfs/go-ipfs/core/node/helpers"
|
||||
@ -52,8 +54,7 @@ func Pinning(bstore blockstore.Blockstore, ds format.DAGService, repo repo.Repo)
|
||||
}
|
||||
syncDs := &syncDagService{ds, syncFn}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), 2*time.Minute)
|
||||
defer cancel()
|
||||
ctx := context.TODO()
|
||||
|
||||
pinning, err := dspinner.New(ctx, rootDS, syncDs)
|
||||
if err != nil {
|
||||
@ -82,26 +83,31 @@ func (s *syncDagService) Session(ctx context.Context) format.NodeGetter {
|
||||
return merkledag.NewSession(ctx, s.DAGService)
|
||||
}
|
||||
|
||||
type fetchersOut struct {
|
||||
fx.Out
|
||||
IPLDFetcher fetcher.Factory `name:"ipldFetcher"`
|
||||
UnixfsFetcher fetcher.Factory `name:"unixfsFetcher"`
|
||||
}
|
||||
|
||||
// FetcherConfig returns a fetcher config that can build new fetcher instances
|
||||
func FetcherConfig(bs blockservice.BlockService) fetchersOut {
|
||||
ipldFetcher := bsfetcher.NewFetcherConfig(bs)
|
||||
ipldFetcher.PrototypeChooser = dagpb.AddSupportToChooser(func(lnk ipld.Link, lnkCtx ipld.LinkContext) (ipld.NodePrototype, error) {
|
||||
if tlnkNd, ok := lnkCtx.LinkNode.(schema.TypedLinkNode); ok {
|
||||
return tlnkNd.LinkTargetNodePrototype(), nil
|
||||
}
|
||||
return basicnode.Prototype.Any, nil
|
||||
})
|
||||
|
||||
unixFSFetcher := ipldFetcher.WithReifier(unixfsnode.Reify)
|
||||
return fetchersOut{IPLDFetcher: ipldFetcher, UnixfsFetcher: unixFSFetcher}
|
||||
}
|
||||
|
||||
// Dag creates new DAGService
|
||||
func Dag(bs blockservice.BlockService) format.DAGService {
|
||||
return merkledag.NewDAGService(bs)
|
||||
}
|
||||
|
||||
// OnlineExchange creates new LibP2P backed block exchange (BitSwap)
|
||||
func OnlineExchange(provide bool) interface{} {
|
||||
return func(mctx helpers.MetricsCtx, lc fx.Lifecycle, host host.Host, rt routing.Routing, bs blockstore.GCBlockstore) exchange.Interface {
|
||||
bitswapNetwork := network.NewFromIpfsHost(host, rt)
|
||||
exch := bitswap.New(helpers.LifecycleCtx(mctx, lc), bitswapNetwork, bs, bitswap.ProvideEnabled(provide))
|
||||
lc.Append(fx.Hook{
|
||||
OnStop: func(ctx context.Context) error {
|
||||
return exch.Close()
|
||||
},
|
||||
})
|
||||
return exch
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// Files loads persisted MFS root
|
||||
func Files(mctx helpers.MetricsCtx, lc fx.Lifecycle, repo repo.Repo, dag format.DAGService) (*mfs.Root, error) {
|
||||
dsk := datastore.NewKey("/local/filesroot")
|
||||
|
||||
@ -18,7 +18,6 @@ import (
|
||||
|
||||
offline "github.com/ipfs/go-ipfs-exchange-offline"
|
||||
offroute "github.com/ipfs/go-ipfs-routing/offline"
|
||||
"github.com/ipfs/go-path/resolver"
|
||||
uio "github.com/ipfs/go-unixfs/io"
|
||||
"go.uber.org/fx"
|
||||
)
|
||||
@ -263,7 +262,7 @@ func Online(bcfg *BuildCfg, cfg *config.Config) fx.Option {
|
||||
shouldBitswapProvide := !cfg.Experimental.StrategicProviding
|
||||
|
||||
return fx.Options(
|
||||
fx.Provide(OnlineExchange(shouldBitswapProvide)),
|
||||
fx.Provide(OnlineExchange(cfg, shouldBitswapProvide)),
|
||||
maybeProvide(Graphsync, cfg.Experimental.GraphsyncEnabled),
|
||||
fx.Provide(DNSResolver),
|
||||
fx.Provide(Namesys(ipnsCacheSize)),
|
||||
@ -294,7 +293,7 @@ func Offline(cfg *config.Config) fx.Option {
|
||||
var Core = fx.Options(
|
||||
fx.Provide(BlockService),
|
||||
fx.Provide(Dag),
|
||||
fx.Provide(resolver.NewBasicResolver),
|
||||
fx.Provide(FetcherConfig),
|
||||
fx.Provide(Pinning),
|
||||
fx.Provide(Files),
|
||||
)
|
||||
|
||||
@ -4,7 +4,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/libp2p/go-libp2p"
|
||||
host "github.com/libp2p/go-libp2p-core/host"
|
||||
"github.com/libp2p/go-libp2p-core/host"
|
||||
p2pbhost "github.com/libp2p/go-libp2p/p2p/host/basic"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
mamask "github.com/whyrusleeping/multiaddr-filter"
|
||||
@ -13,13 +13,13 @@ import (
|
||||
func AddrFilters(filters []string) func() (*ma.Filters, Libp2pOpts, error) {
|
||||
return func() (filter *ma.Filters, opts Libp2pOpts, err error) {
|
||||
filter = ma.NewFilters()
|
||||
opts.Opts = append(opts.Opts, libp2p.Filters(filter)) //nolint
|
||||
opts.Opts = append(opts.Opts, libp2p.ConnectionGater((*filtersConnectionGater)(filter)))
|
||||
for _, s := range filters {
|
||||
f, err := mamask.NewMask(s)
|
||||
if err != nil {
|
||||
return filter, opts, fmt.Errorf("incorrectly formatted address filter in config: %s", s)
|
||||
}
|
||||
opts.Opts = append(opts.Opts, libp2p.FilterAddresses(f)) //nolint
|
||||
filter.AddFilter(*f, ma.ActionDeny)
|
||||
}
|
||||
return filter, opts, nil
|
||||
}
|
||||
|
||||
@ -6,7 +6,9 @@ import (
|
||||
|
||||
"github.com/libp2p/go-libp2p-core/host"
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
"github.com/libp2p/go-libp2p/p2p/discovery"
|
||||
"github.com/libp2p/go-libp2p/p2p/discovery/mdns"
|
||||
legacymdns "github.com/libp2p/go-libp2p/p2p/discovery/mdns_legacy"
|
||||
|
||||
"go.uber.org/fx"
|
||||
|
||||
"github.com/ipfs/go-ipfs/core/node/helpers"
|
||||
@ -35,18 +37,21 @@ func DiscoveryHandler(mctx helpers.MetricsCtx, lc fx.Lifecycle, host host.Host)
|
||||
}
|
||||
}
|
||||
|
||||
func SetupDiscovery(mdns bool, mdnsInterval int) func(helpers.MetricsCtx, fx.Lifecycle, host.Host, *discoveryHandler) error {
|
||||
func SetupDiscovery(useMdns bool, mdnsInterval int) func(helpers.MetricsCtx, fx.Lifecycle, host.Host, *discoveryHandler) error {
|
||||
return func(mctx helpers.MetricsCtx, lc fx.Lifecycle, host host.Host, handler *discoveryHandler) error {
|
||||
if mdns {
|
||||
if useMdns {
|
||||
service := mdns.NewMdnsService(host, mdns.ServiceName)
|
||||
service.RegisterNotifee(handler)
|
||||
|
||||
if mdnsInterval == 0 {
|
||||
mdnsInterval = 5
|
||||
}
|
||||
service, err := discovery.NewMdnsService(helpers.LifecycleCtx(mctx, lc), host, time.Duration(mdnsInterval)*time.Second, discovery.ServiceTag)
|
||||
legacyService, err := legacymdns.NewMdnsService(mctx, host, time.Duration(mdnsInterval)*time.Second, legacymdns.ServiceTag)
|
||||
if err != nil {
|
||||
log.Error("mdns error: ", err)
|
||||
return nil
|
||||
}
|
||||
service.RegisterNotifee(handler)
|
||||
legacyService.RegisterNotifee(handler)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
36
core/node/libp2p/filters.go
Normal file
36
core/node/libp2p/filters.go
Normal file
@ -0,0 +1,36 @@
|
||||
package libp2p
|
||||
|
||||
import (
|
||||
"github.com/libp2p/go-libp2p-core/connmgr"
|
||||
"github.com/libp2p/go-libp2p-core/control"
|
||||
"github.com/libp2p/go-libp2p-core/network"
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
)
|
||||
|
||||
// filtersConnectionGater is an adapter that turns multiaddr.Filter into a
|
||||
// connmgr.ConnectionGater.
|
||||
type filtersConnectionGater ma.Filters
|
||||
|
||||
var _ connmgr.ConnectionGater = (*filtersConnectionGater)(nil)
|
||||
|
||||
func (f *filtersConnectionGater) InterceptAddrDial(_ peer.ID, addr ma.Multiaddr) (allow bool) {
|
||||
return !(*ma.Filters)(f).AddrBlocked(addr)
|
||||
}
|
||||
|
||||
func (f *filtersConnectionGater) InterceptPeerDial(p peer.ID) (allow bool) {
|
||||
return true
|
||||
}
|
||||
|
||||
func (f *filtersConnectionGater) InterceptAccept(connAddr network.ConnMultiaddrs) (allow bool) {
|
||||
return !(*ma.Filters)(f).AddrBlocked(connAddr.RemoteMultiaddr())
|
||||
}
|
||||
|
||||
func (f *filtersConnectionGater) InterceptSecured(_ network.Direction, _ peer.ID, connAddr network.ConnMultiaddrs) (allow bool) {
|
||||
return !(*ma.Filters)(f).AddrBlocked(connAddr.RemoteMultiaddr())
|
||||
}
|
||||
|
||||
func (f *filtersConnectionGater) InterceptUpgraded(_ network.Conn) (allow bool, reason control.DisconnectReason) {
|
||||
return true, 0
|
||||
}
|
||||
@ -25,8 +25,7 @@ type Libp2pOpts struct {
|
||||
}
|
||||
|
||||
// Misc options
|
||||
|
||||
var UserAgent = simpleOpt(libp2p.UserAgent(version.UserAgent))
|
||||
var UserAgent = simpleOpt(libp2p.UserAgent(version.GetUserAgentVersion()))
|
||||
|
||||
func ConnectionManager(low, high int, grace time.Duration) func() (opts Libp2pOpts, err error) {
|
||||
return func() (opts Libp2pOpts, err error) {
|
||||
|
||||
@ -9,18 +9,10 @@ import (
|
||||
libp2pquic "github.com/libp2p/go-libp2p-quic-transport"
|
||||
tcp "github.com/libp2p/go-tcp-transport"
|
||||
websocket "github.com/libp2p/go-ws-transport"
|
||||
quic "github.com/lucas-clemente/quic-go"
|
||||
|
||||
"go.uber.org/fx"
|
||||
)
|
||||
|
||||
// See https://github.com/ipfs/go-ipfs/issues/7526 and
|
||||
// https://github.com/lucas-clemente/quic-go/releases/tag/v0.17.3.
|
||||
// TODO: remove this once the network has upgraded to > v0.6.0.
|
||||
func init() {
|
||||
quic.RetireBugBackwardsCompatibilityMode = true
|
||||
}
|
||||
|
||||
func Transports(tptConfig config.Transports) interface{} {
|
||||
return func(pnet struct {
|
||||
fx.In
|
||||
|
||||
@ -5,12 +5,12 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/go-fetcher"
|
||||
"github.com/ipfs/go-ipfs-pinner"
|
||||
"github.com/ipfs/go-ipfs-provider"
|
||||
"github.com/ipfs/go-ipfs-provider/batched"
|
||||
q "github.com/ipfs/go-ipfs-provider/queue"
|
||||
"github.com/ipfs/go-ipfs-provider/simple"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
"github.com/libp2p/go-libp2p-core/routing"
|
||||
"github.com/multiformats/go-multihash"
|
||||
"go.uber.org/fx"
|
||||
@ -172,7 +172,12 @@ func SimpleProviders(reprovideStrategy string, reprovideInterval string) fx.Opti
|
||||
}
|
||||
|
||||
func pinnedProviderStrategy(onlyRoots bool) interface{} {
|
||||
return func(pinner pin.Pinner, dag ipld.DAGService) simple.KeyChanFunc {
|
||||
return simple.NewPinnedProvider(onlyRoots, pinner, dag)
|
||||
type input struct {
|
||||
fx.In
|
||||
Pinner pin.Pinner
|
||||
IPLDFetcher fetcher.Factory `name:"ipldFetcher"`
|
||||
}
|
||||
return func(in input) simple.KeyChanFunc {
|
||||
return simple.NewPinnedProvider(onlyRoots, in.Pinner, in.IPLDFetcher)
|
||||
}
|
||||
}
|
||||
|
||||
27
docs/PATCH_RELEASE_TEMPLATE.md
Normal file
27
docs/PATCH_RELEASE_TEMPLATE.md
Normal file
@ -0,0 +1,27 @@
|
||||
# Patch Release Checklist
|
||||
|
||||
This process handles patch releases from version `vX.Y.Z` to `vX.Y.Z+1` assuming that `vX.Y.Z` is the latest released version of go-ipfs.
|
||||
|
||||
- [ ] Fork a new branch (`release-vX.Y.Z`) from `release` and cherry-pick the relevant commits from master (or custom fixes) onto this branch
|
||||
- [ ] Make a minimal changelog update tracking the relevant fixes to CHANGELOG.
|
||||
- [ ] version string in `version.go` has been updated (in the `release-vX.Y.Z+1` branch).
|
||||
- [ ] Make a PR merging `release-vX.Y.Z+1` into the release branch
|
||||
- [ ] tag the merge commit in the `release` branch with `vX.Y.Z+1`
|
||||
- [ ] upload to dist.ipfs.io
|
||||
1. Build: https://github.com/ipfs/distributions#usage.
|
||||
2. Pin the resulting release.
|
||||
3. Make a PR against ipfs/distributions with the updated versions, including the new hash in the PR comment.
|
||||
4. Ask the infra team to update the DNSLink record for dist.ipfs.io to point to the new distribution.
|
||||
- [ ] cut a release on [github](https://github.com/ipfs/go-ipfs/releases) and upload the result of the ipfs/distributions build in the previous step.
|
||||
- Announce the Release:
|
||||
- [ ] On IRC/Matrix (both #ipfs and #ipfs-dev)
|
||||
- [ ] On discuss.ipfs.io
|
||||
- [ ] Release published
|
||||
- [ ] to [dist.ipfs.io](https://dist.ipfs.io)
|
||||
- [ ] to [npm-go-ipfs](https://github.com/ipfs/npm-go-ipfs)
|
||||
- [ ] to [chocolatey](https://chocolatey.org/packages/ipfs)
|
||||
- [ ] to [snap](https://snapcraft.io/ipfs)
|
||||
- [ ] to [github](https://github.com/ipfs/go-ipfs/releases)
|
||||
- [ ] to [arch](https://www.archlinux.org/packages/community/x86_64/go-ipfs/) (flag it out of date)
|
||||
- [ ] Cut a new ipfs-desktop release
|
||||
- [ ] Merge the `release` branch back into `master`, ignoring the changes to `version.go` (keep the `-dev` version from master).
|
||||
@ -62,7 +62,10 @@ Checklist:
|
||||
- [ ] Deploy new version to a subset of Preload nodes
|
||||
- [ ] Collect metrics every day. Work with the Infrastructure team to learn of any hiccup
|
||||
- [ ] IPFS Application Testing - Run the tests of the following applications:
|
||||
- [ ] [IPFS Desktop](https://github.com/ipfs-shipyard/ipfs-desktop) - @lidel
|
||||
- [ ] [IPFS Desktop](https://github.com/ipfs-shipyard/ipfs-desktop)
|
||||
- [ ] Ensure the RC is published to [the NPM package](https://www.npmjs.com/package/go-ipfs?activeTab=versions) ([happens automatically, just wait for CI](https://github.com/ipfs/npm-go-ipfs/actions))
|
||||
- [ ] Upgrade to the RC in [ipfs-desktop](https://github.com/ipfs-shipyard/ipfs-desktop) and push to a branch ([example](https://github.com/ipfs/ipfs-desktop/pull/1826/commits/b0a23db31ce942b46d95965ee6fe770fb24d6bde)), and open a draft PR to track through the final release ([example](https://github.com/ipfs/ipfs-desktop/pull/1826))
|
||||
- [ ] Ensure CI tests pass, repeat for new RCs
|
||||
- [ ] [IPFS Companion](https://github.com/ipfs-shipyard/ipfs-companion) - @lidel
|
||||
- [ ] [NPM on IPFS](https://github.com/ipfs-shipyard/npm-on-ipfs) - @achingbrain
|
||||
- [ ] **Stage 2 - Community Dev Testing**
|
||||
@ -106,6 +109,7 @@ Checklist:
|
||||
- [ ] Merge the `release` branch back into `master`, ignoring the changes to `version.go` (keep the `-dev` version from master).
|
||||
- [ ] Create an issue using this release issue template for the _next_ release.
|
||||
- [ ] Make sure any last-minute changelog updates from the blog post make it back into the CHANGELOG.
|
||||
- [ ] Mark PR draft created for IPFS Desktop as ready for review.
|
||||
|
||||
## ❤️ Contributors
|
||||
|
||||
|
||||
@ -1,29 +1,13 @@
|
||||
Command Completion
|
||||
==================
|
||||
# Command Completion
|
||||
|
||||
Shell command completion is provided by the script at
|
||||
[/misc/completion/ipfs-completion.bash](../misc/completion/ipfs-completion.bash).
|
||||
Shell command completions can be generated by running one of the `ipfs commands completions`
|
||||
sub-commands.
|
||||
|
||||
The simplest way to "eval" the completions logic:
|
||||
|
||||
Installation
|
||||
------------
|
||||
The simplest way to see it working is to run
|
||||
`source misc/completion/ipfs-completion.bash` straight from your shell. This
|
||||
is only temporary and to fully enable it, you'll have to follow one of the steps
|
||||
below.
|
||||
|
||||
### Bash on Linux
|
||||
For bash, completion can be enabled in a couple of ways. One is to copy the
|
||||
completion script to the directory `~/.ipfs/` and then in the file
|
||||
`~/.bash_completion` add
|
||||
```bash
|
||||
source ~/.ipfs/ipfs-completion.bash
|
||||
> eval "$(ipfs commands completion bash)"
|
||||
```
|
||||
It will automatically be loaded the next time bash is loaded.
|
||||
To enable ipfs command completion globally on your system you may also
|
||||
copy the completion script to `/etc/bash_completion.d/`.
|
||||
|
||||
|
||||
Additional References
|
||||
---------------------
|
||||
* https://www.debian-administration.org/article/316/An_introduction_to_bash_completion_part_1
|
||||
To install the completions permanently, they can be moved to
|
||||
`/etc/bash_completion.d` or sourced from your `~/.bashrc` file.
|
||||
|
||||
343
docs/config.md
343
docs/config.md
@ -5,10 +5,134 @@ is read once at node instantiation, either for an offline command, or when
|
||||
starting the daemon. Commands that execute on a running daemon do not read the
|
||||
config file at runtime.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [The go-ipfs config file](#the-go-ipfs-config-file)
|
||||
- [Table of Contents](#table-of-contents)
|
||||
- [Profiles](#profiles)
|
||||
- [Types](#types)
|
||||
- [`flag`](#flag)
|
||||
- [`priority`](#priority)
|
||||
- [`strings`](#strings)
|
||||
- [`duration`](#duration)
|
||||
- [`optionalInteger`](#optionalinteger)
|
||||
- [`Addresses`](#addresses)
|
||||
- [`Addresses.API`](#addressesapi)
|
||||
- [`Addresses.Gateway`](#addressesgateway)
|
||||
- [`Addresses.Swarm`](#addressesswarm)
|
||||
- [`Addresses.Announce`](#addressesannounce)
|
||||
- [`Addresses.NoAnnounce`](#addressesnoannounce)
|
||||
- [`API`](#api)
|
||||
- [`API.HTTPHeaders`](#apihttpheaders)
|
||||
- [`AutoNAT`](#autonat)
|
||||
- [`AutoNAT.ServiceMode`](#autonatservicemode)
|
||||
- [`AutoNAT.Throttle`](#autonatthrottle)
|
||||
- [`AutoNAT.Throttle.GlobalLimit`](#autonatthrottlegloballimit)
|
||||
- [`AutoNAT.Throttle.PeerLimit`](#autonatthrottlepeerlimit)
|
||||
- [`AutoNAT.Throttle.Interval`](#autonatthrottleinterval)
|
||||
- [`Bootstrap`](#bootstrap)
|
||||
- [`Datastore`](#datastore)
|
||||
- [`Datastore.StorageMax`](#datastorestoragemax)
|
||||
- [`Datastore.StorageGCWatermark`](#datastorestoragegcwatermark)
|
||||
- [`Datastore.GCPeriod`](#datastoregcperiod)
|
||||
- [`Datastore.HashOnRead`](#datastorehashonread)
|
||||
- [`Datastore.BloomFilterSize`](#datastorebloomfiltersize)
|
||||
- [`Datastore.Spec`](#datastorespec)
|
||||
- [`Discovery`](#discovery)
|
||||
- [`Discovery.MDNS`](#discoverymdns)
|
||||
- [`Discovery.MDNS.Enabled`](#discoverymdnsenabled)
|
||||
- [`Discovery.MDNS.Interval`](#discoverymdnsinterval)
|
||||
- [`Gateway`](#gateway)
|
||||
- [`Gateway.NoFetch`](#gatewaynofetch)
|
||||
- [`Gateway.NoDNSLink`](#gatewaynodnslink)
|
||||
- [`Gateway.HTTPHeaders`](#gatewayhttpheaders)
|
||||
- [`Gateway.RootRedirect`](#gatewayrootredirect)
|
||||
- [`Gateway.Writable`](#gatewaywritable)
|
||||
- [`Gateway.PathPrefixes`](#gatewaypathprefixes)
|
||||
- [`Gateway.PublicGateways`](#gatewaypublicgateways)
|
||||
- [`Gateway.PublicGateways: Paths`](#gatewaypublicgateways-paths)
|
||||
- [`Gateway.PublicGateways: UseSubdomains`](#gatewaypublicgateways-usesubdomains)
|
||||
- [`Gateway.PublicGateways: NoDNSLink`](#gatewaypublicgateways-nodnslink)
|
||||
- [Implicit defaults of `Gateway.PublicGateways`](#implicit-defaults-of-gatewaypublicgateways)
|
||||
- [`Gateway` recipes](#gateway-recipes)
|
||||
- [`Identity`](#identity)
|
||||
- [`Identity.PeerID`](#identitypeerid)
|
||||
- [`Identity.PrivKey`](#identityprivkey)
|
||||
- [`Internal`](#internal)
|
||||
- [`Internal.Bitswap`](#internalbitswap)
|
||||
- [`Internal.Bitswap.TaskWorkerCount`](#internalbitswaptaskworkercount)
|
||||
- [`Internal.Bitswap.EngineBlockstoreWorkerCount`](#internalbitswapengineblockstoreworkercount)
|
||||
- [`Internal.Bitswap.EngineTaskWorkerCount`](#internalbitswapenginetaskworkercount)
|
||||
- [`Internal.Bitswap.MaxOutstandingBytesPerPeer`](#internalbitswapmaxoutstandingbytesperpeer)
|
||||
- [`Ipns`](#ipns)
|
||||
- [`Ipns.RepublishPeriod`](#ipnsrepublishperiod)
|
||||
- [`Ipns.RecordLifetime`](#ipnsrecordlifetime)
|
||||
- [`Ipns.ResolveCacheSize`](#ipnsresolvecachesize)
|
||||
- [`Migration`](#migration)
|
||||
- [`Migration.DownloadSources`](#migrationdownloadsources)
|
||||
- [`Migration.Keep`](#migrationkeep)
|
||||
- [`Mounts`](#mounts)
|
||||
- [`Mounts.IPFS`](#mountsipfs)
|
||||
- [`Mounts.IPNS`](#mountsipns)
|
||||
- [`Mounts.FuseAllowOther`](#mountsfuseallowother)
|
||||
- [`Pinning`](#pinning)
|
||||
- [`Pinning.RemoteServices`](#pinningremoteservices)
|
||||
- [`Pinning.RemoteServices: API`](#pinningremoteservices-api)
|
||||
- [`Pinning.RemoteServices: API.Endpoint`](#pinningremoteservices-apiendpoint)
|
||||
- [`Pinning.RemoteServices: API.Key`](#pinningremoteservices-apikey)
|
||||
- [`Pinning.RemoteServices: Policies`](#pinningremoteservices-policies)
|
||||
- [`Pinning.RemoteServices: Policies.MFS`](#pinningremoteservices-policiesmfs)
|
||||
- [`Pinning.RemoteServices: Policies.MFS.Enabled`](#pinningremoteservices-policiesmfsenabled)
|
||||
- [`Pinning.RemoteServices: Policies.MFS.PinName`](#pinningremoteservices-policiesmfspinname)
|
||||
- [`Pinning.RemoteServices: Policies.MFS.RepinInterval`](#pinningremoteservices-policiesmfsrepininterval)
|
||||
- [`Pubsub`](#pubsub)
|
||||
- [`Pubsub.Router`](#pubsubrouter)
|
||||
- [`Pubsub.DisableSigning`](#pubsubdisablesigning)
|
||||
- [`Peering`](#peering)
|
||||
- [`Peering.Peers`](#peeringpeers)
|
||||
- [`Reprovider`](#reprovider)
|
||||
- [`Reprovider.Interval`](#reproviderinterval)
|
||||
- [`Reprovider.Strategy`](#reproviderstrategy)
|
||||
- [`Routing`](#routing)
|
||||
- [`Routing.Type`](#routingtype)
|
||||
- [`Swarm`](#swarm)
|
||||
- [`Swarm.AddrFilters`](#swarmaddrfilters)
|
||||
- [`Swarm.DisableBandwidthMetrics`](#swarmdisablebandwidthmetrics)
|
||||
- [`Swarm.DisableNatPortMap`](#swarmdisablenatportmap)
|
||||
- [`Swarm.DisableRelay`](#swarmdisablerelay)
|
||||
- [`Swarm.EnableRelayHop`](#swarmenablerelayhop)
|
||||
- [`Swarm.EnableAutoRelay`](#swarmenableautorelay)
|
||||
- [Mode 1: `EnableRelayHop` is `false`](#mode-1-enablerelayhop-is-false)
|
||||
- [Mode 2: `EnableRelayHop` is `true`](#mode-2-enablerelayhop-is-true)
|
||||
- [`Swarm.EnableAutoNATService`](#swarmenableautonatservice)
|
||||
- [`Swarm.ConnMgr`](#swarmconnmgr)
|
||||
- [`Swarm.ConnMgr.Type`](#swarmconnmgrtype)
|
||||
- [Basic Connection Manager](#basic-connection-manager)
|
||||
- [`Swarm.ConnMgr.LowWater`](#swarmconnmgrlowwater)
|
||||
- [`Swarm.ConnMgr.HighWater`](#swarmconnmgrhighwater)
|
||||
- [`Swarm.ConnMgr.GracePeriod`](#swarmconnmgrgraceperiod)
|
||||
- [`Swarm.Transports`](#swarmtransports)
|
||||
- [`Swarm.Transports.Network`](#swarmtransportsnetwork)
|
||||
- [`Swarm.Transports.Network.TCP`](#swarmtransportsnetworktcp)
|
||||
- [`Swarm.Transports.Network.Websocket`](#swarmtransportsnetworkwebsocket)
|
||||
- [`Swarm.Transports.Network.QUIC`](#swarmtransportsnetworkquic)
|
||||
- [`Swarm.Transports.Network.Relay`](#swarmtransportsnetworkrelay)
|
||||
- [`Swarm.Transports.Security`](#swarmtransportssecurity)
|
||||
- [`Swarm.Transports.Security.TLS`](#swarmtransportssecuritytls)
|
||||
- [`Swarm.Transports.Security.SECIO`](#swarmtransportssecuritysecio)
|
||||
- [`Swarm.Transports.Security.Noise`](#swarmtransportssecuritynoise)
|
||||
- [`Swarm.Transports.Multiplexers`](#swarmtransportsmultiplexers)
|
||||
- [`Swarm.Transports.Multiplexers.Yamux`](#swarmtransportsmultiplexersyamux)
|
||||
- [`Swarm.Transports.Multiplexers.Mplex`](#swarmtransportsmultiplexersmplex)
|
||||
- [`DNS`](#dns)
|
||||
- [`DNS.Resolvers`](#dnsresolvers)
|
||||
|
||||
|
||||
|
||||
## Profiles
|
||||
|
||||
Configuration profiles allow to tweak configuration quickly. Profiles can be
|
||||
applied with `--profile` flag to `ipfs init` or with the `ipfs config profile
|
||||
applied with the `--profile` flag to `ipfs init` or with the `ipfs config profile
|
||||
apply` command. When a profile is applied a backup of the configuration file
|
||||
will be created in `$IPFS_PATH`.
|
||||
|
||||
@ -22,7 +146,7 @@ documented in `ipfs config profile --help`.
|
||||
|
||||
- `randomports`
|
||||
|
||||
Use a random port number for swarm.
|
||||
Use a random port number for the incoming swarm connections.
|
||||
|
||||
- `default-datastore`
|
||||
|
||||
@ -34,8 +158,8 @@ documented in `ipfs config profile --help`.
|
||||
|
||||
- `local-discovery`
|
||||
|
||||
Sets default values to fields affected by the server
|
||||
profile, enables discovery in local networks.
|
||||
Enables local discovery (enabled by default). Useful to re-enable local discovery after it's
|
||||
disabled by another profile (e.g., the server profile).
|
||||
|
||||
- `test`
|
||||
|
||||
@ -56,7 +180,7 @@ documented in `ipfs config profile --help`.
|
||||
|
||||
- You need a very simple and very reliable datastore and you trust your
|
||||
filesystem. This datastore stores each block as a separate file in the
|
||||
underlying filesystem so it's unlikely to loose data unless there's an issue
|
||||
underlying filesystem so it's unlikely to lose data unless there's an issue
|
||||
with the underlying file system.
|
||||
- You need to run garbage collection on a small (<= 10GiB) datastore. The
|
||||
default datastore, badger, can leave several gigabytes of data behind when
|
||||
@ -129,100 +253,13 @@ of strings, or null:
|
||||
Duration is a type for describing lengths of time, using the same format go
|
||||
does (e.g, `"1d2h4m40.01s"`).
|
||||
|
||||
## Table of Contents
|
||||
### `optionalInteger`
|
||||
|
||||
- [`Addresses`](#addresses)
|
||||
- [`Addresses.API`](#addressesapi)
|
||||
- [`Addresses.Gateway`](#addressesgateway)
|
||||
- [`Addresses.Swarm`](#addressesswarm)
|
||||
- [`Addresses.Announce`](#addressesannounce)
|
||||
- [`Addresses.NoAnnounce`](#addressesnoannounce)
|
||||
- [`API`](#api)
|
||||
- [`API.HTTPHeaders`](#apihttpheaders)
|
||||
- [`AutoNAT`](#autonat)
|
||||
- [`AutoNAT.ServiceMode`](#autonatservicemode)
|
||||
- [`AutoNAT.Throttle`](#autonatthrottle)
|
||||
- [`AutoNAT.Throttle.GlobalLimit`](#autonatthrottlegloballimit)
|
||||
- [`AutoNAT.Throttle.PeerLimit`](#autonatthrottlepeerlimit)
|
||||
- [`AutoNAT.Throttle.Interval`](#autonatthrottleinterval)
|
||||
- [`Bootstrap`](#bootstrap)
|
||||
- [`Datastore`](#datastore)
|
||||
- [`Datastore.StorageMax`](#datastorestoragemax)
|
||||
- [`Datastore.StorageGCWatermark`](#datastorestoragegcwatermark)
|
||||
- [`Datastore.GCPeriod`](#datastoregcperiod)
|
||||
- [`Datastore.HashOnRead`](#datastorehashonread)
|
||||
- [`Datastore.BloomFilterSize`](#datastorebloomfiltersize)
|
||||
- [`Datastore.Spec`](#datastorespec)
|
||||
- [`Discovery`](#discovery)
|
||||
- [`Discovery.MDNS`](#discoverymdns)
|
||||
- [`Discovery.MDNS.Enabled`](#discoverymdnsenabled)
|
||||
- [`Discovery.MDNS.Interval`](#discoverymdnsinterval)
|
||||
- [`Gateway`](#gateway)
|
||||
- [`Gateway.NoFetch`](#gatewaynofetch)
|
||||
- [`Gateway.NoDNSLink`](#gatewaynodnslink)
|
||||
- [`Gateway.HTTPHeaders`](#gatewayhttpheaders)
|
||||
- [`Gateway.RootRedirect`](#gatewayrootredirect)
|
||||
- [`Gateway.Writable`](#gatewaywritable)
|
||||
- [`Gateway.PathPrefixes`](#gatewaypathprefixes)
|
||||
- [`Gateway.PublicGateways`](#gatewaypublicgateways)
|
||||
- [`Identity`](#identity)
|
||||
- [`Identity.PeerID`](#identitypeerid)
|
||||
- [`Identity.PrivKey`](#identityprivkey)
|
||||
- [`Ipns`](#ipns)
|
||||
- [`Ipns.RepublishPeriod`](#ipnsrepublishperiod)
|
||||
- [`Ipns.RecordLifetime`](#ipnsrecordlifetime)
|
||||
- [`Ipns.ResolveCacheSize`](#ipnsresolvecachesize)
|
||||
- [`Migration`](#migration)
|
||||
- [`Migration.DownloadSources`](#migrationdownloadsources)
|
||||
- [`Migration.Keep`](#migrationkeep)
|
||||
- [`Mounts`](#mounts)
|
||||
- [`Mounts.IPFS`](#mountsipfs)
|
||||
- [`Mounts.IPNS`](#mountsipns)
|
||||
- [`Mounts.FuseAllowOther`](#mountsfuseallowother)
|
||||
- [`Pinning`](#pinning)
|
||||
- [`Pinning.RemoteServices`](#pinningremoteservices)
|
||||
- [`Pinning.RemoteServices.API`](#pinningremoteservices-api)
|
||||
- [`Pinning.RemoteServices.API.Endpoint`](#pinningremoteservices-apiendpoint)
|
||||
- [`Pinning.RemoteServices.API.Key`](#pinningremoteservices-apikey)
|
||||
- [`Pinning.RemoteServices.Policies`](#pinningremoteservices-policies)
|
||||
- [`Pinning.RemoteServices.Policies.MFS`](#pinningremoteservices-policiesmfs)
|
||||
- [`Pubsub`](#pubsub)
|
||||
- [`Pubsub.Router`](#pubsubrouter)
|
||||
- [`Pubsub.DisableSigning`](#pubsubdisablesigning)
|
||||
- [`Peering`](#peering)
|
||||
- [`Peering.Peers`](#peeringpeers)
|
||||
- [`Reprovider`](#reprovider)
|
||||
- [`Reprovider.Interval`](#reproviderinterval)
|
||||
- [`Reprovider.Strategy`](#reproviderstrategy)
|
||||
- [`Routing`](#routing)
|
||||
- [`Routing.Type`](#routingtype)
|
||||
- [`Swarm`](#swarm)
|
||||
- [`Swarm.AddrFilters`](#swarmaddrfilters)
|
||||
- [`Swarm.DisableBandwidthMetrics`](#swarmdisablebandwidthmetrics)
|
||||
- [`Swarm.DisableNatPortMap`](#swarmdisablenatportmap)
|
||||
- [`Swarm.DisableRelay`](#swarmdisablerelay)
|
||||
- [`Swarm.EnableRelayHop`](#swarmenablerelayhop)
|
||||
- [`Swarm.EnableAutoRelay`](#swarmenableautorelay)
|
||||
- [`Swarm.ConnMgr`](#swarmconnmgr)
|
||||
- [`Swarm.ConnMgr.Type`](#swarmconnmgrtype)
|
||||
- [`Swarm.ConnMgr.LowWater`](#swarmconnmgrlowwater)
|
||||
- [`Swarm.ConnMgr.HighWater`](#swarmconnmgrhighwater)
|
||||
- [`Swarm.ConnMgr.GracePeriod`](#swarmconnmgrgraceperiod)
|
||||
- [`Swarm.Transports`](#swarmtransports)
|
||||
- [`Swarm.Transports.Security`](#swarmtransportssecurity)
|
||||
- [`Swarm.Transports.Security.TLS`](#swarmtransportssecuritytls)
|
||||
- [`Swarm.Transports.Security.SECIO`](#swarmtransportssecuritysecio)
|
||||
- [`Swarm.Transports.Security.Noise`](#swarmtransportssecuritynoise)
|
||||
- [`Swarm.Transports.Multiplexers`](#swarmtransportsmultiplexers)
|
||||
- [`Swarm.Transports.Multiplexers.Yamux`](#swarmtransportsmultiplexersyamux)
|
||||
- [`Swarm.Transports.Multiplexers.Mplex`](#swarmtransportsmultiplexersmplex)
|
||||
- [`Swarm.Transports.Network`](#swarmtransportsnetwork)
|
||||
- [`Swarm.Transports.Network.TCP`](#swarmtransportsnetworktcp)
|
||||
- [`Swarm.Transports.Network.QUIC`](#swarmtransportsnetworkquic)
|
||||
- [`Swarm.Transports.Network.Websocket`](#swarmtransportsnetworkwebsocket)
|
||||
- [`Swarm.Transports.Network.Relay`](#swarmtransportsnetworkrelay)
|
||||
- [`DNS`](#dns)
|
||||
- [`DNS.Resolvers`](#dnsresolvers)
|
||||
Optional Integers allow specifying some numerical value which has
|
||||
an implicit default when `null` or missing from the config file:
|
||||
|
||||
- `null`/missing (apply the default value defined in go-ipfs sources)
|
||||
- an integer between `-2^63` and `2^63-1` (i.e. `-9223372036854775808` to `9223372036854775807`)
|
||||
|
||||
## `Addresses`
|
||||
|
||||
@ -258,7 +295,7 @@ Type: `strings` (multiaddrs)
|
||||
|
||||
### `Addresses.Swarm`
|
||||
|
||||
Array of multiaddrs describing which addresses to listen on for p2p swarm
|
||||
An array of multiaddrs describing which addresses to listen on for p2p swarm
|
||||
connections.
|
||||
|
||||
Supported Transports:
|
||||
@ -289,7 +326,7 @@ Default: `[]`
|
||||
Type: `array[string]` (multiaddrs)
|
||||
|
||||
### `Addresses.NoAnnounce`
|
||||
Array of swarm addresses not to announce to the network.
|
||||
An array of swarm addresses not to announce to the network.
|
||||
|
||||
Default: `[]`
|
||||
|
||||
@ -363,8 +400,7 @@ Type: `duration` (when `0`/unset, the default value is used)
|
||||
|
||||
## `Bootstrap`
|
||||
|
||||
Bootstrap is an array of multiaddrs of trusted nodes to connect to in order to
|
||||
initiate a connection to the network.
|
||||
Bootstrap is an array of multiaddrs of trusted nodes that your node connects to, to fetch other nodes of the network on startup.
|
||||
|
||||
Default: The ipfs.io bootstrap nodes
|
||||
|
||||
@ -405,7 +441,7 @@ Type: `duration` (an empty string means the default value)
|
||||
|
||||
### `Datastore.HashOnRead`
|
||||
|
||||
A boolean value. If set to true, all block reads from disk will be hashed and
|
||||
A boolean value. If set to true, all block reads from the disk will be hashed and
|
||||
verified. This will cause increased CPU utilization.
|
||||
|
||||
Default: `false`
|
||||
@ -416,13 +452,13 @@ Type: `bool`
|
||||
|
||||
A number representing the size in bytes of the blockstore's [bloom
|
||||
filter](https://en.wikipedia.org/wiki/Bloom_filter). A value of zero represents
|
||||
the feature being disabled.
|
||||
the feature is disabled.
|
||||
|
||||
This site generates useful graphs for various bloom filter values:
|
||||
<https://hur.st/bloomfilter/?n=1e6&p=0.01&m=&k=7> You may use it to find a
|
||||
preferred optimal value, where `m` is `BloomFilterSize` in bits. Remember to
|
||||
convert the value `m` from bits, into bytes for use as `BloomFilterSize` in the
|
||||
config file. For example, for 1,000,000 blocks, expecting a 1% false positive
|
||||
config file. For example, for 1,000,000 blocks, expecting a 1% false-positive
|
||||
rate, you'd end up with a filter size of 9592955 bits, so for `BloomFilterSize`
|
||||
we'd want to use 1199120 bytes. As of writing, [7 hash
|
||||
functions](https://github.com/ipfs/go-ipfs-blockstore/blob/547442836ade055cc114b562a3cc193d4e57c884/caching.go#L22)
|
||||
@ -496,7 +532,7 @@ Type: `bool`
|
||||
|
||||
#### `Discovery.MDNS.Interval`
|
||||
|
||||
A number of seconds to wait between discovery checks.
|
||||
The number of seconds between discovery checks.
|
||||
|
||||
Default: `5`
|
||||
|
||||
@ -518,8 +554,8 @@ Type: `bool`
|
||||
### `Gateway.NoDNSLink`
|
||||
|
||||
A boolean to configure whether DNSLink lookup for value in `Host` HTTP header
|
||||
should be performed. If DNSLink is present, content path stored in the DNS TXT
|
||||
record becomes the `/` and respective payload is returned to the client.
|
||||
should be performed. If DNSLink is present, the content path stored in the DNS TXT
|
||||
record becomes the `/` and the respective payload is returned to the client.
|
||||
|
||||
Default: `false`
|
||||
|
||||
@ -567,7 +603,7 @@ Type: `bool`
|
||||
**DEPRECATED:** see [go-ipfs#7702](https://github.com/ipfs/go-ipfs/issues/7702)
|
||||
|
||||
<!--
|
||||
Array of acceptable url paths that a client can specify in X-Ipfs-Path-Prefix
|
||||
An array of acceptable url paths that a client can specify in X-Ipfs-Path-Prefix
|
||||
header.
|
||||
|
||||
The X-Ipfs-Path-Prefix header is used to specify a base path to prepend to links
|
||||
@ -611,7 +647,7 @@ Examples:
|
||||
|
||||
#### `Gateway.PublicGateways: Paths`
|
||||
|
||||
Array of paths that should be exposed on the hostname.
|
||||
An array of paths that should be exposed on the hostname.
|
||||
|
||||
Example:
|
||||
```json
|
||||
@ -779,10 +815,85 @@ Type: `string` (peer ID)
|
||||
|
||||
### `Identity.PrivKey`
|
||||
|
||||
The base64 encoded protobuf describing (and containing) the nodes private key.
|
||||
The base64 encoded protobuf describing (and containing) the node's private key.
|
||||
|
||||
Type: `string` (base64 encoded)
|
||||
|
||||
## `Internal`
|
||||
|
||||
This section includes internal knobs for various subsystems to allow advanced users with big or private infrastructures to fine-tune some behaviors without the need to recompile go-ipfs.
|
||||
|
||||
**Be aware that making informed change here requires in-depth knowledge and most users should leave these untouched. All knobs listed here are subject to breaking changes between versions.**
|
||||
|
||||
### `Internal.Bitswap`
|
||||
|
||||
`Internal.Bitswap` contains knobs for tuning bitswap resource utilization.
|
||||
The knobs (below) document how their value should related to each other.
|
||||
Whether their values should be raised or lowered should be determined
|
||||
based on the metrics `ipfs_bitswap_active_tasks`, `ipfs_bitswap_pending_tasks`,
|
||||
`ipfs_bitswap_pending_block_tasks` and `ipfs_bitswap_active_block_tasks`
|
||||
reported by bitswap.
|
||||
|
||||
These metrics can be accessed as the prometheus endpoint at `{Addresses.API}/debug/metrics/prometheus` (default: `http://127.0.0.1:5001/debug/metrics/prometheus`)
|
||||
|
||||
The value of `ipfs_bitswap_active_tasks` is capped by `EngineTaskWorkerCount`.
|
||||
|
||||
The value of `ipfs_bitswap_pending_tasks` is generally capped by the knobs below,
|
||||
however its exact maximum value is hard to predict as it depends on task sizes
|
||||
as well as number of requesting peers. However, as a rule of thumb,
|
||||
during healthy operation this value should oscillate around a "typical" low value
|
||||
(without hitting a plateau continuously).
|
||||
|
||||
If `ipfs_bitswap_pending_tasks` is growing while `ipfs_bitswap_active_tasks` is at its maximum then
|
||||
the node has reached its resource limits and new requests are unable to be processed as quickly as they are coming in.
|
||||
Raising resource limits (using the knobs below) could help, assuming the hardware can support the new limits.
|
||||
|
||||
The value of `ipfs_bitswap_active_block_tasks` is capped by `EngineBlockstoreWorkerCount`.
|
||||
|
||||
The value of `ipfs_bitswap_pending_block_tasks` is indirectly capped by `ipfs_bitswap_active_tasks`, but can be hard to
|
||||
predict as it depends on the number of blocks involved in a peer task which can vary.
|
||||
|
||||
If the value of `ipfs_bitswap_pending_block_tasks` is observed to grow,
|
||||
while `ipfs_bitswap_active_block_tasks` is at its maximum, there is indication that the number of
|
||||
available block tasks is creating a bottleneck (either due to high-latency block operations,
|
||||
or due to high number of block operations per bitswap peer task).
|
||||
In such cases, try increasing the `EngineBlockstoreWorkerCount`.
|
||||
If this adjustment still does not increase the throuput of the node, there might
|
||||
be hardware limitations like I/O or CPU.
|
||||
|
||||
#### `Internal.Bitswap.TaskWorkerCount`
|
||||
|
||||
Number of threads (goroutines) sending outgoing messages.
|
||||
Throttles the number of concurrent send operations.
|
||||
|
||||
Type: `optionalInteger` (thread count, `null` means default which is 8)
|
||||
|
||||
#### `Internal.Bitswap.EngineBlockstoreWorkerCount`
|
||||
|
||||
Number of threads for blockstore operations.
|
||||
Used to throttle the number of concurrent requests to the block store.
|
||||
The optimal value can be informed by the metrics `ipfs_bitswap_pending_block_tasks` and `ipfs_bitswap_active_block_tasks`.
|
||||
This would be a number that depends on your hardware (I/O and CPU).
|
||||
|
||||
Type: `optionalInteger` (thread count, `null` means default which is 128)
|
||||
|
||||
#### `Internal.Bitswap.EngineTaskWorkerCount`
|
||||
|
||||
Number of worker threads used for preparing and packaging responses before they are sent out.
|
||||
This number should generally be equal to `TaskWorkerCount`.
|
||||
|
||||
Type: `optionalInteger` (thread count, `null` means default which is 8)
|
||||
|
||||
#### `Internal.Bitswap.MaxOutstandingBytesPerPeer`
|
||||
|
||||
Maximum number of bytes (across all tasks) pending to be processed and sent to any individual peer.
|
||||
This number controls fairness and can very from 250Kb (very fair) to 10Mb (less fair, with more work
|
||||
dedicated to peers who ask for more). Values below 250Kb could cause thrashing.
|
||||
Values above 10Mb open the potential for aggressively-wanting peers to consume all resources and
|
||||
deteriorate the quality provided to less aggressively-wanting peers.
|
||||
|
||||
Type: `optionalInteger` (byte count, `null` means default which is 1MB)
|
||||
|
||||
## `Ipns`
|
||||
|
||||
### `Ipns.RepublishPeriod`
|
||||
@ -850,19 +961,19 @@ Type: `string` (filesystem path)
|
||||
|
||||
### `Mounts.FuseAllowOther`
|
||||
|
||||
Sets the FUSE allow other option on the mountpoint.
|
||||
Sets the 'FUSE allow other'-option on the mount point.
|
||||
|
||||
## `Pinning`
|
||||
|
||||
Pinning configures the options available for pinning content
|
||||
(i.e. keeping content longer term instead of as temporarily cached storage).
|
||||
(i.e. keeping content longer-term instead of as temporarily cached storage).
|
||||
|
||||
### `Pinning.RemoteServices`
|
||||
|
||||
`RemoteServices` maps a name for a remote pinning service to its configuration.
|
||||
|
||||
A remote pinning service is a remote service that exposes an API for managing
|
||||
that service's interest in longer term data storage.
|
||||
that service's interest in long-term data storage.
|
||||
|
||||
The exposed API conforms to the specification defined at
|
||||
https://ipfs.github.io/pinning-services-api-spec/
|
||||
@ -1004,7 +1115,7 @@ When a node is added to the set of peered nodes, go-ipfs will:
|
||||
Peering can be asymmetric or symmetric:
|
||||
|
||||
* When symmetric, the connection will be protected by both nodes and will likely
|
||||
be vary stable.
|
||||
be very stable.
|
||||
* When asymmetric, only one node (the node that configured peering) will protect
|
||||
the connection and attempt to re-connect to the peered node on disconnect. If
|
||||
the peered node is under heavy load and/or has a low connection limit, the
|
||||
@ -1087,7 +1198,7 @@ When the DHT is enabled, it can operate in two modes: client and server.
|
||||
respond to requests from other peers (both requests to store records and
|
||||
requests to retrieve records).
|
||||
* In client mode, your node will query the DHT as a client but will not respond
|
||||
to requests from other peers. This mode is less resource intensive than server
|
||||
to requests from other peers. This mode is less resource-intensive than server
|
||||
mode.
|
||||
|
||||
When `Routing.Type` is set to `dht`, your node will start as a DHT client, and
|
||||
|
||||
@ -15,8 +15,8 @@ When you see ipfs doing something (using lots of CPU, memory, or otherwise
|
||||
being weird), the first thing you want to do is gather all the relevant
|
||||
profiling information.
|
||||
|
||||
There's a script (`bin/collect-profiles.sh`) that will do this for you and
|
||||
bundle the results up into a tarball, ready to be attached to a bug report.
|
||||
There's a command (`ipfs diag profile`) that will do this for you and
|
||||
bundle the results up into a zip file, ready to be attached to a bug report.
|
||||
|
||||
If you feel intrepid, you can dump this information and investigate it yourself:
|
||||
|
||||
@ -79,6 +79,9 @@ that goroutine in the middle of a short wait for something. If the wait time is
|
||||
over a few minutes, that either means that goroutine doesn't do much, or
|
||||
something is pretty wrong.
|
||||
|
||||
If you're seeing a lot of goroutines, consider using
|
||||
[stackparse](https://github.com/whyrusleeping/stackparse) to filter, sort, and summarize them.
|
||||
|
||||
### Analyzing the CPU Profile
|
||||
|
||||
The go team wrote an [excellent article on profiling go
|
||||
@ -97,4 +100,3 @@ The output is JSON formatted and includes badger store statistics, the command l
|
||||
If you have any questions, or want us to analyze some weird go-ipfs behaviour,
|
||||
just let us know, and be sure to include all the profiling information
|
||||
mentioned at the top.
|
||||
|
||||
|
||||
@ -77,10 +77,11 @@ Defaults: 2048
|
||||
|
||||
## `IPFS_DIST_PATH`
|
||||
|
||||
URL from which go-ipfs fetches repo migrations (when the daemon is launched with
|
||||
the `--migrate` flag).
|
||||
IPFS Content Path from which go-ipfs fetches repo migrations (when the daemon
|
||||
is launched with the `--migrate` flag).
|
||||
|
||||
Default: https://ipfs.io/ipfs/$something (depends on the IPFS version)
|
||||
Default: `/ipfs/<cid>` (the exact path is hardcoded in
|
||||
`migrations.CurrentIpfsDist`, depends on the IPFS version)
|
||||
|
||||
## `IPFS_NS_MAP`
|
||||
|
||||
|
||||
|
Before Width: | Height: | Size: 70 KiB After Width: | Height: | Size: 70 KiB |
@ -1,15 +1,14 @@
|
||||
module github.com/ipfs/go-ipfs/examples/go-ipfs-as-a-library
|
||||
|
||||
go 1.14
|
||||
go 1.16
|
||||
|
||||
require (
|
||||
github.com/ipfs/go-ipfs v0.7.0
|
||||
github.com/ipfs/go-ipfs-config v0.9.0
|
||||
github.com/ipfs/go-ipfs v0.9.1
|
||||
github.com/ipfs/go-ipfs-config v0.16.0
|
||||
github.com/ipfs/go-ipfs-files v0.0.8
|
||||
github.com/ipfs/interface-go-ipfs-core v0.4.0
|
||||
github.com/libp2p/go-libp2p-core v0.6.0
|
||||
github.com/libp2p/go-libp2p-peerstore v0.2.6
|
||||
github.com/multiformats/go-multiaddr v0.2.2
|
||||
github.com/ipfs/interface-go-ipfs-core v0.5.1
|
||||
github.com/libp2p/go-libp2p-core v0.9.0
|
||||
github.com/multiformats/go-multiaddr v0.4.0
|
||||
)
|
||||
|
||||
replace github.com/ipfs/go-ipfs => ./../../..
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -2,6 +2,7 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
@ -12,14 +13,13 @@ import (
|
||||
|
||||
config "github.com/ipfs/go-ipfs-config"
|
||||
files "github.com/ipfs/go-ipfs-files"
|
||||
libp2p "github.com/ipfs/go-ipfs/core/node/libp2p"
|
||||
icore "github.com/ipfs/interface-go-ipfs-core"
|
||||
icorepath "github.com/ipfs/interface-go-ipfs-core/path"
|
||||
peerstore "github.com/libp2p/go-libp2p-peerstore"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
|
||||
"github.com/ipfs/go-ipfs/core"
|
||||
"github.com/ipfs/go-ipfs/core/coreapi"
|
||||
"github.com/ipfs/go-ipfs/core/node/libp2p"
|
||||
"github.com/ipfs/go-ipfs/plugin/loader" // This package is needed so that all the preloaded plugins are loaded automatically
|
||||
"github.com/ipfs/go-ipfs/repo/fsrepo"
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
@ -46,7 +46,7 @@ func setupPlugins(externalPluginsPath string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func createTempRepo(ctx context.Context) (string, error) {
|
||||
func createTempRepo() (string, error) {
|
||||
repoPath, err := ioutil.TempDir("", "ipfs-shell")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get temp dir: %s", err)
|
||||
@ -58,6 +58,24 @@ func createTempRepo(ctx context.Context) (string, error) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// When creating the repository, you can define custom settings on the repository, such as enabling experimental
|
||||
// features (See experimental-features.md) or customizing the gateway endpoint.
|
||||
// To do such things, you should modify the variable `cfg`. For example:
|
||||
if *flagExp {
|
||||
// https://github.com/ipfs/go-ipfs/blob/master/docs/experimental-features.md#ipfs-filestore
|
||||
cfg.Experimental.FilestoreEnabled = true
|
||||
// https://github.com/ipfs/go-ipfs/blob/master/docs/experimental-features.md#ipfs-urlstore
|
||||
cfg.Experimental.UrlstoreEnabled = true
|
||||
// https://github.com/ipfs/go-ipfs/blob/master/docs/experimental-features.md#directory-sharding--hamt
|
||||
cfg.Experimental.ShardingEnabled = true
|
||||
// https://github.com/ipfs/go-ipfs/blob/master/docs/experimental-features.md#ipfs-p2p
|
||||
cfg.Experimental.Libp2pStreamMounting = true
|
||||
// https://github.com/ipfs/go-ipfs/blob/master/docs/experimental-features.md#p2p-http-proxy
|
||||
cfg.Experimental.P2pHttpProxy = true
|
||||
// https://github.com/ipfs/go-ipfs/blob/master/docs/experimental-features.md#strategic-providing
|
||||
cfg.Experimental.StrategicProviding = true
|
||||
}
|
||||
|
||||
// Create the repo with the config
|
||||
err = fsrepo.Init(repoPath, cfg)
|
||||
if err != nil {
|
||||
@ -118,7 +136,7 @@ func spawnEphemeral(ctx context.Context) (icore.CoreAPI, error) {
|
||||
}
|
||||
|
||||
// Create a Temporary Repo
|
||||
repoPath, err := createTempRepo(ctx)
|
||||
repoPath, err := createTempRepo()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create temp repo: %s", err)
|
||||
}
|
||||
@ -131,19 +149,19 @@ func spawnEphemeral(ctx context.Context) (icore.CoreAPI, error) {
|
||||
|
||||
func connectToPeers(ctx context.Context, ipfs icore.CoreAPI, peers []string) error {
|
||||
var wg sync.WaitGroup
|
||||
peerInfos := make(map[peer.ID]*peerstore.PeerInfo, len(peers))
|
||||
peerInfos := make(map[peer.ID]*peer.AddrInfo, len(peers))
|
||||
for _, addrStr := range peers {
|
||||
addr, err := ma.NewMultiaddr(addrStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pii, err := peerstore.InfoFromP2pAddr(addr)
|
||||
pii, err := peer.AddrInfoFromP2pAddr(addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pi, ok := peerInfos[pii.ID]
|
||||
if !ok {
|
||||
pi = &peerstore.PeerInfo{ID: pii.ID}
|
||||
pi = &peer.AddrInfo{ID: pii.ID}
|
||||
peerInfos[pi.ID] = pi
|
||||
}
|
||||
pi.Addrs = append(pi.Addrs, pii.Addrs...)
|
||||
@ -151,7 +169,7 @@ func connectToPeers(ctx context.Context, ipfs icore.CoreAPI, peers []string) err
|
||||
|
||||
wg.Add(len(peerInfos))
|
||||
for _, peerInfo := range peerInfos {
|
||||
go func(peerInfo *peerstore.PeerInfo) {
|
||||
go func(peerInfo *peer.AddrInfo) {
|
||||
defer wg.Done()
|
||||
err := ipfs.Swarm().Connect(ctx, *peerInfo)
|
||||
if err != nil {
|
||||
@ -199,7 +217,11 @@ func getUnixfsNode(path string) (files.Node, error) {
|
||||
|
||||
/// -------
|
||||
|
||||
var flagExp = flag.Bool("experimental", false, "enable experimental features")
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
/// --- Part I: Getting a IPFS node running
|
||||
|
||||
fmt.Println("-- Getting an IPFS node running -- ")
|
||||
@ -212,7 +234,7 @@ func main() {
|
||||
fmt.Println("Spawning node on default repo")
|
||||
ipfs, err := spawnDefault(ctx)
|
||||
if err != nil {
|
||||
fmt.Println("No IPFS repo available on the default path")
|
||||
panic(fmt.Errorf("failed to spawnDefault node: %s", err))
|
||||
}
|
||||
*/
|
||||
|
||||
@ -229,7 +251,7 @@ func main() {
|
||||
|
||||
fmt.Println("\n-- Adding and getting back files & directories --")
|
||||
|
||||
inputBasePath := "./example-folder/"
|
||||
inputBasePath := "../example-folder/"
|
||||
inputPathFile := inputBasePath + "ipfs.paper.draft3.pdf"
|
||||
inputPathDirectory := inputBasePath + "test-dir"
|
||||
|
||||
@ -259,7 +281,11 @@ func main() {
|
||||
|
||||
/// --- Part III: Getting the file and directory you added back
|
||||
|
||||
outputBasePath := "./example-folder/"
|
||||
outputBasePath, err := ioutil.TempDir("", "example")
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("could not create output dir (%v)", err))
|
||||
}
|
||||
fmt.Printf("output folder: %s\n", outputBasePath)
|
||||
outputPathFile := outputBasePath + strings.Split(cidFile.String(), "/")[2]
|
||||
outputPathDirectory := outputBasePath + strings.Split(cidDirectory.String(), "/")[2]
|
||||
|
||||
@ -313,7 +339,12 @@ func main() {
|
||||
// "/ip4/127.0.0.1/udp/4010/quic/p2p/QmZp2fhDLxjYue2RiUvLwT9MWdnbDxam32qYFnGmxZDh5L",
|
||||
}
|
||||
|
||||
go connectToPeers(ctx, ipfs, bootstrapNodes)
|
||||
go func() {
|
||||
err := connectToPeers(ctx, ipfs, bootstrapNodes)
|
||||
if err != nil {
|
||||
log.Printf("failed connect to peers: %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
exampleCIDStr := "QmUaoioqU7bxezBQZkUcgcSyokatMY71sxsALxQmRRrHrj"
|
||||
|
||||
|
||||
17
docs/examples/go-ipfs-as-a-library/main_test.go
Normal file
17
docs/examples/go-ipfs-as-a-library/main_test.go
Normal file
@ -0,0 +1,17 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestExample(t *testing.T) {
|
||||
out, err := exec.Command("go", "run", "main.go").Output()
|
||||
if err != nil {
|
||||
t.Fatalf("running example (%v)", err)
|
||||
}
|
||||
if !strings.Contains(string(out), "All done!") {
|
||||
t.Errorf("example did not run successfully")
|
||||
}
|
||||
}
|
||||
@ -1,72 +0,0 @@
|
||||
# Use go-ipfs as a library and enable experimental features
|
||||
|
||||
Before moving on to this tutorial, you must read first the initial [`go-ipfs` as a library tutorial](../go-ipfs-as-a-library/README.md)
|
||||
as it gives insights on how to create a repository, the daemon and add a file.
|
||||
|
||||
There is only one thing that differs from this example and the first tutorial, which is the function [`createTempRepo`](../go-ipfs-as-a-library/main.go#L49):
|
||||
|
||||
```go
|
||||
func createTempRepo(ctx context.Context) (string, error) {
|
||||
repoPath, err := ioutil.TempDir("", "ipfs-shell")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get temp dir: %s", err)
|
||||
}
|
||||
|
||||
// Create a config with default options and a 2048 bit key
|
||||
cfg, err := config.Init(ioutil.Discard, 2048)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Create the repo with the config
|
||||
err = fsrepo.Init(repoPath, cfg)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to init ephemeral node: %s", err)
|
||||
}
|
||||
|
||||
return repoPath, nil
|
||||
}
|
||||
```
|
||||
|
||||
When creating the repository, you can define custom settings on the repository, such as enabling [experimental
|
||||
features](../../experimental-features.md) or customizing the gateway endpoint.
|
||||
|
||||
To do such things, you should modify the variable `cfg`. For example, to enable the sharding experiment, you would modify the function to:
|
||||
|
||||
```go
|
||||
func createTempRepo(ctx context.Context) (string, error) {
|
||||
repoPath, err := ioutil.TempDir("", "ipfs-shell")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get temp dir: %s", err)
|
||||
}
|
||||
|
||||
// Create a config with default options and a 2048 bit key
|
||||
cfg, err := config.Init(ioutil.Discard, 2048)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// https://github.com/ipfs/go-ipfs/blob/master/docs/experimental-features.md#ipfs-filestore
|
||||
cfg.Experimental.FilestoreEnabled = true
|
||||
// https://github.com/ipfs/go-ipfs/blob/master/docs/experimental-features.md#ipfs-urlstore
|
||||
cfg.Experimental.UrlstoreEnabled = true
|
||||
// https://github.com/ipfs/go-ipfs/blob/master/docs/experimental-features.md#directory-sharding--hamt
|
||||
cfg.Experimental.ShardingEnabled = true
|
||||
// https://github.com/ipfs/go-ipfs/blob/master/docs/experimental-features.md#ipfs-p2p
|
||||
cfg.Experimental.Libp2pStreamMounting = true
|
||||
// https://github.com/ipfs/go-ipfs/blob/master/docs/experimental-features.md#p2p-http-proxy
|
||||
cfg.Experimental.P2pHttpProxy = true
|
||||
// https://github.com/ipfs/go-ipfs/blob/master/docs/experimental-features.md#strategic-providing
|
||||
cfg.Experimental.StrategicProviding = true
|
||||
|
||||
// Create the repo with the config
|
||||
err = fsrepo.Init(repoPath, cfg)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to init ephemeral node: %s", err)
|
||||
}
|
||||
|
||||
return repoPath, nil
|
||||
}
|
||||
```
|
||||
|
||||
There are many other options that you can find through the [documentation](https://godoc.org/github.com/ipfs/go-ipfs-config#Config).
|
||||
@ -140,7 +140,7 @@ To build out-of-tree plugins, use the plugin's Makefile if provided. Otherwise,
|
||||
you can manually build the plugin by running:
|
||||
|
||||
```bash
|
||||
myplugin$ go build -buildmode=plugin -i -o myplugin.so myplugin.go
|
||||
myplugin$ go build -buildmode=plugin -o myplugin.so myplugin.go
|
||||
```
|
||||
|
||||
Finally, as with in-tree plugins:
|
||||
|
||||
@ -33,12 +33,16 @@ func NewMount(p goprocess.Process, fsys fs.FS, mountpoint string, allow_other bo
|
||||
var conn *fuse.Conn
|
||||
var err error
|
||||
|
||||
if allow_other {
|
||||
conn, err = fuse.Mount(mountpoint, fuse.AllowOther())
|
||||
} else {
|
||||
conn, err = fuse.Mount(mountpoint)
|
||||
var mountOpts = []fuse.MountOption{
|
||||
fuse.MaxReadahead(64 * 1024 * 1024),
|
||||
fuse.AsyncRead(),
|
||||
}
|
||||
|
||||
if allow_other {
|
||||
mountOpts = append(mountOpts, fuse.AllowOther())
|
||||
}
|
||||
conn, err = fuse.Mount(mountpoint, mountOpts...)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -10,16 +10,19 @@ import (
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
core "github.com/ipfs/go-ipfs/core"
|
||||
mdag "github.com/ipfs/go-merkledag"
|
||||
path "github.com/ipfs/go-path"
|
||||
ft "github.com/ipfs/go-unixfs"
|
||||
uio "github.com/ipfs/go-unixfs/io"
|
||||
|
||||
fuse "bazil.org/fuse"
|
||||
fs "bazil.org/fuse/fs"
|
||||
"github.com/ipfs/go-cid"
|
||||
core "github.com/ipfs/go-ipfs/core"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
logging "github.com/ipfs/go-log"
|
||||
mdag "github.com/ipfs/go-merkledag"
|
||||
path "github.com/ipfs/go-path"
|
||||
"github.com/ipfs/go-path/resolver"
|
||||
ft "github.com/ipfs/go-unixfs"
|
||||
uio "github.com/ipfs/go-unixfs/io"
|
||||
ipldprime "github.com/ipld/go-ipld-prime"
|
||||
cidlink "github.com/ipld/go-ipld-prime/linking/cid"
|
||||
)
|
||||
|
||||
var log = logging.Logger("fuse/ipfs")
|
||||
@ -65,20 +68,47 @@ func (s *Root) Lookup(ctx context.Context, name string) (fs.Node, error) {
|
||||
return nil, fuse.ENOENT
|
||||
}
|
||||
|
||||
nd, err := s.Ipfs.Resolver.ResolvePath(ctx, p)
|
||||
nd, ndLnk, err := resolver.NewBasicResolver(s.Ipfs.UnixFSFetcherFactory).ResolvePath(ctx, p)
|
||||
if err != nil {
|
||||
// todo: make this error more versatile.
|
||||
return nil, fuse.ENOENT
|
||||
}
|
||||
|
||||
switch nd := nd.(type) {
|
||||
case *mdag.ProtoNode, *mdag.RawNode:
|
||||
return &Node{Ipfs: s.Ipfs, Nd: nd}, nil
|
||||
default:
|
||||
log.Error("fuse node was not a protobuf node")
|
||||
return nil, fuse.ENOTSUP
|
||||
cidLnk, ok := ndLnk.(cidlink.Link)
|
||||
if !ok {
|
||||
log.Debugf("non-cidlink returned from ResolvePath: %v", ndLnk)
|
||||
return nil, fuse.ENOENT
|
||||
}
|
||||
|
||||
// convert ipld-prime node to universal node
|
||||
blk, err := s.Ipfs.Blockstore.Get(cidLnk.Cid)
|
||||
if err != nil {
|
||||
log.Debugf("fuse failed to retrieve block: %v: %s", cidLnk, err)
|
||||
return nil, fuse.ENOENT
|
||||
}
|
||||
|
||||
var fnd ipld.Node
|
||||
switch cidLnk.Cid.Prefix().Codec {
|
||||
case cid.DagProtobuf:
|
||||
adl, ok := nd.(ipldprime.ADL)
|
||||
if ok {
|
||||
substrate := adl.Substrate()
|
||||
fnd, err = mdag.ProtoNodeConverter(blk, substrate)
|
||||
} else {
|
||||
fnd, err = mdag.ProtoNodeConverter(blk, nd)
|
||||
}
|
||||
case cid.Raw:
|
||||
fnd, err = mdag.RawNodeConverter(blk, nd)
|
||||
default:
|
||||
log.Error("fuse node was not a supported type")
|
||||
return nil, fuse.ENOTSUP
|
||||
}
|
||||
if err != nil {
|
||||
log.Error("could not convert protobuf or raw node")
|
||||
return nil, fuse.ENOENT
|
||||
}
|
||||
|
||||
return &Node{Ipfs: s.Ipfs, Nd: fnd}, nil
|
||||
}
|
||||
|
||||
// ReadDirAll reads a particular directory. Disallowed for root.
|
||||
|
||||
80
go.mod
80
go.mod
@ -5,106 +5,112 @@ require (
|
||||
contrib.go.opencensus.io/exporter/prometheus v0.3.0
|
||||
github.com/blang/semver/v4 v4.0.0
|
||||
github.com/cheggaaa/pb v1.0.29
|
||||
github.com/coreos/go-systemd/v22 v22.3.1
|
||||
github.com/coreos/go-systemd/v22 v22.3.2
|
||||
github.com/dustin/go-humanize v1.0.0
|
||||
github.com/elgris/jsondiff v0.0.0-20160530203242-765b5c24c302
|
||||
github.com/fsnotify/fsnotify v1.4.9
|
||||
github.com/gabriel-vasile/mimetype v1.1.2
|
||||
github.com/go-bindata/go-bindata/v3 v3.1.3
|
||||
github.com/hashicorp/go-multierror v1.1.1
|
||||
github.com/ipfs/go-bitswap v0.3.4
|
||||
github.com/ipfs/go-bitswap v0.4.0
|
||||
github.com/ipfs/go-block-format v0.0.3
|
||||
github.com/ipfs/go-blockservice v0.1.4
|
||||
github.com/ipfs/go-blockservice v0.1.7
|
||||
github.com/ipfs/go-cid v0.0.7
|
||||
github.com/ipfs/go-cidutil v0.0.2
|
||||
github.com/ipfs/go-datastore v0.4.5
|
||||
github.com/ipfs/go-datastore v0.4.6
|
||||
github.com/ipfs/go-detect-race v0.0.1
|
||||
github.com/ipfs/go-ds-badger v0.2.6
|
||||
github.com/ipfs/go-ds-badger v0.2.7
|
||||
github.com/ipfs/go-ds-flatfs v0.4.5
|
||||
github.com/ipfs/go-ds-leveldb v0.4.2
|
||||
github.com/ipfs/go-ds-measure v0.1.0
|
||||
github.com/ipfs/go-fetcher v1.5.0
|
||||
github.com/ipfs/go-filestore v0.0.3
|
||||
github.com/ipfs/go-fs-lock v0.0.6
|
||||
github.com/ipfs/go-fs-lock v0.0.7
|
||||
github.com/ipfs/go-graphsync v0.8.0
|
||||
github.com/ipfs/go-ipfs-blockstore v0.1.6
|
||||
github.com/ipfs/go-ipfs-chunker v0.0.5
|
||||
github.com/ipfs/go-ipfs-cmds v0.6.0
|
||||
github.com/ipfs/go-ipfs-config v0.14.0
|
||||
github.com/ipfs/go-ipfs-config v0.16.0
|
||||
github.com/ipfs/go-ipfs-exchange-interface v0.0.1
|
||||
github.com/ipfs/go-ipfs-exchange-offline v0.0.1
|
||||
github.com/ipfs/go-ipfs-files v0.0.8
|
||||
github.com/ipfs/go-ipfs-keystore v0.0.2
|
||||
github.com/ipfs/go-ipfs-pinner v0.1.1
|
||||
github.com/ipfs/go-ipfs-pinner v0.1.2
|
||||
github.com/ipfs/go-ipfs-posinfo v0.0.1
|
||||
github.com/ipfs/go-ipfs-provider v0.5.1
|
||||
github.com/ipfs/go-ipfs-provider v0.6.1
|
||||
github.com/ipfs/go-ipfs-routing v0.1.0
|
||||
github.com/ipfs/go-ipfs-util v0.0.2
|
||||
github.com/ipfs/go-ipld-cbor v0.0.5
|
||||
github.com/ipfs/go-ipld-format v0.2.0
|
||||
github.com/ipfs/go-ipld-git v0.0.4
|
||||
github.com/ipfs/go-ipns v0.1.0
|
||||
github.com/ipfs/go-ipld-git v0.1.1
|
||||
github.com/ipfs/go-ipld-legacy v0.1.0
|
||||
github.com/ipfs/go-ipns v0.1.2
|
||||
github.com/ipfs/go-log v1.0.5
|
||||
github.com/ipfs/go-merkledag v0.3.2
|
||||
github.com/ipfs/go-merkledag v0.4.0
|
||||
github.com/ipfs/go-metrics-interface v0.0.1
|
||||
github.com/ipfs/go-metrics-prometheus v0.0.2
|
||||
github.com/ipfs/go-mfs v0.1.2
|
||||
github.com/ipfs/go-namesys v0.3.0
|
||||
github.com/ipfs/go-path v0.0.9
|
||||
github.com/ipfs/go-namesys v0.3.1
|
||||
github.com/ipfs/go-path v0.1.2
|
||||
github.com/ipfs/go-pinning-service-http-client v0.1.0
|
||||
github.com/ipfs/go-unixfs v0.2.5
|
||||
github.com/ipfs/go-unixfsnode v1.1.3
|
||||
github.com/ipfs/go-verifcid v0.0.1
|
||||
github.com/ipfs/interface-go-ipfs-core v0.4.0
|
||||
github.com/ipfs/interface-go-ipfs-core v0.5.1
|
||||
github.com/ipfs/tar-utils v0.0.1
|
||||
github.com/ipld/go-car v0.3.1
|
||||
github.com/ipld/go-codec-dagpb v1.3.0
|
||||
github.com/ipld/go-ipld-prime v0.12.2
|
||||
github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c
|
||||
github.com/jbenet/go-temp-err-catcher v0.1.0
|
||||
github.com/jbenet/goprocess v0.1.4
|
||||
github.com/libp2p/go-doh-resolver v0.3.1
|
||||
github.com/libp2p/go-libp2p v0.14.3
|
||||
github.com/libp2p/go-libp2p v0.15.0
|
||||
github.com/libp2p/go-libp2p-circuit v0.4.0
|
||||
github.com/libp2p/go-libp2p-connmgr v0.2.4
|
||||
github.com/libp2p/go-libp2p-core v0.8.5
|
||||
github.com/libp2p/go-libp2p-core v0.9.0
|
||||
github.com/libp2p/go-libp2p-discovery v0.5.1
|
||||
github.com/libp2p/go-libp2p-http v0.2.0
|
||||
github.com/libp2p/go-libp2p-kad-dht v0.12.2
|
||||
github.com/libp2p/go-libp2p-http v0.2.1
|
||||
github.com/libp2p/go-libp2p-kad-dht v0.13.1
|
||||
github.com/libp2p/go-libp2p-kbucket v0.4.7
|
||||
github.com/libp2p/go-libp2p-loggables v0.1.0
|
||||
github.com/libp2p/go-libp2p-mplex v0.4.1
|
||||
github.com/libp2p/go-libp2p-noise v0.2.0
|
||||
github.com/libp2p/go-libp2p-peerstore v0.2.7
|
||||
github.com/libp2p/go-libp2p-pubsub v0.4.2
|
||||
github.com/libp2p/go-libp2p-noise v0.2.2
|
||||
github.com/libp2p/go-libp2p-peerstore v0.2.8
|
||||
github.com/libp2p/go-libp2p-pubsub v0.5.4
|
||||
github.com/libp2p/go-libp2p-pubsub-router v0.4.0
|
||||
github.com/libp2p/go-libp2p-quic-transport v0.11.2
|
||||
github.com/libp2p/go-libp2p-quic-transport v0.12.0
|
||||
github.com/libp2p/go-libp2p-record v0.1.3
|
||||
github.com/libp2p/go-libp2p-routing-helpers v0.2.3
|
||||
github.com/libp2p/go-libp2p-swarm v0.5.0
|
||||
github.com/libp2p/go-libp2p-testing v0.4.0
|
||||
github.com/libp2p/go-libp2p-tls v0.1.3
|
||||
github.com/libp2p/go-libp2p-swarm v0.5.3
|
||||
github.com/libp2p/go-libp2p-testing v0.4.2
|
||||
github.com/libp2p/go-libp2p-tls v0.2.0
|
||||
github.com/libp2p/go-libp2p-yamux v0.5.4
|
||||
github.com/libp2p/go-socket-activation v0.0.2
|
||||
github.com/libp2p/go-tcp-transport v0.2.4
|
||||
github.com/libp2p/go-ws-transport v0.4.0
|
||||
github.com/lucas-clemente/quic-go v0.21.2
|
||||
github.com/miekg/dns v1.1.41
|
||||
github.com/libp2p/go-socket-activation v0.1.0
|
||||
github.com/libp2p/go-tcp-transport v0.2.8
|
||||
github.com/libp2p/go-ws-transport v0.5.0
|
||||
github.com/miekg/dns v1.1.43
|
||||
github.com/mitchellh/go-homedir v1.1.0
|
||||
github.com/multiformats/go-multiaddr v0.3.3
|
||||
github.com/multiformats/go-multiaddr v0.4.0
|
||||
github.com/multiformats/go-multiaddr-dns v0.3.1
|
||||
github.com/multiformats/go-multibase v0.0.3
|
||||
github.com/multiformats/go-multicodec v0.3.0
|
||||
github.com/multiformats/go-multihash v0.0.15
|
||||
github.com/opentracing/opentracing-go v1.2.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/client_golang v1.10.0
|
||||
github.com/prometheus/client_golang v1.11.0
|
||||
github.com/prometheus/statsd_exporter v0.21.0 // indirect
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/syndtr/goleveldb v1.0.0
|
||||
github.com/whyrusleeping/go-sysinfo v0.0.0-20190219211824-4a357d4b90b1
|
||||
github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7
|
||||
go.opencensus.io v0.23.0
|
||||
go.uber.org/fx v1.13.1
|
||||
go.uber.org/zap v1.16.0
|
||||
golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf
|
||||
go.uber.org/zap v1.19.0
|
||||
golang.org/x/crypto v0.0.0-20210813211128-0a44fdfbc16e
|
||||
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 // indirect
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||
golang.org/x/sys v0.0.0-20210511113859-b0526f3d8744
|
||||
golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912
|
||||
)
|
||||
|
||||
go 1.15
|
||||
go 1.16
|
||||
|
||||
@ -1,958 +0,0 @@
|
||||
_do_comp()
|
||||
{
|
||||
if [[ $(type compopt) == *"builtin" ]]; then
|
||||
compopt $@
|
||||
else
|
||||
complete $@
|
||||
fi
|
||||
}
|
||||
|
||||
_ipfs_comp()
|
||||
{
|
||||
COMPREPLY=( $(compgen -W "$1" -- ${word}) )
|
||||
if [[ ${#COMPREPLY[@]} == 1 && ${COMPREPLY[0]} == "--"*"=" ]] ; then
|
||||
# If there's only one option, with =, then discard space
|
||||
_do_comp -o nospace
|
||||
fi
|
||||
}
|
||||
|
||||
_ipfs_help_only()
|
||||
{
|
||||
_ipfs_comp "--help"
|
||||
}
|
||||
|
||||
_ipfs_add()
|
||||
{
|
||||
if [[ "${prev}" == "--chunker" ]] ; then
|
||||
_ipfs_comp "placeholder1 placeholder2 placeholder3" # TODO: a) Give real options, b) Solve autocomplete bug for "="
|
||||
elif [ "${prev}" == "--pin" ] ; then
|
||||
_ipfs_comp "true false"
|
||||
elif [[ ${word} == -* ]] ; then
|
||||
_ipfs_comp "--recursive --dereference-args --stdin-name= --hidden --ignore= --ignore-rules-path= --quiet --quieter --silent --progress --trickle --only-hash --wrap-with-directory --chunker= --pin= --raw-leaves --nocopy --fscache --cid-version= --hash= --inline --inline-limit= --help "
|
||||
else
|
||||
_ipfs_filesystem_complete
|
||||
fi
|
||||
}
|
||||
|
||||
_ipfs_bitswap()
|
||||
{
|
||||
ipfs_comp "ledger stat wantlist --help"
|
||||
}
|
||||
|
||||
_ipfs_bitswap_ledger()
|
||||
{
|
||||
_ipfs_help_only
|
||||
}
|
||||
|
||||
_ipfs_bitswap_stat()
|
||||
{
|
||||
_ipfs_help_only
|
||||
}
|
||||
|
||||
_ipfs_bitswap_wantlist()
|
||||
{
|
||||
ipfs_comp "--peer= --help"
|
||||
}
|
||||
|
||||
_ipfs_block()
|
||||
{
|
||||
_ipfs_comp "get put rm stat --help"
|
||||
}
|
||||
|
||||
_ipfs_block_get()
|
||||
{
|
||||
_ipfs_hash_complete
|
||||
}
|
||||
|
||||
_ipfs_block_put()
|
||||
{
|
||||
if [ "${prev}" == "--format" ] ; then
|
||||
_ipfs_comp "v0 placeholder2 placeholder3" # TODO: a) Give real options, b) Solve autocomplete bug for "="
|
||||
elif [[ ${word} == -* ]] ; then
|
||||
_ipfs_comp "--format= --help"
|
||||
else
|
||||
_ipfs_filesystem_complete
|
||||
fi
|
||||
}
|
||||
|
||||
_ipfs_block_rm()
|
||||
{
|
||||
if [[ ${word} == -* ]] ; then
|
||||
_ipfs_comp "--force --quiet --help"
|
||||
else
|
||||
_ipfs_hash_complete
|
||||
fi
|
||||
}
|
||||
|
||||
_ipfs_block_stat()
|
||||
{
|
||||
_ipfs_hash_complete
|
||||
}
|
||||
|
||||
_ipfs_bootstrap()
|
||||
{
|
||||
_ipfs_comp "add list rm --help"
|
||||
}
|
||||
|
||||
_ipfs_bootstrap_add()
|
||||
{
|
||||
_ipfs_comp "default --help"
|
||||
}
|
||||
|
||||
_ipfs_bootstrap_list()
|
||||
{
|
||||
_ipfs_help_only
|
||||
}
|
||||
|
||||
_ipfs_bootstrap_rm()
|
||||
{
|
||||
_ipfs_comp "all --help"
|
||||
}
|
||||
|
||||
_ipfs_cat()
|
||||
{
|
||||
if [[ ${prev} == */* ]] ; then
|
||||
COMPREPLY=() # Only one argument allowed
|
||||
elif [[ ${word} == */* ]] ; then
|
||||
_ipfs_hash_complete
|
||||
else
|
||||
_ipfs_pinned_complete
|
||||
fi
|
||||
}
|
||||
|
||||
_ipfs_commands()
|
||||
{
|
||||
_ipfs_comp "--flags --help"
|
||||
}
|
||||
|
||||
_ipfs_config()
|
||||
{
|
||||
if [[ ${word} == -* ]] ; then
|
||||
_ipfs_comp "--bool --json"
|
||||
elif [[ ${prev} == *.* ]] ; then
|
||||
COMPREPLY=() # Only one subheader of the config can be shown or edited.
|
||||
else
|
||||
_ipfs_comp "show edit replace"
|
||||
fi
|
||||
}
|
||||
|
||||
_ipfs_config_edit()
|
||||
{
|
||||
_ipfs_help_only
|
||||
}
|
||||
|
||||
_ipfs_config_replace()
|
||||
{
|
||||
if [[ ${word} == -* ]] ; then
|
||||
_ipfs_comp "--help"
|
||||
else
|
||||
_ipfs_filesystem_complete
|
||||
fi
|
||||
}
|
||||
|
||||
_ipfs_config_show()
|
||||
{
|
||||
_ipfs_help_only
|
||||
}
|
||||
|
||||
_ipfs_daemon()
|
||||
{
|
||||
if [[ ${prev} == "--routing" ]] ; then
|
||||
_ipfs_comp "dht dhtclient none" # TODO: Solve autocomplete bug for "="
|
||||
elif [[ ${prev} == "--mount-ipfs" ]] || [[ ${prev} == "--mount-ipns" ]] || [[ ${prev} == "=" ]]; then
|
||||
_ipfs_filesystem_complete
|
||||
elif [[ ${word} == -* ]] ; then
|
||||
_ipfs_comp "--init --routing= --mount --writable --mount-ipfs= \
|
||||
--mount-ipns= --unrestricted-api --disable-transport-encryption \
|
||||
-- enable-gc --manage-fdlimit --offline --migrate --help"
|
||||
fi
|
||||
}
|
||||
|
||||
_ipfs_dag()
|
||||
{
|
||||
_ipfs_comp "get put --help"
|
||||
}
|
||||
|
||||
_ipfs_dag_get()
|
||||
{
|
||||
_ipfs_help_only
|
||||
}
|
||||
|
||||
_ipfs_dag_put()
|
||||
{
|
||||
if [[ ${prev} == "--format" ]] ; then
|
||||
_ipfs_comp "cbor placeholder1" # TODO: a) Which format more than cbor is valid? b) Solve autocomplete bug for "="
|
||||
elif [[ ${prev} == "--input-enc" ]] ; then
|
||||
_ipfs_comp "json placeholder1" # TODO: a) Which format more than json is valid? b) Solve autocomplete bug for "="
|
||||
elif [[ ${word} == -* ]] ; then
|
||||
_ipfs_comp "--format= --input-enc= --help"
|
||||
else
|
||||
_ipfs_filesystem_complete
|
||||
fi
|
||||
}
|
||||
|
||||
_ipfs_dht()
|
||||
{
|
||||
_ipfs_comp "findpeer findprovs get provide put query --help"
|
||||
}
|
||||
|
||||
_ipfs_dht_findpeer()
|
||||
{
|
||||
_ipfs_comp "--verbose --help"
|
||||
}
|
||||
|
||||
_ipfs_dht_findprovs()
|
||||
{
|
||||
_ipfs_comp "--verbose --help"
|
||||
}
|
||||
|
||||
_ipfs_dht_get()
|
||||
{
|
||||
_ipfs_comp "--verbose --help"
|
||||
}
|
||||
|
||||
_ipfs_dht_provide()
|
||||
{
|
||||
_ipfs_comp "--recursive --verbose --help"
|
||||
}
|
||||
|
||||
_ipfs_dht_put()
|
||||
{
|
||||
_ipfs_comp "--verbose --help"
|
||||
}
|
||||
|
||||
_ipfs_dht_query()
|
||||
{
|
||||
_ipfs_comp "--verbose --help"
|
||||
}
|
||||
|
||||
_ipfs_diag()
|
||||
{
|
||||
_ipfs_comp "sys cmds net --help"
|
||||
}
|
||||
|
||||
_ipfs_diag_cmds()
|
||||
{
|
||||
if [[ ${prev} == "clear" ]] ; then
|
||||
return 0
|
||||
elif [[ ${prev} =~ ^-?[0-9]+$ ]] ; then
|
||||
_ipfs_comp "ns us µs ms s m h" # TODO: Trigger without space, eg. "ipfs diag set-time 10ns" not "... set-time 10 ns"
|
||||
elif [[ ${prev} == "set-time" ]] ; then
|
||||
_ipfs_help_only
|
||||
elif [[ ${word} == -* ]] ; then
|
||||
_ipfs_comp "--verbose --help"
|
||||
else
|
||||
_ipfs_comp "clear set-time"
|
||||
fi
|
||||
}
|
||||
|
||||
_ipfs_diag_sys()
|
||||
{
|
||||
_ipfs_help_only
|
||||
}
|
||||
|
||||
_ipfs_diag_net()
|
||||
{
|
||||
if [[ ${prev} == "--vis" ]] ; then
|
||||
_ipfs_comp "d3 dot text" # TODO: Solve autocomplete bug for "="
|
||||
elif [[ ${word} == -* ]] ; then
|
||||
_ipfs_comp "--timeout= --vis= --help"
|
||||
fi
|
||||
}
|
||||
|
||||
_ipfs_dns()
|
||||
{
|
||||
if [[ ${word} == -* ]] ; then
|
||||
_ipfs_comp "--recursive --help"
|
||||
fi
|
||||
}
|
||||
|
||||
_ipfs_files()
|
||||
{
|
||||
_ipfs_comp "mv rm flush read write cp ls mkdir stat"
|
||||
}
|
||||
|
||||
_ipfs_files_mv()
|
||||
{
|
||||
if [[ ${word} == -* ]] ; then
|
||||
_ipfs_comp "--recursive --flush"
|
||||
elif [[ ${word} == /* ]] ; then
|
||||
_ipfs_files_complete
|
||||
else
|
||||
COMPREPLY=( / )
|
||||
[[ $COMPREPLY = */ ]] && _do_comp -o nospace
|
||||
fi
|
||||
}
|
||||
|
||||
_ipfs_files_rm()
|
||||
{
|
||||
if [[ ${word} == -* ]] ; then
|
||||
_ipfs_comp "--recursive --flush"
|
||||
elif [[ ${word} == /* ]] ; then
|
||||
_ipfs_files_complete
|
||||
else
|
||||
COMPREPLY=( / )
|
||||
[[ $COMPREPLY = */ ]] && _do_comp -o nospace
|
||||
fi
|
||||
}
|
||||
_ipfs_files_flush()
|
||||
{
|
||||
if [[ ${word} == /* ]] ; then
|
||||
_ipfs_files_complete
|
||||
else
|
||||
COMPREPLY=( / )
|
||||
[[ $COMPREPLY = */ ]] && _do_comp -o nospace
|
||||
fi
|
||||
}
|
||||
|
||||
_ipfs_files_read()
|
||||
{
|
||||
if [[ ${prev} == "--count" ]] || [[ ${prev} == "--offset" ]] ; then
|
||||
COMPREPLY=() # Numbers, just keep it empty
|
||||
elif [[ ${word} == -* ]] ; then
|
||||
_ipfs_comp "--offset --count --help"
|
||||
elif [[ ${word} == /* ]] ; then
|
||||
_ipfs_files_complete
|
||||
else
|
||||
COMPREPLY=( / )
|
||||
[[ $COMPREPLY = */ ]] && _do_comp -o nospace
|
||||
fi
|
||||
}
|
||||
|
||||
_ipfs_files_write()
|
||||
{
|
||||
if [[ ${prev} == "--count" ]] || [[ ${prev} == "--offset" ]] ; then # Dirty check
|
||||
COMPREPLY=() # Numbers, just keep it empty
|
||||
elif [[ ${word} == -* ]] ; then
|
||||
_ipfs_comp "--offset --count --create --truncate --help"
|
||||
elif [[ ${prev} == /* ]] ; then
|
||||
_ipfs_filesystem_complete
|
||||
elif [[ ${word} == /* ]] ; then
|
||||
_ipfs_files_complete
|
||||
else
|
||||
COMPREPLY=( / )
|
||||
[[ $COMPREPLY = */ ]] && _do_comp -o nospace
|
||||
fi
|
||||
}
|
||||
|
||||
_ipfs_files_cp()
|
||||
{
|
||||
if [[ ${word} == /* ]] ; then
|
||||
_ipfs_files_complete
|
||||
else
|
||||
COMPREPLY=( / )
|
||||
[[ $COMPREPLY = */ ]] && _do_comp -o nospace
|
||||
fi
|
||||
}
|
||||
|
||||
_ipfs_files_ls()
|
||||
{
|
||||
if [[ ${word} == -* ]] ; then
|
||||
_ipfs_comp "-l --help"
|
||||
elif [[ ${prev} == /* ]] ; then
|
||||
COMPREPLY=() # Path exist
|
||||
elif [[ ${word} == /* ]] ; then
|
||||
_ipfs_files_complete
|
||||
else
|
||||
COMPREPLY=( / )
|
||||
[[ $COMPREPLY = */ ]] && _do_comp -o nospace
|
||||
fi
|
||||
}
|
||||
|
||||
_ipfs_files_mkdir()
|
||||
{
|
||||
if [[ ${word} == -* ]] ; then
|
||||
_ipfs_comp "--parents --help"
|
||||
|
||||
elif [[ ${prev} == /* ]] ; then
|
||||
COMPREPLY=() # Path exist
|
||||
elif [[ ${word} == /* ]] ; then
|
||||
_ipfs_files_complete
|
||||
else
|
||||
COMPREPLY=( / )
|
||||
[[ $COMPREPLY = */ ]] && _do_comp -o nospace
|
||||
fi
|
||||
}
|
||||
|
||||
_ipfs_files_stat()
|
||||
{
|
||||
if [[ ${prev} == /* ]] ; then
|
||||
COMPREPLY=() # Path exist
|
||||
elif [[ ${word} == /* ]] ; then
|
||||
_ipfs_files_complete
|
||||
else
|
||||
COMPREPLY=( / )
|
||||
[[ $COMPREPLY = */ ]] && _do_comp -o nospace
|
||||
fi
|
||||
}
|
||||
|
||||
_ipfs_file()
|
||||
{
|
||||
if [[ ${prev} == "ls" ]] ; then
|
||||
_ipfs_hash_complete
|
||||
else
|
||||
_ipfs_comp "ls --help"
|
||||
fi
|
||||
}
|
||||
|
||||
_ipfs_file_ls()
|
||||
{
|
||||
_ipfs_help_only
|
||||
}
|
||||
|
||||
_ipfs_get()
|
||||
{
|
||||
if [ "${prev}" == "--output" ] ; then
|
||||
_do_comp -o default # Re-enable default file read
|
||||
COMPREPLY=()
|
||||
elif [ "${prev}" == "--compression-level" ] ; then
|
||||
_ipfs_comp "-1 1 2 3 4 5 6 7 8 9" # TODO: Solve autocomplete bug for "="
|
||||
elif [[ ${word} == -* ]] ; then
|
||||
_ipfs_comp "--output= --archive --compress --compression-level= --help"
|
||||
else
|
||||
_ipfs_hash_complete
|
||||
fi
|
||||
}
|
||||
|
||||
_ipfs_id()
|
||||
{
|
||||
if [[ ${word} == -* ]] ; then
|
||||
_ipfs_comp "--format= --help"
|
||||
fi
|
||||
}
|
||||
|
||||
_ipfs_init()
|
||||
{
|
||||
_ipfs_comp "--bits --force --empty-repo --help"
|
||||
}
|
||||
|
||||
_ipfs_log()
|
||||
{
|
||||
_ipfs_comp "level ls tail --help"
|
||||
}
|
||||
|
||||
_ipfs_log_level()
|
||||
{
|
||||
# TODO: auto-complete subsystem and level
|
||||
_ipfs_help_only
|
||||
}
|
||||
|
||||
_ipfs_log_ls()
|
||||
{
|
||||
_ipfs_help_only
|
||||
}
|
||||
|
||||
_ipfs_log_tail()
|
||||
{
|
||||
_ipfs_help_only
|
||||
}
|
||||
|
||||
_ipfs_ls()
|
||||
{
|
||||
if [[ ${word} == -* ]] ; then
|
||||
_ipfs_comp "--headers --resolve-type=false --help"
|
||||
else
|
||||
_ipfs_hash_complete
|
||||
fi
|
||||
}
|
||||
|
||||
_ipfs_mount()
|
||||
{
|
||||
if [[ ${prev} == "--ipfs-path" ]] || [[ ${prev} == "--ipns-path" ]] || [[ ${prev} == "=" ]] ; then
|
||||
_ipfs_filesystem_complete
|
||||
elif [[ ${word} == -* ]] ; then
|
||||
_ipfs_comp "--ipfs-path= --ipns-path= --help"
|
||||
fi
|
||||
}
|
||||
|
||||
_ipfs_name()
|
||||
{
|
||||
_ipfs_comp "publish resolve --help"
|
||||
}
|
||||
|
||||
_ipfs_name_publish()
|
||||
{
|
||||
if [[ ${prev} == "--lifetime" ]] || [[ ${prev} == "--ttl" ]] ; then
|
||||
COMPREPLY=() # Accept only numbers
|
||||
elif [[ ${prev} =~ ^-?[0-9]+$ ]] ; then
|
||||
_ipfs_comp "ns us µs ms s m h" # TODO: Trigger without space, eg. "ipfs diag set-time 10ns" not "... set-time 10 ns"
|
||||
elif [[ ${word} == -* ]] ; then
|
||||
_ipfs_comp "--resolve --lifetime --ttl --help"
|
||||
elif [[ ${word} == */ ]]; then
|
||||
_ipfs_hash_complete
|
||||
else
|
||||
_ipfs_pinned_complete
|
||||
fi
|
||||
}
|
||||
|
||||
_ipfs_name_resolve()
|
||||
{
|
||||
if [[ ${word} == -* ]] ; then
|
||||
_ipfs_comp "--recursive --nocache --help"
|
||||
fi
|
||||
}
|
||||
|
||||
_ipfs_object()
|
||||
{
|
||||
_ipfs_comp "data diff get links new patch put stat --help"
|
||||
}
|
||||
|
||||
_ipfs_object_data()
|
||||
{
|
||||
_ipfs_hash_complete
|
||||
}
|
||||
|
||||
_ipfs_object_diff()
|
||||
{
|
||||
if [[ ${word} == -* ]] ; then
|
||||
_ipfs_comp "--verbose --help"
|
||||
else
|
||||
_ipfs_hash_complete
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
_ipfs_object_get()
|
||||
{
|
||||
if [ "${prev}" == "--encoding" ] ; then
|
||||
_ipfs_comp "protobuf json xml"
|
||||
elif [[ ${word} == -* ]] ; then
|
||||
_ipfs_comp "--encoding --help"
|
||||
else
|
||||
_ipfs_hash_complete
|
||||
fi
|
||||
}
|
||||
|
||||
_ipfs_object_links()
|
||||
{
|
||||
if [[ ${word} == -* ]] ; then
|
||||
_ipfs_comp "--headers --help"
|
||||
else
|
||||
_ipfs_hash_complete
|
||||
fi
|
||||
}
|
||||
|
||||
_ipfs_object_new()
|
||||
{
|
||||
if [[ ${word} == -* ]] ; then
|
||||
_ipfs_comp "--help"
|
||||
else
|
||||
_ipfs_comp "unixfs-dir"
|
||||
fi
|
||||
}
|
||||
|
||||
_ipfs_object_patch()
|
||||
{
|
||||
if [[ -n "${COMP_WORDS[3]}" ]] ; then # Root merkledag object exist
|
||||
case "${COMP_WORDS[4]}" in
|
||||
append-data)
|
||||
_ipfs_help_only
|
||||
;;
|
||||
add-link)
|
||||
if [[ ${word} == -* ]] && [[ ${prev} == "add-link" ]] ; then # Dirty check
|
||||
_ipfs_comp "--create"
|
||||
#else
|
||||
# TODO: Hash path autocomplete. This is tricky, can be hash or a name.
|
||||
fi
|
||||
;;
|
||||
rm-link)
|
||||
_ipfs_hash_complete
|
||||
;;
|
||||
set-data)
|
||||
_ipfs_filesystem_complete
|
||||
;;
|
||||
*)
|
||||
_ipfs_comp "append-data add-link rm-link set-data"
|
||||
;;
|
||||
esac
|
||||
else
|
||||
_ipfs_hash_complete
|
||||
fi
|
||||
}
|
||||
|
||||
_ipfs_object_put()
|
||||
{
|
||||
if [ "${prev}" == "--inputenc" ] ; then
|
||||
_ipfs_comp "protobuf json"
|
||||
elif [ "${prev}" == "--datafieldenc" ] ; then
|
||||
_ipfs_comp "text base64"
|
||||
elif [[ ${word} == -* ]] ; then
|
||||
_ipfs_comp "--inputenc --datafieldenc --help"
|
||||
else
|
||||
_ipfs_hash_complete
|
||||
fi
|
||||
}
|
||||
|
||||
_ipfs_object_stat()
|
||||
{
|
||||
_ipfs_hash_complete
|
||||
}
|
||||
|
||||
_ipfs_pin()
|
||||
{
|
||||
_ipfs_comp "rm ls add --help"
|
||||
}
|
||||
|
||||
_ipfs_pin_add()
|
||||
{
|
||||
if [[ ${word} == -* ]] ; then
|
||||
_ipfs_comp "--recursive= --help"
|
||||
elif [[ ${word} == */ ]] && [[ ${word} != "/ipfs/" ]] ; then
|
||||
_ipfs_hash_complete
|
||||
fi
|
||||
}
|
||||
|
||||
_ipfs_pin_ls()
|
||||
{
|
||||
if [[ ${prev} == "--type" ]] || [[ ${prev} == "-t" ]] ; then
|
||||
_ipfs_comp "direct indirect recursive all" # TODO: Solve autocomplete bug for
|
||||
elif [[ ${word} == -* ]] ; then
|
||||
_ipfs_comp "--count --quiet --type= --help"
|
||||
elif [[ ${word} == */ ]] && [[ ${word} != "/ipfs/" ]] ; then
|
||||
_ipfs_hash_complete
|
||||
fi
|
||||
}
|
||||
|
||||
_ipfs_pin_rm()
|
||||
{
|
||||
if [[ ${word} == -* ]] ; then
|
||||
_ipfs_comp "--recursive --help"
|
||||
elif [[ ${word} == */ ]] && [[ ${word} != "/ipfs/" ]] ; then
|
||||
COMPREPLY=() # TODO: _ipfs_hash_complete() + List local pinned hashes as default?
|
||||
fi
|
||||
}
|
||||
|
||||
_ipfs_ping()
|
||||
{
|
||||
_ipfs_comp "--count= --help"
|
||||
}
|
||||
|
||||
_ipfs_pubsub()
|
||||
{
|
||||
_ipfs_comp "ls peers pub sub --help"
|
||||
}
|
||||
|
||||
_ipfs_pubsub_ls()
|
||||
{
|
||||
_ipfs_help_only
|
||||
}
|
||||
|
||||
_ipfs_pubsub_peers()
|
||||
{
|
||||
_ipfs_help_only
|
||||
}
|
||||
|
||||
_ipfs_pubsub_pub()
|
||||
{
|
||||
_ipfs_help_only
|
||||
}
|
||||
|
||||
_ipfs_pubsub_sub()
|
||||
{
|
||||
_ipfs_comp "--discover --help"
|
||||
}
|
||||
|
||||
_ipfs_refs()
|
||||
{
|
||||
if [ "${prev}" == "--format" ] ; then
|
||||
_ipfs_comp "src dst linkname"
|
||||
elif [[ ${word} == -* ]] ; then
|
||||
_ipfs_comp "local --format= --edges --unique --recursive --help"
|
||||
#else
|
||||
# TODO: Use "ipfs ref" and combine it with autocomplete, see _ipfs_hash_complete
|
||||
fi
|
||||
}
|
||||
|
||||
_ipfs_refs_local()
|
||||
{
|
||||
_ipfs_help_only
|
||||
}
|
||||
|
||||
_ipfs_repo()
|
||||
{
|
||||
_ipfs_comp "fsck gc stat verify version --help"
|
||||
}
|
||||
|
||||
_ipfs_repo_version()
|
||||
{
|
||||
_ipfs_comp "--quiet --help"
|
||||
}
|
||||
|
||||
_ipfs_repo_verify()
|
||||
{
|
||||
_ipfs_help_only
|
||||
}
|
||||
|
||||
_ipfs_repo_gc()
|
||||
{
|
||||
_ipfs_comp "--quiet --help"
|
||||
}
|
||||
|
||||
_ipfs_repo_stat()
|
||||
{
|
||||
_ipfs_comp "--human --help"
|
||||
}
|
||||
|
||||
_ipfs_repo_fsck()
|
||||
{
|
||||
_ipfs_help_only
|
||||
}
|
||||
|
||||
_ipfs_resolve()
|
||||
{
|
||||
if [[ ${word} == /ipfs/* ]] ; then
|
||||
_ipfs_hash_complete
|
||||
elif [[ ${word} == /ipns/* ]] ; then
|
||||
COMPREPLY=() # Can't autocomplete ipns
|
||||
elif [[ ${word} == -* ]] ; then
|
||||
_ipfs_comp "--recursive --help"
|
||||
else
|
||||
opts="/ipns/ /ipfs/"
|
||||
COMPREPLY=( $(compgen -W "${opts}" -- ${word}) )
|
||||
[[ $COMPREPLY = */ ]] && _do_comp -o nospace
|
||||
fi
|
||||
}
|
||||
|
||||
_ipfs_stats()
|
||||
{
|
||||
_ipfs_comp "bitswap bw repo --help"
|
||||
}
|
||||
|
||||
_ipfs_stats_bitswap()
|
||||
{
|
||||
_ipfs_help_only
|
||||
}
|
||||
|
||||
_ipfs_stats_bw()
|
||||
{
|
||||
# TODO: Which protocol is valid?
|
||||
_ipfs_comp "--peer= --proto= --poll --interval= --help"
|
||||
}
|
||||
|
||||
_ipfs_stats_repo()
|
||||
{
|
||||
_ipfs_comp "--human= --help"
|
||||
}
|
||||
|
||||
_ipfs_swarm()
|
||||
{
|
||||
_ipfs_comp "addrs connect disconnect filters peers --help"
|
||||
}
|
||||
|
||||
_ipfs_swarm_addrs()
|
||||
{
|
||||
_ipfs_comp "local --help"
|
||||
}
|
||||
|
||||
_ipfs_swarm_addrs_local()
|
||||
{
|
||||
_ipfs_comp "--id --help"
|
||||
}
|
||||
|
||||
_ipfs_swarm_connect()
|
||||
{
|
||||
_ipfs_multiaddr_complete
|
||||
}
|
||||
|
||||
_ipfs_swarm_disconnect()
|
||||
{
|
||||
local OLDIFS="$IFS" ; local IFS=$'\n' # Change divider for iterator one line below
|
||||
opts=$(for x in `ipfs swarm peers`; do echo ${x} ; done)
|
||||
IFS="$OLDIFS" # Reset divider to space, ' '
|
||||
COMPREPLY=( $(compgen -W "${opts}" -- ${word}) )
|
||||
[[ $COMPREPLY = */ ]] && _do_comp -o nospace -o filenames
|
||||
}
|
||||
|
||||
_ipfs_swarm_filters()
|
||||
{
|
||||
if [[ ${prev} == "add" ]] || [[ ${prev} == "rm" ]]; then
|
||||
_ipfs_multiaddr_complete
|
||||
else
|
||||
_ipfs_comp "add rm --help"
|
||||
fi
|
||||
}
|
||||
|
||||
_ipfs_swarm_filters_add()
|
||||
{
|
||||
_ipfs_help_only
|
||||
}
|
||||
|
||||
_ipfs_swarm_filters_rm()
|
||||
{
|
||||
_ipfs_help_only
|
||||
}
|
||||
|
||||
_ipfs_swarm_peers()
|
||||
{
|
||||
_ipfs_help_only
|
||||
}
|
||||
|
||||
_ipfs_tar()
|
||||
{
|
||||
_ipfs_comp "add cat --help"
|
||||
}
|
||||
|
||||
_ipfs_tar_add()
|
||||
{
|
||||
if [[ ${word} == -* ]] ; then
|
||||
_ipfs_comp "--help"
|
||||
else
|
||||
_ipfs_filesystem_complete
|
||||
fi
|
||||
}
|
||||
|
||||
_ipfs_tar_cat()
|
||||
{
|
||||
if [[ ${word} == -* ]] ; then
|
||||
_ipfs_comp "--help"
|
||||
else
|
||||
_ipfs_filesystem_complete
|
||||
fi
|
||||
}
|
||||
|
||||
_ipfs_update()
|
||||
{
|
||||
if [[ ${word} == -* ]] ; then
|
||||
_ipfs_comp "--version" # TODO: How does "--verbose" option work?
|
||||
else
|
||||
_ipfs_comp "versions version install stash revert fetch"
|
||||
fi
|
||||
}
|
||||
|
||||
_ipfs_update_install()
|
||||
{
|
||||
if [[ ${prev} == v*.*.* ]] ; then
|
||||
COMPREPLY=()
|
||||
elif [[ ${word} == -* ]] ; then
|
||||
_ipfs_comp "--version"
|
||||
else
|
||||
local OLDIFS="$IFS" ; local IFS=$'\n' # Change divider for iterator one line below
|
||||
opts=$(for x in `ipfs update versions`; do echo ${x} ; done)
|
||||
IFS="$OLDIFS" # Reset divider to space, ' '
|
||||
COMPREPLY=( $(compgen -W "${opts}" -- ${word}) )
|
||||
fi
|
||||
}
|
||||
|
||||
_ipfs_update_stash()
|
||||
{
|
||||
if [[ ${word} == -* ]] ; then
|
||||
_ipfs_comp "--tag --help"
|
||||
fi
|
||||
}
|
||||
_ipfs_update_fetch()
|
||||
{
|
||||
if [[ ${prev} == "--output" ]] ; then
|
||||
_ipfs_filesystem_complete
|
||||
elif [[ ${word} == -* ]] ; then
|
||||
_ipfs_comp "--output --help"
|
||||
fi
|
||||
}
|
||||
|
||||
_ipfs_version()
|
||||
{
|
||||
_ipfs_comp "--number --commit --repo"
|
||||
}
|
||||
|
||||
_ipfs_hash_complete()
|
||||
{
|
||||
local lastDir=${word%/*}/
|
||||
echo "LastDir: ${lastDir}" >> ~/Downloads/debug-ipfs.txt
|
||||
local OLDIFS="$IFS" ; local IFS=$'\n' # Change divider for iterator one line below
|
||||
opts=$(for x in `ipfs file ls ${lastDir}`; do echo ${lastDir}${x}/ ; done) # TODO: Implement "ipfs file ls -F" to get rid of frontslash after files. This take long time to run first time on a new shell.
|
||||
echo "Options: ${opts}" >> ~/Downloads/debug-ipfs.txt
|
||||
IFS="$OLDIFS" # Reset divider to space, ' '
|
||||
echo "Current: ${word}" >> ~/Downloads/debug-ipfs.txt
|
||||
COMPREPLY=( $(compgen -W "${opts}" -- ${word}) )
|
||||
echo "Suggestion: ${COMPREPLY}" >> ~/Downloads/debug-ipfs.txt
|
||||
[[ $COMPREPLY = */ ]] && _do_comp -o nospace -o filenames # Removing whitespace after output & handle output as filenames. (Only printing the latest folder of files.)
|
||||
return 0
|
||||
}
|
||||
|
||||
_ipfs_files_complete()
|
||||
{
|
||||
local lastDir=${word%/*}/
|
||||
local OLDIFS="$IFS" ; local IFS=$'\n' # Change divider for iterator one line below
|
||||
opts=$(for x in `ipfs files ls ${lastDir}`; do echo ${lastDir}${x}/ ; done) # TODO: Implement "ipfs files ls -F" to get rid of frontslash after files. This does currently throw "Error: /cats/foo/ is not a directory"
|
||||
IFS="$OLDIFS" # Reset divider to space, ' '
|
||||
COMPREPLY=( $(compgen -W "${opts}" -- ${word}) )
|
||||
[[ $COMPREPLY = */ ]] && _do_comp -o nospace -o filenames
|
||||
return 0
|
||||
}
|
||||
|
||||
_ipfs_multiaddr_complete()
|
||||
{
|
||||
local lastDir=${word%/*}/
|
||||
# Special case
|
||||
if [[ ${word} == */"ipcidr"* ]] ; then # TODO: Broken, fix it.
|
||||
opts="1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32" # TODO: IPv6?
|
||||
COMPREPLY=( $(compgen -W "${opts}" -- ${word}) )
|
||||
# "Loop"
|
||||
elif [[ ${word} == /*/ ]] || [[ ${word} == /*/* ]] ; then
|
||||
if [[ ${word} == /*/*/*/*/*/ ]] ; then
|
||||
COMPREPLY=()
|
||||
elif [[ ${word} == /*/*/*/*/ ]] ; then
|
||||
word=${word##*/}
|
||||
opts="ipfs/ "
|
||||
COMPREPLY=( $(compgen -W "${opts}" -- ${word}) )
|
||||
elif [[ ${word} == /*/*/*/ ]] ; then
|
||||
word=${word##*/}
|
||||
opts="4001/ "
|
||||
COMPREPLY=( $(compgen -W "${opts}" -- ${word}) )
|
||||
elif [[ ${word} == /*/*/ ]] ; then
|
||||
word=${word##*/}
|
||||
opts="udp/ tcp/ ipcidr/"
|
||||
COMPREPLY=( $(compgen -W "${opts}" -- ${word}) )
|
||||
elif [[ ${word} == /*/ ]] ; then
|
||||
COMPREPLY=() # TODO: This need to return something to NOT break the function. Maybe a "/" in the end as well due to -o filename option.
|
||||
fi
|
||||
COMPREPLY=${lastDir}${COMPREPLY}
|
||||
else # start case
|
||||
opts="/ip4/ /ip6/"
|
||||
COMPREPLY=( $(compgen -W "${opts}" -- ${word}) )
|
||||
fi
|
||||
[[ $COMPREPLY = */ ]] && _do_comp -o nospace -o filenames
|
||||
return 0
|
||||
}
|
||||
|
||||
_ipfs_pinned_complete()
|
||||
{
|
||||
local OLDIFS="$IFS" ; local IFS=$'\n'
|
||||
local pinned=$(ipfs pin ls)
|
||||
COMPREPLY=( $(compgen -W "${pinned}" -- ${word}) )
|
||||
IFS="$OLDIFS"
|
||||
if [[ ${#COMPREPLY[*]} -eq 1 ]]; then # Only one completion, remove pretty output
|
||||
COMPREPLY=( ${COMPREPLY[0]/ *//} ) #Remove ' ' and everything after
|
||||
[[ $COMPREPLY = */ ]] && _do_comp -o nospace # Removing whitespace after output
|
||||
fi
|
||||
}
|
||||
_ipfs_filesystem_complete()
|
||||
{
|
||||
_do_comp -o default # Re-enable default file read
|
||||
COMPREPLY=()
|
||||
}
|
||||
|
||||
_ipfs()
|
||||
{
|
||||
COMPREPLY=()
|
||||
_do_comp +o default # Disable default to not deny completion, see: http://stackoverflow.com/a/19062943/1216348
|
||||
|
||||
local word="${COMP_WORDS[COMP_CWORD]}"
|
||||
local prev="${COMP_WORDS[COMP_CWORD-1]}"
|
||||
|
||||
case "${COMP_CWORD}" in
|
||||
1)
|
||||
local opts="add bitswap block bootstrap cat commands config daemon dag dht \
|
||||
diag dns file files get id init log ls mount name object pin ping pubsub \
|
||||
refs repo resolve stats swarm tar update version"
|
||||
COMPREPLY=( $(compgen -W "${opts}" -- ${word}) );;
|
||||
2)
|
||||
local command="${COMP_WORDS[1]}"
|
||||
eval "_ipfs_$command" 2> /dev/null ;;
|
||||
*)
|
||||
local command="${COMP_WORDS[1]}"
|
||||
local subcommand="${COMP_WORDS[2]}"
|
||||
eval "_ipfs_${command}_${subcommand}" 2> /dev/null && return
|
||||
eval "_ipfs_$command" 2> /dev/null ;;
|
||||
esac
|
||||
}
|
||||
complete -F _ipfs ipfs
|
||||
@ -30,8 +30,7 @@ SUPPORTED_PLATFORMS += openbsd-amd64
|
||||
SUPPORTED_PLATFORMS += netbsd-386
|
||||
SUPPORTED_PLATFORMS += netbsd-amd64
|
||||
|
||||
space:=
|
||||
space+=
|
||||
space:=$() $()
|
||||
comma:=,
|
||||
join-with=$(subst $(space),$1,$(strip $2))
|
||||
|
||||
|
||||
@ -242,6 +242,20 @@ func (ps *PeeringService) AddPeer(info peer.AddrInfo) {
|
||||
}
|
||||
}
|
||||
|
||||
// ListPeers lists peers in the peering service.
|
||||
func (ps *PeeringService) ListPeers() []peer.AddrInfo {
|
||||
ps.mu.RLock()
|
||||
defer ps.mu.RUnlock()
|
||||
|
||||
out := make([]peer.AddrInfo, 0, len(ps.peers))
|
||||
for id, addrs := range ps.peers {
|
||||
ai := peer.AddrInfo{ID: id}
|
||||
ai.Addrs = append(ai.Addrs, addrs.addrs...)
|
||||
out = append(out, ai)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// RemovePeer removes a peer from the peering service. This function may be
|
||||
// safely called at any time: before the service is started, while running, or
|
||||
// after it stops.
|
||||
|
||||
@ -39,6 +39,7 @@ func TestPeeringService(t *testing.T) {
|
||||
|
||||
// peer 1 -> 2
|
||||
ps1.AddPeer(peer.AddrInfo{ID: h2.ID(), Addrs: h2.Addrs()})
|
||||
require.Contains(t, ps1.ListPeers(), peer.AddrInfo{ID: h2.ID(), Addrs: h2.Addrs()})
|
||||
|
||||
// We haven't started so we shouldn't have any peers.
|
||||
require.Never(t, func() bool {
|
||||
@ -66,7 +67,7 @@ func TestPeeringService(t *testing.T) {
|
||||
t.Logf("waiting for h1's connection to h3 to work")
|
||||
require.NoError(t, h1.Connect(ctx, peer.AddrInfo{ID: h3.ID(), Addrs: h3.Addrs()}))
|
||||
require.Eventually(t, func() bool {
|
||||
return h1.Network().Connectedness(h2.ID()) == network.Connected
|
||||
return h1.Network().Connectedness(h3.ID()) == network.Connected
|
||||
}, 30*time.Second, 100*time.Millisecond)
|
||||
|
||||
require.Len(t, h1.Network().Peers(), 3)
|
||||
@ -109,6 +110,7 @@ func TestPeeringService(t *testing.T) {
|
||||
|
||||
// Unprotect 2 from 1.
|
||||
ps1.RemovePeer(h2.ID())
|
||||
require.NotContains(t, ps1.ListPeers(), peer.AddrInfo{ID: h2.ID(), Addrs: h2.Addrs()})
|
||||
|
||||
// Trim connections.
|
||||
h1.ConnManager().TrimOpenConns(ctx)
|
||||
@ -127,7 +129,9 @@ func TestPeeringService(t *testing.T) {
|
||||
|
||||
// Until added back
|
||||
ps1.AddPeer(peer.AddrInfo{ID: h2.ID(), Addrs: h2.Addrs()})
|
||||
require.Contains(t, ps1.ListPeers(), peer.AddrInfo{ID: h2.ID(), Addrs: h2.Addrs()})
|
||||
ps1.AddPeer(peer.AddrInfo{ID: h3.ID(), Addrs: h3.Addrs()})
|
||||
require.Contains(t, ps1.ListPeers(), peer.AddrInfo{ID: h3.ID(), Addrs: h3.Addrs()})
|
||||
t.Logf("wait for h1 to connect to h2 and h3 again")
|
||||
require.Eventually(t, func() bool {
|
||||
return h1.Network().Connectedness(h2.ID()) == network.Connected
|
||||
@ -142,7 +146,9 @@ func TestPeeringService(t *testing.T) {
|
||||
|
||||
// Adding and removing should work after stopping.
|
||||
ps1.AddPeer(peer.AddrInfo{ID: h4.ID(), Addrs: h4.Addrs()})
|
||||
require.Contains(t, ps1.ListPeers(), peer.AddrInfo{ID: h4.ID(), Addrs: h4.Addrs()})
|
||||
ps1.RemovePeer(h2.ID())
|
||||
require.NotContains(t, ps1.ListPeers(), peer.AddrInfo{ID: h2.ID(), Addrs: h2.Addrs()})
|
||||
}
|
||||
|
||||
func TestNextBackoff(t *testing.T) {
|
||||
|
||||
@ -1,16 +1,13 @@
|
||||
package plugin
|
||||
|
||||
import (
|
||||
"github.com/ipfs/go-ipfs/core/coredag"
|
||||
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
multicodec "github.com/ipld/go-ipld-prime/multicodec"
|
||||
)
|
||||
|
||||
// PluginIPLD is an interface that can be implemented to add handlers for
|
||||
// for different IPLD formats
|
||||
// for different IPLD codecs
|
||||
type PluginIPLD interface {
|
||||
Plugin
|
||||
|
||||
RegisterBlockDecoders(dec ipld.BlockDecoder) error
|
||||
RegisterInputEncParsers(iec coredag.InputEncParsers) error
|
||||
Register(multicodec.Registry) error
|
||||
}
|
||||
|
||||
@ -10,14 +10,13 @@ import (
|
||||
|
||||
config "github.com/ipfs/go-ipfs-config"
|
||||
cserialize "github.com/ipfs/go-ipfs-config/serialize"
|
||||
"github.com/ipld/go-ipld-prime/multicodec"
|
||||
|
||||
"github.com/ipfs/go-ipfs/core"
|
||||
"github.com/ipfs/go-ipfs/core/coreapi"
|
||||
coredag "github.com/ipfs/go-ipfs/core/coredag"
|
||||
plugin "github.com/ipfs/go-ipfs/plugin"
|
||||
fsrepo "github.com/ipfs/go-ipfs/repo/fsrepo"
|
||||
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
logging "github.com/ipfs/go-log"
|
||||
opentracing "github.com/opentracing/opentracing-go"
|
||||
)
|
||||
@ -298,7 +297,7 @@ func (loader *PluginLoader) Start(node *core.IpfsNode) error {
|
||||
return loader.transition(loaderStarting, loaderStarted)
|
||||
}
|
||||
|
||||
// StopDaemon stops all long-running plugins.
|
||||
// Close stops all long-running plugins.
|
||||
func (loader *PluginLoader) Close() error {
|
||||
switch loader.state {
|
||||
case loaderClosing, loaderFailed, loaderClosed:
|
||||
@ -335,11 +334,7 @@ func injectDatastorePlugin(pl plugin.PluginDatastore) error {
|
||||
}
|
||||
|
||||
func injectIPLDPlugin(pl plugin.PluginIPLD) error {
|
||||
err := pl.RegisterBlockDecoders(ipld.DefaultBlockDecoder)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return pl.RegisterInputEncParsers(coredag.DefaultInputEncParsers)
|
||||
return pl.Register(multicodec.DefaultRegistry)
|
||||
}
|
||||
|
||||
func injectTracerPlugin(pl plugin.PluginTracer) error {
|
||||
|
||||
@ -5,6 +5,7 @@ import (
|
||||
pluginflatfs "github.com/ipfs/go-ipfs/plugin/plugins/flatfs"
|
||||
pluginipldgit "github.com/ipfs/go-ipfs/plugin/plugins/git"
|
||||
pluginlevelds "github.com/ipfs/go-ipfs/plugin/plugins/levelds"
|
||||
pluginpeerlog "github.com/ipfs/go-ipfs/plugin/plugins/peerlog"
|
||||
)
|
||||
|
||||
// DO NOT EDIT THIS FILE
|
||||
@ -16,4 +17,5 @@ func init() {
|
||||
Preload(pluginbadgerds.Plugins...)
|
||||
Preload(pluginflatfs.Plugins...)
|
||||
Preload(pluginlevelds.Plugins...)
|
||||
Preload(pluginpeerlog.Plugins...)
|
||||
}
|
||||
|
||||
@ -8,3 +8,4 @@ ipldgit github.com/ipfs/go-ipfs/plugin/plugins/git *
|
||||
badgerds github.com/ipfs/go-ipfs/plugin/plugins/badgerds *
|
||||
flatfs github.com/ipfs/go-ipfs/plugin/plugins/flatfs *
|
||||
levelds github.com/ipfs/go-ipfs/plugin/plugins/levelds *
|
||||
peerlog github.com/ipfs/go-ipfs/plugin/plugins/peerlog *
|
||||
@ -1,6 +1,6 @@
|
||||
include mk/header.mk
|
||||
|
||||
$(d)_plugins:=$(d)/git $(d)/badgerds $(d)/flatfs $(d)/levelds
|
||||
$(d)_plugins:=$(d)/git $(d)/badgerds $(d)/flatfs $(d)/levelds $(d)/peerlog
|
||||
$(d)_plugins_so:=$(addsuffix .so,$($(d)_plugins))
|
||||
$(d)_plugins_main:=$(addsuffix /main/main.go,$($(d)_plugins))
|
||||
|
||||
@ -12,7 +12,7 @@ $($(d)_plugins_main):
|
||||
|
||||
$($(d)_plugins_so): %.so : %/main/main.go
|
||||
$($(d)_plugins_so): $$(DEPS_GO) ALWAYS
|
||||
$(GOCC) build -buildmode=plugin -i -pkgdir "$(GOPATH)/pkg/linux_amd64_dynlink" $(go-flags-with-tags) -o "$@" "$(call go-pkg-name,$(basename $@))/main"
|
||||
$(GOCC) build -buildmode=plugin -pkgdir "$(GOPATH)/pkg/linux_amd64_dynlink" $(go-flags-with-tags) -o "$@" "$(call go-pkg-name,$(basename $@))/main"
|
||||
chmod +x "$@"
|
||||
|
||||
CLEAN += $($(d)_plugins_so)
|
||||
|
||||
@ -2,17 +2,15 @@ package git
|
||||
|
||||
import (
|
||||
"compress/zlib"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
|
||||
"github.com/ipfs/go-ipfs/core/coredag"
|
||||
"github.com/ipfs/go-ipfs/plugin"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipfs/go-ipld-format"
|
||||
// Note that depending on this package registers it's multicodec encoder and decoder.
|
||||
git "github.com/ipfs/go-ipld-git"
|
||||
mh "github.com/multiformats/go-multihash"
|
||||
"github.com/ipld/go-ipld-prime"
|
||||
"github.com/ipld/go-ipld-prime/multicodec"
|
||||
mc "github.com/multiformats/go-multicodec"
|
||||
)
|
||||
|
||||
// Plugins is exported list of plugins that will be loaded
|
||||
@ -36,40 +34,21 @@ func (*gitPlugin) Init(_ *plugin.Environment) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*gitPlugin) RegisterBlockDecoders(dec format.BlockDecoder) error {
|
||||
dec.Register(cid.GitRaw, git.DecodeBlock)
|
||||
func (*gitPlugin) Register(reg multicodec.Registry) error {
|
||||
// register a custom identifier in the reserved range for import of "zlib-encoded git objects."
|
||||
reg.RegisterDecoder(uint64(mc.ReservedStart+mc.GitRaw), decodeZlibGit)
|
||||
reg.RegisterEncoder(uint64(mc.GitRaw), git.Encode)
|
||||
reg.RegisterDecoder(uint64(mc.GitRaw), git.Decode)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*gitPlugin) RegisterInputEncParsers(iec coredag.InputEncParsers) error {
|
||||
iec.AddParser("raw", "git", parseRawGit)
|
||||
iec.AddParser("zlib", "git", parseZlibGit)
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseRawGit(r io.Reader, mhType uint64, mhLen int) ([]format.Node, error) {
|
||||
if mhType != math.MaxUint64 && mhType != mh.SHA1 {
|
||||
return nil, fmt.Errorf("unsupported mhType %d", mhType)
|
||||
}
|
||||
|
||||
if mhLen != -1 && mhLen != mh.DefaultLengths[mh.SHA1] {
|
||||
return nil, fmt.Errorf("invalid mhLen %d", mhLen)
|
||||
}
|
||||
|
||||
nd, err := git.ParseObject(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return []format.Node{nd}, nil
|
||||
}
|
||||
|
||||
func parseZlibGit(r io.Reader, mhType uint64, mhLen int) ([]format.Node, error) {
|
||||
func decodeZlibGit(na ipld.NodeAssembler, r io.Reader) error {
|
||||
rc, err := zlib.NewReader(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
defer rc.Close()
|
||||
return parseRawGit(rc, mhType, mhLen)
|
||||
|
||||
return git.Decode(na, rc)
|
||||
}
|
||||
|
||||
@ -36,7 +36,7 @@ type plEvent struct {
|
||||
peer peer.ID
|
||||
}
|
||||
|
||||
// Log all the PeerIDs we see
|
||||
// Log all the PeerIDs. This is considered internal, unsupported, and may break at any point.
|
||||
//
|
||||
// Usage:
|
||||
// GOLOG_FILE=~/peer.log IPFS_LOGGING_FMT=json ipfs daemon
|
||||
@ -45,6 +45,7 @@ type plEvent struct {
|
||||
// {"level":"info","ts":"2020-02-10T13:54:59.095Z","logger":"plugin/peerlog","caller":"peerlog/peerlog.go:56","msg":"identified","peer":"QmS2H72gdrekXJggGdE9SunXPntBqdkJdkXQJjuxcH8Cbt","agent":"go-ipfs/0.5.0/"}
|
||||
//
|
||||
type peerLogPlugin struct {
|
||||
enabled bool
|
||||
droppedCount uint64
|
||||
events chan plEvent
|
||||
}
|
||||
@ -66,9 +67,35 @@ func (*peerLogPlugin) Version() string {
|
||||
return "0.1.0"
|
||||
}
|
||||
|
||||
// extractEnabled extracts the "Enabled" field from the plugin config.
|
||||
// Do not follow this as a precedent, this is only applicable to this plugin,
|
||||
// since it is internal-only, unsupported functionality.
|
||||
// For supported functionality, we should rework the plugin API to support this use case
|
||||
// of including plugins that are disabled by default.
|
||||
func extractEnabled(config interface{}) bool {
|
||||
// plugin is disabled by default, unless Enabled=true
|
||||
if config == nil {
|
||||
return false
|
||||
}
|
||||
mapIface, ok := config.(map[string]interface{})
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
enabledIface, ok := mapIface["Enabled"]
|
||||
if !ok || enabledIface == nil {
|
||||
return false
|
||||
}
|
||||
enabled, ok := enabledIface.(bool)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return enabled
|
||||
}
|
||||
|
||||
// Init initializes plugin
|
||||
func (pl *peerLogPlugin) Init(*plugin.Environment) error {
|
||||
func (pl *peerLogPlugin) Init(env *plugin.Environment) error {
|
||||
pl.events = make(chan plEvent, eventQueueSize)
|
||||
pl.enabled = extractEnabled(env.Config)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -153,6 +180,10 @@ func (pl *peerLogPlugin) emit(evt eventType, p peer.ID) {
|
||||
}
|
||||
|
||||
func (pl *peerLogPlugin) Start(node *core.IpfsNode) error {
|
||||
if !pl.enabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ensure logs from this plugin get printed regardless of global IPFS_LOGGING value
|
||||
if err := logging.SetLogLevel("plugin/peerlog", "info"); err != nil {
|
||||
return fmt.Errorf("failed to set log level: %w", err)
|
||||
|
||||
49
plugin/plugins/peerlog/peerlog_test.go
Normal file
49
plugin/plugins/peerlog/peerlog_test.go
Normal file
@ -0,0 +1,49 @@
|
||||
package peerlog
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestExtractEnabled(t *testing.T) {
|
||||
for _, c := range []struct {
|
||||
name string
|
||||
config interface{}
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "nil config returns false",
|
||||
config: nil,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "returns false when config is not a string map",
|
||||
config: 1,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "returns false when config has no Enabled field",
|
||||
config: map[string]interface{}{},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "returns false when config has a null Enabled field",
|
||||
config: map[string]interface{}{"Enabled": nil},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "returns false when config has a non-boolean Enabled field",
|
||||
config: map[string]interface{}{"Enabled": 1},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "returns the vlaue of the Enabled field",
|
||||
config: map[string]interface{}{"Enabled": true},
|
||||
expected: true,
|
||||
},
|
||||
} {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
isEnabled := extractEnabled(c.config)
|
||||
if isEnabled != c.expected {
|
||||
t.Fatalf("expected %v, got %v", c.expected, isEnabled)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -127,11 +127,7 @@ func TestHttpFetch(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestFetchBinary(t *testing.T) {
|
||||
tmpDir, err := ioutil.TempDir("", "fetchtest")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
@ -10,7 +10,7 @@ import (
|
||||
|
||||
const (
|
||||
// Current dirstibution to fetch migrations from
|
||||
CurrentIpfsDist = "/ipfs/QmVxxcTSuryJYdQJGcS8SyhzN7NBNLTqVPAxpu6gp2ZcrR"
|
||||
CurrentIpfsDist = "/ipfs/QmP7tLxzhLU1KauTRX3jkVkF93pCv4skcceyUYMhf4AKJR" // fs-repo-migrations v2.0.2
|
||||
// Latest distribution path. Default for fetchers.
|
||||
LatestIpfsDist = "/ipns/dist.ipfs.io"
|
||||
|
||||
|
||||
@ -13,13 +13,7 @@ var (
|
||||
)
|
||||
|
||||
func TestRepoDir(t *testing.T) {
|
||||
var err error
|
||||
fakeHome, err = ioutil.TempDir("", "testhome")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer os.RemoveAll(fakeHome)
|
||||
|
||||
fakeHome = t.TempDir()
|
||||
os.Setenv("HOME", fakeHome)
|
||||
fakeIpfs = filepath.Join(fakeHome, ".ipfs")
|
||||
|
||||
|
||||
@ -2,6 +2,7 @@ package ipfsfetcher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@ -32,10 +33,9 @@ const (
|
||||
)
|
||||
|
||||
type IpfsFetcher struct {
|
||||
distPath string
|
||||
limit int64
|
||||
bootstrap []string
|
||||
peers []peer.AddrInfo
|
||||
distPath string
|
||||
limit int64
|
||||
repoRoot *string
|
||||
|
||||
openOnce sync.Once
|
||||
openErr error
|
||||
@ -56,12 +56,15 @@ type IpfsFetcher struct {
|
||||
//
|
||||
// Specifying "" for distPath sets the default IPNS path.
|
||||
// Specifying 0 for fetchLimit sets the default, -1 means no limit.
|
||||
func NewIpfsFetcher(distPath string, fetchLimit int64, bootstrap []string, peers []peer.AddrInfo) *IpfsFetcher {
|
||||
//
|
||||
// Bootstrap and peer information in read from the IPFS config file in
|
||||
// repoRoot, unless repoRoot is nil. If repoRoot is empty (""), then read the
|
||||
// config from the default IPFS directory.
|
||||
func NewIpfsFetcher(distPath string, fetchLimit int64, repoRoot *string) *IpfsFetcher {
|
||||
f := &IpfsFetcher{
|
||||
limit: defaultFetchLimit,
|
||||
distPath: migrations.LatestIpfsDist,
|
||||
bootstrap: bootstrap,
|
||||
peers: peers,
|
||||
limit: defaultFetchLimit,
|
||||
distPath: migrations.LatestIpfsDist,
|
||||
repoRoot: repoRoot,
|
||||
}
|
||||
|
||||
if distPath != "" {
|
||||
@ -88,7 +91,8 @@ func (f *IpfsFetcher) Fetch(ctx context.Context, filePath string) (io.ReadCloser
|
||||
// Initialize and start IPFS node on first call to Fetch, since the fetcher
|
||||
// may be created by not used.
|
||||
f.openOnce.Do(func() {
|
||||
f.ipfsTmpDir, f.openErr = initTempNode(ctx, f.bootstrap, f.peers)
|
||||
bootstrap, peers := readIpfsConfig(f.repoRoot)
|
||||
f.ipfsTmpDir, f.openErr = initTempNode(ctx, bootstrap, peers)
|
||||
if f.openErr != nil {
|
||||
return
|
||||
}
|
||||
@ -277,3 +281,52 @@ func parsePath(fetchPath string) (ipath.Path, error) {
|
||||
}
|
||||
return ipfsPath, ipfsPath.IsValid()
|
||||
}
|
||||
|
||||
func readIpfsConfig(repoRoot *string) (bootstrap []string, peers []peer.AddrInfo) {
|
||||
if repoRoot == nil {
|
||||
return
|
||||
}
|
||||
|
||||
cfgPath, err := config.Filename(*repoRoot)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
return
|
||||
}
|
||||
|
||||
cfgFile, err := os.Open(cfgPath)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
return
|
||||
}
|
||||
defer cfgFile.Close()
|
||||
|
||||
// Attempt to read bootstrap addresses
|
||||
var bootstrapCfg struct {
|
||||
Bootstrap []string
|
||||
}
|
||||
err = json.NewDecoder(cfgFile).Decode(&bootstrapCfg)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "cannot read bootstrap peers from config")
|
||||
} else {
|
||||
bootstrap = bootstrapCfg.Bootstrap
|
||||
}
|
||||
|
||||
if _, err = cfgFile.Seek(0, 0); err != nil {
|
||||
// If Seek fails, only log the error and continue on to try to read the
|
||||
// peering config anyway as it might still be readable
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
}
|
||||
|
||||
// Attempt to read peers
|
||||
var peeringCfg struct {
|
||||
Peering config.Peering
|
||||
}
|
||||
err = json.NewDecoder(cfgFile).Decode(&peeringCfg)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "cannot read peering from config")
|
||||
} else {
|
||||
peers = peeringCfg.Peering.Peers
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@ -25,7 +25,7 @@ func TestIpfsFetcher(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
fetcher := NewIpfsFetcher("", 0, nil, nil)
|
||||
fetcher := NewIpfsFetcher("", 0, nil)
|
||||
defer fetcher.Close()
|
||||
|
||||
rc, err := fetcher.Fetch(ctx, "go-ipfs/versions")
|
||||
@ -63,11 +63,11 @@ func TestInitIpfsFetcher(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
f := NewIpfsFetcher("", 0, nil, nil)
|
||||
f := NewIpfsFetcher("", 0, nil)
|
||||
defer f.Close()
|
||||
|
||||
// Init ipfs repo
|
||||
f.ipfsTmpDir, f.openErr = initTempNode(ctx, f.bootstrap, f.peers)
|
||||
f.ipfsTmpDir, f.openErr = initTempNode(ctx, nil, nil)
|
||||
if f.openErr != nil {
|
||||
t.Fatalf("failed to initialize ipfs node: %s", f.openErr)
|
||||
}
|
||||
@ -110,6 +110,144 @@ func TestInitIpfsFetcher(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadIpfsConfig(t *testing.T) {
|
||||
var testConfig = `
|
||||
{
|
||||
"Bootstrap": [
|
||||
"/dnsaddr/bootstrap.libp2p.io/p2p/QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt",
|
||||
"/ip4/104.131.131.82/tcp/4001/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ"
|
||||
],
|
||||
"Migration": {
|
||||
"DownloadSources": ["IPFS", "HTTP", "127.0.0.1", "https://127.0.1.1"],
|
||||
"Keep": "cache"
|
||||
},
|
||||
"Peering": {
|
||||
"Peers": [
|
||||
{
|
||||
"ID": "12D3KooWGC6TvWhfapngX6wvJHMYvKpDMXPb3ZnCZ6dMoaMtimQ5",
|
||||
"Addrs": ["/ip4/127.0.0.1/tcp/4001", "/ip4/127.0.0.1/udp/4001/quic"]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
`
|
||||
|
||||
noSuchDir := "no_such_dir-5953aa51-1145-4efd-afd1-a069075fcf76"
|
||||
bootstrap, peers := readIpfsConfig(&noSuchDir)
|
||||
if bootstrap != nil {
|
||||
t.Error("expected nil bootstrap")
|
||||
}
|
||||
if peers != nil {
|
||||
t.Error("expected nil peers")
|
||||
}
|
||||
|
||||
tmpDir := makeConfig(t, testConfig)
|
||||
|
||||
bootstrap, peers = readIpfsConfig(nil)
|
||||
if bootstrap != nil || peers != nil {
|
||||
t.Fatal("expected nil ipfs config items")
|
||||
}
|
||||
|
||||
bootstrap, peers = readIpfsConfig(&tmpDir)
|
||||
if len(bootstrap) != 2 {
|
||||
t.Fatal("wrong number of bootstrap addresses")
|
||||
}
|
||||
if bootstrap[0] != "/dnsaddr/bootstrap.libp2p.io/p2p/QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt" {
|
||||
t.Fatal("wrong bootstrap address")
|
||||
}
|
||||
|
||||
if len(peers) != 1 {
|
||||
t.Fatal("wrong number of peers")
|
||||
}
|
||||
|
||||
peer := peers[0]
|
||||
if peer.ID.String() != "12D3KooWGC6TvWhfapngX6wvJHMYvKpDMXPb3ZnCZ6dMoaMtimQ5" {
|
||||
t.Errorf("wrong ID for first peer")
|
||||
}
|
||||
if len(peer.Addrs) != 2 {
|
||||
t.Error("wrong number of addrs for first peer")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBadBootstrappingIpfsConfig(t *testing.T) {
|
||||
const configBadBootstrap = `
|
||||
{
|
||||
"Bootstrap": "unreadable",
|
||||
"Migration": {
|
||||
"DownloadSources": ["IPFS", "HTTP", "127.0.0.1"],
|
||||
"Keep": "cache"
|
||||
},
|
||||
"Peering": {
|
||||
"Peers": [
|
||||
{
|
||||
"ID": "12D3KooWGC6TvWhfapngX6wvJHMYvKpDMXPb3ZnCZ6dMoaMtimQ5",
|
||||
"Addrs": ["/ip4/127.0.0.1/tcp/4001", "/ip4/127.0.0.1/udp/4001/quic"]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
`
|
||||
|
||||
tmpDir := makeConfig(t, configBadBootstrap)
|
||||
|
||||
bootstrap, peers := readIpfsConfig(&tmpDir)
|
||||
if bootstrap != nil {
|
||||
t.Fatal("expected nil bootstrap")
|
||||
}
|
||||
if len(peers) != 1 {
|
||||
t.Fatal("wrong number of peers")
|
||||
}
|
||||
if len(peers[0].Addrs) != 2 {
|
||||
t.Error("wrong number of addrs for first peer")
|
||||
}
|
||||
os.RemoveAll(tmpDir)
|
||||
}
|
||||
|
||||
func TestBadPeersIpfsConfig(t *testing.T) {
|
||||
const configBadPeers = `
|
||||
{
|
||||
"Bootstrap": [
|
||||
"/dnsaddr/bootstrap.libp2p.io/p2p/QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt",
|
||||
"/ip4/104.131.131.82/tcp/4001/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ"
|
||||
],
|
||||
"Migration": {
|
||||
"DownloadSources": ["IPFS", "HTTP", "127.0.0.1"],
|
||||
"Keep": "cache"
|
||||
},
|
||||
"Peering": "Unreadable-data"
|
||||
}
|
||||
`
|
||||
|
||||
tmpDir := makeConfig(t, configBadPeers)
|
||||
|
||||
bootstrap, peers := readIpfsConfig(&tmpDir)
|
||||
if peers != nil {
|
||||
t.Fatal("expected nil peers")
|
||||
}
|
||||
if len(bootstrap) != 2 {
|
||||
t.Fatal("wrong number of bootstrap addresses")
|
||||
}
|
||||
if bootstrap[0] != "/dnsaddr/bootstrap.libp2p.io/p2p/QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt" {
|
||||
t.Fatal("wrong bootstrap address")
|
||||
}
|
||||
}
|
||||
|
||||
func makeConfig(t *testing.T, configData string) string {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
cfgFile, err := os.Create(filepath.Join(tmpDir, "config"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err = cfgFile.Write([]byte(configData)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err = cfgFile.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return tmpDir
|
||||
}
|
||||
|
||||
func skipUnlessEpic(t *testing.T) {
|
||||
if os.Getenv("IPFS_EPIC_TEST") == "" {
|
||||
t.SkipNow()
|
||||
|
||||
@ -2,15 +2,20 @@ package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
config "github.com/ipfs/go-ipfs-config"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -107,6 +112,89 @@ func ExeName(name string) string {
|
||||
return name
|
||||
}
|
||||
|
||||
// ReadMigrationConfig reads the Migration section of the IPFS config, avoiding
|
||||
// reading anything other than the Migration section. That way, we're free to
|
||||
// make arbitrary changes to all _other_ sections in migrations.
|
||||
func ReadMigrationConfig(repoRoot string) (*config.Migration, error) {
|
||||
var cfg struct {
|
||||
Migration config.Migration
|
||||
}
|
||||
|
||||
cfgPath, err := config.Filename(repoRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfgFile, err := os.Open(cfgPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cfgFile.Close()
|
||||
|
||||
err = json.NewDecoder(cfgFile).Decode(&cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch cfg.Migration.Keep {
|
||||
case "":
|
||||
cfg.Migration.Keep = config.DefaultMigrationKeep
|
||||
case "discard", "cache", "keep":
|
||||
default:
|
||||
return nil, errors.New("unknown config value, Migrations.Keep must be 'cache', 'pin', or 'discard'")
|
||||
}
|
||||
|
||||
if len(cfg.Migration.DownloadSources) == 0 {
|
||||
cfg.Migration.DownloadSources = config.DefaultMigrationDownloadSources
|
||||
}
|
||||
|
||||
return &cfg.Migration, nil
|
||||
}
|
||||
|
||||
// GetMigrationFetcher creates one or more fetchers according to
|
||||
// downloadSources,
|
||||
func GetMigrationFetcher(downloadSources []string, distPath string, newIpfsFetcher func(string) Fetcher) (Fetcher, error) {
|
||||
const httpUserAgent = "go-ipfs"
|
||||
|
||||
var fetchers []Fetcher
|
||||
for _, src := range downloadSources {
|
||||
src := strings.TrimSpace(src)
|
||||
switch src {
|
||||
case "HTTPS", "https", "HTTP", "http":
|
||||
fetchers = append(fetchers, NewHttpFetcher(distPath, "", httpUserAgent, 0))
|
||||
case "IPFS", "ipfs":
|
||||
if newIpfsFetcher != nil {
|
||||
fetchers = append(fetchers, newIpfsFetcher(distPath))
|
||||
}
|
||||
default:
|
||||
u, err := url.Parse(src)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("bad gateway address: %s", err)
|
||||
}
|
||||
switch u.Scheme {
|
||||
case "":
|
||||
u.Scheme = "https"
|
||||
case "https", "http":
|
||||
default:
|
||||
return nil, errors.New("bad gateway address: url scheme must be http or https")
|
||||
}
|
||||
fetchers = append(fetchers, NewHttpFetcher(distPath, u.String(), httpUserAgent, 0))
|
||||
case "":
|
||||
// Ignore empty string
|
||||
}
|
||||
}
|
||||
|
||||
switch len(fetchers) {
|
||||
case 0:
|
||||
return nil, errors.New("no sources specified")
|
||||
case 1:
|
||||
return fetchers[0], nil
|
||||
}
|
||||
|
||||
// Wrap fetchers in a MultiFetcher to try them in order
|
||||
return NewMultiFetcher(fetchers...), nil
|
||||
}
|
||||
|
||||
func migrationName(from, to int) string {
|
||||
return fmt.Sprintf("fs-repo-%d-to-%d", from, to)
|
||||
}
|
||||
|
||||
@ -3,20 +3,18 @@ package migrations
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
config "github.com/ipfs/go-ipfs-config"
|
||||
)
|
||||
|
||||
func TestFindMigrations(t *testing.T) {
|
||||
tmpDir, err := ioutil.TempDir("", "migratetest")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
@ -63,11 +61,7 @@ func TestFindMigrations(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestFindMigrationsReverse(t *testing.T) {
|
||||
tmpDir, err := ioutil.TempDir("", "migratetest")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
@ -121,11 +115,7 @@ func TestFetchMigrations(t *testing.T) {
|
||||
defer ts.Close()
|
||||
fetcher := NewHttpFetcher(CurrentIpfsDist, ts.URL, "", 0)
|
||||
|
||||
tmpDir, err := ioutil.TempDir("", "migratetest")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
needed := []string{"fs-repo-1-to-2", "fs-repo-2-to-3"}
|
||||
buf := new(strings.Builder)
|
||||
@ -157,16 +147,12 @@ func TestFetchMigrations(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRunMigrations(t *testing.T) {
|
||||
fakeHome, err := ioutil.TempDir("", "testhome")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer os.RemoveAll(fakeHome)
|
||||
fakeHome := t.TempDir()
|
||||
|
||||
os.Setenv("HOME", fakeHome)
|
||||
fakeIpfs := filepath.Join(fakeHome, ".ipfs")
|
||||
|
||||
err = os.Mkdir(fakeIpfs, os.ModePerm)
|
||||
err := os.Mkdir(fakeIpfs, os.ModePerm)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@ -211,3 +197,205 @@ func createFakeBin(from, to int, tmpDir string) {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
var testConfig = `
|
||||
{
|
||||
"Bootstrap": [
|
||||
"/dnsaddr/bootstrap.libp2p.io/p2p/QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt",
|
||||
"/ip4/104.131.131.82/tcp/4001/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ"
|
||||
],
|
||||
"Migration": {
|
||||
"DownloadSources": ["IPFS", "HTTP", "127.0.0.1", "https://127.0.1.1"],
|
||||
"Keep": "cache"
|
||||
},
|
||||
"Peering": {
|
||||
"Peers": [
|
||||
{
|
||||
"ID": "12D3KooWGC6TvWhfapngX6wvJHMYvKpDMXPb3ZnCZ6dMoaMtimQ5",
|
||||
"Addrs": ["/ip4/127.0.0.1/tcp/4001", "/ip4/127.0.0.1/udp/4001/quic"]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
`
|
||||
|
||||
func TestReadMigrationConfigDefaults(t *testing.T) {
|
||||
tmpDir := makeConfig(t, "{}")
|
||||
|
||||
cfg, err := ReadMigrationConfig(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if cfg.Keep != config.DefaultMigrationKeep {
|
||||
t.Error("expected default value for Keep")
|
||||
}
|
||||
|
||||
if len(cfg.DownloadSources) != len(config.DefaultMigrationDownloadSources) {
|
||||
t.Fatal("expected default number of download sources")
|
||||
}
|
||||
for i, src := range config.DefaultMigrationDownloadSources {
|
||||
if cfg.DownloadSources[i] != src {
|
||||
t.Errorf("wrong DownloadSource: %s", cfg.DownloadSources[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadMigrationConfigErrors(t *testing.T) {
|
||||
tmpDir := makeConfig(t, `{"Migration": {"Keep": "badvalue"}}`)
|
||||
|
||||
_, err := ReadMigrationConfig(tmpDir)
|
||||
if err == nil {
|
||||
t.Fatal("expected error")
|
||||
}
|
||||
if !strings.HasPrefix(err.Error(), "unknown") {
|
||||
t.Fatal("did not get expected error:", err)
|
||||
}
|
||||
|
||||
os.RemoveAll(tmpDir)
|
||||
_, err = ReadMigrationConfig(tmpDir)
|
||||
if err == nil {
|
||||
t.Fatal("expected error")
|
||||
}
|
||||
|
||||
tmpDir = makeConfig(t, `}{`)
|
||||
_, err = ReadMigrationConfig(tmpDir)
|
||||
if err == nil {
|
||||
t.Fatal("expected error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadMigrationConfig(t *testing.T) {
|
||||
tmpDir := makeConfig(t, testConfig)
|
||||
|
||||
cfg, err := ReadMigrationConfig(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(cfg.DownloadSources) != 4 {
|
||||
t.Fatal("wrong number of DownloadSources")
|
||||
}
|
||||
expect := []string{"IPFS", "HTTP", "127.0.0.1", "https://127.0.1.1"}
|
||||
for i := range expect {
|
||||
if cfg.DownloadSources[i] != expect[i] {
|
||||
t.Errorf("wrong DownloadSource at %d", i)
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.Keep != "cache" {
|
||||
t.Error("wrong value for Keep")
|
||||
}
|
||||
}
|
||||
|
||||
type mockIpfsFetcher struct{}
|
||||
|
||||
func (m *mockIpfsFetcher) Fetch(ctx context.Context, filePath string) (io.ReadCloser, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *mockIpfsFetcher) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestGetMigrationFetcher(t *testing.T) {
|
||||
var f Fetcher
|
||||
var err error
|
||||
|
||||
newIpfsFetcher := func(distPath string) Fetcher {
|
||||
return &mockIpfsFetcher{}
|
||||
}
|
||||
|
||||
downloadSources := []string{"ftp://bad.gateway.io"}
|
||||
_, err = GetMigrationFetcher(downloadSources, "", newIpfsFetcher)
|
||||
if err == nil || !strings.HasPrefix(err.Error(), "bad gateway addr") {
|
||||
t.Fatal("Expected bad gateway address error, got:", err)
|
||||
}
|
||||
|
||||
downloadSources = []string{"::bad.gateway.io"}
|
||||
_, err = GetMigrationFetcher(downloadSources, "", newIpfsFetcher)
|
||||
if err == nil || !strings.HasPrefix(err.Error(), "bad gateway addr") {
|
||||
t.Fatal("Expected bad gateway address error, got:", err)
|
||||
}
|
||||
|
||||
downloadSources = []string{"http://localhost"}
|
||||
f, err = GetMigrationFetcher(downloadSources, "", newIpfsFetcher)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, ok := f.(*HttpFetcher); !ok {
|
||||
t.Fatal("expected HttpFetcher")
|
||||
}
|
||||
|
||||
downloadSources = []string{"ipfs"}
|
||||
f, err = GetMigrationFetcher(downloadSources, "", newIpfsFetcher)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, ok := f.(*mockIpfsFetcher); !ok {
|
||||
t.Fatal("expected IpfsFetcher")
|
||||
}
|
||||
|
||||
downloadSources = []string{"http"}
|
||||
f, err = GetMigrationFetcher(downloadSources, "", newIpfsFetcher)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, ok := f.(*HttpFetcher); !ok {
|
||||
t.Fatal("expected HttpFetcher")
|
||||
}
|
||||
|
||||
downloadSources = []string{"IPFS", "HTTPS"}
|
||||
f, err = GetMigrationFetcher(downloadSources, "", newIpfsFetcher)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
mf, ok := f.(*MultiFetcher)
|
||||
if !ok {
|
||||
t.Fatal("expected MultiFetcher")
|
||||
}
|
||||
if mf.Len() != 2 {
|
||||
t.Fatal("expected 2 fetchers in MultiFetcher")
|
||||
}
|
||||
|
||||
downloadSources = []string{"ipfs", "https", "some.domain.io"}
|
||||
f, err = GetMigrationFetcher(downloadSources, "", newIpfsFetcher)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
mf, ok = f.(*MultiFetcher)
|
||||
if !ok {
|
||||
t.Fatal("expected MultiFetcher")
|
||||
}
|
||||
if mf.Len() != 3 {
|
||||
t.Fatal("expected 3 fetchers in MultiFetcher")
|
||||
}
|
||||
|
||||
downloadSources = nil
|
||||
_, err = GetMigrationFetcher(downloadSources, "", newIpfsFetcher)
|
||||
if err == nil {
|
||||
t.Fatal("expected error when no sources specified")
|
||||
}
|
||||
|
||||
downloadSources = []string{"", ""}
|
||||
_, err = GetMigrationFetcher(downloadSources, "", newIpfsFetcher)
|
||||
if err == nil {
|
||||
t.Fatal("expected error when empty string fetchers specified")
|
||||
}
|
||||
}
|
||||
|
||||
func makeConfig(t *testing.T, configData string) string {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
cfgFile, err := os.Create(filepath.Join(tmpDir, "config"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err = cfgFile.Write([]byte(configData)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err = cfgFile.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return tmpDir
|
||||
}
|
||||
|
||||
@ -33,14 +33,10 @@ func TestUnpackArchive(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUnpackTgz(t *testing.T) {
|
||||
tmpDir, err := ioutil.TempDir("", "testunpacktgz")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
badTarGzip := filepath.Join(tmpDir, "bad.tar.gz")
|
||||
err = ioutil.WriteFile(badTarGzip, []byte("bad-data\n"), 0644)
|
||||
err := ioutil.WriteFile(badTarGzip, []byte("bad-data\n"), 0644)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@ -81,14 +77,10 @@ func TestUnpackTgz(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUnpackZip(t *testing.T) {
|
||||
tmpDir, err := ioutil.TempDir("", "testunpackzip")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
badZip := filepath.Join(tmpDir, "bad.zip")
|
||||
err = ioutil.WriteFile(badZip, []byte("bad-data\n"), 0644)
|
||||
err := ioutil.WriteFile(badZip, []byte("bad-data\n"), 0644)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@ -98,6 +98,7 @@ test "$TEST_NO_PLUGIN" != 1 && test "$TEST_OS" = "LINUX" && test_set_prereq PLUG
|
||||
|
||||
# this may not be available, skip a few dependent tests
|
||||
type socat >/dev/null 2>&1 && test_set_prereq SOCAT
|
||||
type unzip >/dev/null 2>&1 && test_set_prereq UNZIP
|
||||
|
||||
|
||||
# Set a prereq as error messages are often different on Windows/Cygwin
|
||||
@ -268,7 +269,7 @@ test_launch_ipfs_daemon() {
|
||||
|
||||
# wait for api file to show up
|
||||
test_expect_success "api file shows up" '
|
||||
test_wait_for_file 50 100ms "$IPFS_PATH/api"
|
||||
test_wait_for_file 50 200ms "$IPFS_PATH/api"
|
||||
'
|
||||
|
||||
test_set_address_vars actual_daemon
|
||||
@ -280,6 +281,10 @@ test_launch_ipfs_daemon() {
|
||||
'
|
||||
}
|
||||
|
||||
test_launch_ipfs_daemon_without_network() {
|
||||
test_launch_ipfs_daemon --offline "$@"
|
||||
}
|
||||
|
||||
do_umount() {
|
||||
if [ "$(uname -s)" = "Linux" ]; then
|
||||
fusermount -u "$1"
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user