Merge branch 'master' into ajnavarro/swarm-peering-save

This commit is contained in:
Hector Sanjuan 2024-11-08 17:05:38 +01:00 committed by GitHub
commit bd2677c398
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
618 changed files with 34090 additions and 26699 deletions

View File

@ -1,37 +0,0 @@
version: 2.1
setup: true
orbs:
continuation: circleci/continuation@0.2.0
jobs:
generate-params:
executor: continuation/default
steps:
- checkout
- run:
name: Generate params
# for builds on the ipfs/kubo repo, use 2xlarge for faster builds
# but since this is not available for many contributors, we otherwise use medium
command: |
echo $CIRCLE_REPOSITORY_URL
if [ "$CIRCLE_REPOSITORY_URL" = 'git@github.com:ipfs/kubo.git' ]; then
resource_class=2xlarge
make_jobs=10
else
resource_class=medium
make_jobs=3
fi
cat \<<- EOF > params.json
{
"resource_class": "$resource_class",
"make_jobs": "$make_jobs"
}
EOF
cat params.json
- continuation/continue:
parameters: params.json
configuration_path: .circleci/main.yml
workflows:
version: 2
setup-workflow:
jobs:
- generate-params

View File

@ -1,400 +0,0 @@
version: 2.1
parameters:
resource_class:
type: string
default: medium
make_jobs:
type: string
default: 3
aliases:
make_out_dirs: &make_out_dirs
run: mkdir -p /tmp/circleci-artifacts /tmp/circleci-workspace /tmp/circleci-test-results/{unit,sharness}
restore_gomod: &restore_gomod
restore_cache:
keys:
- v5-dep-{{ .Branch }}-{{ checksum "~/ipfs/kubo/go.sum" }}-{{ .Environment.CIRCLE_JOB }}
- v5-dep-{{ .Branch }}-{{ checksum "~/ipfs/kubo/go.sum" }}-
- v5-dep-{{ .Branch }}-
- v5-dep-master-
store_gomod: &store_gomod
save_cache:
key: v5-dep-{{ .Branch }}-{{ checksum "~/ipfs/kubo/go.sum" }}-{{ .Environment.CIRCLE_JOB }}
paths:
- ~/go/pkg/mod
- ~/.cache/go-build/
default_environment: &default_environment
SERVICE: circle-ci
TRAVIS: 1
CIRCLE: 1
CIRCLE_TEST_REPORTS: /tmp/circleci-test-results
CIRCLE_ARTIFACTS: /tmp/circleci-artifacts
GIT_PAGER: cat
executors:
golang:
docker:
- image: cimg/go:1.19.1
working_directory: ~/ipfs/kubo
environment:
<<: *default_environment
TEST_NO_DOCKER: 1
TEST_NO_FUSE: 1
TEST_VERBOSE: 1
node:
docker:
- image: circleci/node:14
working_directory: ~/ipfs/kubo
environment:
<<: *default_environment
node-browsers:
docker:
- image: circleci/node:16.12.0-browsers
working_directory: ~/ipfs/kubo
environment:
<<: *default_environment
NO_SANDBOX: true
LIBP2P_TCP_REUSEPORT: false
LIBP2P_ALLOW_WEAK_RSA_KEYS: 1
E2E_IPFSD_TYPE: go
dockerizer:
docker:
- image: cimg/go:1.19.1
environment:
IMAGE_NAME: ipfs/kubo
WIP_IMAGE_TAG: wip
jobs:
gobuild:
executor: golang
resource_class: 2xlarge+
steps:
- checkout
- *make_out_dirs
- *restore_gomod
- run:
command: make cmd/ipfs-try-build
environment:
TEST_NO_FUSE: 0
- run:
command: make cmd/ipfs-try-build
environment:
TEST_NO_FUSE: 1
- *store_gomod
golint:
executor: golang
steps:
- checkout
- *make_out_dirs
- *restore_gomod
- run: |
make -O test_go_lint
- *store_gomod
gotest:
executor: golang
steps:
- checkout
- *make_out_dirs
- *restore_gomod
- run: |
make -j 1 test/unit/gotest.junit.xml \
&& [[ ! $(jq -s -c 'map(select(.Action == "fail")) | .[]' test/unit/gotest.json) ]]
- run:
when: always
command: bash <(curl -s https://codecov.io/bash) -cF unittests -X search -f coverage/unit_tests.coverprofile
- run:
command: |
# we want to first test with the kubo version in the go.mod file
go test -v ./...
# we also want to test the examples against the current version of kubo
# however, that version might be in a fork so we need to replace the dependency
# backup the go.mod and go.sum files to restore them after we run the tests
cp go.mod go.mod.bak
cp go.sum go.sum.bak
# make sure the examples run against the current version of kubo
go mod edit -replace github.com/ipfs/kubo=./../../..
go mod tidy
go test -v ./...
# restore the go.mod and go.sum files to their original state
mv go.mod.bak go.mod
mv go.sum.bak go.sum
working_directory: ~/ipfs/kubo/docs/examples/kubo-as-a-library
- run:
when: always
command: mv "test/unit/gotest.junit.xml" /tmp/circleci-test-results/unit
- *store_gomod
- store_test_results:
path: /tmp/circleci-test-results
# Save artifacts
- store_artifacts:
path: /tmp/circleci-artifacts
- store_artifacts:
path: /tmp/circleci-test-results
sharness:
machine:
image: ubuntu-2004:202010-01
resource_class: << pipeline.parameters.resource_class >>
working_directory: ~/ipfs/kubo
environment:
<<: *default_environment
TEST_NO_DOCKER: 0
TEST_NO_FUSE: 1
TEST_VERBOSE: 1
steps:
- run: sudo apt update
- run: |
mkdir ~/localgo && cd ~/localgo
wget https://golang.org/dl/go1.19.1.linux-amd64.tar.gz
tar xfz go1.19.1.linux-amd64.tar.gz
echo "export PATH=$(pwd)/go/bin:\$PATH" >> ~/.bashrc
- run: go version
- run: sudo apt install socat net-tools fish
- checkout
- run:
mkdir rb-pinning-service-api &&
cd rb-pinning-service-api &&
git init &&
git remote add origin https://github.com/ipfs-shipyard/rb-pinning-service-api.git &&
git fetch --depth 1 origin 773c3adbb421c551d2d89288abac3e01e1f7c3a8 &&
git checkout FETCH_HEAD
- run:
cd rb-pinning-service-api &&
(for i in {1..3}; do docker-compose pull && break || sleep 5; done) &&
docker-compose up -d
- *make_out_dirs
- *restore_gomod
- run:
name: Setup Environment Variables
# we need the docker host IP; all ports exported by child containers can be accessed there.
command: echo "export TEST_DOCKER_HOST=$(ip -4 addr show docker0 | grep -Po 'inet \K[\d.]+')" >> $BASH_ENV
- run:
echo TEST_DOCKER_HOST=$TEST_DOCKER_HOST &&
make -O -j << pipeline.parameters.make_jobs >> coverage/sharness_tests.coverprofile test/sharness/test-results/sharness.xml TEST_GENERATE_JUNIT=1 CONTINUE_ON_S_FAILURE=1 TEST_DOCKER_HOST=$TEST_DOCKER_HOST
- run:
when: always
command: bash <(curl -s https://codecov.io/bash) -cF sharness -X search -f coverage/sharness_tests.coverprofile
- run: mv "test/sharness/test-results/sharness.xml" /tmp/circleci-test-results/sharness
# make sure we fail if there are test failures
- run: find test/sharness/test-results -name 't*-*.sh.*.counts' | test/sharness/lib/sharness/aggregate-results.sh | grep 'failed\s*0'
- *store_gomod
- store_test_results:
path: /tmp/circleci-test-results
# Save artifacts
- store_artifacts:
path: /tmp/circleci-artifacts
- store_artifacts:
path: /tmp/circleci-test-results
build:
executor: golang
steps:
- checkout
- *make_out_dirs
- *restore_gomod
- run:
name: Building
command: make build
- run:
name: Storing
command: |
mkdir -p /tmp/circleci-workspace/bin
cp cmd/ipfs/ipfs /tmp/circleci-workspace/bin
- persist_to_workspace:
root: /tmp/circleci-workspace
paths:
- bin/ipfs
- *store_gomod
interop:
docker:
- image: cimg/go:1.19.1-node
parallelism: 4
resource_class: 2xlarge+
steps:
- *make_out_dirs
- attach_workspace:
at: /tmp/circleci-workspace
- restore_cache:
keys:
- v1-interop-{{ .Branch }}-{{ .Revision }}
- v1-interop-{{ .Branch }}-
- v1-interop-
- run:
name: Installing dependencies
command: |
npm init -y
npm install ipfs@^0.61.0
npm install ipfs-interop@^8.0.10
npm install mocha-circleci-reporter@0.0.3
working_directory: ~/ipfs/kubo/interop
- run:
name: Running tests
command: |
mkdir -p /tmp/test-results/interop/
export MOCHA_FILE="$(mktemp /tmp/test-results/interop/unit.XXXXXX.xml)"
npx ipfs-interop -- -t node -f $(sed -n -e "s|^import '\(.*\)'$|test/\1|p" node_modules/ipfs-interop/test/node.js | circleci tests split --split-by=timings) -- --reporter mocha-circleci-reporter
working_directory: ~/ipfs/kubo/interop
environment:
LIBP2P_TCP_REUSEPORT: false
LIBP2P_ALLOW_WEAK_RSA_KEYS: 1
IPFS_GO_EXEC: /tmp/circleci-workspace/bin/ipfs
- store_test_results:
path: /tmp/test-results
- save_cache:
key: v1-interop-{{ .Branch }}-{{ .Revision }}
paths:
- ~/ipfs/kubo/interop/node_modules
go-ipfs-api:
executor: golang
steps:
- *make_out_dirs
- attach_workspace:
at: /tmp/circleci-workspace
- run:
name: Cloning
command: |
git clone https://github.com/ipfs/go-ipfs-api.git
git -C go-ipfs-api log -1
- run:
name: Starting the daemon
command: /tmp/circleci-workspace/bin/ipfs daemon --init --enable-namesys-pubsub
background: true
- run:
name: Waiting for the daemon
no_output_timeout: 30s
command: |
while ! /tmp/circleci-workspace/bin/ipfs id --api=/ip4/127.0.0.1/tcp/5001 2>/dev/null; do
sleep 1
done
- restore_cache:
keys:
- v1-go-api-{{ checksum "~/ipfs/kubo/go-ipfs-api/go.sum" }}
- v1-go-api-
- run:
command: go test -count=1 -v ./...
working_directory: ~/ipfs/kubo/go-ipfs-api
- save_cache:
key: v1-go-api-{{ checksum "~/ipfs/kubo/go-ipfs-api/go.sum" }}
paths:
- ~/go/pkg/mod
- ~/.cache/go-build/
- run:
name: Stopping the daemon
command: /tmp/circleci-workspace/bin/ipfs shutdown
go-ipfs-http-client:
executor: golang
steps:
- *make_out_dirs
- attach_workspace:
at: /tmp/circleci-workspace
- run:
name: Cloning
command: |
git clone https://github.com/ipfs/go-ipfs-http-client.git
git -C go-ipfs-http-client log -1
- restore_cache:
keys:
- v1-http-client-{{ checksum "~/ipfs/kubo/go-ipfs-http-client/go.sum" }}
- v1-http-client-
- run:
name: go test -count=1 -v ./...
command: |
export PATH=/tmp/circleci-workspace/bin:$PATH
go test -count=1 -v ./...
working_directory: ~/ipfs/kubo/go-ipfs-http-client
- save_cache:
key: v1-http-client-{{ checksum "~/ipfs/kubo/go-ipfs-http-client/go.sum" }}
paths:
- ~/go/pkg/mod
- ~/.cache/go-build/
ipfs-webui:
executor: node-browsers
resource_class: 2xlarge+
steps:
- *make_out_dirs
- attach_workspace:
at: /tmp/circleci-workspace
- run:
name: Cloning
command: |
git clone https://github.com/ipfs/ipfs-webui.git
git -C ipfs-webui log -1
- restore_cache:
keys:
- v1-ipfs-webui-{{ checksum "~/ipfs/kubo/ipfs-webui/package-lock.json" }}
- v1-ipfs-webui-
- run:
name: Installing dependencies
command: |
npm ci --prefer-offline --no-audit --progress=false --cache ~/ipfs/kubo/.cache/npm
npx playwright install
working_directory: ~/ipfs/kubo/ipfs-webui
- run:
name: Running upstream tests (finish early if they fail)
command: |
npm test || circleci-agent step halt
working_directory: ~/ipfs/kubo/ipfs-webui
- run:
name: Running tests with kubo built from current commit
command: npm test
working_directory: ~/ipfs/kubo/ipfs-webui
environment:
IPFS_GO_EXEC: /tmp/circleci-workspace/bin/ipfs
- save_cache:
key: v1-ipfs-webui-{{ checksum "~/ipfs/kubo/ipfs-webui/package-lock.json" }}
paths:
- ~/.cache/ms-playwright
- ~/ipfs/kubo/.cache/npm
# We only run build as a test here. DockerHub images are built and published
# by GitHub Action now: https://github.com/ipfs/kubo/pull/8467
docker-build:
executor: dockerizer
steps:
- checkout
- setup_remote_docker:
version: "19.03.13"
- run:
name: Build Docker image
command: |
docker build -t $IMAGE_NAME:$WIP_IMAGE_TAG .
workflows:
version: 2
# Runs for all branches, but not on tags
# see: https://circleci.com/docs/2.0/workflows/#executing-workflows-for-a-git-tag
test:
jobs:
- gobuild
- golint
- gotest
- sharness
- build
- interop:
requires:
- build
- go-ipfs-api:
requires:
- build
- go-ipfs-http-client:
requires:
- build
- ipfs-webui:
requires:
- build
- docker-build

3
.github/CODEOWNERS vendored
View File

@ -2,6 +2,9 @@
# request that modifies code that they own. Code owners are not automatically
# requested to review draft pull requests.
# Default
* @ipfs/kubo-maintainers
# HTTP Gateway
core/corehttp/ @lidel
test/sharness/*gateway*.sh @lidel

1
.github/FUNDING.yml vendored Normal file
View File

@ -0,0 +1 @@
custom: [ipshipyard.gitwallet.co]

View File

@ -18,7 +18,7 @@ body:
label: Checklist
description: Please verify that you've followed these steps
options:
- label: This is a bug report, not a question. Ask questions on [discuss.ipfs.io](https://discuss.ipfs.io).
- label: This is a bug report, not a question. Ask questions on [discuss.ipfs.tech](https://discuss.ipfs.tech/c/help/13).
required: true
- label: I have searched on the [issue tracker](https://github.com/ipfs/kubo/issues?q=is%3Aissue) for my bug.
required: true
@ -32,8 +32,9 @@ body:
label: Installation method
description: Please select your installation method
options:
- dist.ipfs.tech or ipfs-update
- docker image
- ipfs-desktop
- ipfs-update or dist.ipfs.tech
- third-party binary
- built from source
- type: textarea

View File

@ -1,7 +1,7 @@
blank_issues_enabled: false
contact_links:
- name: Getting Help on IPFS
url: https://ipfs.io/help
url: https://ipfs.tech/help
about: All information about how and where to get help on IPFS.
- name: Kubo configuration reference
url: https://github.com/ipfs/kubo/blob/master/docs/config.md#readme
@ -9,9 +9,9 @@ contact_links:
- name: Kubo experimental features docs
url: https://github.com/ipfs/kubo/blob/master/docs/experimental-features.md#readme
about: Documentation on Private Networks, Filestore and other experimental features.
- name: RPC API Reference
- name: Kubo RPC API Reference
url: https://docs.ipfs.tech/reference/kubo/rpc/
about: Documentation of all Kubo RPC API endpoints.
- name: IPFS Official Forum
url: https://discuss.ipfs.io
- name: IPFS Official Discussion Forum
url: https://discuss.ipfs.tech
about: Please post general questions, support requests, and discussions here.

View File

@ -6,11 +6,11 @@ body:
- type: markdown
attributes:
value: |
Suggest an enhancement to Kubo (the program). If you'd like to suggest an improvement to the IPFS protocol, please discuss it on [the forum](https://discuss.ipfs.io).
Suggest an enhancement to Kubo (the program). If you'd like to suggest an improvement to the IPFS protocol, please discuss it on [the forum](https://discuss.ipfs.tech).
Issues in this repo must be specific, actionable, and well motivated. They should be starting points for _building_ new features, not brainstorming ideas.
If you have an idea you'd like to discuss, please open a new thread on [the forum](https://discuss.ipfs.io).
If you have an idea you'd like to discuss, please open a new thread on [the forum](https://discuss.ipfs.tech).
**Example:**

View File

@ -6,11 +6,11 @@ body:
- type: markdown
attributes:
value: |
Suggest a new feature in Kubo (the program). If you'd like to suggest an improvement to the IPFS protocol, please discuss it on [the forum](https://discuss.ipfs.io).
Suggest a new feature in Kubo (the program). If you'd like to suggest an improvement to the IPFS protocol, please discuss it on [the forum](https://discuss.ipfs.tech).
Issues in this repo must be specific, actionable, and well motivated. They should be starting points for _building_ new features, not brainstorming ideas.
If you have an idea you'd like to discuss, please open a new thread on [the forum](https://discuss.ipfs.io).
If you have an idea you'd like to discuss, please open a new thread on [the forum](https://discuss.ipfs.tech).
**Example:**

View File

@ -1,10 +1,6 @@
version: 2
updates:
- package-ecosystem: gomod
directory: "/"
schedule:
interval: daily
time: "11:00"
open-pull-requests-limit: 10
labels:
- "topic/dependencies"
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"

5
.github/pull_request_template.md vendored Normal file
View File

@ -0,0 +1,5 @@
<!--
Please update docs/changelogs/ if you're modifying Go files. If your change does not require a changelog entry, please do one of the following:
- add `[skip changelog]` to the PR title
- label the PR with `skip/changelog`
-->

39
.github/workflows/changelog.yml vendored Normal file
View File

@ -0,0 +1,39 @@
name: Changelog
on:
pull_request:
types:
- opened
- edited
- synchronize
- reopened
- labeled
- unlabeled
paths:
- '**.go'
- '**/go.mod'
- '**/go.sum'
jobs:
changelog:
if: contains(github.event.pull_request.title, '[skip changelog]') == false &&
contains(github.event.pull_request.labels.*.name, 'skip/changelog') == false
runs-on: ubuntu-latest
name: Changelog
steps:
- id: changelog
env:
GITHUB_TOKEN: ${{ github.token }}
ENDPOINT: repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}/files
SELECTOR: 'map(select(.filename | startswith("docs/changelogs/"))) | length'
run: gh api "$ENDPOINT" --jq "$SELECTOR" | xargs -I{} echo "modified={}" | tee -a $GITHUB_OUTPUT
- if: steps.changelog.outputs.modified == '0'
env:
MESSAGE: |
docs/changelogs/ was not modified in this PR. Please do one of the following:
- add a changelog entry
- add `[skip changelog]` to the PR title
- label the PR with `skip/changelog`
run: |
echo "::error::${MESSAGE//$'\n'/%0A}"
exit 1

View File

@ -1,5 +1,5 @@
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
name: "CodeQL"
name: CodeQL
on:
workflow_dispatch:
@ -8,35 +8,42 @@ on:
pull_request:
# The branches below must be a subset of the branches above
branches: [ master ]
paths-ignore:
- '**/*.md'
schedule:
- cron: '30 12 * * 2'
jobs:
analyze:
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
name: Analyze
runs-on: ubuntu-latest
permissions:
contents: read # to fetch code (actions/checkout)
security-events: write # (github/codeql-action/autobuild)
strategy:
fail-fast: false
matrix:
language: [ 'go' ]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
# Learn more:
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
concurrency:
group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }}
cancel-in-progress: true
jobs:
codeql:
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
runs-on: ubuntu-latest
timeout-minutes: 20
steps:
- name: Checkout repository
uses: actions/checkout@v2
uses: actions/checkout@v4
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: 1.23.x
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v2
uses: github/codeql-action/init@v3
with:
languages: ${{ matrix.language }}
languages: go
- name: Autobuild
uses: github/codeql-action/autobuild@v2
uses: github/codeql-action/autobuild@v3
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2
uses: github/codeql-action/analyze@v3

34
.github/workflows/docker-build.yml vendored Normal file
View File

@ -0,0 +1,34 @@
# If we decide to run build-image.yml on every PR, we could deprecate this workflow.
name: Docker Build
on:
workflow_dispatch:
pull_request:
paths-ignore:
- '**/*.md'
push:
branches:
- 'master'
concurrency:
group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }}
cancel-in-progress: true
jobs:
docker-build:
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
runs-on: ubuntu-latest
timeout-minutes: 10
env:
IMAGE_NAME: ipfs/kubo
WIP_IMAGE_TAG: wip
defaults:
run:
shell: bash
steps:
- uses: actions/setup-go@v5
with:
go-version: 1.23.x
- uses: actions/checkout@v4
- run: docker build -t $IMAGE_NAME:$WIP_IMAGE_TAG .
- run: docker run --rm $IMAGE_NAME:$WIP_IMAGE_TAG --version

View File

@ -1,51 +1,135 @@
name: Publish Docker image
name: Docker Push
on:
workflow_dispatch:
inputs:
push:
description: 'Push to Docker Hub'
required: true
default: 'false'
tags:
description: 'Custom tags to use for the push'
required: false
default: ''
# # If we decide to build all images on every PR, we should make sure that
# # they are NOT pushed to Docker Hub.
# pull_request:
# paths-ignore:
# - '**/*.md'
push:
branches:
- 'master'
- 'staging'
- 'bifrost-*'
tags:
- 'v*'
permissions:
contents: read # to fetch code (actions/checkout)
jobs:
push_to_registry:
docker-hub:
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
name: Push Docker image to Docker Hub
runs-on: ubuntu-latest
timeout-minutes: 15
env:
IMAGE_NAME: ipfs/kubo
LEGACY_IMAGE_NAME: ipfs/go-ipfs
steps:
- name: Check out the repo
uses: actions/checkout@v2
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
uses: docker/setup-buildx-action@v3
- name: Cache Docker layers
uses: actions/cache@v4
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildx-
- name: Get tags
id: tags
if: github.event.inputs.tags == ''
run: |
TAGS="$(./bin/get-docker-tags.sh $(date -u +%F))"
TAGS="${TAGS//$'\n'/'%0A'}"
echo "::set-output name=value::$(echo $TAGS)"
echo "value<<EOF" >> $GITHUB_OUTPUT
./bin/get-docker-tags.sh "$(date -u +%F)" >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
shell: bash
- name: Log in to Docker Hub
uses: docker/login-action@f054a8b539a109f9f41c372932f1ae047eff08c9
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USERNAME }}
username: ${{ vars.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Build Docker image and publish to Docker Hub
uses: docker/build-push-action@v2
# We have to build each platform separately because when using multi-arch
# builds, only one platform is being loaded into the cache. This would
# prevent us from testing the other platforms.
- name: Build Docker image (linux/amd64)
uses: docker/build-push-action@v6
with:
platforms: linux/amd64
context: .
push: false
load: true
file: ./Dockerfile
tags: ${{ env.IMAGE_NAME }}:linux-amd64
cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=local,dest=/tmp/.buildx-cache-new
- name: Build Docker image (linux/arm/v7)
uses: docker/build-push-action@v6
with:
platforms: linux/arm/v7
context: .
push: false
load: true
file: ./Dockerfile
tags: ${{ env.IMAGE_NAME }}:linux-arm-v7
cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=local,dest=/tmp/.buildx-cache-new
- name: Build Docker image (linux/arm64/v8)
uses: docker/build-push-action@v6
with:
platforms: linux/arm64/v8
context: .
push: false
load: true
file: ./Dockerfile
tags: ${{ env.IMAGE_NAME }}:linux-arm64-v8
cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=local,dest=/tmp/.buildx-cache-new
# We test all the images on amd64 host here. This uses QEMU to emulate
# the other platforms.
- run: docker run --rm $IMAGE_NAME:linux-amd64 --version
- run: docker run --rm $IMAGE_NAME:linux-arm-v7 --version
- run: docker run --rm $IMAGE_NAME:linux-arm64-v8 --version
# This will only push the previously built images.
- if: github.event_name != 'workflow_dispatch' || github.event.inputs.push == 'true'
name: Publish to Docker Hub
uses: docker/build-push-action@v6
with:
platforms: linux/amd64,linux/arm/v7,linux/arm64/v8
context: .
push: true
file: ./Dockerfile
tags: "${{ steps.tags.outputs.value }}"
tags: "${{ github.event.inputs.tags || steps.tags.outputs.value }}"
cache-from: type=local,src=/tmp/.buildx-cache-new
cache-to: type=local,dest=/tmp/.buildx-cache-new
# https://github.com/docker/build-push-action/issues/252
# https://github.com/moby/buildkit/issues/1896
- name: Move cache to limit growth
run: |
rm -rf /tmp/.buildx-cache
mv /tmp/.buildx-cache-new /tmp/.buildx-cache

View File

@ -0,0 +1,228 @@
name: Gateway Conformance
on:
workflow_dispatch:
push:
branches:
- master
pull_request:
paths-ignore:
- '**/*.md'
concurrency:
group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }}
cancel-in-progress: true
defaults:
run:
shell: bash
env:
# hostnames expected by https://github.com/ipfs/gateway-conformance
GATEWAY_PUBLIC_GATEWAYS: |
{
"example.com": {
"UseSubdomains": true,
"InlineDNSLink": true,
"Paths": ["/ipfs", "/ipns"]
},
"localhost": {
"UseSubdomains": true,
"InlineDNSLink": true,
"Paths": ["/ipfs", "/ipns"]
}
}
jobs:
# Testing all gateway features via TCP port specified in Addresses.Gateway
gateway-conformance:
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
# 1. Download the gateway-conformance fixtures
- name: Download gateway-conformance fixtures
uses: ipfs/gateway-conformance/.github/actions/extract-fixtures@v0.6
with:
output: fixtures
# 2. Build the kubo-gateway
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: 1.23.x
- uses: protocol/cache-go-action@v1
with:
name: ${{ github.job }}
- name: Checkout kubo-gateway
uses: actions/checkout@v4
with:
path: kubo-gateway
- name: Build kubo-gateway
run: make build
working-directory: kubo-gateway
# 3. Init the kubo-gateway
- name: Init kubo-gateway
run: |
./ipfs init -e
./ipfs config --json Gateway.PublicGateways "$GATEWAY_PUBLIC_GATEWAYS"
working-directory: kubo-gateway/cmd/ipfs
# 4. Populate the Kubo gateway with the gateway-conformance fixtures
- name: Import fixtures
run: |
# Import car files
find ./fixtures -name '*.car' -exec kubo-gateway/cmd/ipfs/ipfs dag import --pin-roots=false {} \;
# Import ipns records
records=$(find ./fixtures -name '*.ipns-record')
for record in $records
do
key=$(basename -s .ipns-record "$record" | cut -d'_' -f1)
kubo-gateway/cmd/ipfs/ipfs routing put --allow-offline "/ipns/$key" "$record"
done
# Import dnslink records
# the IPFS_NS_MAP env will be used by the daemon
echo "IPFS_NS_MAP=$(cat ./fixtures/dnslinks.IPFS_NS_MAP)" >> $GITHUB_ENV
# 5. Start the kubo-gateway
- name: Start kubo-gateway
run: |
./ipfs daemon --offline &
working-directory: kubo-gateway/cmd/ipfs
# 6. Run the gateway-conformance tests
- name: Run gateway-conformance tests
uses: ipfs/gateway-conformance/.github/actions/test@v0.6
with:
gateway-url: http://127.0.0.1:8080
subdomain-url: http://localhost:8080
args: -skip 'TestGatewayCar/GET_response_for_application/vnd.ipld.car/Header_Content-Length'
json: output.json
xml: output.xml
html: output.html
markdown: output.md
# 7. Upload the results
- name: Upload MD summary
if: failure() || success()
run: cat output.md >> $GITHUB_STEP_SUMMARY
- name: Upload HTML report
if: failure() || success()
uses: actions/upload-artifact@v4
with:
name: gateway-conformance.html
path: output.html
- name: Upload JSON report
if: failure() || success()
uses: actions/upload-artifact@v4
with:
name: gateway-conformance.json
path: output.json
# Testing trustless gateway feature subset exposed as libp2p protocol
gateway-conformance-libp2p-experiment:
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
# 1. Download the gateway-conformance fixtures
- name: Download gateway-conformance fixtures
uses: ipfs/gateway-conformance/.github/actions/extract-fixtures@v0.6
with:
output: fixtures
# 2. Build the kubo-gateway
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: 1.23.x
- uses: protocol/cache-go-action@v1
with:
name: ${{ github.job }}
- name: Checkout kubo-gateway
uses: actions/checkout@v4
with:
path: kubo-gateway
- name: Build kubo-gateway
run: make build
working-directory: kubo-gateway
# 3. Init the kubo-gateway
- name: Init kubo-gateway
run: |
./ipfs init --profile=test
./ipfs config --json Gateway.PublicGateways "$GATEWAY_PUBLIC_GATEWAYS"
./ipfs config --json Experimental.GatewayOverLibp2p true
./ipfs config Addresses.Gateway "/ip4/127.0.0.1/tcp/8080"
./ipfs config Addresses.API "/ip4/127.0.0.1/tcp/5001"
working-directory: kubo-gateway/cmd/ipfs
# 4. Populate the Kubo gateway with the gateway-conformance fixtures
- name: Import fixtures
run: |
# Import car files
find ./fixtures -name '*.car' -exec kubo-gateway/cmd/ipfs/ipfs dag import --pin-roots=false {} \;
# 5. Start the kubo-gateway
- name: Start kubo-gateway
run: |
( ./ipfs daemon & ) | sed '/Daemon is ready/q'
while [[ "$(./ipfs id | jq '.Addresses | length')" == '0' ]]; do sleep 1; done
working-directory: kubo-gateway/cmd/ipfs
# 6. Setup a kubo http-p2p-proxy to expose libp2p protocol as a regular HTTP port for gateway conformance tests
- name: Init p2p-proxy kubo node
env:
IPFS_PATH: "~/.kubo-p2p-proxy"
run: |
./ipfs init --profile=test -e
./ipfs config --json Experimental.Libp2pStreamMounting true
./ipfs config Addresses.Gateway "/ip4/127.0.0.1/tcp/8081"
./ipfs config Addresses.API "/ip4/127.0.0.1/tcp/5002"
working-directory: kubo-gateway/cmd/ipfs
# 7. Start the kubo http-p2p-proxy
- name: Start kubo http-p2p-proxy
env:
IPFS_PATH: "~/.kubo-p2p-proxy"
run: |
( ./ipfs daemon & ) | sed '/Daemon is ready/q'
while [[ "$(./ipfs id | jq '.Addresses | length')" == '0' ]]; do sleep 1; done
working-directory: kubo-gateway/cmd/ipfs
# 8. Start forwarding data from the http-p2p-proxy to the node serving the Gateway API over libp2p
- name: Start http-over-libp2p forwarding proxy
run: |
gatewayNodeId=$(./ipfs --api=/ip4/127.0.0.1/tcp/5001 id -f="<id>")
./ipfs --api=/ip4/127.0.0.1/tcp/5002 swarm connect $(./ipfs --api=/ip4/127.0.0.1/tcp/5001 swarm addrs local --id | head -n 1)
./ipfs --api=/ip4/127.0.0.1/tcp/5002 p2p forward --allow-custom-protocol /http/1.1 /ip4/127.0.0.1/tcp/8092 /p2p/$gatewayNodeId
working-directory: kubo-gateway/cmd/ipfs
# 9. Run the gateway-conformance tests over libp2p
- name: Run gateway-conformance tests over libp2p
uses: ipfs/gateway-conformance/.github/actions/test@v0.6
with:
gateway-url: http://127.0.0.1:8092
args: --specs "trustless-gateway,-trustless-ipns-gateway" -skip 'TestGatewayCar/GET_response_for_application/vnd.ipld.car/Header_Content-Length'
json: output.json
xml: output.xml
html: output.html
markdown: output.md
# 10. Upload the results
- name: Upload MD summary
if: failure() || success()
run: cat output.md >> $GITHUB_STEP_SUMMARY
- name: Upload HTML report
if: failure() || success()
uses: actions/upload-artifact@v4
with:
name: gateway-conformance-libp2p.html
path: output.html
- name: Upload JSON report
if: failure() || success()
uses: actions/upload-artifact@v4
with:
name: gateway-conformance-libp2p.json
path: output.json

40
.github/workflows/gobuild.yml vendored Normal file
View File

@ -0,0 +1,40 @@
name: Go Build
on:
workflow_dispatch:
pull_request:
paths-ignore:
- '**/*.md'
push:
branches:
- 'master'
concurrency:
group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }}
cancel-in-progress: true
jobs:
go-build:
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
runs-on: ${{ fromJSON(github.repository == 'ipfs/kubo' && '["self-hosted", "linux", "x64", "4xlarge"]' || '"ubuntu-latest"') }}
timeout-minutes: 20
env:
TEST_DOCKER: 0
TEST_VERBOSE: 1
TRAVIS: 1
GIT_PAGER: cat
IPFS_CHECK_RCMGR_DEFAULTS: 1
defaults:
run:
shell: bash
steps:
- uses: actions/setup-go@v5
with:
go-version: 1.23.x
- uses: actions/checkout@v4
- run: make cmd/ipfs-try-build
env:
TEST_FUSE: 1
- run: make cmd/ipfs-try-build
env:
TEST_FUSE: 0

View File

@ -1,19 +1,35 @@
on: [push, pull_request]
name: Go Checks
name: Go Check
on:
workflow_dispatch:
pull_request:
paths-ignore:
- '**/*.md'
push:
branches:
- 'master'
permissions:
contents: read # to fetch code (actions/checkout)
concurrency:
group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }}
cancel-in-progress: true
jobs:
unit:
go-check:
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
runs-on: ubuntu-latest
name: All
timeout-minutes: 10
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
with:
submodules: recursive
- uses: actions/setup-go@v2
- uses: actions/setup-go@v5
with:
go-version: "1.19.x"
go-version: "1.23.x"
- name: Check that go.mod is tidy
uses: protocol/multiple-go-modules@v1.2
uses: protocol/multiple-go-modules@v1.4
with:
run: |
go mod tidy
@ -33,6 +49,6 @@ jobs:
fi
- name: go vet
if: always() # run this step even if the previous one failed
uses: protocol/multiple-go-modules@v1.2
uses: protocol/multiple-go-modules@v1.4
with:
run: go vet ./...

36
.github/workflows/golint.yml vendored Normal file
View File

@ -0,0 +1,36 @@
name: Go Lint
on:
workflow_dispatch:
pull_request:
paths-ignore:
- '**/*.md'
push:
branches:
- 'master'
concurrency:
group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }}
cancel-in-progress: true
jobs:
go-lint:
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
runs-on: ubuntu-latest
timeout-minutes: 10
env:
TEST_DOCKER: 0
TEST_FUSE: 0
TEST_VERBOSE: 1
TRAVIS: 1
GIT_PAGER: cat
IPFS_CHECK_RCMGR_DEFAULTS: 1
defaults:
run:
shell: bash
steps:
- uses: actions/setup-go@v5
with:
go-version: 1.23.x
- uses: actions/checkout@v4
- run: make -O test_go_lint

109
.github/workflows/gotest.yml vendored Normal file
View File

@ -0,0 +1,109 @@
name: Go Test
on:
workflow_dispatch:
pull_request:
paths-ignore:
- '**/*.md'
push:
branches:
- 'master'
concurrency:
group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }}
cancel-in-progress: true
jobs:
go-test:
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
runs-on: ${{ fromJSON(github.repository == 'ipfs/kubo' && '["self-hosted", "linux", "x64", "2xlarge"]' || '"ubuntu-latest"') }}
timeout-minutes: 20
env:
TEST_DOCKER: 0
TEST_FUSE: 0
TEST_VERBOSE: 1
TRAVIS: 1
GIT_PAGER: cat
IPFS_CHECK_RCMGR_DEFAULTS: 1
defaults:
run:
shell: bash
steps:
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: 1.23.x
- name: Check out Kubo
uses: actions/checkout@v4
- name: Install missing tools
run: sudo apt update && sudo apt install -y zsh
- name: 👉️ If this step failed, go to «Summary» (top left) → inspect the «Failures/Errors» table
env:
# increasing parallelism beyond 2 doesn't speed up the tests much
PARALLEL: 2
run: |
make -j "$PARALLEL" test/unit/gotest.junit.xml &&
[[ ! $(jq -s -c 'map(select(.Action == "fail")) | .[]' test/unit/gotest.json) ]]
- name: Upload coverage to Codecov
uses: codecov/codecov-action@b9fd7d16f6d7d1b5d2bec1a2887e65ceed900238 # v4.6.0
if: failure() || success()
with:
name: unittests
files: coverage/unit_tests.coverprofile
- name: Test kubo-as-a-library example
run: |
# we want to first test with the kubo version in the go.mod file
go test -v ./...
# we also want to test the examples against the current version of kubo
# however, that version might be in a fork so we need to replace the dependency
# backup the go.mod and go.sum files to restore them after we run the tests
cp go.mod go.mod.bak
cp go.sum go.sum.bak
# make sure the examples run against the current version of kubo
go mod edit -replace github.com/ipfs/kubo=./../../..
go mod tidy
go test -v ./...
# restore the go.mod and go.sum files to their original state
mv go.mod.bak go.mod
mv go.sum.bak go.sum
working-directory: docs/examples/kubo-as-a-library
- name: Create a proper JUnit XML report
uses: ipdxco/gotest-json-to-junit-xml@v1
with:
input: test/unit/gotest.json
output: test/unit/gotest.junit.xml
if: failure() || success()
- name: Archive the JUnit XML report
uses: actions/upload-artifact@v4
with:
name: unit
path: test/unit/gotest.junit.xml
if: failure() || success()
- name: Create a HTML report
uses: ipdxco/junit-xml-to-html@v1
with:
mode: no-frames
input: test/unit/gotest.junit.xml
output: test/unit/gotest.html
if: failure() || success()
- name: Archive the HTML report
uses: actions/upload-artifact@v4
with:
name: html
path: test/unit/gotest.html
if: failure() || success()
- name: Create a Markdown report
uses: ipdxco/junit-xml-to-html@v1
with:
mode: summary
input: test/unit/gotest.junit.xml
output: test/unit/gotest.md
if: failure() || success()
- name: Set the summary
run: cat test/unit/gotest.md >> $GITHUB_STEP_SUMMARY
if: failure() || success()

136
.github/workflows/interop.yml vendored Normal file
View File

@ -0,0 +1,136 @@
name: Interop
on:
workflow_dispatch:
pull_request:
paths-ignore:
- '**/*.md'
push:
branches:
- 'master'
env:
GO_VERSION: 1.23.x
concurrency:
group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }}
cancel-in-progress: true
defaults:
run:
shell: bash
jobs:
interop-prep:
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
runs-on: ubuntu-latest
timeout-minutes: 5
env:
TEST_DOCKER: 0
TEST_FUSE: 0
TEST_VERBOSE: 1
TRAVIS: 1
GIT_PAGER: cat
IPFS_CHECK_RCMGR_DEFAULTS: 1
defaults:
run:
shell: bash
steps:
- uses: actions/setup-go@v5
with:
go-version: ${{ env.GO_VERSION }}
- uses: actions/checkout@v4
- run: make build
- uses: actions/upload-artifact@v4
with:
name: kubo
path: cmd/ipfs/ipfs
helia-interop:
needs: [interop-prep]
runs-on: ${{ fromJSON(github.repository == 'ipfs/kubo' && '["self-hosted", "linux", "x64", "2xlarge"]' || '"ubuntu-latest"') }}
timeout-minutes: 20
defaults:
run:
shell: bash
steps:
- uses: actions/setup-node@v4
with:
node-version: lts/*
- uses: actions/download-artifact@v4
with:
name: kubo
path: cmd/ipfs
- run: chmod +x cmd/ipfs/ipfs
- run: echo "dir=$(npm config get cache)" >> $GITHUB_OUTPUT
id: npm-cache-dir
- uses: actions/cache@v4
with:
path: ${{ steps.npm-cache-dir.outputs.dir }}
key: ${{ runner.os }}-${{ github.job }}-helia-${{ hashFiles('**/package-lock.json') }}
restore-keys: ${{ runner.os }}-${{ github.job }}-helia-
- run: sudo apt update
- run: sudo apt install -y libxkbcommon0 libxdamage1 libgbm1 libpango-1.0-0 libcairo2 # dependencies for playwright
- run: npx --package @helia/interop helia-interop
env:
KUBO_BINARY: ${{ github.workspace }}/cmd/ipfs/ipfs
ipfs-webui:
needs: [interop-prep]
runs-on: ${{ fromJSON(github.repository == 'ipfs/kubo' && '["self-hosted", "linux", "x64", "2xlarge"]' || '"ubuntu-latest"') }}
timeout-minutes: 20
env:
NO_SANDBOX: true
LIBP2P_TCP_REUSEPORT: false
LIBP2P_ALLOW_WEAK_RSA_KEYS: 1
E2E_IPFSD_TYPE: go
TRAVIS: 1
GIT_PAGER: cat
IPFS_CHECK_RCMGR_DEFAULTS: 1
defaults:
run:
shell: bash
steps:
- uses: actions/setup-node@v4
with:
node-version: 18.14.0
- uses: actions/download-artifact@v4
with:
name: kubo
path: cmd/ipfs
- run: chmod +x cmd/ipfs/ipfs
- uses: actions/checkout@v4
with:
repository: ipfs/ipfs-webui
path: ipfs-webui
- run: |
echo "dir=$(npm config get cache)" >> $GITHUB_OUTPUT
id: npm-cache-dir
- uses: actions/cache@v4
with:
path: ${{ steps.npm-cache-dir.outputs.dir }}
key: ${{ runner.os }}-${{ github.job }}-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{ runner.os }}-${{ github.job }}-
- env:
NPM_CACHE_DIR: ${{ steps.npm-cache-dir.outputs.dir }}
run: |
npm ci --prefer-offline --no-audit --progress=false --cache "$NPM_CACHE_DIR"
npx playwright install --with-deps
working-directory: ipfs-webui
- id: ref
run: echo "ref=$(git rev-parse --short HEAD)" | tee -a $GITHUB_OUTPUT
working-directory: ipfs-webui
- id: state
env:
GITHUB_TOKEN: ${{ github.token }}
ENDPOINT: repos/ipfs/ipfs-webui/commits/${{ steps.ref.outputs.ref }}/status
SELECTOR: .state
KEY: state
run: gh api "$ENDPOINT" --jq "$SELECTOR" | xargs -I{} echo "$KEY={}" | tee -a $GITHUB_OUTPUT
- name: Build ipfs-webui@main (state=${{ steps.state.outputs.state }})
run: npm run test:build
working-directory: ipfs-webui
- name: Test ipfs-webui@main (state=${{ steps.state.outputs.state }}) E2E against the locally built Kubo binary
run: npm run test:e2e
env:
IPFS_GO_EXEC: ${{ github.workspace }}/cmd/ipfs/ipfs
working-directory: ipfs-webui

123
.github/workflows/sharness.yml vendored Normal file
View File

@ -0,0 +1,123 @@
name: Sharness
on:
workflow_dispatch:
pull_request:
paths-ignore:
- '**/*.md'
push:
branches:
- 'master'
concurrency:
group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }}
cancel-in-progress: true
jobs:
sharness-test:
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
runs-on: ${{ fromJSON(github.repository == 'ipfs/kubo' && '["self-hosted", "linux", "x64", "4xlarge"]' || '"ubuntu-latest"') }}
timeout-minutes: 20
defaults:
run:
shell: bash
steps:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: 1.23.x
- name: Checkout Kubo
uses: actions/checkout@v4
with:
path: kubo
- name: Install missing tools
run: sudo apt update && sudo apt install -y socat net-tools fish libxml2-utils
- uses: actions/cache@v4
with:
path: test/sharness/lib/dependencies
key: ${{ runner.os }}-test-generate-junit-html-${{ hashFiles('test/sharness/lib/test-generate-junit-html.sh') }}
- name: Run Sharness tests
run: |
make -O -j "$PARALLEL" \
test_sharness \
coverage/sharness_tests.coverprofile \
test/sharness/test-results/sharness.xml
working-directory: kubo
env:
TEST_DOCKER: 1
TEST_PLUGIN: 0
TEST_FUSE: 0
TEST_VERBOSE: 1
TEST_JUNIT: 1
TEST_EXPENSIVE: 1
IPFS_CHECK_RCMGR_DEFAULTS: 1
CONTINUE_ON_S_FAILURE: 1
# increasing parallelism beyond 10 doesn't speed up the tests much
PARALLEL: ${{ github.repository == 'ipfs/kubo' && 10 || 3 }}
- name: Upload coverage report
uses: codecov/codecov-action@b9fd7d16f6d7d1b5d2bec1a2887e65ceed900238 # v4.6.0
if: failure() || success()
with:
name: sharness
files: kubo/coverage/sharness_tests.coverprofile
- name: Aggregate results
run: find kubo/test/sharness/test-results -name 't*-*.sh.*.counts' | kubo/test/sharness/lib/sharness/aggregate-results.sh > kubo/test/sharness/test-results/summary.txt
- name: 👉️ If this step failed, go to «Summary» (top left) → «HTML Report» → inspect the «Failures» column
run: |
cat kubo/test/sharness/test-results/summary.txt &&
grep 'failed\s*0' kubo/test/sharness/test-results/summary.txt
- name: Add aggregate results to the summary
if: failure() || success()
run: |
echo "# Summary" >> $GITHUB_STEP_SUMMARY
echo >> $GITHUB_STEP_SUMMARY
cat kubo/test/sharness/test-results/summary.txt >> $GITHUB_STEP_SUMMARY
- name: Generate one-page HTML report
uses: ipdxco/junit-xml-to-html@v1
if: failure() || success()
with:
mode: no-frames
input: kubo/test/sharness/test-results/sharness.xml
output: kubo/test/sharness/test-results/sharness.html
- name: Upload one-page HTML report to S3
id: one-page
uses: ipdxco/custom-github-runners/.github/actions/upload-artifact@main
if: github.repository == 'ipfs/kubo' && (failure() || success())
with:
source: kubo/test/sharness/test-results/sharness.html
destination: sharness.html
- name: Upload one-page HTML report
if: github.repository != 'ipfs/kubo' && (failure() || success())
uses: actions/upload-artifact@v4
with:
name: sharness.html
path: kubo/test/sharness/test-results/sharness.html
- name: Generate full HTML report
uses: ipdxco/junit-xml-to-html@v1
if: failure() || success()
with:
mode: frames
input: kubo/test/sharness/test-results/sharness.xml
output: kubo/test/sharness/test-results/sharness-html
- name: Upload full HTML report to S3
id: full
uses: ipdxco/custom-github-runners/.github/actions/upload-artifact@main
if: github.repository == 'ipfs/kubo' && (failure() || success())
with:
source: kubo/test/sharness/test-results/sharness-html
destination: sharness-html/
- name: Upload full HTML report
if: github.repository != 'ipfs/kubo' && (failure() || success())
uses: actions/upload-artifact@v4
with:
name: sharness-html
path: kubo/test/sharness/test-results/sharness-html
- name: Add S3 links to the summary
if: github.repository == 'ipfs/kubo' && (failure() || success())
run: echo "$MD" >> $GITHUB_STEP_SUMMARY
env:
MD: |
# HTML Reports
- View the [one page HTML report](${{ steps.one-page.outputs.url }})
- View the [full HTML report](${{ steps.full.outputs.url }}index.html)

View File

@ -2,25 +2,12 @@ name: Close and mark stale issue
on:
schedule:
- cron: '0 0 * * *'
- cron: '0 0 * * *'
permissions:
issues: write
pull-requests: write
jobs:
stale:
runs-on: ubuntu-latest
permissions:
issues: write
pull-requests: write
steps:
- uses: actions/stale@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
stale-issue-message: 'Oops, seems like we needed more information for this issue, please comment with more details or this issue will be closed in 7 days.'
close-issue-message: 'This issue was closed because it is missing author input.'
stale-issue-label: 'kind/stale'
any-of-labels: 'need/author-input'
exempt-issue-labels: 'need/triage,need/community-input,need/maintainer-input,need/maintainers-input,need/analysis,status/blocked,status/in-progress,status/ready,status/deferred,status/inactive'
days-before-issue-stale: 6
days-before-issue-close: 7
enable-statistics: true
uses: pl-strflt/.github/.github/workflows/reusable-stale-issue.yml@v0.3

View File

@ -1,4 +1,4 @@
name: Sync github release assets with dist.ipfs.tech
name: Sync GitHub Release Assets
on:
workflow_dispatch:
@ -9,27 +9,31 @@ concurrency:
group: release-assets-dist-sync
cancel-in-progress: true
permissions:
contents: write # to upload release asset
jobs:
sync-github-and-dist-ipfs-tech:
dist-ipfs-tech:
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
runs-on: "ubuntu-latest"
timeout-minutes: 15
steps:
- uses: ipfs/download-ipfs-distribution-action@v1
- uses: ipfs/start-ipfs-daemon-action@v1
with:
args: --init --init-profile=flatfs,server --enable-gc=false
- uses: actions/setup-node@v2
- uses: actions/setup-node@v4
with:
node-version: 14
- name: Sync the latest 5 github releases
uses: actions/github-script@v4
uses: actions/github-script@v7
with:
script: |
const fs = require('fs').promises
const max_synced = 5
// fetch github releases
resp = await github.repos.listReleases({
resp = await github.rest.repos.listReleases({
owner: context.repo.owner,
repo: context.repo.repo,
page: 1,
@ -112,7 +116,7 @@ jobs:
}
console.log("uploading", file, "to github release", release.tag_name)
const uploadReleaseAsset = async (file) => github.repos.uploadReleaseAsset({
const uploadReleaseAsset = async (file) => github.rest.repos.uploadReleaseAsset({
owner: context.repo.owner,
repo: context.repo.repo,
release_id: release.id,

View File

@ -1,38 +0,0 @@
---
name: Testground PR Checker
on:
workflow_dispatch:
push:
jobs:
testground:
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
runs-on: ubuntu-latest
name: ${{ matrix.composition_file }}
strategy:
matrix:
include:
- backend_addr: ci.testground.ipfs.team
backend_proto: https
plan_directory: testplans/bitswap
composition_file: testplans/bitswap/_compositions/small-k8s.toml
- backend_addr: ci.testground.ipfs.team
backend_proto: https
plan_directory: testplans/bitswap
composition_file: testplans/bitswap/_compositions/medium-k8s.toml
- backend_addr: ci.testground.ipfs.team
backend_proto: https
plan_directory: testplans/bitswap
composition_file: testplans/bitswap/_compositions/large-k8s.toml
steps:
- uses: actions/checkout@v2
- name: testground run
uses: testground/testground-github-action@v1
timeout-minutes: 5
with:
backend_addr: ${{ matrix.backend_addr }}
backend_proto: ${{ matrix.backend_proto }}
plan_directory: ${{ matrix.plan_directory }}
composition_file: ${{ matrix.composition_file }}

View File

@ -4,5 +4,8 @@ linters:
linters-settings:
stylecheck:
checks:
- all
- '-ST1003'
dot-import-whitelist:
- github.com/ipfs/kubo/test/cli/testutils

View File

@ -1,5 +1,19 @@
# Kubo Changelogs
- [v0.32](docs/changelogs/v0.32.md)
- [v0.31](docs/changelogs/v0.31.md)
- [v0.30](docs/changelogs/v0.30.md)
- [v0.29](docs/changelogs/v0.29.md)
- [v0.28](docs/changelogs/v0.28.md)
- [v0.27](docs/changelogs/v0.27.md)
- [v0.26](docs/changelogs/v0.26.md)
- [v0.25](docs/changelogs/v0.25.md)
- [v0.24](docs/changelogs/v0.24.md)
- [v0.23](docs/changelogs/v0.23.md)
- [v0.22](docs/changelogs/v0.22.md)
- [v0.21](docs/changelogs/v0.21.md)
- [v0.20](docs/changelogs/v0.20.md)
- [v0.19](docs/changelogs/v0.19.md)
- [v0.18](docs/changelogs/v0.18.md)
- [v0.17](docs/changelogs/v0.17.md)
- [v0.16](docs/changelogs/v0.16.md)

View File

@ -1,11 +1,6 @@
FROM golang:1.19.1-buster
LABEL maintainer="Steven Allen <steven@stebalien.com>"
FROM --platform=${BUILDPLATFORM:-linux/amd64} golang:1.23 AS builder
# Install deps
RUN apt-get update && apt-get install -y \
libssl-dev \
ca-certificates \
fuse
ARG TARGETOS TARGETARCH
ENV SRC_DIR /kubo
@ -20,44 +15,46 @@ COPY . $SRC_DIR
# e.g. docker build --build-arg IPFS_PLUGINS="foo bar baz"
ARG IPFS_PLUGINS
# Allow for other targets to be built, e.g.: docker build --build-arg MAKE_TARGET="nofuse"
ARG MAKE_TARGET=build
# Build the thing.
# Also: fix getting HEAD commit hash via git rev-parse.
RUN cd $SRC_DIR \
&& mkdir -p .git/objects \
&& GOFLAGS=-buildvcs=false make build GOTAGS=openssl IPFS_PLUGINS=$IPFS_PLUGINS
&& GOOS=$TARGETOS GOARCH=$TARGETARCH GOFLAGS=-buildvcs=false make ${MAKE_TARGET} IPFS_PLUGINS=$IPFS_PLUGINS
# Get su-exec, a very minimal tool for dropping privileges,
# and tini, a very minimal init daemon for containers
ENV SUEXEC_VERSION v0.2
ENV TINI_VERSION v0.19.0
# Using Debian Buster because the version of busybox we're using is based on it
# and we want to make sure the libraries we're using are compatible. That's also
# why we're running this for the target platform.
FROM debian:stable-slim AS utilities
RUN set -eux; \
dpkgArch="$(dpkg --print-architecture)"; \
case "${dpkgArch##*-}" in \
"amd64" | "armhf" | "arm64") tiniArch="tini-static-$dpkgArch" ;;\
*) echo >&2 "unsupported architecture: ${dpkgArch}"; exit 1 ;; \
esac; \
cd /tmp \
&& git clone https://github.com/ncopa/su-exec.git \
&& cd su-exec \
&& git checkout -q $SUEXEC_VERSION \
&& make su-exec-static \
&& cd /tmp \
&& wget -q -O tini https://github.com/krallin/tini/releases/download/$TINI_VERSION/$tiniArch \
&& chmod +x tini
apt-get update; \
apt-get install -y \
tini \
# Using gosu (~2MB) instead of su-exec (~20KB) because it's easier to
# install on Debian. Useful links:
# - https://github.com/ncopa/su-exec#why-reinvent-gosu
# - https://github.com/tianon/gosu/issues/52#issuecomment-441946745
gosu \
# This installs fusermount which we later copy over to the target image.
fuse \
ca-certificates \
; \
rm -rf /var/lib/apt/lists/*
# Now comes the actual target image, which aims to be as small as possible.
FROM busybox:1.31.1-glibc
LABEL maintainer="Steven Allen <steven@stebalien.com>"
FROM busybox:stable-glibc
# Get the ipfs binary, entrypoint script, and TLS CAs from the build container.
ENV SRC_DIR /kubo
COPY --from=0 $SRC_DIR/cmd/ipfs/ipfs /usr/local/bin/ipfs
COPY --from=0 $SRC_DIR/bin/container_daemon /usr/local/bin/start_ipfs
COPY --from=0 $SRC_DIR/bin/container_init_run /usr/local/bin/container_init_run
COPY --from=0 /tmp/su-exec/su-exec-static /sbin/su-exec
COPY --from=0 /tmp/tini /sbin/tini
COPY --from=0 /bin/fusermount /usr/local/bin/fusermount
COPY --from=0 /etc/ssl/certs /etc/ssl/certs
COPY --from=utilities /usr/sbin/gosu /sbin/gosu
COPY --from=utilities /usr/bin/tini /sbin/tini
COPY --from=utilities /bin/fusermount /usr/local/bin/fusermount
COPY --from=utilities /etc/ssl/certs /etc/ssl/certs
COPY --from=builder $SRC_DIR/cmd/ipfs/ipfs /usr/local/bin/ipfs
COPY --from=builder $SRC_DIR/bin/container_daemon /usr/local/bin/start_ipfs
COPY --from=builder $SRC_DIR/bin/container_init_run /usr/local/bin/container_init_run
# Add suid bit on fusermount so it will run properly
RUN chmod 4755 /usr/local/bin/fusermount
@ -65,13 +62,6 @@ RUN chmod 4755 /usr/local/bin/fusermount
# Fix permissions on start_ipfs (ignore the build machine's permissions)
RUN chmod 0755 /usr/local/bin/start_ipfs
# This shared lib (part of glibc) doesn't seem to be included with busybox.
COPY --from=0 /lib/*-linux-gnu*/libdl.so.2 /lib/
# Copy over SSL libraries.
COPY --from=0 /usr/lib/*-linux-gnu*/libssl.so* /usr/lib/
COPY --from=0 /usr/lib/*-linux-gnu*/libcrypto.so* /usr/lib/
# Swarm TCP; should be exposed to the public
EXPOSE 4001
# Swarm UDP; should be exposed to the public
@ -113,7 +103,7 @@ ENTRYPOINT ["/sbin/tini", "--", "/usr/local/bin/start_ipfs"]
# Healthcheck for the container
# QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn is the CID of empty folder
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
CMD ipfs dag stat /ipfs/QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn || exit 1
CMD ipfs --api=/ip4/127.0.0.1/tcp/5001 dag stat /ipfs/QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn || exit 1
# Execute the daemon subcommand by default
CMD ["daemon", "--migrate=true", "--agent-version-suffix=docker"]

148
README.md
View File

@ -1,20 +1,36 @@
<h1 align="center">
<br>
<a href="https://docs.ipfs.tech/how-to/command-line-quick-start/"><img src="https://user-images.githubusercontent.com/157609/250148884-d6d12db8-fdcf-4be3-8546-2550b69845d8.png" alt="Kubo logo" title="Kubo logo" width="200"></a>
<br>
Kubo: IPFS Implementation in GO
<br>
</h1>
![kubo, an IPFS node in Go](https://ipfs.io/ipfs/bafykbzacecaesuqmivkauix25v6i6xxxsvsrtxknhgb5zak3xxsg2nb4dhs2u/ipfs.go.png)
<p align="center" style="font-size: 1.2rem;">The first implementation of IPFS.</p>
[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square&cacheSeconds=3600)](https://protocol.ai)
[![GoDoc](https://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square&cacheSeconds=3600)](https://godoc.org/github.com/ipfs/kubo)
[![CircleCI](https://img.shields.io/circleci/build/github/ipfs/kubo?style=flat-square&cacheSeconds=3600)](https://circleci.com/gh/ipfs/kubo)
<p align="center">
<a href="https://ipfs.tech"><img src="https://img.shields.io/badge/project-IPFS-blue.svg?style=flat-square" alt="Official Part of IPFS Project"></a>
<a href="https://discuss.ipfs.tech"><img alt="Discourse Forum" src="https://img.shields.io/discourse/posts?server=https%3A%2F%2Fdiscuss.ipfs.tech"></a>
<a href="https://matrix.to/#/#ipfs-space:ipfs.io"><img alt="Matrix" src="https://img.shields.io/matrix/ipfs-space%3Aipfs.io?server_fqdn=matrix.org"></a>
<a href="https://github.com/ipfs/kubo/actions"><img src="https://img.shields.io/github/actions/workflow/status/ipfs/kubo/build.yml?branch=master" alt="ci"></a>
<a href="https://github.com/ipfs/kubo/releases"><img alt="GitHub release" src="https://img.shields.io/github/v/release/ipfs/kubo?filter=!*rc*"></a>
<a href="https://godoc.org/github.com/ipfs/kubo"><img src="https://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square" alt="godoc reference"></a>
</p>
<hr />
## What is Kubo?
Kubo was the first IPFS implementation and is the most widely used one today. Implementing the *Interplanetary Filesystem* - the Web3 standard for content-addressing, interoperable with HTTP. Thus powered by IPLD's data models and the libp2p for network communication. Kubo is written in Go.
Featureset
- Runs an IPFS-Node as a network service
- [Command Line Interface](https://docs.ipfs.tech/reference/kubo/cli/) to IPFS-Nodes
- Local [Web2-to-Web3 HTTP Gateway functionality](https://github.com/ipfs/specs/tree/main/http-gateways#readme)
- HTTP RPC API (`/api/v0`) to access and control the daemon
- IPFS's internal Webgui can be used to manage the Kubo nodes
- Runs an IPFS-Node as a network service that is part of LAN and WAN DHT
- [HTTP Gateway](https://specs.ipfs.tech/http-gateways/) (`/ipfs` and `/ipns`) functionality for trusted and [trustless](https://docs.ipfs.tech/reference/http/gateway/#trustless-verifiable-retrieval) content retrieval
- [HTTP Routing V1](https://specs.ipfs.tech/routing/http-routing-v1/) (`/routing/v1`) client and server implementation for [delegated routing](./docs/delegated-routing.md) lookups
- [HTTP Kubo RPC API](https://docs.ipfs.tech/reference/kubo/rpc/) (`/api/v0`) to access and control the daemon
- [Command Line Interface](https://docs.ipfs.tech/reference/kubo/cli/) based on (`/api/v0`) RPC API
- [WebUI](https://github.com/ipfs/ipfs-webui/#readme) to manage the Kubo node
- [Content blocking](/docs/content-blocking.md) support for operators of public nodes
### Other implementations
@ -33,7 +49,7 @@ Before opening an issue, consider using one of the following locations to ensure
- Exploration of new ideas in [ipfs/notes issues](https://github.com/ipfs/notes/issues).
- Ask questions and meet the rest of the community at the [IPFS Forum](https://discuss.ipfs.tech).
- Or [chat with us](https://docs.ipfs.tech/community/chat/).
[![YouTube Channel Subscribers](https://img.shields.io/youtube/channel/subscribers/UCdjsUXJ3QawK4O5L1kqqsew?label=Subscribe%20IPFS&style=social&cacheSeconds=3600)](https://www.youtube.com/channel/UCdjsUXJ3QawK4O5L1kqqsew) [![Follow @IPFS on Twitter](https://img.shields.io/twitter/follow/IPFS?style=social&cacheSeconds=3600)](https://twitter.com/IPFS)
## Next milestones
@ -48,8 +64,8 @@ Before opening an issue, consider using one of the following locations to ensure
- [Next milestones](#next-milestones)
- [Table of Contents](#table-of-contents)
- [Security Issues](#security-issues)
- [Minimal System Requirements](#minimal-system-requirements)
- [Install](#install)
- [System Requirements](#system-requirements)
- [Docker](#docker)
- [Official prebuilt binaries](#official-prebuilt-binaries)
- [Updating](#updating)
@ -57,23 +73,24 @@ Before opening an issue, consider using one of the following locations to ensure
- [Downloading builds using IPFS](#downloading-builds-using-ipfs)
- [Unofficial Linux packages](#unofficial-linux-packages)
- [ArchLinux](#arch-linux)
- [Gentoo Linux](#gentoo-linux)
- [Nix](#nix)
- [Solus](#solus)
- [openSUSE](#opensuse)
- [Guix](#guix)
- [Snap](#snap)
- [Ubuntu PPA](#ubuntu-ppa)
- [Unofficial Windows packages](#unofficial-windows-packages)
- [Chocolatey](#chocolatey)
- [Scoop](#scoop)
- [Unofficial MacOS packages](#unofficial-macos-packages)
- [MacPorts](#macports)
- [Nix](#nix-macos)
- [Homebrew](#homebrew)
- [Homebrew](#homebrew)
- [Build from Source](#build-from-source)
- [Install Go](#install-go)
- [Download and Compile IPFS](#download-and-compile-ipfs)
- [Cross Compiling](#cross-compiling)
- [OpenSSL](#openssl)
- [Troubleshooting](#troubleshooting)
- [Getting Started](#getting-started)
- [Usage](#usage)
@ -94,26 +111,38 @@ Before opening an issue, consider using one of the following locations to ensure
Please follow [`SECURITY.md`](SECURITY.md).
### Minimal System Requirements
IPFS can run on most Linux, macOS, and Windows systems. We recommend running it on a machine with at least 4 GB of RAM and 2 CPU cores (kubo is highly parallel). On systems with less memory, it may not be completely stable, and you run on your own risk.
## Install
The canonical download instructions for IPFS are over at: https://docs.ipfs.tech/install/. It is **highly recommended** you follow those instructions if you are not interested in working on IPFS development.
### System Requirements
IPFS can run on most Linux, macOS, and Windows systems. We recommend running it on a machine with at least 2 GB of RAM and 2 CPU cores (kubo is highly parallel). On systems with less memory, it may not be completely stable.
If your system is resource-constrained, we recommend:
1. Installing OpenSSL and rebuilding kubo manually with `make build GOTAGS=openssl`. See the [download and compile](#download-and-compile-ipfs) section for more information on compiling kubo.
2. Initializing your daemon with `ipfs init --profile=lowpower`
### Docker
Official images are published at https://hub.docker.com/r/ipfs/kubo/:
[![Docker Image Version (latest semver)](https://img.shields.io/docker/v/ipfs/kubo?color=blue&label=kubo%20docker%20image&logo=docker&sort=semver&style=flat-square&cacheSeconds=3600)](https://hub.docker.com/r/ipfs/kubo/)
More info on how to run Kubo (go-ipfs) inside Docker can be found [here](https://docs.ipfs.tech/how-to/run-ipfs-inside-docker/).
- 🟢 Releases
- `latest` and `release` tags always point at [the latest stable release](https://github.com/ipfs/kubo/releases/latest)
- `vN.N.N` points at a specific [release tag](https://github.com/ipfs/kubo/releases)
- These are production grade images.
- 🟠 We also provide experimental developer builds
- `master-latest` always points at the `HEAD` of the `master` branch
- `master-YYYY-DD-MM-GITSHA` points at a specific commit from the `master` branch
- These tags are used by developers for internal testing, not intended for end users or production use.
```console
$ docker pull ipfs/kubo:latest
$ docker run --rm -it --net=host ipfs/kubo:latest
```
To [customize your node](https://docs.ipfs.tech/install/run-ipfs-inside-docker/#customizing-your-node),
pass necessary config via `-e` or by mounting scripts in the `/container-init.d`.
Learn more at https://docs.ipfs.tech/install/run-ipfs-inside-docker/
### Official prebuilt binaries
@ -166,12 +195,18 @@ $ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_windows-amd64.zip
### Unofficial Linux packages
<a href="https://repology.org/project/kubo/versions">
<img src="https://repology.org/badge/vertical-allrepos/kubo.svg" alt="Packaging status" align="right">
</a>
- [ArchLinux](#arch-linux)
- [Nix](#nix)
- [Gentoo Linux](#gentoo-linux)
- [Nix](#nix-linux)
- [Solus](#solus)
- [openSUSE](#opensuse)
- [Guix](#guix)
- [Snap](#snap)
- [Ubuntu PPA](#ubuntu-ppa)
#### Arch Linux
@ -183,15 +218,25 @@ $ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_windows-amd64.zip
[![kubo-git via AUR](https://img.shields.io/static/v1?label=kubo-git&message=latest%40master&color=1793d1&logo=arch-linux&style=flat-square&cacheSeconds=3600)](https://aur.archlinux.org/packages/kubo/)
#### <a name="gentoo-linux">Gentoo Linux</a>
https://wiki.gentoo.org/wiki/Kubo
```bash
# emerge -a net-p2p/kubo
```
https://packages.gentoo.org/packages/net-p2p/kubo
#### <a name="nix-linux">Nix</a>
With the purely functional package manager [Nix](https://nixos.org/nix/) you can install kubo (go-ipfs) like this:
```
$ nix-env -i ipfs
$ nix-env -i kubo
```
You can also install the Package by using its attribute name, which is also `ipfs`.
You can also install the Package by using its attribute name, which is also `kubo`.
#### Solus
@ -215,6 +260,31 @@ You can also install it through the Solus software center.
No longer supported, see rationale in [kubo#8688](https://github.com/ipfs/kubo/issues/8688).
#### Ubuntu PPA
[PPA homepage](https://launchpad.net/~twdragon/+archive/ubuntu/ipfs) on Launchpad.
##### Latest Ubuntu (>= 20.04 LTS)
```sh
sudo add-apt-repository ppa:twdragon/ipfs
sudo apt update
sudo apt install ipfs-kubo
```
##### Any Ubuntu version
```sh
sudo su
echo 'deb https://ppa.launchpadcontent.net/twdragon/ipfs/ubuntu <<DISTRO>> main' >> /etc/apt/sources.list.d/ipfs
echo 'deb-src https://ppa.launchpadcontent.net/twdragon/ipfs/ubuntu <<DISTRO>> main' >> /etc/apt/sources.list.d/ipfs
exit
sudo apt update
sudo apt install ipfs-kubo
```
where `<<DISTRO>>` is the codename of your Ubuntu distribution (for example, `jammy` for 22.04 LTS). During the first installation the package maintenance script may automatically ask you about which networking profile, CPU accounting model, and/or existing node configuration file you want to use.
**NOTE**: this method also may work with any compatible Debian-based distro which has `libc6` inside, and APT as a package manager.
### Unofficial Windows packages
- [Chocolatey](#chocolatey)
@ -252,10 +322,10 @@ $ sudo port install ipfs
In macOS you can use the purely functional package manager [Nix](https://nixos.org/nix/):
```
$ nix-env -i ipfs
$ nix-env -i kubo
```
You can also install the Package by using its attribute name, which is also `ipfs`.
You can also install the Package by using its attribute name, which is also `kubo`.
#### Homebrew
@ -313,15 +383,6 @@ Compiling for a different platform is as simple as running:
make build GOOS=myTargetOS GOARCH=myTargetArchitecture
```
##### OpenSSL
To build go-ipfs with OpenSSL support, append `GOTAGS=openssl` to your `make` invocation. Building with OpenSSL should significantly reduce the background CPU usage on nodes that frequently make or receive new connections.
Note: OpenSSL requires CGO support and, by default, CGO is disabled when cross-compiling. To cross-compile with OpenSSL support, you must:
1. Install a compiler toolchain for the target platform.
2. Set the `CGO_ENABLED=1` environment variable.
#### Troubleshooting
- Separate [instructions are available for building on Windows](docs/windows.md).
@ -381,7 +442,6 @@ Some places to get you started on the codebase:
- libp2p
- libp2p: https://github.com/libp2p/go-libp2p
- DHT: https://github.com/libp2p/go-libp2p-kad-dht
- PubSub: https://github.com/libp2p/go-libp2p-pubsub
- [IPFS : The `Add` command demystified](https://github.com/ipfs/kubo/tree/master/docs/add-code-flow.md)
### Map of Implemented Subsystems
@ -411,9 +471,11 @@ If you make changes to the protocol buffers, you will need to install the [proto
Find more documentation for developers on [docs](./docs)
## Maintainer Info
* [Project Board for active and upcoming work](https://pl-strflt.notion.site/Kubo-GitHub-Project-Board-c68f9192e48e4e9eba185fa697bf0570)
* [Release Process](https://pl-strflt.notion.site/Kubo-Release-Process-5a5d066264704009a28a79cff93062c4)
* [Additional PL EngRes Kubo maintainer info](https://pl-strflt.notion.site/Kubo-go-ipfs-4a484aeeaa974dcf918027c300426c05)
Kubo is maintained by [Shipyard](https://ipshipyard.com/).
* This repository is part of [Shipyard's GO Triage triage](https://ipshipyard.notion.site/IPFS-Go-Triage-Boxo-Kubo-Rainbow-0ddee6b7f28d412da7dabe4f9107c29a).
* [Release Process](https://ipshipyard.notion.site/Kubo-Release-Process-6dba4f5755c9458ab5685eeb28173778)
## Contributing
@ -424,7 +486,9 @@ We ❤️ all [our contributors](docs/AUTHORS); this project wouldnt be what
This repository falls under the IPFS [Code of Conduct](https://github.com/ipfs/community/blob/master/code-of-conduct.md).
Please reach out to us in one [chat](https://docs.ipfs.tech/community/chat/) rooms.
Members of IPFS community provide Kubo support on [discussion forum category here](https://discuss.ipfs.tech/c/help/help-kubo/23).
Need help with IPFS itself? Learn where to get help and support at https://ipfs.tech/help.
## License

View File

@ -19,7 +19,7 @@ include mk/golang.mk
# extra properties #
# -------------------- #
ifeq ($(TEST_NO_FUSE),1)
ifeq ($(TEST_FUSE),0)
GOTAGS += nofuse
endif
export LIBP2P_TCP_REUSEPORT=false
@ -66,6 +66,10 @@ clean:
rm -rf $(CLEAN)
.PHONY: clean
mod_tidy:
@find . -name go.mod -execdir $(GOCC) mod tidy \;
.PHONY: mod_tidy
coverage: $(COVERAGE)
.PHONY: coverage
@ -118,7 +122,8 @@ help:
@echo ' all - print this help message'
@echo ' build - Build binary at ./cmd/ipfs/ipfs'
@echo ' nofuse - Build binary with no fuse support'
@echo ' install - Build binary and install into $$GOPATH/bin'
@echo ' install - Build binary and install into $$GOBIN'
@echo ' mod_tidy - Remove unused dependencies from go.mod files'
# @echo ' dist_install - TODO: c.f. ./cmd/ipfs/dist/README.md'
@echo ''
@echo 'CLEANING TARGETS:'
@ -136,8 +141,7 @@ help:
@echo ' test_go_expensive - Run all go tests and compile on all platforms'
@echo ' test_go_race - Run go tests with the race detector enabled'
@echo ' test_go_lint - Run the `golangci-lint` vetting tool'
@echo ' test_sharness_short - Run short sharness tests'
@echo ' test_sharness_expensive - Run all sharness tests'
@echo ' test_sharness - Run sharness tests'
@echo ' coverage - Collects coverage info from unit tests and sharness'
@echo
.PHONY: help

View File

@ -14,8 +14,8 @@ environment:
GOPATH: c:\gopath
TEST_VERBOSE: 1
#TEST_NO_FUSE: 1
#TEST_SUITE: test_sharness_expensive
#GOFLAGS: -tags nofuse
#TEST_SUITE: test_sharness
#GOFLAGS: -tags nofuse
global:
BASH: C:\cygwin\bin\bash
matrix:
@ -23,27 +23,27 @@ environment:
GOVERSION: 1.5.1
GOROOT: c:\go
DOWNLOADPLATFORM: "x64"
install:
# Enable make
#- SET PATH=c:\MinGW\bin;%PATH%
#- copy c:\MinGW\bin\mingw32-make.exe c:\MinGW\bin\make.exe
- go version
- go env
# Cygwin build script
#
# NOTES:
#
# The stdin/stdout file descriptor appears not to be valid for the Appveyor
# build which causes failures as certain functions attempt to redirect
# build which causes failures as certain functions attempt to redirect
# default file handles. Ensure a dummy file descriptor is opened with 'exec'.
#
#
build_script:
- '%BASH% -lc "cd $APPVEYOR_BUILD_FOLDER; exec 0</dev/null; export PATH=$GOPATH/bin:$PATH; make nofuse"'
test_script:
- '%BASH% -lc "cd $APPVEYOR_BUILD_FOLDER; exec 0</dev/null; export PATH=$GOPATH/bin:$PATH; export GOFLAGS=''-tags nofuse''; export TEST_NO_FUSE=1; export TEST_VERBOSE=1; export TEST_SUITE=test_sharness_expensive; make $TEST_SUITE"'
- '%BASH% -lc "cd $APPVEYOR_BUILD_FOLDER; exec 0</dev/null; export PATH=$GOPATH/bin:$PATH; export GOFLAGS=''-tags nofuse''; export TEST_NO_FUSE=1; export TEST_VERBOSE=1; export TEST_EXPENSIVE=1; export TEST_SUITE=test_sharness; make $TEST_SUITE"'
#build:
# parallel: true

View File

@ -3,13 +3,3 @@
This directory contains the go-ipfs assets:
* Getting started documentation (`init-doc`).
* Directory listing HTML template (`dir-index-html`).
## Re-generating
Edit the source files and use `go generate` from within the
assets directory:
```
go generate .
```

View File

@ -1,31 +1,21 @@
//go:generate npm run build --prefix ./dir-index-html/
package assets
import (
"embed"
"fmt"
"io"
"io/fs"
gopath "path"
"strconv"
"github.com/ipfs/kubo/core"
"github.com/ipfs/kubo/core/coreapi"
"github.com/cespare/xxhash"
"github.com/ipfs/boxo/files"
cid "github.com/ipfs/go-cid"
files "github.com/ipfs/go-ipfs-files"
options "github.com/ipfs/interface-go-ipfs-core/options"
"github.com/ipfs/interface-go-ipfs-core/path"
)
//go:embed init-doc dir-index-html/dir-index.html dir-index-html/knownIcons.txt
//go:embed init-doc
var Asset embed.FS
// AssetHash a non-cryptographic hash of all embedded assets
var AssetHash string
// initDocPaths lists the paths for the docs we want to seed during --init
// initDocPaths lists the paths for the docs we want to seed during --init.
var initDocPaths = []string{
gopath.Join("init-doc", "about"),
gopath.Join("init-doc", "readme"),
@ -36,33 +26,7 @@ var initDocPaths = []string{
gopath.Join("init-doc", "ping"),
}
func init() {
sum := xxhash.New()
err := fs.WalkDir(Asset, ".", func(path string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
if d.IsDir() {
return nil
}
file, err := Asset.Open(path)
if err != nil {
return err
}
defer file.Close()
_, err = io.Copy(sum, file)
return err
})
if err != nil {
panic("error creating asset sum: " + err.Error())
}
AssetHash = strconv.FormatUint(sum.Sum64(), 32)
}
// SeedInitDocs adds the list of embedded init documentation to the passed node, pins it and returns the root key
// SeedInitDocs adds the list of embedded init documentation to the passed node, pins it and returns the root key.
func SeedInitDocs(nd *core.IpfsNode) (cid.Cid, error) {
return addAssetList(nd, initDocPaths)
}
@ -73,12 +37,7 @@ func addAssetList(nd *core.IpfsNode, l []string) (cid.Cid, error) {
return cid.Cid{}, err
}
dirb, err := api.Object().New(nd.Context(), options.Object.Type("unixfs-dir"))
if err != nil {
return cid.Cid{}, err
}
basePath := path.IpfsPath(dirb.Cid())
dirMap := map[string]files.Node{}
for _, p := range l {
d, err := Asset.ReadFile(p)
@ -86,22 +45,17 @@ func addAssetList(nd *core.IpfsNode, l []string) (cid.Cid, error) {
return cid.Cid{}, fmt.Errorf("assets: could load Asset '%s': %s", p, err)
}
fp, err := api.Unixfs().Add(nd.Context(), files.NewBytesFile(d))
if err != nil {
return cid.Cid{}, err
}
dirMap[gopath.Base(p)] = files.NewBytesFile(d)
}
fname := gopath.Base(p)
basePath, err = api.Object().AddLink(nd.Context(), basePath, fname, fp)
if err != nil {
return cid.Cid{}, err
}
basePath, err := api.Unixfs().Add(nd.Context(), files.NewMapDirectory(dirMap))
if err != nil {
return cid.Cid{}, err
}
if err := api.Pin().Add(nd.Context(), basePath); err != nil {
return cid.Cid{}, err
}
return basePath.Cid(), nil
return basePath.RootCid(), nil
}

View File

@ -1,3 +0,0 @@
# dag-index-html
> HTML representation for non-UnixFS DAGs such as DAG-CBOR.

File diff suppressed because one or more lines are too long

View File

@ -1,26 +0,0 @@
# dir-index-html
> Directory listing HTML for HTTP gateway
![](https://user-images.githubusercontent.com/157609/88379209-ce6f0600-cda2-11ea-9620-20b9237bb441.png)
## Updating
When making updates to the directory listing page template, please note the following:
1. Make your changes to the (human-friendly) source documents in the `src` directory and run `npm run build`
3. Before testing or releasing, go to the top-level `./assets` directory and make sure to run the `go generate .` script to update the bindata version
## Testing
1. Make sure you have [Go](https://golang.org/dl/) installed
2. Start the test server, which lives in its own directory:
```bash
> cd test
> go run .
```
This will listen on [`localhost:3000`](http://localhost:3000/) and reload the template every time you refresh the page.
If you get a "no such file or directory" error upon trying `go run .`, make sure you ran `npm run build` to generate the minified artifact that the test is looking for.

File diff suppressed because one or more lines are too long

View File

@ -1 +0,0 @@
package dirindexhtml

View File

@ -1,65 +0,0 @@
.aac
.aiff
.ai
.avi
.bmp
.c
.cpp
.css
.dat
.dmg
.doc
.dotx
.dwg
.dxf
.eps
.exe
.flv
.gif
.h
.hpp
.html
.ics
.iso
.java
.jpg
.jpeg
.js
.key
.less
.mid
.mkv
.mov
.mp3
.mp4
.mpg
.odf
.ods
.odt
.otp
.ots
.ott
.pdf
.php
.png
.ppt
.psd
.py
.qt
.rar
.rb
.rtf
.sass
.scss
.sql
.tga
.tgz
.tiff
.txt
.wav
.wmv
.xls
.xlsx
.xml
.yml
.zip

View File

@ -1,17 +0,0 @@
{
"name": "dir-index-html",
"description": "Directory listing HTML for go-ipfs gateways",
"version": "1.3.0",
"private": true,
"homepage": "https://github.com/ipfs/go-ipfs",
"license": "MIT",
"scripts": {
"start": "cd test && go run .",
"build": "npm run build:clean && npm run build:remove-style-links && npm run build:minify-wrap-css && npm run build:combine-html-css && npm run build:remove-unused",
"build:clean": "rm dir-index.html",
"build:remove-style-links": "sed '/<link rel=\"stylesheet\"/d' ./src/dir-index.html > ./base-html.html",
"build:minify-wrap-css": "(echo \"<style>\" && cat ./src/icons.css ./src/style.css | tr -d \"\t\n\r\" && echo && echo \"</style>\") > ./minified-wrapped-style.html",
"build:combine-html-css": "sed '/<\\/title>/ r ./minified-wrapped-style.html' ./base-html.html > ./dir-index.html",
"build:remove-unused": "rm ./base-html.html && rm ./minified-wrapped-style.html"
}
}

View File

@ -1,98 +0,0 @@
<!DOCTYPE html>
{{ $root := . }}
<html lang="en">
<head>
<meta charset="utf-8" />
<meta name="description" content="A directory of content-addressed files hosted on IPFS">
<meta property="og:title" content="Files on IPFS">
<meta property="og:description" content="{{ .Path }}">
<meta property="og:type" content="website">
<meta property="og:image" content="https://gateway.ipfs.io/ipfs/QmSDeYAe9mga6NdTozAZuyGL3Q1XjsLtvX28XFxJH8oPjq">
<meta name="twitter:title" content="{{ .Path }}">
<meta name="twitter:description" content="A directory of files hosted on the distributed, decentralized web using IPFS">
<meta name="twitter:card" content="summary_large_image">
<meta name="twitter:image" content="https://gateway.ipfs.io/ipfs/QmSDeYAe9mga6NdTozAZuyGL3Q1XjsLtvX28XFxJH8oPjq">
<meta name="twitter:creator" content="@ipfs">
<meta name="twitter:site" content="@ipfs">
<meta name="image" content="https://gateway.ipfs.io/ipfs/QmSDeYAe9mga6NdTozAZuyGL3Q1XjsLtvX28XFxJH8oPjq">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<link rel="shortcut icon" href="data:image/x-icon;base64,AAABAAEAEBAAAAEAIABoBAAAFgAAACgAAAAQAAAAIAAAAAEAIAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAlo89/56ZQ/8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACUjDu1lo89/6mhTP+zrVP/nplD/5+aRK8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHNiIS6Wjz3/ubFY/761W/+vp1D/urRZ/8vDZf/GvmH/nplD/1BNIm8AAAAAAAAAAAAAAAAAAAAAAAAAAJaPPf+knEj/vrVb/761W/++tVv/r6dQ/7q0Wf/Lw2X/y8Nl/8vDZf+tpk7/nplD/wAAAAAAAAAAAAAAAJaPPf+2rVX/vrVb/761W/++tVv/vrVb/6+nUP+6tFn/y8Nl/8vDZf/Lw2X/y8Nl/8G6Xv+emUP/AAAAAAAAAACWjz3/vrVb/761W/++tVv/vrVb/761W/+vp1D/urRZ/8vDZf/Lw2X/y8Nl/8vDZf/Lw2X/nplD/wAAAAAAAAAAlo89/761W/++tVv/vrVb/761W/++tVv/r6dQ/7q0Wf/Lw2X/y8Nl/8vDZf/Lw2X/y8Nl/56ZQ/8AAAAAAAAAAJaPPf++tVv/vrVb/761W/++tVv/vbRa/5aPPf+emUP/y8Nl/8vDZf/Lw2X/y8Nl/8vDZf+emUP/AAAAAAAAAACWjz3/vrVb/761W/++tVv/vrVb/5qTQP+inkb/op5G/6KdRv/Lw2X/y8Nl/8vDZf/Lw2X/nplD/wAAAAAAAAAAlo89/761W/++tVv/sqlS/56ZQ//LxWb/0Mlp/9DJaf/Kw2X/oJtE/7+3XP/Lw2X/y8Nl/56ZQ/8AAAAAAAAAAJaPPf+9tFr/mJE+/7GsUv/Rymr/0cpq/9HKav/Rymr/0cpq/9HKav+xrFL/nplD/8vDZf+emUP/AAAAAAAAAACWjz3/op5G/9HKav/Rymr/0cpq/9HKav/Rymr/0cpq/9HKav/Rymr/0cpq/9HKav+inkb/nplD/wAAAAAAAAAAAAAAAKKeRv+3slb/0cpq/9HKav/Rymr/0cpq/9HKav/Rymr/0cpq/9HKav+1sFX/op5G/wAAAAAAAAAAAAAAAAAAAAAAAAAAop5GUKKeRv/Nxmf/0cpq/9HKav/Rymr/0cpq/83GZ/+inkb/op5GSAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAop5G16KeRv/LxWb/y8Vm/6KeRv+inkaPAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAop5G/6KeRtcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA/n8AAPgfAADwDwAAwAMAAIABAACAAQAAgAEAAIABAACAAQAAgAEAAIABAACAAQAAwAMAAPAPAAD4HwAA/n8AAA==" />
<link rel="stylesheet" href="style.css"/>
<link rel="stylesheet" href="icons.css">
<title>{{ .Path }}</title>
</head>
<body>
<div id="page-header">
<div id="page-header-logo" class="ipfs-logo">&nbsp;</div>
<div id="page-header-menu">
<div class="menu-item-wide"><a href="https://ipfs.tech" target="_blank" rel="noopener noreferrer">About IPFS</a></div>
<div class="menu-item-wide"><a href="https://ipfs.tech#install" target="_blank" rel="noopener noreferrer">Install IPFS</a></div>
<div class="menu-item-narrow"><a href="https://ipfs.tech" target="_blank" rel="noopener noreferrer">About</a></div>
<div class="menu-item-narrow"><a href="https://ipfs.tech#install" target="_blank" rel="noopener noreferrer">Install</a></div>
<div>
<a href="https://github.com/ipfs/kubo/issues/new/choose" target="_blank" rel="noopener noreferrer" title="Report a bug">
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 18.4 21"><circle cx="7.5" cy="4.8" r="1"/><circle cx="11.1" cy="4.8" r="1"/><path d="M12.7 8.4c-0.5-1.5-1.9-2.5-3.5-2.5 -1.6 0-3 1-3.5 2.5H12.7z"/><path d="M8.5 9.7H5c-0.5 0.8-0.7 1.7-0.7 2.7 0 2.6 1.8 4.8 4.2 5.2V9.7z"/><path d="M13.4 9.7H9.9v7.9c2.4-0.4 4.2-2.5 4.2-5.2C14.1 11.4 13.9 10.5 13.4 9.7z"/><circle cx="15.7" cy="12.9" r="1"/><circle cx="15.1" cy="15.4" r="1"/><circle cx="15.3" cy="10.4" r="1"/><circle cx="2.7" cy="12.9" r="1"/><circle cx="3.3" cy="15.4" r="1"/><circle cx="3.1" cy="10.4" r="1"/></svg>
</a>
</div>
</div>
</div>
<div id="content">
<div id="content-header" class="d-flex flex-wrap">
<div>
<strong>
Index of
{{ range .Breadcrumbs -}}
/{{ if .Path }}<a href="{{ $root.GatewayURL }}{{ .Path | urlEscape }}">{{ .Name }}</a>{{ else }}{{ .Name }}{{ end }}
{{- else }}
{{ .Path }}
{{ end }}
</strong>
{{ if .Hash }}
<div class="ipfs-hash" translate="no">
{{ .Hash }}
</div>
{{ end }}
</div>
{{ if .Size }}
<div class="no-linebreak flex-shrink-1 ml-auto">
<strong title="Cumulative size of IPFS DAG (data + metadata)">&nbsp;{{ .Size }}</strong>
</div>
{{ end }}
</div>
<div class="table-responsive">
<table>
{{ if .BackLink }}
<tr>
<td class="type-icon">
<div class="ipfs-_blank">&nbsp;</div>
</td>
<td>
<a href="{{.BackLink | urlEscape}}">..</a>
</td>
<td></td>
<td></td>
</tr>
{{ end }}
{{ range .Listing }}
<tr>
<td class="type-icon">
<div class="{{iconFromExt .Name}}">&nbsp;</div>
</td>
<td>
<a href="{{ .Path | urlEscape }}">{{ .Name }}</a>
</td>
<td class="no-linebreak">
{{ if .Hash }}
<a class="ipfs-hash" translate="no" href={{ if $root.DNSLink }}"https://cid.ipfs.tech/#{{ .Hash | urlEscape}}" target="_blank" rel="noreferrer noopener"{{ else }}"{{ $root.GatewayURL }}/ipfs/{{ .Hash | urlEscape}}?filename={{ .Name | urlEscape }}"{{ end }}>
{{ .ShortHash }}
</a>
{{ end }}
</td>
<td class="no-linebreak" title="Cumulative size of IPFS DAG (data + metadata)">{{ .Size }}</td>
</tr>
{{ end }}
</table>
</div>
</div>
</body>
</html>

File diff suppressed because one or more lines are too long

View File

@ -1,212 +0,0 @@
body {
color:#34373f;
font-family:"Helvetica Neue", Helvetica, Arial, sans-serif;
font-size:14px;
line-height:1.43;
margin:0;
word-break:break-all;
-webkit-text-size-adjust:100%;
-ms-text-size-adjust:100%;
-webkit-tap-highlight-color:transparent
}
a {
color:#117eb3;
text-decoration:none
}
a:hover {
color:#00b0e9;
text-decoration:underline
}
a:active,
a:visited {
color:#00b0e9
}
strong {
font-weight:700
}
table {
border-collapse:collapse;
border-spacing:0;
max-width:100%;
width:100%
}
table:last-child {
border-bottom-left-radius:3px;
border-bottom-right-radius:3px
}
tr:first-child td {
border-top:0
}
tr:nth-of-type(even) {
background-color:#f7f8fa
}
td {
border-top:1px solid #d9dbe2;
padding:.65em;
vertical-align:top
}
#page-header {
align-items:center;
background:#0b3a53;
border-bottom:4px solid #69c4cd;
color:#fff;
display:flex;
font-size:1.12em;
font-weight:500;
justify-content:space-between;
padding:0 1em
}
#page-header a {
color:#69c4cd
}
#page-header a:active {
color:#9ad4db
}
#page-header a:hover {
color:#fff
}
#page-header-logo {
height:2.25em;
margin:.7em .7em .7em 0;
width:7.15em
}
#page-header-menu {
align-items:center;
display:flex;
margin:.65em 0
}
#page-header-menu div {
margin:0 .6em
}
#page-header-menu div:last-child {
margin:0 0 0 .6em
}
#page-header-menu svg {
fill:#69c4cd;
height:1.8em;
margin-top:.125em
}
#page-header-menu svg:hover {
fill:#fff
}
.menu-item-narrow {
display:none
}
#content {
border:1px solid #d9dbe2;
border-radius:4px;
margin:1em
}
#content-header {
background-color:#edf0f4;
border-bottom:1px solid #d9dbe2;
border-top-left-radius:3px;
border-top-right-radius:3px;
padding:.7em 1em
}
.type-icon,
.type-icon>* {
width:1.15em
}
.no-linebreak {
white-space:nowrap
}
.ipfs-hash {
color:#7f8491;
font-family:monospace
}
@media only screen and (max-width:500px) {
.menu-item-narrow {
display:inline
}
.menu-item-wide {
display:none
}
}
@media print {
#page-header {
display:none
}
#content-header,
.ipfs-hash,
body {
color:#000
}
#content-header {
border-bottom:1px solid #000
}
#content {
border:1px solid #000
}
a,
a:visited {
color:#000;
text-decoration:underline
}
a[href]:after {
content:" (" attr(href) ")"
}
tr {
page-break-inside:avoid
}
tr:nth-of-type(even) {
background-color:transparent
}
td {
border-top:1px solid #000
}
}
@-ms-viewport {
width:device-width
}
.d-flex {
display:flex
}
.flex-wrap {
flex-flow:wrap
}
.flex-shrink-1 {
flex-shrink:1
}
.ml-auto {
margin-left:auto
}
.table-responsive {
display:block;
width:100%;
overflow-x:auto;
-webkit-overflow-scrolling:touch
}

View File

@ -1,3 +0,0 @@
module github.com/ipfs/dir-index-html/test
go 1.17

View File

@ -1,116 +0,0 @@
package main
import (
"fmt"
"net/http"
"net/url"
"os"
"text/template"
)
const templateFile = "../dir-index.html"
// Copied from go-ipfs/core/corehttp/gateway_indexPage.go
type listingTemplateData struct {
GatewayURL string
DNSLink bool
Listing []directoryItem
Size string
Path string
Breadcrumbs []breadcrumb
BackLink string
Hash string
}
type directoryItem struct {
Size string
Name string
Path string
Hash string
ShortHash string
}
type breadcrumb struct {
Name string
Path string
}
var testPath = "/ipfs/QmFooBarQXB2mzChmMeKY47C43LxUdg1NDJ5MWcKMKxDu7/a/b/c"
var testData = listingTemplateData{
GatewayURL: "//localhost:3000",
DNSLink: true,
Listing: []directoryItem{{
Size: "25 MiB",
Name: "short-film.mov",
Path: testPath + "/short-film.mov",
Hash: "QmbWqxBEKC3P8tqsKc98xmWNzrzDtRLMiMPL8wBuTGsMnR",
ShortHash: "QmbW\u2026sMnR",
}, {
Size: "23 KiB",
Name: "250pxيوسف_الوزاني_صورة_ملتقطة_بواسطة_مرصد_هابل_الفضائي_توضح_سديم_السرطان،_وهو_بقايا_مستعر_أعظم._.jpg",
Path: testPath + "/250pxيوسف_الوزاني_صورة_ملتقطة_بواسطة_مرصد_هابل_الفضائي_توضح_سديم_السرطان،_وهو_بقايا_مستعر_أعظم._.jpg",
Hash: "QmUwrKrMTrNv8QjWGKMMH5QV9FMPUtRCoQ6zxTdgxATQW6",
ShortHash: "QmUw\u2026TQW6",
}, {
Size: "1 KiB",
Name: "this-piece-of-papers-got-47-words-37-sentences-58-words-we-wanna-know.txt",
Path: testPath + "/this-piece-of-papers-got-47-words-37-sentences-58-words-we-wanna-know.txt",
Hash: "bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi",
ShortHash: "bafy\u2026bzdi",
}},
Size: "25 MiB",
Path: testPath,
Breadcrumbs: []breadcrumb{{
Name: "ipfs",
}, {
Name: "QmFooBarQXB2mzChmMeKY47C43LxUdg1NDJ5MWcKMKxDu7",
Path: testPath + "/../../..",
}, {
Name: "a",
Path: testPath + "/../..",
}, {
Name: "b",
Path: testPath + "/..",
}, {
Name: "c",
Path: testPath,
}},
BackLink: testPath + "/..",
Hash: "QmFooBazBar2mzChmMeKY47C43LxUdg1NDJ5MWcKMKxDu7",
}
func main() {
mux := http.NewServeMux()
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/" {
http.Error(w, "Ha-ha, tricked you! There are no files here!", http.StatusNotFound)
return
}
listingTemplate, err := template.New("dir-index.html").Funcs(template.FuncMap{
"iconFromExt": func(name string) string {
return "ipfs-_blank" // place-holder
},
"urlEscape": func(rawUrl string) string {
pathUrl := url.URL{Path: rawUrl}
return pathUrl.String()
},
}).ParseFiles(templateFile)
if err != nil {
http.Error(w, fmt.Sprintf("failed to parse template file: %s", err), http.StatusInternalServerError)
return
}
err = listingTemplate.Execute(w, &testData)
if err != nil {
http.Error(w, fmt.Sprintf("failed to execute template: %s", err), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
})
if _, err := os.Stat(templateFile); err != nil {
wd, _ := os.Getwd()
fmt.Printf("could not open template file %q, relative to %q: %s\n", templateFile, wd, err)
os.Exit(1)
}
fmt.Printf("listening on localhost:3000\n")
http.ListenAndServe("localhost:3000", mux)
}

View File

@ -7,9 +7,9 @@ repo="$IPFS_PATH"
if [ "$(id -u)" -eq 0 ]; then
echo "Changing user to $user"
# ensure folder is writable
su-exec "$user" test -w "$repo" || chown -R -- "$user" "$repo"
gosu "$user" test -w "$repo" || chown -R -- "$user" "$repo"
# restart script with new privileges
exec su-exec "$user" "$0" "$@"
exec gosu "$user" "$0" "$@"
fi
# 2nd invocation with regular user

View File

@ -6,22 +6,19 @@
# ./get-docker-tags.sh <build number> <git commit sha1> <git branch name> [git tag name]
#
# Example:
#
#
# # get tag for the master branch
# ./get-docker-tags.sh $(date -u +%F) testingsha master
#
# # get tag for a release tag
# ./get-docker-tags.sh $(date -u +%F) testingsha release v0.5.0
#
# # Serving suggestion in circle ci - https://circleci.com/docs/2.0/env-vars/#built-in-environment-variables
# ./get-docker-tags.sh $(date -u +%F) "$CIRCLE_SHA1" "$CIRCLE_BRANCH" "$CIRCLE_TAG"
#
set -euo pipefail
if [[ $# -lt 1 ]] ; then
echo 'At least 1 arg required.'
echo 'Usage:'
echo './push-docker-tags.sh <build number> [git commit sha1] [git branch name] [git tag name]'
echo './get-docker-tags.sh <build number> [git commit sha1] [git branch name] [git tag name]'
exit 1
fi
@ -53,9 +50,9 @@ elif [[ $GIT_BRANCH =~ ^bifrost-.* ]]; then
branch=$(echo "$GIT_BRANCH" | tr '/' '-' | tr --delete --complement '[:alnum:]-')
echoImageName "${branch}-${BUILD_NUM}-${GIT_SHA1_SHORT}"
elif [ "$GIT_BRANCH" = "master" ]; then
echoImageName "master-${BUILD_NUM}-${GIT_SHA1_SHORT}"
echoImageName "master-latest"
elif [ "$GIT_BRANCH" = "master" ] || [ "$GIT_BRANCH" = "staging" ]; then
echoImageName "${GIT_BRANCH}-${BUILD_NUM}-${GIT_SHA1_SHORT}"
echoImageName "${GIT_BRANCH}-latest"
else
echo "Nothing to do. No docker tag defined for branch: $GIT_BRANCH, tag: $GIT_TAG"

View File

@ -19,7 +19,7 @@ if [ $? -ne 0 ]; then
fi
# check the object is there
ipfs object stat "$1" >/dev/null
ipfs dag stat "$1" >/dev/null
if [ $? -ne 0 ]; then
echo "error: ipfs cannot find $1"
exit 1

View File

@ -41,7 +41,6 @@ IGNORE_FILES=(
"go.mod"
"go.sum"
".github"
".circleci"
"*.pb.go"
"cbor_gen.go"
"ipldsch_*.go"
@ -261,7 +260,7 @@ recursive_release_log() {
printf -- "Generating Changelog for %s %s..%s\n" "$module" "$start" "$end" >&2
echo "### Changelog"
echo "### 📝 Changelog"
echo
echo "<details><summary>Full Changelog</summary>"
echo
@ -293,7 +292,7 @@ recursive_release_log() {
echo
echo "</details>"
echo
echo "### Contributors"
echo "### 👨‍👩‍👧‍👦 Contributors"
echo
echo "| Contributor | Commits | Lines ± | Files Changed |"

View File

@ -1,12 +1,13 @@
#!/usr/bin/env bash
#
# TODO: this script is legacy, use get-docker-tags.sh instead.
#
# push-docker-tags.sh
#
# Run from ci to tag images based on the current branch or tag name.
# Run from ci to tag images based on the current branch or tag name.
# A bit like dockerhub autobuild config, but somewhere we can version control it.
#
# The `docker-build` job in .circleci/config.yml builds the current commit
# in docker and tags it as ipfs/go-ipfs:wip
#
# The `docker-build` job builds the current commit in docker and tags it as ipfs/go-ipfs:wip
#
# Then the `docker-publish` job runs this script to decide what tag, if any,
# to publish to dockerhub.
@ -17,16 +18,13 @@
# Example:
# # dry run. pass a 5th arg to have it print what it would do rather than do it.
# ./push-docker-tags.sh $(date -u +%F) testingsha master "" dryrun
#
#
# # push tag for the master branch
# ./push-docker-tags.sh $(date -u +%F) testingsha master
#
# # push tag for a release tag
# ./push-docker-tags.sh $(date -u +%F) testingsha release v0.5.0
#
# # Serving suggestion in circle ci - https://circleci.com/docs/2.0/env-vars/#built-in-environment-variables
# ./push-docker-tags.sh $(date -u +%F) "$CIRCLE_SHA1" "$CIRCLE_BRANCH" "$CIRCLE_TAG"
#
set -euo pipefail
if [[ $# -lt 1 ]] ; then
@ -72,9 +70,9 @@ elif [[ $GIT_BRANCH =~ ^bifrost-.* ]]; then
branch=$(echo "$GIT_BRANCH" | tr '/' '-' | tr --delete --complement '[:alnum:]-')
pushTag "${branch}-${BUILD_NUM}-${GIT_SHA1_SHORT}"
elif [ "$GIT_BRANCH" = "master" ]; then
pushTag "master-${BUILD_NUM}-${GIT_SHA1_SHORT}"
pushTag "master-latest"
elif [ "$GIT_BRANCH" = "master" ] || [ "$GIT_BRANCH" = "staging" ]; then
pushTag "${GIT_BRANCH}-${BUILD_NUM}-${GIT_SHA1_SHORT}"
pushTag "${GIT_BRANCH}-latest"
else
echo "Nothing to do. No docker tag defined for branch: $GIT_BRANCH, tag: $GIT_TAG"

View File

@ -6,9 +6,9 @@ import (
"errors"
"fmt"
bs "github.com/ipfs/boxo/blockstore"
pin "github.com/ipfs/boxo/pinning/pinner"
cid "github.com/ipfs/go-cid"
bs "github.com/ipfs/go-ipfs-blockstore"
pin "github.com/ipfs/go-ipfs-pinner"
format "github.com/ipfs/go-ipld-format"
)

48
client/rpc/README.md Normal file
View File

@ -0,0 +1,48 @@
# `coreiface.CoreAPI` over http `rpc`
> IPFS CoreAPI implementation using HTTP API
This package implements [`coreiface.CoreAPI`](https://pkg.go.dev/github.com/ipfs/kubo/core/coreiface#CoreAPI) over the HTTP API.
## Documentation
https://pkg.go.dev/github.com/ipfs/kubo/client/rpc
### Example
Pin file on your local IPFS node based on its CID:
```go
package main
import (
"context"
"fmt"
"github.com/ipfs/boxo/path"
"github.com/ipfs/go-cid"
"github.com/ipfs/kubo/client/rpc"
)
func main() {
// "Connect" to local node
node, err := rpc.NewLocalApi()
if err != nil {
fmt.Println(err)
return
}
// Pin a given file by its CID
ctx := context.Background()
c, err := cid.Decode("bafkreidtuosuw37f5xmn65b3ksdiikajy7pwjjslzj2lxxz2vc4wdy3zku")
if err != nil {
fmt.Println(err)
return
}
p := path.FromCid(c)
err = node.Pin().Add(ctx, p)
if err != nil {
fmt.Println(err)
return
}
}
```

292
client/rpc/api.go Normal file
View File

@ -0,0 +1,292 @@
package rpc
import (
"context"
"encoding/json"
"errors"
"fmt"
"net"
"net/http"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/blang/semver/v4"
"github.com/ipfs/boxo/ipld/merkledag"
"github.com/ipfs/go-cid"
legacy "github.com/ipfs/go-ipld-legacy"
ipfs "github.com/ipfs/kubo"
iface "github.com/ipfs/kubo/core/coreiface"
caopts "github.com/ipfs/kubo/core/coreiface/options"
"github.com/ipfs/kubo/misc/fsutil"
dagpb "github.com/ipld/go-codec-dagpb"
_ "github.com/ipld/go-ipld-prime/codec/dagcbor"
"github.com/ipld/go-ipld-prime/node/basicnode"
ma "github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr/net"
)
const (
DefaultPathName = ".ipfs"
DefaultPathRoot = "~/" + DefaultPathName
DefaultApiFile = "api"
EnvDir = "IPFS_PATH"
)
// ErrApiNotFound if we fail to find a running daemon.
var ErrApiNotFound = errors.New("ipfs api address could not be found")
// HttpApi implements github.com/ipfs/interface-go-ipfs-core/CoreAPI using
// IPFS HTTP API.
//
// For interface docs see
// https://godoc.org/github.com/ipfs/interface-go-ipfs-core#CoreAPI
type HttpApi struct {
url string
httpcli http.Client
Headers http.Header
applyGlobal func(*requestBuilder)
ipldDecoder *legacy.Decoder
versionMu sync.Mutex
version *semver.Version
}
// NewLocalApi tries to construct new HttpApi instance communicating with local
// IPFS daemon
//
// Daemon api address is pulled from the $IPFS_PATH/api file.
// If $IPFS_PATH env var is not present, it defaults to ~/.ipfs.
func NewLocalApi() (*HttpApi, error) {
baseDir := os.Getenv(EnvDir)
if baseDir == "" {
baseDir = DefaultPathRoot
}
return NewPathApi(baseDir)
}
// NewPathApi constructs new HttpApi by pulling api address from specified
// ipfspath. Api file should be located at $ipfspath/api.
func NewPathApi(ipfspath string) (*HttpApi, error) {
a, err := ApiAddr(ipfspath)
if err != nil {
if os.IsNotExist(err) {
err = ErrApiNotFound
}
return nil, err
}
return NewApi(a)
}
// ApiAddr reads api file in specified ipfs path.
func ApiAddr(ipfspath string) (ma.Multiaddr, error) {
baseDir, err := fsutil.ExpandHome(ipfspath)
if err != nil {
return nil, err
}
apiFile := filepath.Join(baseDir, DefaultApiFile)
api, err := os.ReadFile(apiFile)
if err != nil {
return nil, err
}
return ma.NewMultiaddr(strings.TrimSpace(string(api)))
}
// NewApi constructs HttpApi with specified endpoint.
func NewApi(a ma.Multiaddr) (*HttpApi, error) {
transport := &http.Transport{
Proxy: http.ProxyFromEnvironment,
DisableKeepAlives: true,
}
network, address, err := manet.DialArgs(a)
if err != nil {
return nil, err
}
if network == "unix" {
transport.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) {
return net.Dial("unix", address)
}
c := &http.Client{
Transport: transport,
}
// This will create an API client which
// makes requests to `http://unix`.
return NewURLApiWithClient(network, c)
}
c := &http.Client{
Transport: transport,
}
return NewApiWithClient(a, c)
}
// NewApiWithClient constructs HttpApi with specified endpoint and custom http client.
func NewApiWithClient(a ma.Multiaddr, c *http.Client) (*HttpApi, error) {
_, url, err := manet.DialArgs(a)
if err != nil {
return nil, err
}
if a, err := ma.NewMultiaddr(url); err == nil {
_, host, err := manet.DialArgs(a)
if err == nil {
url = host
}
}
proto := "http://"
// By default, DialArgs is going to provide details suitable for connecting
// a socket to, but not really suitable for making an informed choice of http
// protocol. For multiaddresses specifying tls and/or https we want to make
// a https request instead of a http request.
protocols := a.Protocols()
for _, p := range protocols {
if p.Code == ma.P_HTTPS || p.Code == ma.P_TLS {
proto = "https://"
break
}
}
return NewURLApiWithClient(proto+url, c)
}
func NewURLApiWithClient(url string, c *http.Client) (*HttpApi, error) {
decoder := legacy.NewDecoder()
// Add support for these codecs to match what is done in the merkledag library
// Note: to match prior behavior the go-ipld-prime CBOR decoder is manually included
// TODO: allow the codec registry used to be configured by the caller not through a global variable
decoder.RegisterCodec(cid.DagProtobuf, dagpb.Type.PBNode, merkledag.ProtoNodeConverter)
decoder.RegisterCodec(cid.Raw, basicnode.Prototype.Bytes, merkledag.RawNodeConverter)
api := &HttpApi{
url: url,
httpcli: *c,
Headers: make(map[string][]string),
applyGlobal: func(*requestBuilder) {},
ipldDecoder: decoder,
}
// We don't support redirects.
api.httpcli.CheckRedirect = func(_ *http.Request, _ []*http.Request) error {
return fmt.Errorf("unexpected redirect")
}
return api, nil
}
func (api *HttpApi) WithOptions(opts ...caopts.ApiOption) (iface.CoreAPI, error) {
options, err := caopts.ApiOptions(opts...)
if err != nil {
return nil, err
}
subApi := &HttpApi{
url: api.url,
httpcli: api.httpcli,
Headers: api.Headers,
applyGlobal: func(req *requestBuilder) {
if options.Offline {
req.Option("offline", options.Offline)
}
},
ipldDecoder: api.ipldDecoder,
}
return subApi, nil
}
func (api *HttpApi) Request(command string, args ...string) RequestBuilder {
headers := make(map[string]string)
if api.Headers != nil {
for k := range api.Headers {
headers[k] = api.Headers.Get(k)
}
}
return &requestBuilder{
command: command,
args: args,
shell: api,
headers: headers,
}
}
func (api *HttpApi) Unixfs() iface.UnixfsAPI {
return (*UnixfsAPI)(api)
}
func (api *HttpApi) Block() iface.BlockAPI {
return (*BlockAPI)(api)
}
func (api *HttpApi) Dag() iface.APIDagService {
return (*HttpDagServ)(api)
}
func (api *HttpApi) Name() iface.NameAPI {
return (*NameAPI)(api)
}
func (api *HttpApi) Key() iface.KeyAPI {
return (*KeyAPI)(api)
}
func (api *HttpApi) Pin() iface.PinAPI {
return (*PinAPI)(api)
}
func (api *HttpApi) Object() iface.ObjectAPI {
return (*ObjectAPI)(api)
}
func (api *HttpApi) Swarm() iface.SwarmAPI {
return (*SwarmAPI)(api)
}
func (api *HttpApi) PubSub() iface.PubSubAPI {
return (*PubsubAPI)(api)
}
func (api *HttpApi) Routing() iface.RoutingAPI {
return (*RoutingAPI)(api)
}
func (api *HttpApi) loadRemoteVersion() (*semver.Version, error) {
api.versionMu.Lock()
defer api.versionMu.Unlock()
if api.version == nil {
ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Second*30))
defer cancel()
resp, err := api.Request("version").Send(ctx)
if err != nil {
return nil, err
}
if resp.Error != nil {
return nil, resp.Error
}
defer resp.Close()
var out ipfs.VersionInfo
dec := json.NewDecoder(resp.Output)
if err := dec.Decode(&out); err != nil {
return nil, err
}
remoteVersion, err := semver.New(out.Version)
if err != nil {
return nil, err
}
api.version = remoteVersion
}
return api.version, nil
}

169
client/rpc/api_test.go Normal file
View File

@ -0,0 +1,169 @@
package rpc
import (
"context"
"net/http"
"net/http/httptest"
"runtime"
"strconv"
"strings"
"sync"
"testing"
"time"
"github.com/ipfs/boxo/path"
iface "github.com/ipfs/kubo/core/coreiface"
"github.com/ipfs/kubo/core/coreiface/tests"
"github.com/ipfs/kubo/test/cli/harness"
ma "github.com/multiformats/go-multiaddr"
"go.uber.org/multierr"
)
type NodeProvider struct{}
func (np NodeProvider) MakeAPISwarm(t *testing.T, ctx context.Context, fullIdentity, online bool, n int) ([]iface.CoreAPI, error) {
h := harness.NewT(t)
apis := make([]iface.CoreAPI, n)
nodes := h.NewNodes(n)
var wg, zero sync.WaitGroup
zeroNode := nodes[0]
wg.Add(len(apis))
zero.Add(1)
var errs []error
var errsLk sync.Mutex
for i, n := range nodes {
go func(i int, n *harness.Node) {
if err := func() error {
defer wg.Done()
var err error
n.Init("--empty-repo")
c := n.ReadConfig()
c.Experimental.FilestoreEnabled = true
n.WriteConfig(c)
n.StartDaemon("--enable-pubsub-experiment", "--offline="+strconv.FormatBool(!online))
if online {
if i > 0 {
zero.Wait()
n.Connect(zeroNode)
} else {
zero.Done()
}
}
apiMaddr, err := n.TryAPIAddr()
if err != nil {
return err
}
api, err := NewApi(apiMaddr)
if err != nil {
return err
}
apis[i] = api
// empty node is pinned even with --empty-repo, we don't want that
emptyNode, err := path.NewPath("/ipfs/QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn")
if err != nil {
return err
}
if err := api.Pin().Rm(ctx, emptyNode); err != nil {
return err
}
return nil
}(); err != nil {
errsLk.Lock()
errs = append(errs, err)
errsLk.Unlock()
}
}(i, n)
}
wg.Wait()
return apis, multierr.Combine(errs...)
}
func TestHttpApi(t *testing.T) {
t.Parallel()
if runtime.GOOS == "windows" {
t.Skip("skipping due to #9905")
}
tests.TestApi(NodeProvider{})(t)
}
func Test_NewURLApiWithClient_With_Headers(t *testing.T) {
t.Parallel()
var (
headerToTest = "Test-Header"
expectedHeaderValue = "thisisaheadertest"
)
ts := httptest.NewServer(
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
val := r.Header.Get(headerToTest)
if val != expectedHeaderValue {
w.WriteHeader(400)
return
}
http.ServeContent(w, r, "", time.Now(), strings.NewReader("test"))
}),
)
defer ts.Close()
api, err := NewURLApiWithClient(ts.URL, &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
DisableKeepAlives: true,
},
})
if err != nil {
t.Fatal(err)
}
api.Headers.Set(headerToTest, expectedHeaderValue)
p, err := path.NewPath("/ipfs/QmS4ustL54uo8FzR9455qaxZwuMiUhyvMcX9Ba8nUH4uVv")
if err != nil {
t.Fatal(err)
}
if err := api.Pin().Rm(context.Background(), p); err != nil {
t.Fatal(err)
}
}
func Test_NewURLApiWithClient_HTTP_Variant(t *testing.T) {
t.Parallel()
testcases := []struct {
address string
expected string
}{
{address: "/ip4/127.0.0.1/tcp/80", expected: "http://127.0.0.1:80"},
{address: "/ip4/127.0.0.1/tcp/443/tls", expected: "https://127.0.0.1:443"},
{address: "/ip4/127.0.0.1/tcp/443/https", expected: "https://127.0.0.1:443"},
{address: "/ip4/127.0.0.1/tcp/443/tls/http", expected: "https://127.0.0.1:443"},
}
for _, tc := range testcases {
address, err := ma.NewMultiaddr(tc.address)
if err != nil {
t.Fatal(err)
}
api, err := NewApiWithClient(address, &http.Client{})
if err != nil {
t.Fatal(err)
}
if api.url != tc.expected {
t.Errorf("Expected = %s; got %s", tc.expected, api.url)
}
}
}

356
client/rpc/apifile.go Normal file
View File

@ -0,0 +1,356 @@
package rpc
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"os"
"strconv"
"time"
"github.com/ipfs/boxo/files"
unixfs "github.com/ipfs/boxo/ipld/unixfs"
"github.com/ipfs/boxo/path"
"github.com/ipfs/go-cid"
)
const forwardSeekLimit = 1 << 14 // 16k
func (api *UnixfsAPI) Get(ctx context.Context, p path.Path) (files.Node, error) {
if p.Mutable() { // use resolved path in case we are dealing with IPNS / MFS
var err error
p, _, err = api.core().ResolvePath(ctx, p)
if err != nil {
return nil, err
}
}
var stat struct {
Hash string
Type string
Size int64 // unixfs size
Mode string
Mtime int64
MtimeNsecs int
}
err := api.core().Request("files/stat", p.String()).Exec(ctx, &stat)
if err != nil {
return nil, err
}
mode, err := stringToFileMode(stat.Mode)
if err != nil {
return nil, err
}
var modTime time.Time
if stat.Mtime != 0 {
modTime = time.Unix(stat.Mtime, int64(stat.MtimeNsecs)).UTC()
}
switch stat.Type {
case "file":
return api.getFile(ctx, p, stat.Size, mode, modTime)
case "directory":
return api.getDir(ctx, p, stat.Size, mode, modTime)
case "symlink":
return api.getSymlink(ctx, p, modTime)
default:
return nil, fmt.Errorf("unsupported file type '%s'", stat.Type)
}
}
type apiFile struct {
ctx context.Context
core *HttpApi
size int64
path path.Path
mode os.FileMode
mtime time.Time
r *Response
at int64
}
func (f *apiFile) reset() error {
if f.r != nil {
_ = f.r.Cancel()
f.r = nil
}
req := f.core.Request("cat", f.path.String())
if f.at != 0 {
req.Option("offset", f.at)
}
resp, err := req.Send(f.ctx)
if err != nil {
return err
}
if resp.Error != nil {
return resp.Error
}
f.r = resp
return nil
}
func (f *apiFile) Read(p []byte) (int, error) {
n, err := f.r.Output.Read(p)
if n > 0 {
f.at += int64(n)
}
return n, err
}
func (f *apiFile) ReadAt(p []byte, off int64) (int, error) {
// Always make a new request. This method should be parallel-safe.
resp, err := f.core.Request("cat", f.path.String()).
Option("offset", off).Option("length", len(p)).Send(f.ctx)
if err != nil {
return 0, err
}
if resp.Error != nil {
return 0, resp.Error
}
defer resp.Output.Close()
n, err := io.ReadFull(resp.Output, p)
if err == io.ErrUnexpectedEOF {
err = io.EOF
}
return n, err
}
func (f *apiFile) Seek(offset int64, whence int) (int64, error) {
switch whence {
case io.SeekEnd:
offset = f.size + offset
case io.SeekCurrent:
offset = f.at + offset
}
if f.at == offset { // noop
return offset, nil
}
if f.at < offset && offset-f.at < forwardSeekLimit { // forward skip
r, err := io.CopyN(io.Discard, f.r.Output, offset-f.at)
f.at += r
return f.at, err
}
f.at = offset
return f.at, f.reset()
}
func (f *apiFile) Close() error {
if f.r != nil {
return f.r.Cancel()
}
return nil
}
func (f *apiFile) Mode() os.FileMode {
return f.mode
}
func (f *apiFile) ModTime() time.Time {
return f.mtime
}
func (f *apiFile) Size() (int64, error) {
return f.size, nil
}
func stringToFileMode(mode string) (os.FileMode, error) {
if mode == "" {
return 0, nil
}
mode64, err := strconv.ParseUint(mode, 8, 32)
if err != nil {
return 0, fmt.Errorf("cannot parse mode %s: %s", mode, err)
}
return os.FileMode(uint32(mode64)), nil
}
func (api *UnixfsAPI) getFile(ctx context.Context, p path.Path, size int64, mode os.FileMode, mtime time.Time) (files.Node, error) {
f := &apiFile{
ctx: ctx,
core: api.core(),
size: size,
path: p,
mode: mode,
mtime: mtime,
}
return f, f.reset()
}
type apiIter struct {
ctx context.Context
core *UnixfsAPI
err error
dec *json.Decoder
curFile files.Node
cur lsLink
}
func (it *apiIter) Err() error {
return it.err
}
func (it *apiIter) Name() string {
return it.cur.Name
}
func (it *apiIter) Next() bool {
if it.ctx.Err() != nil {
it.err = it.ctx.Err()
return false
}
var out lsOutput
if err := it.dec.Decode(&out); err != nil {
if err != io.EOF {
it.err = err
}
return false
}
if len(out.Objects) != 1 {
it.err = fmt.Errorf("ls returned more objects than expected (%d)", len(out.Objects))
return false
}
if len(out.Objects[0].Links) != 1 {
it.err = fmt.Errorf("ls returned more links than expected (%d)", len(out.Objects[0].Links))
return false
}
it.cur = out.Objects[0].Links[0]
c, err := cid.Parse(it.cur.Hash)
if err != nil {
it.err = err
return false
}
switch it.cur.Type {
case unixfs.THAMTShard, unixfs.TMetadata, unixfs.TDirectory:
it.curFile, err = it.core.getDir(it.ctx, path.FromCid(c), int64(it.cur.Size), it.cur.Mode, it.cur.ModTime)
if err != nil {
it.err = err
return false
}
case unixfs.TFile:
it.curFile, err = it.core.getFile(it.ctx, path.FromCid(c), int64(it.cur.Size), it.cur.Mode, it.cur.ModTime)
if err != nil {
it.err = err
return false
}
case unixfs.TSymlink:
it.curFile, err = it.core.getSymlink(it.ctx, path.FromCid(c), it.cur.ModTime)
if err != nil {
it.err = err
return false
}
default:
it.err = fmt.Errorf("file type %d not supported", it.cur.Type)
return false
}
return true
}
func (it *apiIter) Node() files.Node {
return it.curFile
}
type apiDir struct {
ctx context.Context
core *UnixfsAPI
size int64
path path.Path
mode os.FileMode
mtime time.Time
dec *json.Decoder
}
func (d *apiDir) Close() error {
return nil
}
func (d *apiDir) Mode() os.FileMode {
return d.mode
}
func (d *apiDir) ModTime() time.Time {
return d.mtime
}
func (d *apiDir) Size() (int64, error) {
return d.size, nil
}
func (d *apiDir) Entries() files.DirIterator {
return &apiIter{
ctx: d.ctx,
core: d.core,
dec: d.dec,
}
}
func (api *UnixfsAPI) getDir(ctx context.Context, p path.Path, size int64, mode os.FileMode, modTime time.Time) (files.Node, error) {
resp, err := api.core().Request("ls", p.String()).
Option("resolve-size", true).
Option("stream", true).Send(ctx)
if err != nil {
return nil, err
}
if resp.Error != nil {
return nil, resp.Error
}
data, _ := io.ReadAll(resp.Output)
rdr := bytes.NewReader(data)
d := &apiDir{
ctx: ctx,
core: api,
size: size,
path: p,
mode: mode,
mtime: modTime,
//dec: json.NewDecoder(resp.Output),
dec: json.NewDecoder(rdr),
}
return d, nil
}
func (api *UnixfsAPI) getSymlink(ctx context.Context, p path.Path, modTime time.Time) (files.Node, error) {
resp, err := api.core().Request("cat", p.String()).
Option("resolve-size", true).
Option("stream", true).Send(ctx)
if err != nil {
return nil, err
}
if resp.Error != nil {
return nil, resp.Error
}
target, err := io.ReadAll(resp.Output)
if err != nil {
return nil, err
}
return files.NewSymlinkFile(string(target), modTime), nil
}
var (
_ files.File = &apiFile{}
_ files.Directory = &apiDir{}
)

29
client/rpc/auth/auth.go Normal file
View File

@ -0,0 +1,29 @@
package auth
import "net/http"
var _ http.RoundTripper = &AuthorizedRoundTripper{}
type AuthorizedRoundTripper struct {
authorization string
roundTripper http.RoundTripper
}
// NewAuthorizedRoundTripper creates a new [http.RoundTripper] that will set the
// Authorization HTTP header with the value of [authorization]. The given [roundTripper] is
// the base [http.RoundTripper]. If it is nil, [http.DefaultTransport] is used.
func NewAuthorizedRoundTripper(authorization string, roundTripper http.RoundTripper) http.RoundTripper {
if roundTripper == nil {
roundTripper = http.DefaultTransport
}
return &AuthorizedRoundTripper{
authorization: authorization,
roundTripper: roundTripper,
}
}
func (tp *AuthorizedRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) {
r.Header.Set("Authorization", tp.authorization)
return tp.roundTripper.RoundTrip(r)
}

134
client/rpc/block.go Normal file
View File

@ -0,0 +1,134 @@
package rpc
import (
"bytes"
"context"
"fmt"
"io"
"github.com/ipfs/boxo/path"
"github.com/ipfs/go-cid"
iface "github.com/ipfs/kubo/core/coreiface"
caopts "github.com/ipfs/kubo/core/coreiface/options"
mc "github.com/multiformats/go-multicodec"
mh "github.com/multiformats/go-multihash"
)
type BlockAPI HttpApi
type blockStat struct {
Key string
BSize int `json:"Size"`
cid cid.Cid
}
func (s *blockStat) Size() int {
return s.BSize
}
func (s *blockStat) Path() path.ImmutablePath {
return path.FromCid(s.cid)
}
func (api *BlockAPI) Put(ctx context.Context, r io.Reader, opts ...caopts.BlockPutOption) (iface.BlockStat, error) {
options, err := caopts.BlockPutOptions(opts...)
px := options.CidPrefix
if err != nil {
return nil, err
}
mht, ok := mh.Codes[px.MhType]
if !ok {
return nil, fmt.Errorf("unknowm mhType %d", px.MhType)
}
var cidOptKey, cidOptVal string
switch {
case px.Version == 0 && px.Codec == cid.DagProtobuf:
// ensure legacy --format=v0 passes as BlockPutOption still works
cidOptKey = "format"
cidOptVal = "v0"
default:
// pass codec as string
cidOptKey = "cid-codec"
cidOptVal = mc.Code(px.Codec).String()
}
req := api.core().Request("block/put").
Option("mhtype", mht).
Option("mhlen", px.MhLength).
Option(cidOptKey, cidOptVal).
Option("pin", options.Pin).
FileBody(r)
var out blockStat
if err := req.Exec(ctx, &out); err != nil {
return nil, err
}
out.cid, err = cid.Parse(out.Key)
if err != nil {
return nil, err
}
return &out, nil
}
func (api *BlockAPI) Get(ctx context.Context, p path.Path) (io.Reader, error) {
resp, err := api.core().Request("block/get", p.String()).Send(ctx)
if err != nil {
return nil, err
}
if resp.Error != nil {
return nil, parseErrNotFoundWithFallbackToError(resp.Error)
}
// TODO: make get return ReadCloser to avoid copying
defer resp.Close()
b := new(bytes.Buffer)
if _, err := io.Copy(b, resp.Output); err != nil {
return nil, err
}
return b, nil
}
func (api *BlockAPI) Rm(ctx context.Context, p path.Path, opts ...caopts.BlockRmOption) error {
options, err := caopts.BlockRmOptions(opts...)
if err != nil {
return err
}
removedBlock := struct {
Hash string `json:",omitempty"`
Error string `json:",omitempty"`
}{}
req := api.core().Request("block/rm").
Option("force", options.Force).
Arguments(p.String())
if err := req.Exec(ctx, &removedBlock); err != nil {
return err
}
return parseErrNotFoundWithFallbackToMSG(removedBlock.Error)
}
func (api *BlockAPI) Stat(ctx context.Context, p path.Path) (iface.BlockStat, error) {
var out blockStat
err := api.core().Request("block/stat", p.String()).Exec(ctx, &out)
if err != nil {
return nil, parseErrNotFoundWithFallbackToError(err)
}
out.cid, err = cid.Parse(out.Key)
if err != nil {
return nil, err
}
return &out, nil
}
func (api *BlockAPI) core() *HttpApi {
return (*HttpApi)(api)
}

138
client/rpc/dag.go Normal file
View File

@ -0,0 +1,138 @@
package rpc
import (
"bytes"
"context"
"fmt"
"io"
"github.com/ipfs/boxo/path"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
format "github.com/ipfs/go-ipld-format"
"github.com/ipfs/kubo/core/coreiface/options"
multicodec "github.com/multiformats/go-multicodec"
)
type (
httpNodeAdder HttpApi
HttpDagServ httpNodeAdder
pinningHttpNodeAdder httpNodeAdder
)
func (api *HttpDagServ) Get(ctx context.Context, c cid.Cid) (format.Node, error) {
r, err := api.core().Block().Get(ctx, path.FromCid(c))
if err != nil {
return nil, err
}
data, err := io.ReadAll(r)
if err != nil {
return nil, err
}
blk, err := blocks.NewBlockWithCid(data, c)
if err != nil {
return nil, err
}
return api.ipldDecoder.DecodeNode(ctx, blk)
}
func (api *HttpDagServ) GetMany(ctx context.Context, cids []cid.Cid) <-chan *format.NodeOption {
out := make(chan *format.NodeOption)
for _, c := range cids {
// TODO: Consider limiting concurrency of this somehow
go func(c cid.Cid) {
n, err := api.Get(ctx, c)
select {
case out <- &format.NodeOption{Node: n, Err: err}:
case <-ctx.Done():
}
}(c)
}
return out
}
func (api *httpNodeAdder) add(ctx context.Context, nd format.Node, pin bool) error {
c := nd.Cid()
prefix := c.Prefix()
// preserve 'cid-codec' when sent over HTTP
cidCodec := multicodec.Code(prefix.Codec).String()
// 'format' got replaced by 'cid-codec' in https://github.com/ipfs/interface-go-ipfs-core/pull/80
// but we still support it here for backward-compatibility with use of CIDv0
format := ""
if prefix.Version == 0 {
cidCodec = ""
format = "v0"
}
stat, err := api.core().Block().Put(ctx, bytes.NewReader(nd.RawData()),
options.Block.Hash(prefix.MhType, prefix.MhLength),
options.Block.CidCodec(cidCodec),
options.Block.Format(format),
options.Block.Pin(pin))
if err != nil {
return err
}
if !stat.Path().RootCid().Equals(c) {
return fmt.Errorf("cids didn't match - local %s, remote %s", c.String(), stat.Path().RootCid().String())
}
return nil
}
func (api *httpNodeAdder) addMany(ctx context.Context, nds []format.Node, pin bool) error {
for _, nd := range nds {
// TODO: optimize
if err := api.add(ctx, nd, pin); err != nil {
return err
}
}
return nil
}
func (api *HttpDagServ) AddMany(ctx context.Context, nds []format.Node) error {
return (*httpNodeAdder)(api).addMany(ctx, nds, false)
}
func (api *HttpDagServ) Add(ctx context.Context, nd format.Node) error {
return (*httpNodeAdder)(api).add(ctx, nd, false)
}
func (api *pinningHttpNodeAdder) Add(ctx context.Context, nd format.Node) error {
return (*httpNodeAdder)(api).add(ctx, nd, true)
}
func (api *pinningHttpNodeAdder) AddMany(ctx context.Context, nds []format.Node) error {
return (*httpNodeAdder)(api).addMany(ctx, nds, true)
}
func (api *HttpDagServ) Pinning() format.NodeAdder {
return (*pinningHttpNodeAdder)(api)
}
func (api *HttpDagServ) Remove(ctx context.Context, c cid.Cid) error {
return api.core().Block().Rm(ctx, path.FromCid(c)) // TODO: should we force rm?
}
func (api *HttpDagServ) RemoveMany(ctx context.Context, cids []cid.Cid) error {
for _, c := range cids {
// TODO: optimize
if err := api.Remove(ctx, c); err != nil {
return err
}
}
return nil
}
func (api *httpNodeAdder) core() *HttpApi {
return (*HttpApi)(api)
}
func (api *HttpDagServ) core() *HttpApi {
return (*HttpApi)(api)
}

166
client/rpc/errors.go Normal file
View File

@ -0,0 +1,166 @@
package rpc
import (
"errors"
"strings"
"unicode/utf8"
"github.com/ipfs/go-cid"
ipld "github.com/ipfs/go-ipld-format"
mbase "github.com/multiformats/go-multibase"
)
// This file handle parsing and returning the correct ABI based errors from error messages
type prePostWrappedNotFoundError struct {
pre string
post string
wrapped ipld.ErrNotFound
}
func (e prePostWrappedNotFoundError) String() string {
return e.Error()
}
func (e prePostWrappedNotFoundError) Error() string {
return e.pre + e.wrapped.Error() + e.post
}
func (e prePostWrappedNotFoundError) Unwrap() error {
return e.wrapped
}
func parseErrNotFoundWithFallbackToMSG(msg string) error {
err, handled := parseErrNotFound(msg)
if handled {
return err
}
return errors.New(msg)
}
func parseErrNotFoundWithFallbackToError(msg error) error {
err, handled := parseErrNotFound(msg.Error())
if handled {
return err
}
return msg
}
func parseErrNotFound(msg string) (error, bool) {
if msg == "" {
return nil, true // Fast path
}
if err, handled := parseIPLDErrNotFound(msg); handled {
return err, true
}
if err, handled := parseBlockstoreNotFound(msg); handled {
return err, true
}
return nil, false
}
// Assume CIDs break on:
// - Whitespaces: " \t\n\r\v\f"
// - Semicolon: ";" this is to parse ipld.ErrNotFound wrapped in multierr
// - Double Quotes: "\"" this is for parsing %q and %#v formatting.
const cidBreakSet = " \t\n\r\v\f;\""
func parseIPLDErrNotFound(msg string) (error, bool) {
// The pattern we search for is:
const ipldErrNotFoundKey = "ipld: could not find " /*CID*/
// We try to parse the CID, if it's invalid we give up and return a simple text error.
// We also accept "node" in place of the CID because that means it's an Undefined CID.
keyIndex := strings.Index(msg, ipldErrNotFoundKey)
if keyIndex < 0 { // Unknown error
return nil, false
}
cidStart := keyIndex + len(ipldErrNotFoundKey)
msgPostKey := msg[cidStart:]
var c cid.Cid
var postIndex int
if strings.HasPrefix(msgPostKey, "node") {
// Fallback case
c = cid.Undef
postIndex = len("node")
} else {
postIndex = strings.IndexFunc(msgPostKey, func(r rune) bool {
return strings.ContainsAny(string(r), cidBreakSet)
})
if postIndex < 0 {
// no breakage meaning the string look like this something + "ipld: could not find bafy"
postIndex = len(msgPostKey)
}
cidStr := msgPostKey[:postIndex]
var err error
c, err = cid.Decode(cidStr)
if err != nil {
// failed to decode CID give up
return nil, false
}
// check that the CID is either a CIDv0 or a base32 multibase
// because that what ipld.ErrNotFound.Error() -> cid.Cid.String() do currently
if c.Version() != 0 {
baseRune, _ := utf8.DecodeRuneInString(cidStr)
if baseRune == utf8.RuneError || baseRune != mbase.Base32 {
// not a multibase we expect, give up
return nil, false
}
}
}
err := ipld.ErrNotFound{Cid: c}
pre := msg[:keyIndex]
post := msgPostKey[postIndex:]
if len(pre) > 0 || len(post) > 0 {
return prePostWrappedNotFoundError{
pre: pre,
post: post,
wrapped: err,
}, true
}
return err, true
}
// This is a simple error type that just return msg as Error().
// But that also match ipld.ErrNotFound when called with Is(err).
// That is needed to keep compatibility with code that use string.Contains(err.Error(), "blockstore: block not found")
// and code using ipld.ErrNotFound.
type blockstoreNotFoundMatchingIPLDErrNotFound struct {
msg string
}
func (e blockstoreNotFoundMatchingIPLDErrNotFound) String() string {
return e.Error()
}
func (e blockstoreNotFoundMatchingIPLDErrNotFound) Error() string {
return e.msg
}
func (e blockstoreNotFoundMatchingIPLDErrNotFound) Is(err error) bool {
_, ok := err.(ipld.ErrNotFound)
return ok
}
func parseBlockstoreNotFound(msg string) (error, bool) {
if !strings.Contains(msg, "blockstore: block not found") {
return nil, false
}
return blockstoreNotFoundMatchingIPLDErrNotFound{msg: msg}, true
}

99
client/rpc/errors_test.go Normal file
View File

@ -0,0 +1,99 @@
package rpc
import (
"errors"
"fmt"
"testing"
"github.com/ipfs/go-cid"
ipld "github.com/ipfs/go-ipld-format"
mbase "github.com/multiformats/go-multibase"
mh "github.com/multiformats/go-multihash"
)
var randomSha256MH = mh.Multihash{0x12, 0x20, 0x88, 0x82, 0x73, 0x37, 0x7c, 0xc1, 0xc9, 0x96, 0xad, 0xee, 0xd, 0x26, 0x84, 0x2, 0xc9, 0xc9, 0x5c, 0xf9, 0x5c, 0x4d, 0x9b, 0xc3, 0x3f, 0xfb, 0x4a, 0xd8, 0xaf, 0x28, 0x6b, 0xca, 0x1a, 0xf2}
func doParseIpldNotFoundTest(t *testing.T, original error) {
originalMsg := original.Error()
rebuilt := parseErrNotFoundWithFallbackToMSG(originalMsg)
rebuiltMsg := rebuilt.Error()
if originalMsg != rebuiltMsg {
t.Errorf("expected message to be %q; got %q", originalMsg, rebuiltMsg)
}
originalNotFound := ipld.IsNotFound(original)
rebuiltNotFound := ipld.IsNotFound(rebuilt)
if originalNotFound != rebuiltNotFound {
t.Errorf("for %q expected Ipld.IsNotFound to be %t; got %t", originalMsg, originalNotFound, rebuiltNotFound)
}
}
func TestParseIPLDNotFound(t *testing.T) {
t.Parallel()
if err := parseErrNotFoundWithFallbackToMSG(""); err != nil {
t.Errorf("expected empty string to give no error; got %T %q", err, err.Error())
}
cidBreaks := make([]string, len(cidBreakSet))
for i, v := range cidBreakSet {
cidBreaks[i] = "%w" + string(v)
}
base58BTCEncoder, err := mbase.NewEncoder(mbase.Base58BTC)
if err != nil {
t.Fatalf("expected to find Base58BTC encoder; got error %q", err.Error())
}
for _, wrap := range append(cidBreaks,
"",
"merkledag: %w",
"testing: %w the test",
"%w is wrong",
) {
for _, err := range [...]error{
errors.New("ipld: could not find "),
errors.New("ipld: could not find Bad_CID"),
errors.New("ipld: could not find " + cid.NewCidV1(cid.Raw, randomSha256MH).Encode(base58BTCEncoder)), // Test that we only accept CIDv0 and base32 CIDs
errors.New("network connection timeout"),
ipld.ErrNotFound{Cid: cid.Undef},
ipld.ErrNotFound{Cid: cid.NewCidV0(randomSha256MH)},
ipld.ErrNotFound{Cid: cid.NewCidV1(cid.Raw, randomSha256MH)},
} {
if wrap != "" {
err = fmt.Errorf(wrap, err)
}
doParseIpldNotFoundTest(t, err)
}
}
}
func TestBlockstoreNotFoundMatchingIPLDErrNotFound(t *testing.T) {
t.Parallel()
if !ipld.IsNotFound(blockstoreNotFoundMatchingIPLDErrNotFound{}) {
t.Fatalf("expected blockstoreNotFoundMatchingIPLDErrNotFound to match ipld.IsNotFound; got false")
}
for _, wrap := range [...]string{
"",
"merkledag: %w",
"testing: %w the test",
"%w is wrong",
} {
for _, err := range [...]error{
errors.New("network connection timeout"),
blockstoreNotFoundMatchingIPLDErrNotFound{"blockstore: block not found"},
} {
if wrap != "" {
err = fmt.Errorf(wrap, err)
}
doParseIpldNotFoundTest(t, err)
}
}
}

195
client/rpc/key.go Normal file
View File

@ -0,0 +1,195 @@
package rpc
import (
"bytes"
"context"
"errors"
"github.com/ipfs/boxo/ipns"
"github.com/ipfs/boxo/path"
iface "github.com/ipfs/kubo/core/coreiface"
caopts "github.com/ipfs/kubo/core/coreiface/options"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/multiformats/go-multibase"
)
type KeyAPI HttpApi
type key struct {
name string
pid peer.ID
path path.Path
}
func newKey(name, pidStr string) (*key, error) {
pid, err := peer.Decode(pidStr)
if err != nil {
return nil, err
}
path, err := path.NewPath("/ipns/" + ipns.NameFromPeer(pid).String())
if err != nil {
return nil, err
}
return &key{name: name, pid: pid, path: path}, nil
}
func (k *key) Name() string {
return k.name
}
func (k *key) Path() path.Path {
return k.path
}
func (k *key) ID() peer.ID {
return k.pid
}
type keyOutput struct {
Name string
Id string
}
func (api *KeyAPI) Generate(ctx context.Context, name string, opts ...caopts.KeyGenerateOption) (iface.Key, error) {
options, err := caopts.KeyGenerateOptions(opts...)
if err != nil {
return nil, err
}
var out keyOutput
err = api.core().Request("key/gen", name).
Option("type", options.Algorithm).
Option("size", options.Size).
Exec(ctx, &out)
if err != nil {
return nil, err
}
return newKey(out.Name, out.Id)
}
func (api *KeyAPI) Rename(ctx context.Context, oldName string, newName string, opts ...caopts.KeyRenameOption) (iface.Key, bool, error) {
options, err := caopts.KeyRenameOptions(opts...)
if err != nil {
return nil, false, err
}
var out struct {
Was string
Now string
Id string
Overwrite bool
}
err = api.core().Request("key/rename", oldName, newName).
Option("force", options.Force).
Exec(ctx, &out)
if err != nil {
return nil, false, err
}
key, err := newKey(out.Now, out.Id)
if err != nil {
return nil, false, err
}
return key, out.Overwrite, err
}
func (api *KeyAPI) List(ctx context.Context) ([]iface.Key, error) {
var out struct {
Keys []keyOutput
}
if err := api.core().Request("key/list").Exec(ctx, &out); err != nil {
return nil, err
}
res := make([]iface.Key, len(out.Keys))
for i, k := range out.Keys {
key, err := newKey(k.Name, k.Id)
if err != nil {
return nil, err
}
res[i] = key
}
return res, nil
}
func (api *KeyAPI) Self(ctx context.Context) (iface.Key, error) {
var id struct{ ID string }
if err := api.core().Request("id").Exec(ctx, &id); err != nil {
return nil, err
}
return newKey("self", id.ID)
}
func (api *KeyAPI) Remove(ctx context.Context, name string) (iface.Key, error) {
var out struct {
Keys []keyOutput
}
if err := api.core().Request("key/rm", name).Exec(ctx, &out); err != nil {
return nil, err
}
if len(out.Keys) != 1 {
return nil, errors.New("got unexpected number of keys back")
}
return newKey(out.Keys[0].Name, out.Keys[0].Id)
}
func (api *KeyAPI) core() *HttpApi {
return (*HttpApi)(api)
}
func (api *KeyAPI) Sign(ctx context.Context, name string, data []byte) (iface.Key, []byte, error) {
var out struct {
Key keyOutput
Signature string
}
err := api.core().Request("key/sign").
Option("key", name).
FileBody(bytes.NewReader(data)).
Exec(ctx, &out)
if err != nil {
return nil, nil, err
}
key, err := newKey(out.Key.Name, out.Key.Id)
if err != nil {
return nil, nil, err
}
_, signature, err := multibase.Decode(out.Signature)
if err != nil {
return nil, nil, err
}
return key, signature, nil
}
func (api *KeyAPI) Verify(ctx context.Context, keyOrName string, signature, data []byte) (iface.Key, bool, error) {
var out struct {
Key keyOutput
SignatureValid bool
}
err := api.core().Request("key/verify").
Option("key", keyOrName).
Option("signature", toMultibase(signature)).
FileBody(bytes.NewReader(data)).
Exec(ctx, &out)
if err != nil {
return nil, false, err
}
key, err := newKey(out.Key.Name, out.Key.Id)
if err != nil {
return nil, false, err
}
return key, out.SignatureValid, nil
}

134
client/rpc/name.go Normal file
View File

@ -0,0 +1,134 @@
package rpc
import (
"context"
"encoding/json"
"fmt"
"io"
"github.com/ipfs/boxo/ipns"
"github.com/ipfs/boxo/namesys"
"github.com/ipfs/boxo/path"
iface "github.com/ipfs/kubo/core/coreiface"
caopts "github.com/ipfs/kubo/core/coreiface/options"
)
type NameAPI HttpApi
type ipnsEntry struct {
Name string `json:"Name"`
Value string `json:"Value"`
}
func (api *NameAPI) Publish(ctx context.Context, p path.Path, opts ...caopts.NamePublishOption) (ipns.Name, error) {
options, err := caopts.NamePublishOptions(opts...)
if err != nil {
return ipns.Name{}, err
}
req := api.core().Request("name/publish", p.String()).
Option("key", options.Key).
Option("allow-offline", options.AllowOffline).
Option("lifetime", options.ValidTime).
Option("resolve", false)
if options.TTL != nil {
req.Option("ttl", options.TTL)
}
var out ipnsEntry
if err := req.Exec(ctx, &out); err != nil {
return ipns.Name{}, err
}
return ipns.NameFromString(out.Name)
}
func (api *NameAPI) Search(ctx context.Context, name string, opts ...caopts.NameResolveOption) (<-chan iface.IpnsResult, error) {
options, err := caopts.NameResolveOptions(opts...)
if err != nil {
return nil, err
}
ropts := namesys.ProcessResolveOptions(options.ResolveOpts)
if ropts.Depth != namesys.DefaultDepthLimit && ropts.Depth != 1 {
return nil, fmt.Errorf("Name.Resolve: depth other than 1 or %d not supported", namesys.DefaultDepthLimit)
}
req := api.core().Request("name/resolve", name).
Option("nocache", !options.Cache).
Option("recursive", ropts.Depth != 1).
Option("dht-record-count", ropts.DhtRecordCount).
Option("dht-timeout", ropts.DhtTimeout).
Option("stream", true)
resp, err := req.Send(ctx)
if err != nil {
return nil, err
}
if resp.Error != nil {
return nil, resp.Error
}
res := make(chan iface.IpnsResult)
go func() {
defer close(res)
defer resp.Close()
dec := json.NewDecoder(resp.Output)
for {
var out struct{ Path string }
err := dec.Decode(&out)
if err == io.EOF {
return
}
var ires iface.IpnsResult
if err == nil {
p, err := path.NewPath(out.Path)
if err != nil {
return
}
ires.Path = p
}
select {
case res <- ires:
case <-ctx.Done():
}
if err != nil {
return
}
}
}()
return res, nil
}
func (api *NameAPI) Resolve(ctx context.Context, name string, opts ...caopts.NameResolveOption) (path.Path, error) {
options, err := caopts.NameResolveOptions(opts...)
if err != nil {
return nil, err
}
ropts := namesys.ProcessResolveOptions(options.ResolveOpts)
if ropts.Depth != namesys.DefaultDepthLimit && ropts.Depth != 1 {
return nil, fmt.Errorf("Name.Resolve: depth other than 1 or %d not supported", namesys.DefaultDepthLimit)
}
req := api.core().Request("name/resolve", name).
Option("nocache", !options.Cache).
Option("recursive", ropts.Depth != 1).
Option("dht-record-count", ropts.DhtRecordCount).
Option("dht-timeout", ropts.DhtTimeout)
var out struct{ Path string }
if err := req.Exec(ctx, &out); err != nil {
return nil, err
}
return path.NewPath(out.Path)
}
func (api *NameAPI) core() *HttpApi {
return (*HttpApi)(api)
}

88
client/rpc/object.go Normal file
View File

@ -0,0 +1,88 @@
package rpc
import (
"context"
"github.com/ipfs/boxo/path"
"github.com/ipfs/go-cid"
iface "github.com/ipfs/kubo/core/coreiface"
caopts "github.com/ipfs/kubo/core/coreiface/options"
)
type ObjectAPI HttpApi
type objectOut struct {
Hash string
}
func (api *ObjectAPI) AddLink(ctx context.Context, base path.Path, name string, child path.Path, opts ...caopts.ObjectAddLinkOption) (path.ImmutablePath, error) {
options, err := caopts.ObjectAddLinkOptions(opts...)
if err != nil {
return path.ImmutablePath{}, err
}
var out objectOut
err = api.core().Request("object/patch/add-link", base.String(), name, child.String()).
Option("create", options.Create).
Exec(ctx, &out)
if err != nil {
return path.ImmutablePath{}, err
}
c, err := cid.Parse(out.Hash)
if err != nil {
return path.ImmutablePath{}, err
}
return path.FromCid(c), nil
}
func (api *ObjectAPI) RmLink(ctx context.Context, base path.Path, link string) (path.ImmutablePath, error) {
var out objectOut
err := api.core().Request("object/patch/rm-link", base.String(), link).
Exec(ctx, &out)
if err != nil {
return path.ImmutablePath{}, err
}
c, err := cid.Parse(out.Hash)
if err != nil {
return path.ImmutablePath{}, err
}
return path.FromCid(c), nil
}
type change struct {
Type iface.ChangeType
Path string
Before cid.Cid
After cid.Cid
}
func (api *ObjectAPI) Diff(ctx context.Context, a path.Path, b path.Path) ([]iface.ObjectChange, error) {
var out struct {
Changes []change
}
if err := api.core().Request("object/diff", a.String(), b.String()).Exec(ctx, &out); err != nil {
return nil, err
}
res := make([]iface.ObjectChange, len(out.Changes))
for i, ch := range out.Changes {
res[i] = iface.ObjectChange{
Type: ch.Type,
Path: ch.Path,
}
if ch.Before != cid.Undef {
res[i].Before = path.FromCid(ch.Before)
}
if ch.After != cid.Undef {
res[i].After = path.FromCid(ch.After)
}
}
return res, nil
}
func (api *ObjectAPI) core() *HttpApi {
return (*HttpApi)(api)
}

48
client/rpc/path.go Normal file
View File

@ -0,0 +1,48 @@
package rpc
import (
"context"
"github.com/ipfs/boxo/path"
cid "github.com/ipfs/go-cid"
ipld "github.com/ipfs/go-ipld-format"
)
func (api *HttpApi) ResolvePath(ctx context.Context, p path.Path) (path.ImmutablePath, []string, error) {
var out struct {
Cid cid.Cid
RemPath string
}
var err error
if p.Namespace() == path.IPNSNamespace {
if p, err = api.Name().Resolve(ctx, p.String()); err != nil {
return path.ImmutablePath{}, nil, err
}
}
if err := api.Request("dag/resolve", p.String()).Exec(ctx, &out); err != nil {
return path.ImmutablePath{}, nil, err
}
p, err = path.NewPathFromSegments(p.Namespace(), out.Cid.String(), out.RemPath)
if err != nil {
return path.ImmutablePath{}, nil, err
}
imPath, err := path.NewImmutablePath(p)
if err != nil {
return path.ImmutablePath{}, nil, err
}
return imPath, path.StringToSegments(out.RemPath), nil
}
func (api *HttpApi) ResolveNode(ctx context.Context, p path.Path) (ipld.Node, error) {
rp, _, err := api.ResolvePath(ctx, p)
if err != nil {
return nil, err
}
return api.Dag().Get(ctx, rp.RootCid())
}

272
client/rpc/pin.go Normal file
View File

@ -0,0 +1,272 @@
package rpc
import (
"context"
"encoding/json"
"errors"
"io"
"strings"
"github.com/ipfs/boxo/path"
"github.com/ipfs/go-cid"
iface "github.com/ipfs/kubo/core/coreiface"
caopts "github.com/ipfs/kubo/core/coreiface/options"
)
type PinAPI HttpApi
type pinRefKeyObject struct {
Type string
}
type pinRefKeyList struct {
Keys map[string]pinRefKeyObject
}
type pin struct {
path path.ImmutablePath
typ string
name string
err error
}
func (p pin) Err() error {
return p.err
}
func (p pin) Path() path.ImmutablePath {
return p.path
}
func (p pin) Name() string {
return p.name
}
func (p pin) Type() string {
return p.typ
}
func (api *PinAPI) Add(ctx context.Context, p path.Path, opts ...caopts.PinAddOption) error {
options, err := caopts.PinAddOptions(opts...)
if err != nil {
return err
}
return api.core().Request("pin/add", p.String()).
Option("recursive", options.Recursive).Exec(ctx, nil)
}
type pinLsObject struct {
Cid string
Name string
Type string
}
func (api *PinAPI) Ls(ctx context.Context, opts ...caopts.PinLsOption) (<-chan iface.Pin, error) {
options, err := caopts.PinLsOptions(opts...)
if err != nil {
return nil, err
}
res, err := api.core().Request("pin/ls").
Option("type", options.Type).
Option("stream", true).
Send(ctx)
if err != nil {
return nil, err
}
pins := make(chan iface.Pin)
go func(ch chan<- iface.Pin) {
defer res.Output.Close()
defer close(ch)
dec := json.NewDecoder(res.Output)
var out pinLsObject
for {
switch err := dec.Decode(&out); err {
case nil:
case io.EOF:
return
default:
select {
case ch <- pin{err: err}:
return
case <-ctx.Done():
return
}
}
c, err := cid.Parse(out.Cid)
if err != nil {
select {
case ch <- pin{err: err}:
return
case <-ctx.Done():
return
}
}
select {
case ch <- pin{typ: out.Type, name: out.Name, path: path.FromCid(c)}:
case <-ctx.Done():
return
}
}
}(pins)
return pins, nil
}
// IsPinned returns whether or not the given cid is pinned
// and an explanation of why its pinned.
func (api *PinAPI) IsPinned(ctx context.Context, p path.Path, opts ...caopts.PinIsPinnedOption) (string, bool, error) {
options, err := caopts.PinIsPinnedOptions(opts...)
if err != nil {
return "", false, err
}
var out pinRefKeyList
err = api.core().Request("pin/ls").
Option("type", options.WithType).
Option("arg", p.String()).
Exec(ctx, &out)
if err != nil {
// TODO: This error-type discrimination based on sub-string matching is brittle.
// It is addressed by this open issue: https://github.com/ipfs/go-ipfs/issues/7563
if strings.Contains(err.Error(), "is not pinned") {
return "", false, nil
}
return "", false, err
}
for _, obj := range out.Keys {
return obj.Type, true, nil
}
return "", false, errors.New("http api returned no error and no results")
}
func (api *PinAPI) Rm(ctx context.Context, p path.Path, opts ...caopts.PinRmOption) error {
options, err := caopts.PinRmOptions(opts...)
if err != nil {
return err
}
return api.core().Request("pin/rm", p.String()).
Option("recursive", options.Recursive).
Exec(ctx, nil)
}
func (api *PinAPI) Update(ctx context.Context, from path.Path, to path.Path, opts ...caopts.PinUpdateOption) error {
options, err := caopts.PinUpdateOptions(opts...)
if err != nil {
return err
}
return api.core().Request("pin/update", from.String(), to.String()).
Option("unpin", options.Unpin).Exec(ctx, nil)
}
type pinVerifyRes struct {
ok bool
badNodes []iface.BadPinNode
err error
}
func (r pinVerifyRes) Ok() bool {
return r.ok
}
func (r pinVerifyRes) BadNodes() []iface.BadPinNode {
return r.badNodes
}
func (r pinVerifyRes) Err() error {
return r.err
}
type badNode struct {
err error
cid cid.Cid
}
func (n badNode) Path() path.ImmutablePath {
return path.FromCid(n.cid)
}
func (n badNode) Err() error {
return n.err
}
func (api *PinAPI) Verify(ctx context.Context) (<-chan iface.PinStatus, error) {
resp, err := api.core().Request("pin/verify").Option("verbose", true).Send(ctx)
if err != nil {
return nil, err
}
if resp.Error != nil {
return nil, resp.Error
}
res := make(chan iface.PinStatus)
go func() {
defer resp.Close()
defer close(res)
dec := json.NewDecoder(resp.Output)
for {
var out struct {
Cid string
Err string
Ok bool
BadNodes []struct {
Cid string
Err string
}
}
if err := dec.Decode(&out); err != nil {
if err == io.EOF {
return
}
select {
case res <- pinVerifyRes{err: err}:
return
case <-ctx.Done():
return
}
}
if out.Err != "" {
select {
case res <- pinVerifyRes{err: errors.New(out.Err)}:
return
case <-ctx.Done():
return
}
}
badNodes := make([]iface.BadPinNode, len(out.BadNodes))
for i, n := range out.BadNodes {
c, err := cid.Decode(n.Cid)
if err != nil {
badNodes[i] = badNode{cid: c, err: err}
continue
}
if n.Err != "" {
err = errors.New(n.Err)
}
badNodes[i] = badNode{cid: c, err: err}
}
select {
case res <- pinVerifyRes{ok: out.Ok, badNodes: badNodes}:
case <-ctx.Done():
return
}
}
}()
return res, nil
}
func (api *PinAPI) core() *HttpApi {
return (*HttpApi)(api)
}

213
client/rpc/pubsub.go Normal file
View File

@ -0,0 +1,213 @@
package rpc
import (
"bytes"
"context"
"encoding/json"
"io"
iface "github.com/ipfs/kubo/core/coreiface"
caopts "github.com/ipfs/kubo/core/coreiface/options"
"github.com/libp2p/go-libp2p/core/peer"
mbase "github.com/multiformats/go-multibase"
)
type PubsubAPI HttpApi
func (api *PubsubAPI) Ls(ctx context.Context) ([]string, error) {
var out struct {
Strings []string
}
if err := api.core().Request("pubsub/ls").Exec(ctx, &out); err != nil {
return nil, err
}
topics := make([]string, len(out.Strings))
for n, mb := range out.Strings {
_, topic, err := mbase.Decode(mb)
if err != nil {
return nil, err
}
topics[n] = string(topic)
}
return topics, nil
}
func (api *PubsubAPI) Peers(ctx context.Context, opts ...caopts.PubSubPeersOption) ([]peer.ID, error) {
options, err := caopts.PubSubPeersOptions(opts...)
if err != nil {
return nil, err
}
var out struct {
Strings []string
}
var optionalTopic string
if len(options.Topic) > 0 {
optionalTopic = toMultibase([]byte(options.Topic))
}
if err := api.core().Request("pubsub/peers", optionalTopic).Exec(ctx, &out); err != nil {
return nil, err
}
res := make([]peer.ID, len(out.Strings))
for i, sid := range out.Strings {
id, err := peer.Decode(sid)
if err != nil {
return nil, err
}
res[i] = id
}
return res, nil
}
func (api *PubsubAPI) Publish(ctx context.Context, topic string, message []byte) error {
return api.core().Request("pubsub/pub", toMultibase([]byte(topic))).
FileBody(bytes.NewReader(message)).
Exec(ctx, nil)
}
type pubsubSub struct {
messages chan pubsubMessage
done chan struct{}
rcloser func() error
}
type pubsubMessage struct {
JFrom string `json:"from,omitempty"`
JData string `json:"data,omitempty"`
JSeqno string `json:"seqno,omitempty"`
JTopicIDs []string `json:"topicIDs,omitempty"`
// real values after unpacking from text/multibase envelopes
from peer.ID
data []byte
seqno []byte
topics []string
err error
}
func (msg *pubsubMessage) From() peer.ID {
return msg.from
}
func (msg *pubsubMessage) Data() []byte {
return msg.data
}
func (msg *pubsubMessage) Seq() []byte {
return msg.seqno
}
// TODO: do we want to keep this interface as []string,
// or change to more correct [][]byte?
func (msg *pubsubMessage) Topics() []string {
return msg.topics
}
func (s *pubsubSub) Next(ctx context.Context) (iface.PubSubMessage, error) {
select {
case msg, ok := <-s.messages:
if !ok {
return nil, io.EOF
}
if msg.err != nil {
return nil, msg.err
}
// unpack values from text/multibase envelopes
var err error
msg.from, err = peer.Decode(msg.JFrom)
if err != nil {
return nil, err
}
_, msg.data, err = mbase.Decode(msg.JData)
if err != nil {
return nil, err
}
_, msg.seqno, err = mbase.Decode(msg.JSeqno)
if err != nil {
return nil, err
}
for _, mbt := range msg.JTopicIDs {
_, topic, err := mbase.Decode(mbt)
if err != nil {
return nil, err
}
msg.topics = append(msg.topics, string(topic))
}
return &msg, nil
case <-ctx.Done():
return nil, ctx.Err()
}
}
func (api *PubsubAPI) Subscribe(ctx context.Context, topic string, opts ...caopts.PubSubSubscribeOption) (iface.PubSubSubscription, error) {
/* right now we have no options (discover got deprecated)
options, err := caopts.PubSubSubscribeOptions(opts...)
if err != nil {
return nil, err
}
*/
resp, err := api.core().Request("pubsub/sub", toMultibase([]byte(topic))).Send(ctx)
if err != nil {
return nil, err
}
if resp.Error != nil {
return nil, resp.Error
}
sub := &pubsubSub{
messages: make(chan pubsubMessage),
done: make(chan struct{}),
rcloser: func() error {
return resp.Cancel()
},
}
dec := json.NewDecoder(resp.Output)
go func() {
defer close(sub.messages)
for {
var msg pubsubMessage
if err := dec.Decode(&msg); err != nil {
if err == io.EOF {
return
}
msg.err = err
}
select {
case sub.messages <- msg:
case <-sub.done:
return
case <-ctx.Done():
return
}
}
}()
return sub, nil
}
func (s *pubsubSub) Close() error {
if s.done != nil {
close(s.done)
s.done = nil
}
return s.rcloser()
}
func (api *PubsubAPI) core() *HttpApi {
return (*HttpApi)(api)
}
// Encodes bytes into URL-safe multibase that can be sent over HTTP RPC (URL or body).
func toMultibase(data []byte) string {
mb, _ := mbase.Encode(mbase.Base64url, data)
return mb
}

36
client/rpc/request.go Normal file
View File

@ -0,0 +1,36 @@
package rpc
import (
"context"
"io"
"strings"
)
type Request struct {
Ctx context.Context
ApiBase string
Command string
Args []string
Opts map[string]string
Body io.Reader
Headers map[string]string
}
func NewRequest(ctx context.Context, url, command string, args ...string) *Request {
if !strings.HasPrefix(url, "http") {
url = "http://" + url
}
opts := map[string]string{
"encoding": "json",
"stream-channels": "true",
}
return &Request{
Ctx: ctx,
ApiBase: url + "/api/v0",
Command: command,
Args: args,
Opts: opts,
Headers: make(map[string]string),
}
}

View File

@ -0,0 +1,148 @@
package rpc
import (
"bytes"
"context"
"fmt"
"io"
"strconv"
"strings"
"github.com/blang/semver/v4"
"github.com/ipfs/boxo/files"
)
type RequestBuilder interface {
Arguments(args ...string) RequestBuilder
BodyString(body string) RequestBuilder
BodyBytes(body []byte) RequestBuilder
Body(body io.Reader) RequestBuilder
FileBody(body io.Reader) RequestBuilder
Option(key string, value interface{}) RequestBuilder
Header(name, value string) RequestBuilder
Send(ctx context.Context) (*Response, error)
Exec(ctx context.Context, res interface{}) error
}
// encodedAbsolutePathVersion is the version from which the absolute path header in
// multipart requests is %-encoded. Before this version, its sent raw.
var encodedAbsolutePathVersion = semver.MustParse("0.23.0-dev")
// requestBuilder is an IPFS commands request builder.
type requestBuilder struct {
command string
args []string
opts map[string]string
headers map[string]string
body io.Reader
buildError error
shell *HttpApi
}
// Arguments adds the arguments to the args.
func (r *requestBuilder) Arguments(args ...string) RequestBuilder {
r.args = append(r.args, args...)
return r
}
// BodyString sets the request body to the given string.
func (r *requestBuilder) BodyString(body string) RequestBuilder {
return r.Body(strings.NewReader(body))
}
// BodyBytes sets the request body to the given buffer.
func (r *requestBuilder) BodyBytes(body []byte) RequestBuilder {
return r.Body(bytes.NewReader(body))
}
// Body sets the request body to the given reader.
func (r *requestBuilder) Body(body io.Reader) RequestBuilder {
r.body = body
return r
}
// FileBody sets the request body to the given reader wrapped into multipartreader.
func (r *requestBuilder) FileBody(body io.Reader) RequestBuilder {
pr, _ := files.NewReaderPathFile("/dev/stdin", io.NopCloser(body), nil)
d := files.NewMapDirectory(map[string]files.Node{"": pr})
version, err := r.shell.loadRemoteVersion()
if err != nil {
// Unfortunately, we cannot return an error here. Changing this API is also
// not the best since we would otherwise have an inconsistent RequestBuilder.
// We save the error and return it when calling [requestBuilder.Send].
r.buildError = err
return r
}
useEncodedAbsPaths := version.LT(encodedAbsolutePathVersion)
r.body = files.NewMultiFileReader(d, false, useEncodedAbsPaths)
return r
}
// Option sets the given option.
func (r *requestBuilder) Option(key string, value interface{}) RequestBuilder {
var s string
switch v := value.(type) {
case bool:
s = strconv.FormatBool(v)
case string:
s = v
case []byte:
s = string(v)
default:
// slow case.
s = fmt.Sprint(value)
}
if r.opts == nil {
r.opts = make(map[string]string, 1)
}
r.opts[key] = s
return r
}
// Header sets the given header.
func (r *requestBuilder) Header(name, value string) RequestBuilder {
if r.headers == nil {
r.headers = make(map[string]string, 1)
}
r.headers[name] = value
return r
}
// Send sends the request and return the response.
func (r *requestBuilder) Send(ctx context.Context) (*Response, error) {
if r.buildError != nil {
return nil, r.buildError
}
r.shell.applyGlobal(r)
req := NewRequest(ctx, r.shell.url, r.command, r.args...)
req.Opts = r.opts
req.Headers = r.headers
req.Body = r.body
return req.Send(&r.shell.httpcli)
}
// Exec sends the request a request and decodes the response.
func (r *requestBuilder) Exec(ctx context.Context, res interface{}) error {
httpRes, err := r.Send(ctx)
if err != nil {
return err
}
if res == nil {
lateErr := httpRes.Close()
if httpRes.Error != nil {
return httpRes.Error
}
return lateErr
}
return httpRes.decode(res)
}
var _ RequestBuilder = &requestBuilder{}

169
client/rpc/response.go Normal file
View File

@ -0,0 +1,169 @@
package rpc
import (
"encoding/json"
"errors"
"fmt"
"io"
"mime"
"net/http"
"net/url"
"os"
"github.com/ipfs/boxo/files"
cmds "github.com/ipfs/go-ipfs-cmds"
cmdhttp "github.com/ipfs/go-ipfs-cmds/http"
)
type Error = cmds.Error
type trailerReader struct {
resp *http.Response
}
func (r *trailerReader) Read(b []byte) (int, error) {
n, err := r.resp.Body.Read(b)
if err != nil {
if e := r.resp.Trailer.Get(cmdhttp.StreamErrHeader); e != "" {
err = errors.New(e)
}
}
return n, err
}
func (r *trailerReader) Close() error {
return r.resp.Body.Close()
}
type Response struct {
Output io.ReadCloser
Error *Error
}
func (r *Response) Close() error {
if r.Output != nil {
// drain output (response body)
_, err1 := io.Copy(io.Discard, r.Output)
err2 := r.Output.Close()
if err1 != nil {
return err1
}
return err2
}
return nil
}
// Cancel aborts running request (without draining request body).
func (r *Response) Cancel() error {
if r.Output != nil {
return r.Output.Close()
}
return nil
}
// Decode reads request body and decodes it as json.
func (r *Response) decode(dec interface{}) error {
if r.Error != nil {
return r.Error
}
err := json.NewDecoder(r.Output).Decode(dec)
err2 := r.Close()
if err != nil {
return err
}
return err2
}
func (r *Request) Send(c *http.Client) (*Response, error) {
url := r.getURL()
req, err := http.NewRequest("POST", url, r.Body)
if err != nil {
return nil, err
}
req = req.WithContext(r.Ctx)
// Add any headers that were supplied via the requestBuilder.
for k, v := range r.Headers {
req.Header.Add(k, v)
}
if fr, ok := r.Body.(*files.MultiFileReader); ok {
req.Header.Set("Content-Type", "multipart/form-data; boundary="+fr.Boundary())
req.Header.Set("Content-Disposition", "form-data; name=\"files\"")
}
resp, err := c.Do(req)
if err != nil {
return nil, err
}
contentType, _, err := mime.ParseMediaType(resp.Header.Get("Content-Type"))
if err != nil {
return nil, err
}
nresp := new(Response)
nresp.Output = &trailerReader{resp}
if resp.StatusCode >= http.StatusBadRequest {
e := new(Error)
switch {
case resp.StatusCode == http.StatusNotFound:
e.Message = "command not found"
case contentType == "text/plain":
out, err := io.ReadAll(resp.Body)
if err != nil {
fmt.Fprintf(os.Stderr, "ipfs-shell: warning! response (%d) read error: %s\n", resp.StatusCode, err)
}
e.Message = string(out)
// set special status codes.
switch resp.StatusCode {
case http.StatusNotFound, http.StatusBadRequest:
e.Code = cmds.ErrClient
case http.StatusTooManyRequests:
e.Code = cmds.ErrRateLimited
case http.StatusForbidden:
e.Code = cmds.ErrForbidden
}
case contentType == "application/json":
if err = json.NewDecoder(resp.Body).Decode(e); err != nil {
fmt.Fprintf(os.Stderr, "ipfs-shell: warning! response (%d) unmarshall error: %s\n", resp.StatusCode, err)
}
default:
// This is a server-side bug (probably).
e.Code = cmds.ErrImplementation
fmt.Fprintf(os.Stderr, "ipfs-shell: warning! unhandled response (%d) encoding: %s", resp.StatusCode, contentType)
out, err := io.ReadAll(resp.Body)
if err != nil {
fmt.Fprintf(os.Stderr, "ipfs-shell: response (%d) read error: %s\n", resp.StatusCode, err)
}
e.Message = fmt.Sprintf("unknown ipfs-shell error encoding: %q - %q", contentType, out)
}
nresp.Error = e
nresp.Output = nil
// drain body and close
_, _ = io.Copy(io.Discard, resp.Body)
_ = resp.Body.Close()
}
return nresp, nil
}
func (r *Request) getURL() string {
values := make(url.Values)
for _, arg := range r.Args {
values.Add("arg", arg)
}
for k, v := range r.Opts {
values.Add(k, v)
}
return fmt.Sprintf("%s/%s?%s", r.ApiBase, r.Command, values.Encode())
}

161
client/rpc/routing.go Normal file
View File

@ -0,0 +1,161 @@
package rpc
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"github.com/ipfs/boxo/path"
"github.com/ipfs/kubo/core/coreiface/options"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/routing"
)
type RoutingAPI HttpApi
func (api *RoutingAPI) Get(ctx context.Context, key string) ([]byte, error) {
resp, err := api.core().Request("routing/get", key).Send(ctx)
if err != nil {
return nil, err
}
if resp.Error != nil {
return nil, resp.Error
}
defer resp.Close()
var out routing.QueryEvent
dec := json.NewDecoder(resp.Output)
if err := dec.Decode(&out); err != nil {
return nil, err
}
res, err := base64.StdEncoding.DecodeString(out.Extra)
if err != nil {
return nil, err
}
return res, nil
}
func (api *RoutingAPI) Put(ctx context.Context, key string, value []byte, opts ...options.RoutingPutOption) error {
var cfg options.RoutingPutSettings
for _, o := range opts {
if err := o(&cfg); err != nil {
return err
}
}
resp, err := api.core().Request("routing/put", key).
Option("allow-offline", cfg.AllowOffline).
FileBody(bytes.NewReader(value)).
Send(ctx)
if err != nil {
return err
}
if resp.Error != nil {
return resp.Error
}
return nil
}
func (api *RoutingAPI) FindPeer(ctx context.Context, p peer.ID) (peer.AddrInfo, error) {
var out struct {
Type routing.QueryEventType
Responses []peer.AddrInfo
}
resp, err := api.core().Request("routing/findpeer", p.String()).Send(ctx)
if err != nil {
return peer.AddrInfo{}, err
}
if resp.Error != nil {
return peer.AddrInfo{}, resp.Error
}
defer resp.Close()
dec := json.NewDecoder(resp.Output)
for {
if err := dec.Decode(&out); err != nil {
return peer.AddrInfo{}, err
}
if out.Type == routing.FinalPeer {
return out.Responses[0], nil
}
}
}
func (api *RoutingAPI) FindProviders(ctx context.Context, p path.Path, opts ...options.RoutingFindProvidersOption) (<-chan peer.AddrInfo, error) {
options, err := options.RoutingFindProvidersOptions(opts...)
if err != nil {
return nil, err
}
rp, _, err := api.core().ResolvePath(ctx, p)
if err != nil {
return nil, err
}
resp, err := api.core().Request("routing/findprovs", rp.RootCid().String()).
Option("num-providers", options.NumProviders).
Send(ctx)
if err != nil {
return nil, err
}
if resp.Error != nil {
return nil, resp.Error
}
res := make(chan peer.AddrInfo)
go func() {
defer resp.Close()
defer close(res)
dec := json.NewDecoder(resp.Output)
for {
var out struct {
Extra string
Type routing.QueryEventType
Responses []peer.AddrInfo
}
if err := dec.Decode(&out); err != nil {
return // todo: handle this somehow
}
if out.Type == routing.QueryError {
return // usually a 'not found' error
// todo: handle other errors
}
if out.Type == routing.Provider {
for _, pi := range out.Responses {
select {
case res <- pi:
case <-ctx.Done():
return
}
}
}
}
}()
return res, nil
}
func (api *RoutingAPI) Provide(ctx context.Context, p path.Path, opts ...options.RoutingProvideOption) error {
options, err := options.RoutingProvideOptions(opts...)
if err != nil {
return err
}
rp, _, err := api.core().ResolvePath(ctx, p)
if err != nil {
return err
}
return api.core().Request("routing/provide", rp.RootCid().String()).
Option("recursive", options.Recursive).
Exec(ctx, nil)
}
func (api *RoutingAPI) core() *HttpApi {
return (*HttpApi)(api)
}

187
client/rpc/swarm.go Normal file
View File

@ -0,0 +1,187 @@
package rpc
import (
"context"
"time"
iface "github.com/ipfs/kubo/core/coreiface"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/protocol"
"github.com/multiformats/go-multiaddr"
)
type SwarmAPI HttpApi
func (api *SwarmAPI) Connect(ctx context.Context, pi peer.AddrInfo) error {
pidma, err := multiaddr.NewComponent("p2p", pi.ID.String())
if err != nil {
return err
}
saddrs := make([]string, len(pi.Addrs))
for i, addr := range pi.Addrs {
saddrs[i] = addr.Encapsulate(pidma).String()
}
return api.core().Request("swarm/connect", saddrs...).Exec(ctx, nil)
}
func (api *SwarmAPI) Disconnect(ctx context.Context, addr multiaddr.Multiaddr) error {
return api.core().Request("swarm/disconnect", addr.String()).Exec(ctx, nil)
}
type connInfo struct {
addr multiaddr.Multiaddr
peer peer.ID
latency time.Duration
muxer string
direction network.Direction
streams []protocol.ID
}
func (c *connInfo) ID() peer.ID {
return c.peer
}
func (c *connInfo) Address() multiaddr.Multiaddr {
return c.addr
}
func (c *connInfo) Direction() network.Direction {
return c.direction
}
func (c *connInfo) Latency() (time.Duration, error) {
return c.latency, nil
}
func (c *connInfo) Streams() ([]protocol.ID, error) {
return c.streams, nil
}
func (api *SwarmAPI) Peers(ctx context.Context) ([]iface.ConnectionInfo, error) {
var resp struct {
Peers []struct {
Addr string
Peer string
Latency string
Muxer string
Direction network.Direction
Streams []struct {
Protocol string
}
}
}
err := api.core().Request("swarm/peers").
Option("streams", true).
Option("latency", true).
Exec(ctx, &resp)
if err != nil {
return nil, err
}
res := make([]iface.ConnectionInfo, len(resp.Peers))
for i, conn := range resp.Peers {
latency, _ := time.ParseDuration(conn.Latency)
out := &connInfo{
latency: latency,
muxer: conn.Muxer,
direction: conn.Direction,
}
out.peer, err = peer.Decode(conn.Peer)
if err != nil {
return nil, err
}
out.addr, err = multiaddr.NewMultiaddr(conn.Addr)
if err != nil {
return nil, err
}
out.streams = make([]protocol.ID, len(conn.Streams))
for i, p := range conn.Streams {
out.streams[i] = protocol.ID(p.Protocol)
}
res[i] = out
}
return res, nil
}
func (api *SwarmAPI) KnownAddrs(ctx context.Context) (map[peer.ID][]multiaddr.Multiaddr, error) {
var out struct {
Addrs map[string][]string
}
if err := api.core().Request("swarm/addrs").Exec(ctx, &out); err != nil {
return nil, err
}
res := map[peer.ID][]multiaddr.Multiaddr{}
for spid, saddrs := range out.Addrs {
addrs := make([]multiaddr.Multiaddr, len(saddrs))
for i, addr := range saddrs {
a, err := multiaddr.NewMultiaddr(addr)
if err != nil {
return nil, err
}
addrs[i] = a
}
pid, err := peer.Decode(spid)
if err != nil {
return nil, err
}
res[pid] = addrs
}
return res, nil
}
func (api *SwarmAPI) LocalAddrs(ctx context.Context) ([]multiaddr.Multiaddr, error) {
var out struct {
Strings []string
}
if err := api.core().Request("swarm/addrs/local").Exec(ctx, &out); err != nil {
return nil, err
}
res := make([]multiaddr.Multiaddr, len(out.Strings))
for i, addr := range out.Strings {
ma, err := multiaddr.NewMultiaddr(addr)
if err != nil {
return nil, err
}
res[i] = ma
}
return res, nil
}
func (api *SwarmAPI) ListenAddrs(ctx context.Context) ([]multiaddr.Multiaddr, error) {
var out struct {
Strings []string
}
if err := api.core().Request("swarm/addrs/listen").Exec(ctx, &out); err != nil {
return nil, err
}
res := make([]multiaddr.Multiaddr, len(out.Strings))
for i, addr := range out.Strings {
ma, err := multiaddr.NewMultiaddr(addr)
if err != nil {
return nil, err
}
res[i] = ma
}
return res, nil
}
func (api *SwarmAPI) core() *HttpApi {
return (*HttpApi)(api)
}

243
client/rpc/unixfs.go Normal file
View File

@ -0,0 +1,243 @@
package rpc
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"time"
"github.com/ipfs/boxo/files"
unixfs "github.com/ipfs/boxo/ipld/unixfs"
unixfs_pb "github.com/ipfs/boxo/ipld/unixfs/pb"
"github.com/ipfs/boxo/path"
"github.com/ipfs/go-cid"
iface "github.com/ipfs/kubo/core/coreiface"
caopts "github.com/ipfs/kubo/core/coreiface/options"
mh "github.com/multiformats/go-multihash"
)
type addEvent struct {
Name string
Hash string `json:",omitempty"`
Bytes int64 `json:",omitempty"`
Size string `json:",omitempty"`
}
type UnixfsAPI HttpApi
func (api *UnixfsAPI) Add(ctx context.Context, f files.Node, opts ...caopts.UnixfsAddOption) (path.ImmutablePath, error) {
options, _, err := caopts.UnixfsAddOptions(opts...)
if err != nil {
return path.ImmutablePath{}, err
}
mht, ok := mh.Codes[options.MhType]
if !ok {
return path.ImmutablePath{}, fmt.Errorf("unknowm mhType %d", options.MhType)
}
req := api.core().Request("add").
Option("hash", mht).
Option("chunker", options.Chunker).
Option("cid-version", options.CidVersion).
Option("fscache", options.FsCache).
Option("inline", options.Inline).
Option("inline-limit", options.InlineLimit).
Option("nocopy", options.NoCopy).
Option("only-hash", options.OnlyHash).
Option("pin", options.Pin).
Option("silent", options.Silent).
Option("progress", options.Progress)
if options.RawLeavesSet {
req.Option("raw-leaves", options.RawLeaves)
}
switch options.Layout {
case caopts.BalancedLayout:
// noop, default
case caopts.TrickleLayout:
req.Option("trickle", true)
}
d := files.NewMapDirectory(map[string]files.Node{"": f}) // unwrapped on the other side
version, err := api.core().loadRemoteVersion()
if err != nil {
return path.ImmutablePath{}, err
}
useEncodedAbsPaths := version.LT(encodedAbsolutePathVersion)
req.Body(files.NewMultiFileReader(d, false, useEncodedAbsPaths))
var out addEvent
resp, err := req.Send(ctx)
if err != nil {
return path.ImmutablePath{}, err
}
if resp.Error != nil {
return path.ImmutablePath{}, resp.Error
}
defer resp.Output.Close()
dec := json.NewDecoder(resp.Output)
for {
var evt addEvent
if err := dec.Decode(&evt); err != nil {
if errors.Is(err, io.EOF) {
break
}
return path.ImmutablePath{}, err
}
out = evt
if options.Events != nil {
ifevt := &iface.AddEvent{
Name: out.Name,
Size: out.Size,
Bytes: out.Bytes,
}
if out.Hash != "" {
c, err := cid.Parse(out.Hash)
if err != nil {
return path.ImmutablePath{}, err
}
ifevt.Path = path.FromCid(c)
}
select {
case options.Events <- ifevt:
case <-ctx.Done():
return path.ImmutablePath{}, ctx.Err()
}
}
}
c, err := cid.Parse(out.Hash)
if err != nil {
return path.ImmutablePath{}, err
}
return path.FromCid(c), nil
}
type lsLink struct {
Name, Hash string
Size uint64
Type unixfs_pb.Data_DataType
Target string
Mode os.FileMode
ModTime time.Time
}
type lsObject struct {
Hash string
Links []lsLink
}
type lsOutput struct {
Objects []lsObject
}
func (api *UnixfsAPI) Ls(ctx context.Context, p path.Path, opts ...caopts.UnixfsLsOption) (<-chan iface.DirEntry, error) {
options, err := caopts.UnixfsLsOptions(opts...)
if err != nil {
return nil, err
}
resp, err := api.core().Request("ls", p.String()).
Option("resolve-type", options.ResolveChildren).
Option("size", options.ResolveChildren).
Option("stream", true).
Send(ctx)
if err != nil {
return nil, err
}
if resp.Error != nil {
return nil, resp.Error
}
dec := json.NewDecoder(resp.Output)
out := make(chan iface.DirEntry)
go func() {
defer resp.Close()
defer close(out)
for {
var link lsOutput
if err := dec.Decode(&link); err != nil {
if err == io.EOF {
return
}
select {
case out <- iface.DirEntry{Err: err}:
case <-ctx.Done():
}
return
}
if len(link.Objects) != 1 {
select {
case out <- iface.DirEntry{Err: errors.New("unexpected Objects len")}:
case <-ctx.Done():
}
return
}
if len(link.Objects[0].Links) != 1 {
select {
case out <- iface.DirEntry{Err: errors.New("unexpected Links len")}:
case <-ctx.Done():
}
return
}
l0 := link.Objects[0].Links[0]
c, err := cid.Decode(l0.Hash)
if err != nil {
select {
case out <- iface.DirEntry{Err: err}:
case <-ctx.Done():
}
return
}
var ftype iface.FileType
switch l0.Type {
case unixfs.TRaw, unixfs.TFile:
ftype = iface.TFile
case unixfs.THAMTShard, unixfs.TDirectory, unixfs.TMetadata:
ftype = iface.TDirectory
case unixfs.TSymlink:
ftype = iface.TSymlink
}
select {
case out <- iface.DirEntry{
Name: l0.Name,
Cid: c,
Size: l0.Size,
Type: ftype,
Target: l0.Target,
Mode: l0.Mode,
ModTime: l0.ModTime,
}:
case <-ctx.Done():
}
}
}()
return out, nil
}
func (api *UnixfsAPI) core() *HttpApi {
return (*HttpApi)(api)
}

View File

@ -1,6 +1,7 @@
# ipfs commandline tool
# ipfs command line tool
This is the [ipfs](http://ipfs.io) commandline tool. It contains a full ipfs node.
This is a [command line tool for interacting with Kubo](https://docs.ipfs.tech/install/command-line/),
an [IPFS](https://ipfs.tech) implementation. It contains a full IPFS node.
## Install

View File

@ -1,4 +1,4 @@
package main
package kubo
import (
"context"
@ -8,18 +8,18 @@ import (
"os"
"path/filepath"
files "github.com/ipfs/go-ipfs-files"
coreiface "github.com/ipfs/interface-go-ipfs-core"
"github.com/ipfs/interface-go-ipfs-core/options"
ipath "github.com/ipfs/interface-go-ipfs-core/path"
"github.com/ipfs/boxo/files"
"github.com/ipfs/boxo/path"
"github.com/ipfs/kubo/core"
"github.com/ipfs/kubo/core/coreapi"
coreiface "github.com/ipfs/kubo/core/coreiface"
"github.com/ipfs/kubo/core/coreiface/options"
"github.com/ipfs/kubo/repo/fsrepo/migrations"
"github.com/ipfs/kubo/repo/fsrepo/migrations/ipfsfetcher"
"github.com/libp2p/go-libp2p/core/peer"
)
// addMigrations adds any migration downloaded by the fetcher to the IPFS node
// addMigrations adds any migration downloaded by the fetcher to the IPFS node.
func addMigrations(ctx context.Context, node *core.IpfsNode, fetcher migrations.Fetcher, pin bool) error {
var fetchers []migrations.Fetcher
if mf, ok := fetcher.(*migrations.MultiFetcher); ok {
@ -63,7 +63,7 @@ func addMigrations(ctx context.Context, node *core.IpfsNode, fetcher migrations.
return nil
}
// addMigrationFiles adds the files at paths to IPFS, optionally pinning them
// addMigrationFiles adds the files at paths to IPFS, optionally pinning them.
func addMigrationFiles(ctx context.Context, node *core.IpfsNode, paths []string, pin bool) error {
if len(paths) == 0 {
return nil
@ -98,7 +98,7 @@ func addMigrationFiles(ctx context.Context, node *core.IpfsNode, paths []string,
// addMigrationPaths adds the files at paths to IPFS, optionally pinning
// them. This is done after connecting to the peer.
func addMigrationPaths(ctx context.Context, node *core.IpfsNode, peerInfo peer.AddrInfo, paths []ipath.Path, pin bool) error {
func addMigrationPaths(ctx context.Context, node *core.IpfsNode, peerInfo peer.AddrInfo, paths []path.Path, pin bool) error {
if len(paths) == 0 {
return errors.New("nothing downloaded by ipfs fetcher")
}
@ -142,7 +142,7 @@ func addMigrationPaths(ctx context.Context, node *core.IpfsNode, peerInfo peer.A
return nil
}
func ipfsGet(ctx context.Context, ufs coreiface.UnixfsAPI, ipfsPath ipath.Path) error {
func ipfsGet(ctx context.Context, ufs coreiface.UnixfsAPI, ipfsPath path.Path) error {
nd, err := ufs.Get(ctx, ipfsPath)
if err != nil {
return err

View File

@ -1,20 +1,26 @@
package main
package kubo
import (
"context"
"errors"
_ "expvar"
"fmt"
"math"
"net"
"net/http"
_ "net/http/pprof"
"os"
"regexp"
"runtime"
"sort"
"strings"
"sync"
"time"
multierror "github.com/hashicorp/go-multierror"
cmds "github.com/ipfs/go-ipfs-cmds"
mprome "github.com/ipfs/go-metrics-prometheus"
version "github.com/ipfs/kubo"
utilmain "github.com/ipfs/kubo/cmd/ipfs/util"
oldcmds "github.com/ipfs/kubo/commands"
@ -24,19 +30,19 @@ import (
commands "github.com/ipfs/kubo/core/commands"
"github.com/ipfs/kubo/core/coreapi"
corehttp "github.com/ipfs/kubo/core/corehttp"
options "github.com/ipfs/kubo/core/coreiface/options"
corerepo "github.com/ipfs/kubo/core/corerepo"
libp2p "github.com/ipfs/kubo/core/node/libp2p"
nodeMount "github.com/ipfs/kubo/fuse/node"
fsrepo "github.com/ipfs/kubo/repo/fsrepo"
"github.com/ipfs/kubo/repo/fsrepo/migrations"
"github.com/ipfs/kubo/repo/fsrepo/migrations/ipfsfetcher"
pnet "github.com/libp2p/go-libp2p/core/pnet"
sockets "github.com/libp2p/go-socket-activation"
cmds "github.com/ipfs/go-ipfs-cmds"
mprome "github.com/ipfs/go-metrics-prometheus"
options "github.com/ipfs/interface-go-ipfs-core/options"
goprocess "github.com/jbenet/goprocess"
p2pcrypto "github.com/libp2p/go-libp2p/core/crypto"
pnet "github.com/libp2p/go-libp2p/core/pnet"
"github.com/libp2p/go-libp2p/core/protocol"
p2phttp "github.com/libp2p/go-libp2p/p2p/http"
sockets "github.com/libp2p/go-socket-activation"
ma "github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr/net"
prometheus "github.com/prometheus/client_golang/prometheus"
@ -44,34 +50,34 @@ import (
)
const (
adjustFDLimitKwd = "manage-fdlimit"
enableGCKwd = "enable-gc"
initOptionKwd = "init"
initConfigOptionKwd = "init-config"
initProfileOptionKwd = "init-profile"
ipfsMountKwd = "mount-ipfs"
ipnsMountKwd = "mount-ipns"
migrateKwd = "migrate"
mountKwd = "mount"
offlineKwd = "offline" // global option
routingOptionKwd = "routing"
routingOptionSupernodeKwd = "supernode"
routingOptionDHTClientKwd = "dhtclient"
routingOptionDHTKwd = "dht"
routingOptionDHTServerKwd = "dhtserver"
routingOptionNoneKwd = "none"
routingOptionCustomKwd = "custom"
routingOptionDefaultKwd = "default"
routingOptionAutoKwd = "auto"
unencryptTransportKwd = "disable-transport-encryption"
unrestrictedAPIAccessKwd = "unrestricted-api"
writableKwd = "writable"
enablePubSubKwd = "enable-pubsub-experiment"
enableIPNSPubSubKwd = "enable-namesys-pubsub"
enableMultiplexKwd = "enable-mplex-experiment"
agentVersionSuffix = "agent-version-suffix"
adjustFDLimitKwd = "manage-fdlimit"
enableGCKwd = "enable-gc"
initOptionKwd = "init"
initConfigOptionKwd = "init-config"
initProfileOptionKwd = "init-profile"
ipfsMountKwd = "mount-ipfs"
ipnsMountKwd = "mount-ipns"
migrateKwd = "migrate"
mountKwd = "mount"
offlineKwd = "offline" // global option
routingOptionKwd = "routing"
routingOptionSupernodeKwd = "supernode"
routingOptionDHTClientKwd = "dhtclient"
routingOptionDHTKwd = "dht"
routingOptionDHTServerKwd = "dhtserver"
routingOptionNoneKwd = "none"
routingOptionCustomKwd = "custom"
routingOptionDefaultKwd = "default"
routingOptionAutoKwd = "auto"
routingOptionAutoClientKwd = "autoclient"
unencryptTransportKwd = "disable-transport-encryption"
unrestrictedAPIAccessKwd = "unrestricted-api"
enablePubSubKwd = "enable-pubsub-experiment"
enableIPNSPubSubKwd = "enable-namesys-pubsub"
enableMultiplexKwd = "enable-mplex-experiment"
agentVersionSuffix = "agent-version-suffix"
// apiAddrKwd = "address-api"
// swarmAddrKwd = "address-swarm"
// swarmAddrKwd = "address-swarm".
)
var daemonCmd = &cmds.Command{
@ -85,7 +91,7 @@ running, calls to 'ipfs' commands will be sent over the network to
the daemon.
`,
LongDescription: `
The daemon will start listening on ports on the network, which are
The Kubo daemon will start listening on ports on the network, which are
documented in (and can be modified through) 'ipfs config Addresses'.
For example, to change the 'Gateway' port:
@ -105,11 +111,16 @@ other computers in the network, use 0.0.0.0 as the ip address:
Be careful if you expose the RPC API. It is a security risk, as anyone could
control your node remotely. If you need to control the node remotely,
make sure to protect the port as you would other services or database
(firewall, authenticated proxy, etc).
(firewall, authenticated proxy, etc), or at least set API.Authorizations.
If you do not want to open any ports for RPC, and only want to use
kubo CLI client, it is possible to expose the RPC over Unix socket:
ipfs config Addresses.API /unix/var/run/kubo.socket
HTTP Headers
ipfs supports passing arbitrary headers to the RPC API and Gateway. You can
Kubo supports passing arbitrary headers to the RPC API and Gateway. You can
do this by setting headers on the API.HTTPHeaders and Gateway.HTTPHeaders
keys:
@ -120,7 +131,7 @@ Note that the value of the keys is an _array_ of strings. This is because
headers can have more than one value, and it is convenient to pass through
to other libraries.
CORS Headers (for API)
CORS Headers (for RPC API)
You can setup CORS headers the same way:
@ -137,7 +148,7 @@ second signal.
IPFS_PATH environment variable
ipfs uses a repository in the local file system. By default, the repo is
Kubo uses a repository in the local file system. By default, the repo is
located at ~/.ipfs. To change the repo location, set the $IPFS_PATH
environment variable:
@ -145,7 +156,7 @@ environment variable:
DEPRECATION NOTICE
Previously, ipfs used an environment variable as seen below:
Previously, Kubo used an environment variable as seen below:
export API_ORIGIN="http://localhost:8888/"
@ -156,23 +167,22 @@ Headers.
},
Options: []cmds.Option{
cmds.BoolOption(initOptionKwd, "Initialize ipfs with default settings if not already initialized"),
cmds.BoolOption(initOptionKwd, "Initialize Kubo with default settings if not already initialized"),
cmds.StringOption(initConfigOptionKwd, "Path to existing configuration file to be loaded during --init"),
cmds.StringOption(initProfileOptionKwd, "Configuration profiles to apply for --init. See ipfs init --help for more"),
cmds.StringOption(routingOptionKwd, "Overrides the routing option").WithDefault(routingOptionDefaultKwd),
cmds.BoolOption(mountKwd, "Mounts IPFS to the filesystem using FUSE (experimental)"),
cmds.BoolOption(writableKwd, "Enable writing objects (with POST, PUT and DELETE)"),
cmds.StringOption(ipfsMountKwd, "Path to the mountpoint for IPFS (if using --mount). Defaults to config setting."),
cmds.StringOption(ipnsMountKwd, "Path to the mountpoint for IPNS (if using --mount). Defaults to config setting."),
cmds.BoolOption(unrestrictedAPIAccessKwd, "Allow API access to unlisted hashes"),
cmds.BoolOption(unrestrictedAPIAccessKwd, "Allow RPC API access to unlisted hashes"),
cmds.BoolOption(unencryptTransportKwd, "Disable transport encryption (for debugging protocols)"),
cmds.BoolOption(enableGCKwd, "Enable automatic periodic repo garbage collection"),
cmds.BoolOption(adjustFDLimitKwd, "Check and raise file descriptor limits if needed").WithDefault(true),
cmds.BoolOption(migrateKwd, "If true, assume yes at the migrate prompt. If false, assume no."),
cmds.BoolOption(enablePubSubKwd, "Enable experimental pubsub feature. Overrides Pubsub.Enabled config."),
cmds.BoolOption(enablePubSubKwd, "DEPRECATED"),
cmds.BoolOption(enableIPNSPubSubKwd, "Enable IPNS over pubsub. Implicitly enables pubsub, overrides Ipns.UsePubsub config."),
cmds.BoolOption(enableMultiplexKwd, "DEPRECATED"),
cmds.StringOption(agentVersionSuffix, "Optional suffix to the AgentVersion presented by `ipfs id` and also advertised through BitSwap."),
cmds.StringOption(agentVersionSuffix, "Optional suffix to the AgentVersion presented by `ipfs id` and exposed via libp2p identify protocol."),
// TODO: add way to override addresses. tricky part: updating the config if also --init.
// cmds.StringOption(apiAddrKwd, "Address for the daemon rpc API (overrides config)"),
@ -370,6 +380,8 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment
return err
}
fmt.Printf("PeerID: %s\n", cfg.Identity.PeerID)
if !psSet {
pubsub = cfg.Pubsub.Enabled.WithDefault(false)
}
@ -387,7 +399,7 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment
"pubsub": pubsub,
"ipnsps": ipnsps,
},
//TODO(Kubuxu): refactor Online vs Offline by adding Permanent vs Ephemeral
// TODO(Kubuxu): refactor Online vs Offline by adding Permanent vs Ephemeral
}
routingOption, _ := req.Options[routingOptionKwd].(string)
@ -411,11 +423,9 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment
case routingOptionSupernodeKwd:
return errors.New("supernode routing was never fully implemented and has been removed")
case routingOptionDefaultKwd, routingOptionAutoKwd:
ncfg.Routing = libp2p.ConstructDefaultRouting(
cfg.Identity.PeerID,
cfg.Addresses.Swarm,
cfg.Identity.PrivKey,
)
ncfg.Routing = libp2p.ConstructDefaultRouting(cfg, libp2p.DHTOption)
case routingOptionAutoClientKwd:
ncfg.Routing = libp2p.ConstructDefaultRouting(cfg, libp2p.DHTClientOption)
case routingOptionDHTClientKwd:
ncfg.Routing = libp2p.DHTClientOption
case routingOptionDHTKwd:
@ -425,20 +435,25 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment
case routingOptionNoneKwd:
ncfg.Routing = libp2p.NilRouterOption
case routingOptionCustomKwd:
if cfg.Routing.AcceleratedDHTClient.WithDefault(config.DefaultAcceleratedDHTClient) {
return fmt.Errorf("Routing.AcceleratedDHTClient option is set even tho Routing.Type is custom, using custom .AcceleratedDHTClient needs to be set on DHT routers individually")
}
ncfg.Routing = libp2p.ConstructDelegatedRouting(
cfg.Routing.Routers,
cfg.Routing.Methods,
cfg.Identity.PeerID,
cfg.Addresses.Swarm,
cfg.Addresses,
cfg.Identity.PrivKey,
)
default:
return fmt.Errorf("unrecognized routing option: %s", routingOption)
}
agentVersionSuffixString, _ := req.Options[agentVersionSuffix].(string)
if agentVersionSuffixString != "" {
version.SetUserAgentSuffix(agentVersionSuffixString)
// Set optional agent version suffix
versionSuffixFromCli, _ := req.Options[agentVersionSuffix].(string)
versionSuffix := cfg.Version.AgentSuffix.WithDefault(versionSuffixFromCli)
if versionSuffix != "" {
version.SetUserAgentSuffix(versionSuffix)
}
node, err := core.NewNode(req.Context, ncfg)
@ -457,7 +472,23 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment
log.Fatal("Private network does not work with Routing.Type=auto. Update your config to Routing.Type=dht (or none, and do manual peering)")
}
printSwarmAddrs(node)
printLibp2pPorts(node)
if node.PrivateKey.Type() == p2pcrypto.RSA {
fmt.Print(`
Warning: You are using an RSA Peer ID, which was replaced by Ed25519
as the default recommended in Kubo since September 2020. Signing with
RSA Peer IDs is more CPU-intensive than with other key types.
It is recommended that you change your public key type to ed25519
by using the following command:
ipfs key rotate -o rsa-key-backup -t ed25519
After changing your key type, restart your node for the changes to
take effect.
`)
}
defer func() {
// We wait for the node to close first, as the node has children
@ -532,10 +563,16 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment
return err
}
// add trustless gateway over libp2p
p2pGwErrc, err := serveTrustlessGatewayOverLibp2p(cctx)
if err != nil {
return err
}
// Add ipfs version info to prometheus metrics
var ipfsInfoMetric = promauto.NewGaugeVec(prometheus.GaugeOpts{
ipfsInfoMetric := promauto.NewGaugeVec(prometheus.GaugeOpts{
Name: "ipfs_info",
Help: "IPFS version information.",
Help: "Kubo IPFS version information.",
}, []string{"version", "commit"})
// Setting to 1 lets us multiply it with other stats to add the version labels
@ -549,7 +586,7 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment
prometheus.MustRegister(&corehttp.IpfsNodeCollector{Node: node})
// start MFS pinning thread
startPinMFS(daemonConfigPollInterval, cctx, &ipfsPinMFSNode{node})
startPinMFS(cctx, daemonConfigPollInterval, &ipfsPinMFSNode{node})
// The daemon is *finally* ready.
fmt.Printf("Daemon is ready\n")
@ -563,12 +600,30 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment
fmt.Println("(Hit ctrl-c again to force-shutdown the daemon.)")
}()
// Give the user heads up if daemon running in online mode has no peers after 1 minute
if !offline {
// Warn users who were victims of 'lowprofile' footgun (https://github.com/ipfs/kubo/pull/10524)
if cfg.Experimental.StrategicProviding {
fmt.Print(`
Reprovide system is disabled due to 'Experimental.StrategicProviding=true'
Local CIDs will not be announced to Amino DHT, making them impossible to retrieve without manual peering
If this is not intentional, call 'ipfs config profile apply announce-on'
`)
} else if cfg.Reprovider.Interval.WithDefault(config.DefaultReproviderInterval) == 0 {
fmt.Print(`
Reprovider system is disabled due to 'Reprovider.Interval=0'
Local CIDs will not be announced to Amino DHT, making them impossible to retrieve without manual peering
If this is not intentional, call 'ipfs config profile apply announce-on', or set 'Reprovider.Interval=22h'
`)
}
// Give the user heads up if daemon running in online mode has no peers after 1 minute
time.AfterFunc(1*time.Minute, func() {
cfg, err := cctx.GetConfig()
if err != nil {
log.Errorf("failed to access config: %s", err)
return
}
if len(cfg.Bootstrap) == 0 && len(cfg.Peering.Peers) == 0 {
// Skip peer check if Bootstrap and Peering lists are empty
@ -579,16 +634,26 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment
ipfs, err := coreapi.NewCoreAPI(node)
if err != nil {
log.Errorf("failed to access CoreAPI: %v", err)
return
}
peers, err := ipfs.Swarm().Peers(cctx.Context())
if err != nil {
log.Errorf("failed to read swarm peers: %v", err)
return
}
if len(peers) == 0 {
log.Error("failed to bootstrap (no peers found): consider updating Bootstrap or Peering section of your config")
} else {
// After 1 minute we should have enough peers
// to run informed version check
startVersionChecker(
cctx.Context(),
node,
cfg.Version.SwarmCheckEnabled.WithDefault(true),
cfg.Version.SwarmCheckPercentThreshold.WithDefault(config.DefaultSwarmCheckPercentThreshold),
)
}
})
}
// Hard deprecation notice if someone still uses IPFS_REUSEPORT
@ -599,7 +664,7 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment
// collect long-running errors and block for shutdown
// TODO(cryptix): our fuse currently doesn't follow this pattern for graceful shutdown
var errs error
for err := range merge(apiErrc, gwErrc, gcErrc) {
for err := range merge(apiErrc, gwErrc, gcErrc, p2pGwErrc) {
if err != nil {
errs = multierror.Append(errs, err)
}
@ -608,7 +673,7 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment
return errs
}
// serveHTTPApi collects options, creates listener, prints status message and starts serving requests
// serveHTTPApi collects options, creates listener, prints status message and starts serving requests.
func serveHTTPApi(req *cmds.Request, cctx *oldcmds.Context) (<-chan error, error) {
cfg, err := cctx.GetConfig()
if err != nil {
@ -651,13 +716,25 @@ func serveHTTPApi(req *cmds.Request, cctx *oldcmds.Context) (<-chan error, error
listeners = append(listeners, apiLis)
}
if len(cfg.API.Authorizations) > 0 && len(listeners) > 0 {
fmt.Printf("RPC API access is limited by the rules defined in API.Authorizations\n")
}
for _, listener := range listeners {
// we might have listened to /tcp/0 - let's see what we are listing on
fmt.Printf("API server listening on %s\n", listener.Multiaddr())
// Browsers require TCP.
fmt.Printf("RPC API server listening on %s\n", listener.Multiaddr())
// Browsers require TCP with explicit host.
switch listener.Addr().Network() {
case "tcp", "tcp4", "tcp6":
fmt.Printf("WebUI: http://%s/webui\n", listener.Addr())
rpc := listener.Addr().String()
// replace catch-all with explicit localhost URL that works in browsers
// https://github.com/ipfs/kubo/issues/10515
if strings.Contains(rpc, "0.0.0.0:") {
rpc = strings.Replace(rpc, "0.0.0.0:", "127.0.0.1:", 1)
} else if strings.Contains(rpc, "[::]:") {
rpc = strings.Replace(rpc, "[::]:", "[::1]:", 1)
}
fmt.Printf("WebUI: http://%s/webui\n", rpc)
}
}
@ -666,12 +743,12 @@ func serveHTTPApi(req *cmds.Request, cctx *oldcmds.Context) (<-chan error, error
// only the webui objects are allowed.
// if you know what you're doing, go ahead and pass --unrestricted-api.
unrestricted, _ := req.Options[unrestrictedAPIAccessKwd].(bool)
gatewayOpt := corehttp.GatewayOption(false, corehttp.WebUIPaths...)
gatewayOpt := corehttp.GatewayOption(corehttp.WebUIPaths...)
if unrestricted {
gatewayOpt = corehttp.GatewayOption(true, "/ipfs", "/ipns")
gatewayOpt = corehttp.GatewayOption("/ipfs", "/ipns")
}
var opts = []corehttp.ServeOption{
opts := []corehttp.ServeOption{
corehttp.MetricsCollectionOption("api"),
corehttp.MetricsOpenCensusCollectionOption(),
corehttp.MetricsOpenCensusDefaultPrometheusRegistry(),
@ -698,8 +775,11 @@ func serveHTTPApi(req *cmds.Request, cctx *oldcmds.Context) (<-chan error, error
return nil, fmt.Errorf("serveHTTPApi: ConstructNode() failed: %s", err)
}
if err := node.Repo.SetAPIAddr(rewriteMaddrToUseLocalhostIfItsAny(listeners[0].Multiaddr())); err != nil {
return nil, fmt.Errorf("serveHTTPApi: SetAPIAddr() failed: %w", err)
if len(listeners) > 0 {
// Only add an api file if the API is running.
if err := node.Repo.SetAPIAddr(rewriteMaddrToUseLocalhostIfItsAny(listeners[0].Multiaddr())); err != nil {
return nil, fmt.Errorf("serveHTTPApi: SetAPIAddr() failed: %w", err)
}
}
errc := make(chan error)
@ -733,8 +813,8 @@ func rewriteMaddrToUseLocalhostIfItsAny(maddr ma.Multiaddr) ma.Multiaddr {
}
}
// printSwarmAddrs prints the addresses of the host
func printSwarmAddrs(node *core.IpfsNode) {
// printLibp2pPorts prints which ports are opened to facilitate swarm connectivity.
func printLibp2pPorts(node *core.IpfsNode) {
if !node.IsOnline {
fmt.Println("Swarm not listening, running in offline mode.")
return
@ -744,39 +824,58 @@ func printSwarmAddrs(node *core.IpfsNode) {
if err != nil {
log.Errorf("failed to read listening addresses: %s", err)
}
lisAddrs := make([]string, len(ifaceAddrs))
for i, addr := range ifaceAddrs {
lisAddrs[i] = addr.String()
}
sort.Strings(lisAddrs)
for _, addr := range lisAddrs {
fmt.Printf("Swarm listening on %s\n", addr)
// Multiple libp2p transports can use same port.
// Deduplicate all listeners and collect unique IP:port (udp|tcp) combinations
// which is useful information for operator deploying Kubo in TCP/IP infra.
addrMap := make(map[string]map[string]struct{})
re := regexp.MustCompile(`^/(?:ip[46]|dns(?:[46])?)/([^/]+)/(tcp|udp)/(\d+)(/.*)?$`)
for _, addr := range ifaceAddrs {
matches := re.FindStringSubmatch(addr.String())
if matches != nil {
hostname := matches[1]
protocol := strings.ToUpper(matches[2])
port := matches[3]
var host string
if matches[0][:4] == "/ip6" {
host = fmt.Sprintf("[%s]:%s", hostname, port)
} else {
host = fmt.Sprintf("%s:%s", hostname, port)
}
if _, ok := addrMap[host]; !ok {
addrMap[host] = make(map[string]struct{})
}
addrMap[host][protocol] = struct{}{}
}
}
nodePhostAddrs := node.PeerHost.Addrs()
addrs := make([]string, len(nodePhostAddrs))
for i, addr := range nodePhostAddrs {
addrs[i] = addr.String()
}
sort.Strings(addrs)
for _, addr := range addrs {
fmt.Printf("Swarm announcing %s\n", addr)
// Produce a sorted host:port list
hosts := make([]string, 0, len(addrMap))
for host := range addrMap {
hosts = append(hosts, host)
}
sort.Strings(hosts)
// Print listeners
for _, host := range hosts {
protocolsSet := addrMap[host]
protocols := make([]string, 0, len(protocolsSet))
for protocol := range protocolsSet {
protocols = append(protocols, protocol)
}
sort.Strings(protocols)
fmt.Printf("Swarm listening on %s (%s)\n", host, strings.Join(protocols, "+"))
}
fmt.Printf("Run 'ipfs id' to inspect announced and discovered multiaddrs of this node.\n")
}
// serveHTTPGateway collects options, creates listener, prints status message and starts serving requests
// serveHTTPGateway collects options, creates listener, prints status message and starts serving requests.
func serveHTTPGateway(req *cmds.Request, cctx *oldcmds.Context) (<-chan error, error) {
cfg, err := cctx.GetConfig()
if err != nil {
return nil, fmt.Errorf("serveHTTPGateway: GetConfig() failed: %s", err)
}
writable, writableOptionFound := req.Options[writableKwd].(bool)
if !writableOptionFound {
writable = cfg.Gateway.Writable
}
listeners, err := sockets.TakeListeners("io.ipfs.gateway")
if err != nil {
return nil, fmt.Errorf("serveHTTPGateway: socket activation failed: %s", err)
@ -807,37 +906,37 @@ func serveHTTPGateway(req *cmds.Request, cctx *oldcmds.Context) (<-chan error, e
}
// we might have listened to /tcp/0 - let's see what we are listing on
gwType := "readonly"
if writable {
gwType = "writable"
for _, listener := range listeners {
fmt.Printf("Gateway server listening on %s\n", listener.Multiaddr())
}
for _, listener := range listeners {
fmt.Printf("Gateway (%s) server listening on %s\n", gwType, listener.Multiaddr())
if cfg.Gateway.ExposeRoutingAPI.WithDefault(config.DefaultExposeRoutingAPI) {
for _, listener := range listeners {
fmt.Printf("Routing V1 API exposed at http://%s/routing/v1\n", listener.Addr())
}
}
cmdctx := *cctx
cmdctx.Gateway = true
var opts = []corehttp.ServeOption{
opts := []corehttp.ServeOption{
corehttp.MetricsCollectionOption("gateway"),
corehttp.HostnameOption(),
corehttp.GatewayOption(writable, "/ipfs", "/ipns"),
corehttp.GatewayOption("/ipfs", "/ipns"),
corehttp.VersionOption(),
corehttp.CheckVersionOption(),
corehttp.CommandsROOption(cmdctx),
}
if cfg.Experimental.P2pHttpProxy {
opts = append(opts, corehttp.P2PProxyOption())
}
if len(cfg.Gateway.RootRedirect) > 0 {
opts = append(opts, corehttp.RedirectOption("", cfg.Gateway.RootRedirect))
if cfg.Gateway.ExposeRoutingAPI.WithDefault(config.DefaultExposeRoutingAPI) {
opts = append(opts, corehttp.RoutingOption())
}
if len(cfg.Gateway.PathPrefixes) > 0 {
log.Fatal("Support for custom Gateway.PathPrefixes was removed: https://github.com/ipfs/go-ipfs/issues/7702")
if len(cfg.Gateway.RootRedirect) > 0 {
opts = append(opts, corehttp.RedirectOption("", cfg.Gateway.RootRedirect))
}
node, err := cctx.ConstructNode()
@ -873,7 +972,58 @@ func serveHTTPGateway(req *cmds.Request, cctx *oldcmds.Context) (<-chan error, e
return errc, nil
}
// collects options and opens the fuse mountpoint
const gatewayProtocolID protocol.ID = "/ipfs/gateway" // FIXME: specify https://github.com/ipfs/specs/issues/433
func serveTrustlessGatewayOverLibp2p(cctx *oldcmds.Context) (<-chan error, error) {
node, err := cctx.ConstructNode()
if err != nil {
return nil, fmt.Errorf("serveHTTPGatewayOverLibp2p: ConstructNode() failed: %s", err)
}
cfg, err := node.Repo.Config()
if err != nil {
return nil, fmt.Errorf("could not read config: %w", err)
}
if !cfg.Experimental.GatewayOverLibp2p {
errCh := make(chan error)
close(errCh)
return errCh, nil
}
opts := []corehttp.ServeOption{
corehttp.MetricsCollectionOption("libp2p-gateway"),
corehttp.Libp2pGatewayOption(),
corehttp.VersionOption(),
}
handler, err := corehttp.MakeHandler(node, nil, opts...)
if err != nil {
return nil, err
}
h := p2phttp.Host{
StreamHost: node.PeerHost,
}
h.WellKnownHandler.AddProtocolMeta(gatewayProtocolID, p2phttp.ProtocolMeta{Path: "/"})
h.ServeMux = http.NewServeMux()
h.ServeMux.Handle("/", handler)
errc := make(chan error, 1)
go func() {
defer close(errc)
errc <- h.Serve()
}()
go func() {
<-node.Process.Closing()
h.Close()
}()
return errc, nil
}
// collects options and opens the fuse mountpoint.
func mountFuse(req *cmds.Request, cctx *oldcmds.Context) error {
cfg, err := cctx.GetConfig()
if err != nil {
@ -952,7 +1102,11 @@ func YesNoPrompt(prompt string) bool {
var s string
for i := 0; i < 3; i++ {
fmt.Printf("%s ", prompt)
fmt.Scanf("%s", &s)
_, err := fmt.Scanf("%s", &s)
if err != nil {
fmt.Printf("Invalid input: %v. Please try again.\n", err)
continue
}
switch s {
case "y", "Y":
return true
@ -977,3 +1131,41 @@ func printVersion() {
fmt.Printf("System version: %s\n", runtime.GOARCH+"/"+runtime.GOOS)
fmt.Printf("Golang version: %s\n", runtime.Version())
}
func startVersionChecker(ctx context.Context, nd *core.IpfsNode, enabled bool, percentThreshold int64) {
if !enabled {
return
}
ticker := time.NewTicker(time.Hour)
defer ticker.Stop()
go func() {
for {
o, err := commands.DetectNewKuboVersion(nd, percentThreshold)
if err != nil {
// The version check is best-effort, and may fail in custom
// configurations that do not run standard WAN DHT. If it
// errors here, no point in spamming logs: og once and exit.
log.Errorw("initial version check failed, will not be run again", "error", err)
return
}
if o.UpdateAvailable {
newerPercent := fmt.Sprintf("%.0f%%", math.Round(float64(o.WithGreaterVersion)/float64(o.PeersSampled)*100))
log.Errorf(`
A NEW VERSION OF KUBO DETECTED
This Kubo node is running an outdated version (%s).
%s of the sampled Kubo peers are running a higher version.
Visit https://github.com/ipfs/kubo/releases or https://dist.ipfs.tech/#kubo and update to version %s or later.`,
o.RunningVersion, newerPercent, o.GreatestVersion)
}
select {
case <-ctx.Done():
return
case <-nd.Process.Closing():
return
case <-ticker.C:
continue
}
}
}()
}

View File

@ -1,7 +1,7 @@
//go:build linux
// +build linux
package main
package kubo
import (
daemon "github.com/coreos/go-systemd/v22/daemon"

View File

@ -1,7 +1,7 @@
//go:build !linux
// +build !linux
package main
package kubo
func notifyReady() {}

View File

@ -1,4 +1,4 @@
package main
package kubo
import (
"net/http"

View File

@ -1,4 +1,4 @@
package main
package kubo
import (
"context"
@ -25,7 +25,8 @@ func makeResolver(t *testing.T, n uint8) *madns.Resolver {
backend := &madns.MockResolver{
IP: map[string][]net.IPAddr{
"example.com": results,
}}
},
}
resolver, err := madns.NewResolver(madns.WithDefaultResolver(backend))
if err != nil {

View File

@ -1,4 +1,4 @@
package main
package kubo
import (
"context"
@ -10,24 +10,25 @@ import (
"path/filepath"
"strings"
path "github.com/ipfs/go-path"
unixfs "github.com/ipfs/go-unixfs"
unixfs "github.com/ipfs/boxo/ipld/unixfs"
"github.com/ipfs/boxo/path"
assets "github.com/ipfs/kubo/assets"
oldcmds "github.com/ipfs/kubo/commands"
core "github.com/ipfs/kubo/core"
"github.com/ipfs/kubo/core/commands"
fsrepo "github.com/ipfs/kubo/repo/fsrepo"
"github.com/ipfs/boxo/files"
cmds "github.com/ipfs/go-ipfs-cmds"
files "github.com/ipfs/go-ipfs-files"
options "github.com/ipfs/interface-go-ipfs-core/options"
config "github.com/ipfs/kubo/config"
options "github.com/ipfs/kubo/core/coreiface/options"
)
const (
algorithmDefault = options.Ed25519Key
algorithmOptionName = "algorithm"
bitsOptionName = "bits"
emptyRepoDefault = true
emptyRepoOptionName = "empty-repo"
profileOptionName = "profile"
)
@ -61,7 +62,7 @@ environment variable:
Options: []cmds.Option{
cmds.StringOption(algorithmOptionName, "a", "Cryptographic algorithm to use for key generation.").WithDefault(algorithmDefault),
cmds.IntOption(bitsOptionName, "b", "Number of bits to use in the generated RSA private key."),
cmds.BoolOption(emptyRepoOptionName, "e", "Don't add and pin help files to the local storage."),
cmds.BoolOption(emptyRepoOptionName, "e", "Don't add and pin help files to the local storage.").WithDefault(emptyRepoDefault),
cmds.StringOption(profileOptionName, "p", "Apply profile settings to config. Multiple profiles can be separated by ','"),
// TODO need to decide whether to expose the override as a file or a
@ -193,7 +194,7 @@ func checkWritable(dir string) error {
if os.IsNotExist(err) {
// dir doesn't exist, check that we can create it
return os.Mkdir(dir, 0775)
return os.Mkdir(dir, 0o775)
}
if os.IsPermission(err) {
@ -251,7 +252,7 @@ func initializeIpnsKeyspace(repoRoot string) error {
// pin recursively because this might already be pinned
// and doing a direct pin would throw an error in that case
err = nd.Pinning.Pin(ctx, emptyDir, true)
err = nd.Pinning.Pin(ctx, emptyDir, true, "")
if err != nil {
return err
}

View File

@ -1,4 +1,4 @@
package main
package kubo
import (
commands "github.com/ipfs/kubo/core/commands"
@ -14,7 +14,7 @@ var Root = &cmds.Command{
Helptext: commands.Root.Helptext,
}
// commandsClientCmd is the "ipfs commands" command for local cli
// commandsClientCmd is the "ipfs commands" command for local cli.
var commandsClientCmd = commands.CommandsCmd(Root)
// Commands in localCommands should always be run locally (even if daemon is running).

View File

@ -1,23 +1,24 @@
package main
package kubo
import (
"context"
"fmt"
"os"
"time"
"github.com/libp2p/go-libp2p/core/host"
peer "github.com/libp2p/go-libp2p/core/peer"
pinclient "github.com/ipfs/boxo/pinning/remote/client"
cid "github.com/ipfs/go-cid"
ipld "github.com/ipfs/go-ipld-format"
logging "github.com/ipfs/go-log"
pinclient "github.com/ipfs/go-pinning-service-http-client"
logging "github.com/ipfs/go-log/v2"
config "github.com/ipfs/kubo/config"
"github.com/ipfs/kubo/core"
)
// mfslog is the logger for remote mfs pinning
// mfslog is the logger for remote mfs pinning.
var mfslog = logging.Logger("remotepinning/mfs")
type lastPin struct {
@ -31,7 +32,20 @@ func (x lastPin) IsValid() bool {
return x != lastPin{}
}
const daemonConfigPollInterval = time.Minute / 2
var daemonConfigPollInterval = time.Minute / 2
func init() {
// this environment variable is solely for testing, use at your own risk
if pollDurStr := os.Getenv("MFS_PIN_POLL_INTERVAL"); pollDurStr != "" {
d, err := time.ParseDuration(pollDurStr)
if err != nil {
mfslog.Error("error parsing MFS_PIN_POLL_INTERVAL, using default:", err)
return
}
daemonConfigPollInterval = d
}
}
const defaultRepinInterval = 5 * time.Minute
type pinMFSContext interface {
@ -61,56 +75,28 @@ func (x *ipfsPinMFSNode) PeerHost() host.Host {
return x.node.PeerHost
}
func startPinMFS(configPollInterval time.Duration, cctx pinMFSContext, node pinMFSNode) {
errCh := make(chan error)
go pinMFSOnChange(configPollInterval, cctx, node, errCh)
go func() {
for {
select {
case err, isOpen := <-errCh:
if !isOpen {
return
}
mfslog.Errorf("%v", err)
case <-cctx.Context().Done():
return
}
}
}()
func startPinMFS(cctx pinMFSContext, configPollInterval time.Duration, node pinMFSNode) {
go pinMFSOnChange(cctx, configPollInterval, node)
}
func pinMFSOnChange(configPollInterval time.Duration, cctx pinMFSContext, node pinMFSNode, errCh chan<- error) {
defer close(errCh)
var tmo *time.Timer
defer func() {
if tmo != nil {
tmo.Stop()
}
}()
func pinMFSOnChange(cctx pinMFSContext, configPollInterval time.Duration, node pinMFSNode) {
tmo := time.NewTimer(configPollInterval)
defer tmo.Stop()
lastPins := map[string]lastPin{}
for {
// polling sleep
if tmo == nil {
tmo = time.NewTimer(configPollInterval)
} else {
tmo.Reset(configPollInterval)
}
select {
case <-cctx.Context().Done():
return
case <-tmo.C:
tmo.Reset(configPollInterval)
}
// reread the config, which may have changed in the meantime
cfg, err := cctx.GetConfig()
if err != nil {
select {
case errCh <- fmt.Errorf("pinning reading config (%v)", err):
case <-cctx.Context().Done():
return
}
mfslog.Errorf("pinning reading config (%v)", err)
continue
}
mfslog.Debugf("pinning loop is awake, %d remote services", len(cfg.Pinning.RemoteServices))
@ -118,30 +104,29 @@ func pinMFSOnChange(configPollInterval time.Duration, cctx pinMFSContext, node p
// get the most recent MFS root cid
rootNode, err := node.RootNode()
if err != nil {
select {
case errCh <- fmt.Errorf("pinning reading MFS root (%v)", err):
case <-cctx.Context().Done():
return
}
mfslog.Errorf("pinning reading MFS root (%v)", err)
continue
}
rootCid := rootNode.Cid()
// pin to all remote services in parallel
pinAllMFS(cctx.Context(), node, cfg, rootCid, lastPins, errCh)
pinAllMFS(cctx.Context(), node, cfg, rootNode.Cid(), lastPins)
}
}
// pinAllMFS pins on all remote services in parallel to overcome DoS attacks.
func pinAllMFS(ctx context.Context, node pinMFSNode, cfg *config.Config, rootCid cid.Cid, lastPins map[string]lastPin, errCh chan<- error) {
ch := make(chan lastPin, len(cfg.Pinning.RemoteServices))
for svcName_, svcConfig_ := range cfg.Pinning.RemoteServices {
func pinAllMFS(ctx context.Context, node pinMFSNode, cfg *config.Config, rootCid cid.Cid, lastPins map[string]lastPin) {
ch := make(chan lastPin)
var started int
for svcName, svcConfig := range cfg.Pinning.RemoteServices {
if ctx.Err() != nil {
break
}
// skip services where MFS is not enabled
svcName, svcConfig := svcName_, svcConfig_
mfslog.Debugf("pinning MFS root considering service %q", svcName)
if !svcConfig.Policies.MFS.Enable {
mfslog.Debugf("pinning service %q is not enabled", svcName)
ch <- lastPin{}
continue
}
// read mfs pin interval for this service
@ -152,11 +137,7 @@ func pinAllMFS(ctx context.Context, node pinMFSNode, cfg *config.Config, rootCid
var err error
repinInterval, err = time.ParseDuration(svcConfig.Policies.MFS.RepinInterval)
if err != nil {
select {
case errCh <- fmt.Errorf("remote pinning service %q has invalid MFS.RepinInterval (%v)", svcName, err):
case <-ctx.Done():
}
ch <- lastPin{}
mfslog.Errorf("remote pinning service %q has invalid MFS.RepinInterval (%v)", svcName, err)
continue
}
}
@ -169,38 +150,30 @@ func pinAllMFS(ctx context.Context, node pinMFSNode, cfg *config.Config, rootCid
} else {
mfslog.Debugf("pinning MFS root to %q: skipped due to MFS.RepinInterval=%s (remaining: %s)", svcName, repinInterval.String(), (repinInterval - time.Since(last.Time)).String())
}
ch <- lastPin{}
continue
}
}
mfslog.Debugf("pinning MFS root %q to %q", rootCid, svcName)
go func() {
if r, err := pinMFS(ctx, node, rootCid, svcName, svcConfig); err != nil {
select {
case errCh <- fmt.Errorf("pinning MFS root %q to %q (%v)", rootCid, svcName, err):
case <-ctx.Done():
}
ch <- lastPin{}
} else {
ch <- r
go func(svcName string, svcConfig config.RemotePinningService) {
r, err := pinMFS(ctx, node, rootCid, svcName, svcConfig)
if err != nil {
mfslog.Errorf("pinning MFS root %q to %q (%v)", rootCid, svcName, err)
}
}()
ch <- r
}(svcName, svcConfig)
started++
}
for i := 0; i < len(cfg.Pinning.RemoteServices); i++ {
// Collect results from all started goroutines.
for i := 0; i < started; i++ {
if x := <-ch; x.IsValid() {
lastPins[x.ServiceName] = x
}
}
}
func pinMFS(
ctx context.Context,
node pinMFSNode,
cid cid.Cid,
svcName string,
svcConfig config.RemotePinningService,
) (lastPin, error) {
func pinMFS(ctx context.Context, node pinMFSNode, cid cid.Cid, svcName string, svcConfig config.RemotePinningService) (lastPin, error) {
c := pinclient.NewClient(svcConfig.API.Endpoint, svcConfig.API.Key)
pinName := svcConfig.Policies.MFS.PinName
@ -230,43 +203,46 @@ func pinMFS(
}
for range lsPinCh { // in case the prior loop exits early
}
if err := <-lsErrCh; err != nil {
err := <-lsErrCh
if err != nil {
return lastPin{}, fmt.Errorf("error while listing remote pins: %v", err)
}
// CID of the current MFS root is already being pinned, nothing to do
if pinning {
mfslog.Debugf("pinning MFS to %q: pin for %q exists since %s, skipping", svcName, cid, pinTime.String())
return lastPin{Time: pinTime, ServiceName: svcName, ServiceConfig: svcConfig, CID: cid}, nil
}
if !pinning {
// Prepare Pin.name
addOpts := []pinclient.AddOption{pinclient.PinOpts.WithName(pinName)}
// Prepare Pin.name
addOpts := []pinclient.AddOption{pinclient.PinOpts.WithName(pinName)}
// Prepare Pin.origins
// Add own multiaddrs to the 'origins' array, so Pinning Service can
// use that as a hint and connect back to us (if possible)
if node.PeerHost() != nil {
addrs, err := peer.AddrInfoToP2pAddrs(host.InfoFromHost(node.PeerHost()))
if err != nil {
return lastPin{}, err
// Prepare Pin.origins
// Add own multiaddrs to the 'origins' array, so Pinning Service can
// use that as a hint and connect back to us (if possible)
if node.PeerHost() != nil {
addrs, err := peer.AddrInfoToP2pAddrs(host.InfoFromHost(node.PeerHost()))
if err != nil {
return lastPin{}, err
}
addOpts = append(addOpts, pinclient.PinOpts.WithOrigins(addrs...))
}
addOpts = append(addOpts, pinclient.PinOpts.WithOrigins(addrs...))
}
// Create or replace pin for MFS root
if existingRequestID != "" {
mfslog.Debugf("pinning to %q: replacing existing MFS root pin with %q", svcName, cid)
_, err := c.Replace(ctx, existingRequestID, cid, addOpts...)
if err != nil {
return lastPin{}, err
// Create or replace pin for MFS root
if existingRequestID != "" {
mfslog.Debugf("pinning to %q: replacing existing MFS root pin with %q", svcName, cid)
if _, err = c.Replace(ctx, existingRequestID, cid, addOpts...); err != nil {
return lastPin{}, err
}
} else {
mfslog.Debugf("pinning to %q: creating a new MFS root pin for %q", svcName, cid)
if _, err = c.Add(ctx, cid, addOpts...); err != nil {
return lastPin{}, err
}
}
} else {
mfslog.Debugf("pinning to %q: creating a new MFS root pin for %q", svcName, cid)
_, err := c.Add(ctx, cid, addOpts...)
if err != nil {
return lastPin{}, err
}
mfslog.Debugf("pinning MFS to %q: pin for %q exists since %s, skipping", svcName, cid, pinTime.String())
}
return lastPin{Time: pinTime, ServiceName: svcName, ServiceConfig: svcConfig, CID: cid}, nil
return lastPin{
Time: pinTime,
ServiceName: svcName,
ServiceConfig: svcConfig,
CID: cid,
}, nil
}

View File

@ -1,14 +1,19 @@
package main
package kubo
import (
"bufio"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"strings"
"testing"
"time"
merkledag "github.com/ipfs/boxo/ipld/merkledag"
ipld "github.com/ipfs/go-ipld-format"
merkledag "github.com/ipfs/go-merkledag"
logging "github.com/ipfs/go-log/v2"
config "github.com/ipfs/kubo/config"
"github.com/libp2p/go-libp2p/core/host"
peer "github.com/libp2p/go-libp2p/core/peer"
@ -60,25 +65,37 @@ func isErrorSimilar(e1, e2 error) bool {
}
func TestPinMFSConfigError(t *testing.T) {
ctx := &testPinMFSContext{
ctx: context.Background(),
ctx, cancel := context.WithTimeout(context.Background(), 2*testConfigPollInterval)
defer cancel()
cctx := &testPinMFSContext{
ctx: ctx,
cfg: nil,
err: fmt.Errorf("couldn't read config"),
}
node := &testPinMFSNode{}
errCh := make(chan error)
go pinMFSOnChange(testConfigPollInterval, ctx, node, errCh)
if !isErrorSimilar(<-errCh, ctx.err) {
t.Errorf("error did not propagate")
logReader := logging.NewPipeReader()
go func() {
pinMFSOnChange(cctx, testConfigPollInterval, node)
logReader.Close()
}()
level, msg := readLogLine(t, logReader)
if level != "error" {
t.Error("expected error to be logged")
}
if !isErrorSimilar(<-errCh, ctx.err) {
if !isErrorSimilar(errors.New(msg), cctx.err) {
t.Errorf("error did not propagate")
}
}
func TestPinMFSRootNodeError(t *testing.T) {
ctx := &testPinMFSContext{
ctx: context.Background(),
ctx, cancel := context.WithTimeout(context.Background(), 2*testConfigPollInterval)
defer cancel()
cctx := &testPinMFSContext{
ctx: ctx,
cfg: &config.Config{
Pinning: config.Pinning{},
},
@ -87,12 +104,16 @@ func TestPinMFSRootNodeError(t *testing.T) {
node := &testPinMFSNode{
err: fmt.Errorf("cannot create root node"),
}
errCh := make(chan error)
go pinMFSOnChange(testConfigPollInterval, ctx, node, errCh)
if !isErrorSimilar(<-errCh, node.err) {
t.Errorf("error did not propagate")
logReader := logging.NewPipeReader()
go func() {
pinMFSOnChange(cctx, testConfigPollInterval, node)
logReader.Close()
}()
level, msg := readLogLine(t, logReader)
if level != "error" {
t.Error("expected error to be logged")
}
if !isErrorSimilar(<-errCh, node.err) {
if !isErrorSimilar(errors.New(msg), node.err) {
t.Errorf("error did not propagate")
}
}
@ -155,7 +176,8 @@ func TestPinMFSService(t *testing.T) {
}
func testPinMFSServiceWithError(t *testing.T, cfg *config.Config, expectedErrorPrefix string) {
goctx, cancel := context.WithCancel(context.Background())
goctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
ctx := &testPinMFSContext{
ctx: goctx,
cfg: cfg,
@ -164,16 +186,36 @@ func testPinMFSServiceWithError(t *testing.T, cfg *config.Config, expectedErrorP
node := &testPinMFSNode{
err: nil,
}
errCh := make(chan error)
go pinMFSOnChange(testConfigPollInterval, ctx, node, errCh)
defer cancel()
// first pass through the pinning loop
err := <-errCh
if !strings.Contains((err).Error(), expectedErrorPrefix) {
t.Errorf("expecting error containing %q", expectedErrorPrefix)
logReader := logging.NewPipeReader()
go func() {
pinMFSOnChange(ctx, testConfigPollInterval, node)
logReader.Close()
}()
level, msg := readLogLine(t, logReader)
if level != "error" {
t.Error("expected error to be logged")
}
// second pass through the pinning loop
if !strings.Contains((err).Error(), expectedErrorPrefix) {
if !strings.Contains(msg, expectedErrorPrefix) {
t.Errorf("expecting error containing %q", expectedErrorPrefix)
}
}
func readLogLine(t *testing.T, logReader io.Reader) (string, string) {
t.Helper()
r := bufio.NewReader(logReader)
data, err := r.ReadBytes('\n')
if err != nil {
t.Fatal(err)
}
logInfo := struct {
Level string `json:"level"`
Msg string `json:"msg"`
}{}
err = json.Unmarshal(data, &logInfo)
if err != nil {
t.Fatal(err)
}
return logInfo.Level, logInfo.Msg
}

497
cmd/ipfs/kubo/start.go Normal file
View File

@ -0,0 +1,497 @@
// cmd/ipfs/kubo implements the primary CLI binary for kubo
package kubo
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net"
"net/http"
"os"
"runtime/pprof"
"strings"
"time"
"github.com/blang/semver/v4"
"github.com/google/uuid"
u "github.com/ipfs/boxo/util"
cmds "github.com/ipfs/go-ipfs-cmds"
"github.com/ipfs/go-ipfs-cmds/cli"
cmdhttp "github.com/ipfs/go-ipfs-cmds/http"
logging "github.com/ipfs/go-log"
ipfs "github.com/ipfs/kubo"
"github.com/ipfs/kubo/client/rpc/auth"
"github.com/ipfs/kubo/cmd/ipfs/util"
oldcmds "github.com/ipfs/kubo/commands"
config "github.com/ipfs/kubo/config"
"github.com/ipfs/kubo/core"
corecmds "github.com/ipfs/kubo/core/commands"
"github.com/ipfs/kubo/core/corehttp"
"github.com/ipfs/kubo/plugin/loader"
"github.com/ipfs/kubo/repo"
"github.com/ipfs/kubo/repo/fsrepo"
"github.com/ipfs/kubo/tracing"
ma "github.com/multiformats/go-multiaddr"
madns "github.com/multiformats/go-multiaddr-dns"
manet "github.com/multiformats/go-multiaddr/net"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
"go.opentelemetry.io/contrib/propagators/autoprop"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/trace"
)
// log is the command logger.
var (
log = logging.Logger("cmd/ipfs")
tracer trace.Tracer
)
// declared as a var for testing purposes.
var dnsResolver = madns.DefaultResolver
const (
EnvEnableProfiling = "IPFS_PROF"
cpuProfile = "ipfs.cpuprof"
heapProfile = "ipfs.memprof"
)
type PluginPreloader func(*loader.PluginLoader) error
func loadPlugins(repoPath string, preload PluginPreloader) (*loader.PluginLoader, error) {
plugins, err := loader.NewPluginLoader(repoPath)
if err != nil {
return nil, fmt.Errorf("error loading plugins: %s", err)
}
if preload != nil {
if err := preload(plugins); err != nil {
return nil, fmt.Errorf("error loading plugins (preload): %s", err)
}
}
if err := plugins.Initialize(); err != nil {
return nil, fmt.Errorf("error initializing plugins: %s", err)
}
if err := plugins.Inject(); err != nil {
return nil, fmt.Errorf("error initializing plugins: %s", err)
}
return plugins, nil
}
func printErr(err error) int {
fmt.Fprintf(os.Stderr, "Error: %s\n", err.Error())
return 1
}
func newUUID(key string) logging.Metadata {
ids := "#UUID-ERROR#"
if id, err := uuid.NewRandom(); err == nil {
ids = id.String()
}
return logging.Metadata{
key: ids,
}
}
func BuildDefaultEnv(ctx context.Context, req *cmds.Request) (cmds.Environment, error) {
return BuildEnv(nil)(ctx, req)
}
// BuildEnv creates an environment to be used with the kubo CLI. Note: the plugin preloader should only call functions
// associated with preloaded plugins (i.e. Load).
func BuildEnv(pl PluginPreloader) func(ctx context.Context, req *cmds.Request) (cmds.Environment, error) {
return func(ctx context.Context, req *cmds.Request) (cmds.Environment, error) {
checkDebug(req)
repoPath, err := getRepoPath(req)
if err != nil {
return nil, err
}
log.Debugf("config path is %s", repoPath)
plugins, err := loadPlugins(repoPath, pl)
if err != nil {
return nil, err
}
// this sets up the function that will initialize the node
// this is so that we can construct the node lazily.
return &oldcmds.Context{
ConfigRoot: repoPath,
ReqLog: &oldcmds.ReqLog{},
Plugins: plugins,
ConstructNode: func() (n *core.IpfsNode, err error) {
if req == nil {
return nil, errors.New("constructing node without a request")
}
r, err := fsrepo.Open(repoPath)
if err != nil { // repo is owned by the node
return nil, err
}
// ok everything is good. set it on the invocation (for ownership)
// and return it.
n, err = core.NewNode(ctx, &core.BuildCfg{
Repo: r,
})
if err != nil {
return nil, err
}
return n, nil
},
}, nil
}
}
// Start roadmap:
// - parse the commandline to get a cmdInvocation
// - if user requests help, print it and exit.
// - run the command invocation
// - output the response
// - if anything fails, print error, maybe with help.
func Start(buildEnv func(ctx context.Context, req *cmds.Request) (cmds.Environment, error)) (exitCode int) {
ctx := logging.ContextWithLoggable(context.Background(), newUUID("session"))
tp, err := tracing.NewTracerProvider(ctx)
if err != nil {
return printErr(err)
}
defer func() {
if err := tp.Shutdown(ctx); err != nil {
exitCode = printErr(err)
}
}()
otel.SetTracerProvider(tp)
otel.SetTextMapPropagator(autoprop.NewTextMapPropagator())
tracer = tp.Tracer("Kubo-cli")
stopFunc, err := profileIfEnabled()
if err != nil {
return printErr(err)
}
defer stopFunc() // to be executed as late as possible
intrh, ctx := util.SetupInterruptHandler(ctx)
defer intrh.Close()
// Handle `ipfs version` or `ipfs help`
if len(os.Args) > 1 {
// Handle `ipfs --version'
if os.Args[1] == "--version" {
os.Args[1] = "version"
}
// Handle `ipfs help` and `ipfs help <sub-command>`
if os.Args[1] == "help" {
if len(os.Args) > 2 {
os.Args = append(os.Args[:1], os.Args[2:]...)
// Handle `ipfs help --help`
// append `--help`,when the command is not `ipfs help --help`
if os.Args[1] != "--help" {
os.Args = append(os.Args, "--help")
}
} else {
os.Args[1] = "--help"
}
}
} else if insideGUI() { // if no args were passed, and we're in a GUI environment
// launch the daemon instead of launching a ghost window
os.Args = append(os.Args, "daemon", "--init")
}
// output depends on executable name passed in os.Args
// so we need to make sure it's stable
os.Args[0] = "ipfs"
err = cli.Run(ctx, Root, os.Args, os.Stdin, os.Stdout, os.Stderr, buildEnv, makeExecutor)
if err != nil {
return 1
}
// everything went better than expected :)
return 0
}
func insideGUI() bool {
return util.InsideGUI()
}
func checkDebug(req *cmds.Request) {
// check if user wants to debug. option OR env var.
debug, _ := req.Options["debug"].(bool)
if debug || os.Getenv("IPFS_LOGGING") == "debug" {
u.Debug = true
logging.SetDebugLogging()
}
if u.GetenvBool("DEBUG") {
u.Debug = true
}
}
func apiAddrOption(req *cmds.Request) (ma.Multiaddr, error) {
apiAddrStr, apiSpecified := req.Options[corecmds.ApiOption].(string)
if !apiSpecified {
return nil, nil
}
return ma.NewMultiaddr(apiAddrStr)
}
// encodedAbsolutePathVersion is the version from which the absolute path header in
// multipart requests is %-encoded. Before this version, its sent raw.
var encodedAbsolutePathVersion = semver.MustParse("0.23.0-dev")
func makeExecutor(req *cmds.Request, env interface{}) (cmds.Executor, error) {
exe := tracingWrappedExecutor{cmds.NewExecutor(req.Root)}
cctx := env.(*oldcmds.Context)
// Check if the command is disabled.
if req.Command.NoLocal && req.Command.NoRemote {
return nil, fmt.Errorf("command disabled: %v", req.Path)
}
// Can we just run this locally?
if !req.Command.NoLocal {
if doesNotUseRepo, ok := corecmds.GetDoesNotUseRepo(req.Command.Extra); doesNotUseRepo && ok {
return exe, nil
}
}
// Get the API option from the commandline.
apiAddr, err := apiAddrOption(req)
if err != nil {
return nil, err
}
// Require that the command be run on the daemon when the API flag is
// passed (unless we're trying to _run_ the daemon).
daemonRequested := apiAddr != nil && req.Command != daemonCmd
// Run this on the client if required.
if req.Command.NoRemote {
if daemonRequested {
// User requested that the command be run on the daemon but we can't.
// NOTE: We drop this check for the `ipfs daemon` command.
return nil, errors.New("api flag specified but command cannot be run on the daemon")
}
return exe, nil
}
// Finally, look in the repo for an API file.
if apiAddr == nil {
var err error
apiAddr, err = fsrepo.APIAddr(cctx.ConfigRoot)
switch err {
case nil, repo.ErrApiNotRunning:
default:
return nil, err
}
}
// Still no api specified? Run it on the client or fail.
if apiAddr == nil {
if req.Command.NoLocal {
return nil, fmt.Errorf("command must be run on the daemon: %v", req.Path)
}
return exe, nil
}
// Resolve the API addr.
//
// Do not replace apiAddr with the resolved addr so that the requested
// hostname is kept for use in the request's HTTP header.
_, err = resolveAddr(req.Context, apiAddr)
if err != nil {
return nil, err
}
network, host, err := manet.DialArgs(apiAddr)
if err != nil {
return nil, err
}
// Construct the executor.
opts := []cmdhttp.ClientOpt{
cmdhttp.ClientWithAPIPrefix(corehttp.APIPath),
}
// Fallback on a local executor if we (a) have a repo and (b) aren't
// forcing a daemon.
if !daemonRequested && fsrepo.IsInitialized(cctx.ConfigRoot) {
opts = append(opts, cmdhttp.ClientWithFallback(exe))
}
var tpt http.RoundTripper
switch network {
case "tcp", "tcp4", "tcp6":
tpt = http.DefaultTransport
case "unix":
path := host
host = "unix"
tpt = &http.Transport{
DialContext: func(_ context.Context, _, _ string) (net.Conn, error) {
return net.Dial("unix", path)
},
}
default:
return nil, fmt.Errorf("unsupported API address: %s", apiAddr)
}
apiAuth, specified := req.Options[corecmds.ApiAuthOption].(string)
if specified {
authorization := config.ConvertAuthSecret(apiAuth)
tpt = auth.NewAuthorizedRoundTripper(authorization, tpt)
}
httpClient := &http.Client{
Transport: otelhttp.NewTransport(tpt),
}
opts = append(opts, cmdhttp.ClientWithHTTPClient(httpClient))
// Fetch remove version, as some feature compatibility might change depending on it.
remoteVersion, err := getRemoteVersion(tracingWrappedExecutor{cmdhttp.NewClient(host, opts...)})
if err != nil {
return nil, err
}
opts = append(opts, cmdhttp.ClientWithRawAbsPath(remoteVersion.LT(encodedAbsolutePathVersion)))
return tracingWrappedExecutor{cmdhttp.NewClient(host, opts...)}, nil
}
type tracingWrappedExecutor struct {
exec cmds.Executor
}
func (twe tracingWrappedExecutor) Execute(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error {
ctx, span := tracer.Start(req.Context, "cmds."+strings.Join(req.Path, "."), trace.WithAttributes(attribute.StringSlice("Arguments", req.Arguments)))
defer span.End()
req.Context = ctx
err := twe.exec.Execute(req, re, env)
if err != nil {
span.SetStatus(codes.Error, err.Error())
}
return err
}
func getRepoPath(req *cmds.Request) (string, error) {
repoOpt, found := req.Options[corecmds.RepoDirOption].(string)
if found && repoOpt != "" {
return repoOpt, nil
}
repoPath, err := fsrepo.BestKnownPath()
if err != nil {
return "", err
}
return repoPath, nil
}
// startProfiling begins CPU profiling and returns a `stop` function to be
// executed as late as possible. The stop function captures the memprofile.
func startProfiling() (func(), error) {
// start CPU profiling as early as possible
ofi, err := os.Create(cpuProfile)
if err != nil {
return nil, err
}
err = pprof.StartCPUProfile(ofi)
if err != nil {
ofi.Close()
return nil, err
}
go func() {
for range time.NewTicker(time.Second * 30).C {
err := writeHeapProfileToFile()
if err != nil {
log.Error(err)
}
}
}()
stopProfiling := func() {
pprof.StopCPUProfile()
ofi.Close() // captured by the closure
}
return stopProfiling, nil
}
func writeHeapProfileToFile() error {
mprof, err := os.Create(heapProfile)
if err != nil {
return err
}
defer mprof.Close() // _after_ writing the heap profile
return pprof.WriteHeapProfile(mprof)
}
func profileIfEnabled() (func(), error) {
// FIXME this is a temporary hack so profiling of asynchronous operations
// works as intended.
if os.Getenv(EnvEnableProfiling) != "" {
stopProfilingFunc, err := startProfiling() // TODO maybe change this to its own option... profiling makes it slower.
if err != nil {
return nil, err
}
return stopProfilingFunc, nil
}
return func() {}, nil
}
func resolveAddr(ctx context.Context, addr ma.Multiaddr) (ma.Multiaddr, error) {
ctx, cancelFunc := context.WithTimeout(ctx, 10*time.Second)
defer cancelFunc()
addrs, err := dnsResolver.Resolve(ctx, addr)
if err != nil {
return nil, err
}
if len(addrs) == 0 {
return nil, errors.New("non-resolvable API endpoint")
}
return addrs[0], nil
}
type nopWriter struct {
io.Writer
}
func (nw nopWriter) Close() error {
return nil
}
func getRemoteVersion(exe cmds.Executor) (*semver.Version, error) {
ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Second*30))
defer cancel()
req, err := cmds.NewRequest(ctx, []string{"version"}, nil, nil, nil, Root)
if err != nil {
return nil, err
}
var buf bytes.Buffer
re, err := cmds.NewWriterResponseEmitter(nopWriter{&buf}, req)
if err != nil {
return nil, err
}
err = exe.Execute(req, re, nil)
if err != nil {
return nil, err
}
var out ipfs.VersionInfo
dec := json.NewDecoder(&buf)
if err := dec.Decode(&out); err != nil {
return nil, err
}
return semver.New(out.Version)
}

View File

@ -1,394 +1,11 @@
// cmd/ipfs implements the primary CLI binary for ipfs
package main
import (
"context"
"errors"
"fmt"
"math/rand"
"net"
"net/http"
"os"
"runtime/pprof"
"time"
"github.com/ipfs/kubo/cmd/ipfs/util"
oldcmds "github.com/ipfs/kubo/commands"
"github.com/ipfs/kubo/core"
corecmds "github.com/ipfs/kubo/core/commands"
"github.com/ipfs/kubo/core/corehttp"
"github.com/ipfs/kubo/plugin/loader"
"github.com/ipfs/kubo/repo"
"github.com/ipfs/kubo/repo/fsrepo"
"github.com/ipfs/kubo/tracing"
cmds "github.com/ipfs/go-ipfs-cmds"
"github.com/ipfs/go-ipfs-cmds/cli"
cmdhttp "github.com/ipfs/go-ipfs-cmds/http"
u "github.com/ipfs/go-ipfs-util"
logging "github.com/ipfs/go-log"
ma "github.com/multiformats/go-multiaddr"
madns "github.com/multiformats/go-multiaddr-dns"
manet "github.com/multiformats/go-multiaddr/net"
"github.com/google/uuid"
"go.opentelemetry.io/otel"
"github.com/ipfs/kubo/cmd/ipfs/kubo"
)
// log is the command logger
var log = logging.Logger("cmd/ipfs")
// declared as a var for testing purposes
var dnsResolver = madns.DefaultResolver
const (
EnvEnableProfiling = "IPFS_PROF"
cpuProfile = "ipfs.cpuprof"
heapProfile = "ipfs.memprof"
)
func loadPlugins(repoPath string) (*loader.PluginLoader, error) {
plugins, err := loader.NewPluginLoader(repoPath)
if err != nil {
return nil, fmt.Errorf("error loading plugins: %s", err)
}
if err := plugins.Initialize(); err != nil {
return nil, fmt.Errorf("error initializing plugins: %s", err)
}
if err := plugins.Inject(); err != nil {
return nil, fmt.Errorf("error initializing plugins: %s", err)
}
return plugins, nil
}
// main roadmap:
// - parse the commandline to get a cmdInvocation
// - if user requests help, print it and exit.
// - run the command invocation
// - output the response
// - if anything fails, print error, maybe with help
func main() {
os.Exit(mainRet())
}
func printErr(err error) int {
fmt.Fprintf(os.Stderr, "Error: %s\n", err.Error())
return 1
}
func newUUID(key string) logging.Metadata {
ids := "#UUID-ERROR#"
if id, err := uuid.NewRandom(); err == nil {
ids = id.String()
}
return logging.Metadata{
key: ids,
}
}
func mainRet() (exitCode int) {
rand.Seed(time.Now().UnixNano())
ctx := logging.ContextWithLoggable(context.Background(), newUUID("session"))
var err error
tp, err := tracing.NewTracerProvider(ctx)
if err != nil {
return printErr(err)
}
defer func() {
if err := tp.Shutdown(ctx); err != nil {
exitCode = printErr(err)
}
}()
otel.SetTracerProvider(tp)
stopFunc, err := profileIfEnabled()
if err != nil {
return printErr(err)
}
defer stopFunc() // to be executed as late as possible
intrh, ctx := util.SetupInterruptHandler(ctx)
defer intrh.Close()
// Handle `ipfs version` or `ipfs help`
if len(os.Args) > 1 {
// Handle `ipfs --version'
if os.Args[1] == "--version" {
os.Args[1] = "version"
}
// Handle `ipfs help` and `ipfs help <sub-command>`
if os.Args[1] == "help" {
if len(os.Args) > 2 {
os.Args = append(os.Args[:1], os.Args[2:]...)
// Handle `ipfs help --help`
// append `--help`,when the command is not `ipfs help --help`
if os.Args[1] != "--help" {
os.Args = append(os.Args, "--help")
}
} else {
os.Args[1] = "--help"
}
}
} else if insideGUI() { // if no args were passed, and we're in a GUI environment
// launch the daemon instead of launching a ghost window
os.Args = append(os.Args, "daemon", "--init")
}
// output depends on executable name passed in os.Args
// so we need to make sure it's stable
os.Args[0] = "ipfs"
buildEnv := func(ctx context.Context, req *cmds.Request) (cmds.Environment, error) {
checkDebug(req)
repoPath, err := getRepoPath(req)
if err != nil {
return nil, err
}
log.Debugf("config path is %s", repoPath)
plugins, err := loadPlugins(repoPath)
if err != nil {
return nil, err
}
// this sets up the function that will initialize the node
// this is so that we can construct the node lazily.
return &oldcmds.Context{
ConfigRoot: repoPath,
ReqLog: &oldcmds.ReqLog{},
Plugins: plugins,
ConstructNode: func() (n *core.IpfsNode, err error) {
if req == nil {
return nil, errors.New("constructing node without a request")
}
r, err := fsrepo.Open(repoPath)
if err != nil { // repo is owned by the node
return nil, err
}
// ok everything is good. set it on the invocation (for ownership)
// and return it.
n, err = core.NewNode(ctx, &core.BuildCfg{
Repo: r,
})
if err != nil {
return nil, err
}
return n, nil
},
}, nil
}
err = cli.Run(ctx, Root, os.Args, os.Stdin, os.Stdout, os.Stderr, buildEnv, makeExecutor)
if err != nil {
return 1
}
// everything went better than expected :)
return 0
}
func insideGUI() bool {
return util.InsideGUI()
}
func checkDebug(req *cmds.Request) {
// check if user wants to debug. option OR env var.
debug, _ := req.Options["debug"].(bool)
if debug || os.Getenv("IPFS_LOGGING") == "debug" {
u.Debug = true
logging.SetDebugLogging()
}
if u.GetenvBool("DEBUG") {
u.Debug = true
}
}
func apiAddrOption(req *cmds.Request) (ma.Multiaddr, error) {
apiAddrStr, apiSpecified := req.Options[corecmds.ApiOption].(string)
if !apiSpecified {
return nil, nil
}
return ma.NewMultiaddr(apiAddrStr)
}
func makeExecutor(req *cmds.Request, env interface{}) (cmds.Executor, error) {
exe := cmds.NewExecutor(req.Root)
cctx := env.(*oldcmds.Context)
// Check if the command is disabled.
if req.Command.NoLocal && req.Command.NoRemote {
return nil, fmt.Errorf("command disabled: %v", req.Path)
}
// Can we just run this locally?
if !req.Command.NoLocal {
if doesNotUseRepo, ok := corecmds.GetDoesNotUseRepo(req.Command.Extra); doesNotUseRepo && ok {
return exe, nil
}
}
// Get the API option from the commandline.
apiAddr, err := apiAddrOption(req)
if err != nil {
return nil, err
}
// Require that the command be run on the daemon when the API flag is
// passed (unless we're trying to _run_ the daemon).
daemonRequested := apiAddr != nil && req.Command != daemonCmd
// Run this on the client if required.
if req.Command.NoRemote {
if daemonRequested {
// User requested that the command be run on the daemon but we can't.
// NOTE: We drop this check for the `ipfs daemon` command.
return nil, errors.New("api flag specified but command cannot be run on the daemon")
}
return exe, nil
}
// Finally, look in the repo for an API file.
if apiAddr == nil {
var err error
apiAddr, err = fsrepo.APIAddr(cctx.ConfigRoot)
switch err {
case nil, repo.ErrApiNotRunning:
default:
return nil, err
}
}
// Still no api specified? Run it on the client or fail.
if apiAddr == nil {
if req.Command.NoLocal {
return nil, fmt.Errorf("command must be run on the daemon: %v", req.Path)
}
return exe, nil
}
// Resolve the API addr.
apiAddr, err = resolveAddr(req.Context, apiAddr)
if err != nil {
return nil, err
}
network, host, err := manet.DialArgs(apiAddr)
if err != nil {
return nil, err
}
// Construct the executor.
opts := []cmdhttp.ClientOpt{
cmdhttp.ClientWithAPIPrefix(corehttp.APIPath),
}
// Fallback on a local executor if we (a) have a repo and (b) aren't
// forcing a daemon.
if !daemonRequested && fsrepo.IsInitialized(cctx.ConfigRoot) {
opts = append(opts, cmdhttp.ClientWithFallback(exe))
}
switch network {
case "tcp", "tcp4", "tcp6":
case "unix":
path := host
host = "unix"
opts = append(opts, cmdhttp.ClientWithHTTPClient(&http.Client{
Transport: &http.Transport{
DialContext: func(_ context.Context, _, _ string) (net.Conn, error) {
return net.Dial("unix", path)
},
},
}))
default:
return nil, fmt.Errorf("unsupported API address: %s", apiAddr)
}
return cmdhttp.NewClient(host, opts...), nil
}
func getRepoPath(req *cmds.Request) (string, error) {
repoOpt, found := req.Options[corecmds.RepoDirOption].(string)
if found && repoOpt != "" {
return repoOpt, nil
}
repoPath, err := fsrepo.BestKnownPath()
if err != nil {
return "", err
}
return repoPath, nil
}
// startProfiling begins CPU profiling and returns a `stop` function to be
// executed as late as possible. The stop function captures the memprofile.
func startProfiling() (func(), error) {
// start CPU profiling as early as possible
ofi, err := os.Create(cpuProfile)
if err != nil {
return nil, err
}
err = pprof.StartCPUProfile(ofi)
if err != nil {
ofi.Close()
return nil, err
}
go func() {
for range time.NewTicker(time.Second * 30).C {
err := writeHeapProfileToFile()
if err != nil {
log.Error(err)
}
}
}()
stopProfiling := func() {
pprof.StopCPUProfile()
ofi.Close() // captured by the closure
}
return stopProfiling, nil
}
func writeHeapProfileToFile() error {
mprof, err := os.Create(heapProfile)
if err != nil {
return err
}
defer mprof.Close() // _after_ writing the heap profile
return pprof.WriteHeapProfile(mprof)
}
func profileIfEnabled() (func(), error) {
// FIXME this is a temporary hack so profiling of asynchronous operations
// works as intended.
if os.Getenv(EnvEnableProfiling) != "" {
stopProfilingFunc, err := startProfiling() // TODO maybe change this to its own option... profiling makes it slower.
if err != nil {
return nil, err
}
return stopProfilingFunc, nil
}
return func() {}, nil
}
func resolveAddr(ctx context.Context, addr ma.Multiaddr) (ma.Multiaddr, error) {
ctx, cancelFunc := context.WithTimeout(ctx, 10*time.Second)
defer cancelFunc()
addrs, err := dnsResolver.Resolve(ctx, addr)
if err != nil {
return nil, err
}
if len(addrs) == 0 {
return nil, errors.New("non-resolvable API endpoint")
}
return addrs[0], nil
os.Exit(kubo.Start(kubo.BuildDefaultEnv))
}

View File

@ -1,30 +1,33 @@
//go:build testrunmain
// +build testrunmain
package main
package main_test
import (
"flag"
"fmt"
"os"
"testing"
"github.com/ipfs/kubo/cmd/ipfs/kubo"
)
// this abuses go so much that I felt dirty writing this code
// but it is the only way to do it without writing custom compiler that would
// be a clone of go-build with go-test
// be a clone of go-build with go-test.
func TestRunMain(t *testing.T) {
args := flag.Args()
os.Args = append([]string{os.Args[0]}, args...)
ret := mainRet()
ret := kubo.Start(kubo.BuildDefaultEnv)
p := os.Getenv("IPFS_COVER_RET_FILE")
if len(p) != 0 {
os.WriteFile(p, []byte(fmt.Sprintf("%d\n", ret)), 0777)
os.WriteFile(p, []byte(fmt.Sprintf("%d\n", ret)), 0o777)
}
// close outputs so go testing doesn't print anything
null, _ := os.Open(os.DevNull)
null, _ := os.OpenFile(os.DevNull, os.O_RDWR, 0755)
os.Stderr = null
os.Stdout = null
}

View File

@ -14,19 +14,19 @@ var log = logging.Logger("ulimit")
var (
supportsFDManagement = false
// getlimit returns the soft and hard limits of file descriptors counts
// getlimit returns the soft and hard limits of file descriptors counts.
getLimit func() (uint64, uint64, error)
// set limit sets the soft and hard limits of file descriptors counts
// set limit sets the soft and hard limits of file descriptors counts.
setLimit func(uint64, uint64) error
)
// minimum file descriptor limit before we complain
// minimum file descriptor limit before we complain.
const minFds = 2048
// default max file descriptor limit.
const maxFds = 8192
// userMaxFDs returns the value of IPFS_FD_MAX
// userMaxFDs returns the value of IPFS_FD_MAX.
func userMaxFDs() uint64 {
// check if the IPFS_FD_MAX is set up and if it does
// not have a valid fds number notify the user
@ -42,7 +42,7 @@ func userMaxFDs() uint64 {
}
// ManageFdLimit raise the current max file descriptor count
// of the process based on the IPFS_FD_MAX value
// of the process based on the IPFS_FD_MAX value.
func ManageFdLimit() (changed bool, newLimit uint64, err error) {
if !supportsFDManagement {
return false, 0, nil
@ -82,7 +82,7 @@ func ManageFdLimit() (changed bool, newLimit uint64, err error) {
// set the soft value
err = setLimit(targetLimit, hard)
if err != nil {
err = fmt.Errorf("error setting ulimit without hard limit: %s", err)
err = fmt.Errorf("error setting ulimit without hard limit: %w", err)
break
}
newLimit = targetLimit
@ -107,7 +107,7 @@ func ManageFdLimit() (changed bool, newLimit uint64, err error) {
break
}
default:
err = fmt.Errorf("error setting: ulimit: %s", err)
err = fmt.Errorf("error setting: ulimit: %w", err)
}
return newLimit > 0, newLimit, err

View File

@ -16,17 +16,19 @@ import (
core "github.com/ipfs/kubo/core"
coreapi "github.com/ipfs/kubo/core/coreapi"
corehttp "github.com/ipfs/kubo/core/corehttp"
"github.com/ipfs/kubo/misc/fsutil"
fsrepo "github.com/ipfs/kubo/repo/fsrepo"
fsnotify "github.com/fsnotify/fsnotify"
files "github.com/ipfs/go-ipfs-files"
"github.com/ipfs/boxo/files"
process "github.com/jbenet/goprocess"
homedir "github.com/mitchellh/go-homedir"
)
var http = flag.Bool("http", false, "expose IPFS HTTP API")
var repoPath = flag.String("repo", os.Getenv("IPFS_PATH"), "IPFS_PATH to use")
var watchPath = flag.String("path", ".", "the path to watch")
var (
http = flag.Bool("http", false, "expose IPFS HTTP API")
repoPath = flag.String("repo", os.Getenv("IPFS_PATH"), "IPFS_PATH to use")
watchPath = flag.String("path", ".", "the path to watch")
)
func main() {
flag.Parse()
@ -52,11 +54,10 @@ func main() {
}
func run(ipfsPath, watchPath string) error {
proc := process.WithParent(process.Background())
log.Printf("running IPFSWatch on '%s' using repo at '%s'...", watchPath, ipfsPath)
ipfsPath, err := homedir.Expand(ipfsPath)
ipfsPath, err := fsutil.ExpandHome(ipfsPath)
if err != nil {
return err
}
@ -93,8 +94,8 @@ func run(ipfsPath, watchPath string) error {
if *http {
addr := "/ip4/127.0.0.1/tcp/5001"
var opts = []corehttp.ServeOption{
corehttp.GatewayOption(true, "/ipfs", "/ipns"),
opts := []corehttp.ServeOption{
corehttp.GatewayOption("/ipfs", "/ipns"),
corehttp.WebUIOption,
corehttp.CommandsOption(cmdCtx(node, ipfsPath)),
}

View File

@ -1,9 +1,9 @@
codecov:
ci:
- "ci/circle-ci"
- "!travis-ci.org"
- "!ci.ipfs.team:8111"
- "!ci.ipfs.team"
- "github.com"
notify:
require_ci_to_pass: no
after_n_builds: 2

View File

@ -12,14 +12,14 @@ import (
cmds "github.com/ipfs/go-ipfs-cmds"
logging "github.com/ipfs/go-log"
coreiface "github.com/ipfs/interface-go-ipfs-core"
options "github.com/ipfs/interface-go-ipfs-core/options"
config "github.com/ipfs/kubo/config"
coreiface "github.com/ipfs/kubo/core/coreiface"
options "github.com/ipfs/kubo/core/coreiface/options"
)
var log = logging.Logger("command")
// Context represents request context
// Context represents request context.
type Context struct {
ConfigRoot string
ReqLog *ReqLog
@ -54,7 +54,7 @@ func (c *Context) GetNode() (*core.IpfsNode, error) {
}
// GetAPI returns CoreAPI instance backed by ipfs node.
// It may construct the node with the provided function
// It may construct the node with the provided function.
func (c *Context) GetAPI() (coreiface.CoreAPI, error) {
if c.api == nil {
n, err := c.GetNode()

View File

@ -5,7 +5,7 @@ import (
"time"
)
// ReqLogEntry is an entry in the request log
// ReqLogEntry is an entry in the request log.
type ReqLogEntry struct {
StartTime time.Time
EndTime time.Time
@ -18,14 +18,14 @@ type ReqLogEntry struct {
log *ReqLog
}
// Copy returns a copy of the ReqLogEntry
// Copy returns a copy of the ReqLogEntry.
func (r *ReqLogEntry) Copy() *ReqLogEntry {
out := *r
out.log = nil
return &out
}
// ReqLog is a log of requests
// ReqLog is a log of requests.
type ReqLog struct {
Requests []*ReqLogEntry
nextID int
@ -33,7 +33,7 @@ type ReqLog struct {
keep time.Duration
}
// AddEntry adds an entry to the log
// AddEntry adds an entry to the log.
func (rl *ReqLog) AddEntry(rle *ReqLogEntry) {
rl.lock.Lock()
defer rl.lock.Unlock()
@ -47,7 +47,7 @@ func (rl *ReqLog) AddEntry(rle *ReqLogEntry) {
}
}
// ClearInactive removes stale entries
// ClearInactive removes stale entries.
func (rl *ReqLog) ClearInactive() {
rl.lock.Lock()
defer rl.lock.Unlock()
@ -79,14 +79,14 @@ func (rl *ReqLog) cleanup() {
rl.Requests = rl.Requests[:i]
}
// SetKeepTime sets a duration after which an entry will be considered inactive
// SetKeepTime sets a duration after which an entry will be considered inactive.
func (rl *ReqLog) SetKeepTime(t time.Duration) {
rl.lock.Lock()
defer rl.lock.Unlock()
rl.keep = t
}
// Report generates a copy of all the entries in the requestlog
// Report generates a copy of all the entries in the requestlog.
func (rl *ReqLog) Report() []*ReqLogEntry {
rl.lock.Lock()
defer rl.lock.Unlock()
@ -99,7 +99,7 @@ func (rl *ReqLog) Report() []*ReqLogEntry {
return out
}
// Finish marks an entry in the log as finished
// Finish marks an entry in the log as finished.
func (rl *ReqLog) Finish(rle *ReqLogEntry) {
rl.lock.Lock()
defer rl.lock.Unlock()

View File

@ -1,5 +1,63 @@
package config
type API struct {
HTTPHeaders map[string][]string // HTTP headers to return with the API.
import (
"encoding/base64"
"strings"
)
const (
APITag = "API"
AuthorizationTag = "Authorizations"
)
type RPCAuthScope struct {
// AuthSecret is the secret that will be compared to the HTTP "Authorization".
// header. A secret is in the format "type:value". Check the documentation for
// supported types.
AuthSecret string
// AllowedPaths is an explicit list of RPC path prefixes to allow.
// By default, none are allowed. ["/api/v0"] exposes all RPCs.
AllowedPaths []string
}
type API struct {
// HTTPHeaders are the HTTP headers to return with the API.
HTTPHeaders map[string][]string
// Authorization is a map of authorizations used to authenticate in the API.
// If the map is empty, then the RPC API is exposed to everyone. Check the
// documentation for more details.
Authorizations map[string]*RPCAuthScope `json:",omitempty"`
}
// ConvertAuthSecret converts the given secret in the format "type:value" into an
// HTTP Authorization header value. It can handle 'bearer' and 'basic' as type.
// If type exists and is not known, an empty string is returned. If type does not
// exist, 'bearer' type is assumed.
func ConvertAuthSecret(secret string) string {
if secret == "" {
return secret
}
split := strings.SplitN(secret, ":", 2)
if len(split) < 2 {
// No prefix: assume bearer token.
return "Bearer " + secret
}
if strings.HasPrefix(secret, "basic:") {
if strings.Contains(split[1], ":") {
// Assume basic:user:password
return "Basic " + base64.StdEncoding.EncodeToString([]byte(split[1]))
} else {
// Assume already base64 encoded.
return "Basic " + split[1]
}
} else if strings.HasPrefix(secret, "bearer:") {
return "Bearer " + split[1]
}
// Unknown. Type is present, but we can't handle it.
return ""
}

22
config/api_test.go Normal file
View File

@ -0,0 +1,22 @@
package config
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestConvertAuthSecret(t *testing.T) {
for _, testCase := range []struct {
input string
output string
}{
{"", ""},
{"someToken", "Bearer someToken"},
{"bearer:someToken", "Bearer someToken"},
{"basic:user:pass", "Basic dXNlcjpwYXNz"},
{"basic:dXNlcjpwYXNz", "Basic dXNlcjpwYXNz"},
} {
assert.Equal(t, testCase.output, ConvertAuthSecret(testCase.input))
}
}

View File

@ -20,6 +20,9 @@ const (
// AutoNATServiceDisabled indicates that the user has disabled the
// AutoNATService.
AutoNATServiceDisabled
// AutoNATServiceEnabledV1Only forces use of V1 and disables V2
// (used for testing)
AutoNATServiceEnabledV1Only
)
func (m *AutoNATServiceMode) UnmarshalText(text []byte) error {
@ -30,6 +33,8 @@ func (m *AutoNATServiceMode) UnmarshalText(text []byte) error {
*m = AutoNATServiceEnabled
case "disabled":
*m = AutoNATServiceDisabled
case "legacy-v1":
*m = AutoNATServiceEnabledV1Only
default:
return fmt.Errorf("unknown autonat mode: %s", string(text))
}
@ -44,6 +49,8 @@ func (m AutoNATServiceMode) MarshalText() ([]byte, error) {
return []byte("enabled"), nil
case AutoNATServiceDisabled:
return []byte("disabled"), nil
case AutoNATServiceEnabledV1Only:
return []byte("legacy-v1"), nil
default:
return nil, fmt.Errorf("unknown autonat mode: %d", m)
}
@ -64,7 +71,7 @@ type AutoNATConfig struct {
Throttle *AutoNATThrottleConfig `json:",omitempty"`
}
// AutoNATThrottleConfig configures the throttle limites
// AutoNATThrottleConfig configures the throttle limites.
type AutoNATThrottleConfig struct {
// GlobalLimit and PeerLimit sets the global and per-peer dialback
// limits. The AutoNAT service will only perform the specified number of

30
config/autotls.go Normal file
View File

@ -0,0 +1,30 @@
package config
import p2pforge "github.com/ipshipyard/p2p-forge/client"
// AutoTLS includes optional configuration of p2p-forge client of service
// for obtaining a domain and TLS certificate to improve connectivity for web
// browser clients. More: https://github.com/ipshipyard/p2p-forge#readme
type AutoTLS struct {
// Enables the p2p-forge feature
Enabled Flag `json:",omitempty"`
// Optional override of the parent domain that will be used
DomainSuffix *OptionalString `json:",omitempty"`
// Optional override of HTTP API that acts as ACME DNS-01 Challenge broker
RegistrationEndpoint *OptionalString `json:",omitempty"`
// Optional Authorization token, used with private/test instances of p2p-forge
RegistrationToken *OptionalString `json:",omitempty"`
// Optional override of CA ACME API used by p2p-forge system
CAEndpoint *OptionalString `json:",omitempty"`
}
const (
DefaultAutoTLSEnabled = false // experimental, opt-in for now (https://github.com/ipfs/kubo/pull/10521)
DefaultDomainSuffix = p2pforge.DefaultForgeDomain
DefaultRegistrationEndpoint = p2pforge.DefaultForgeEndpoint
DefaultCAEndpoint = p2pforge.DefaultCAEndpoint
)

View File

@ -19,8 +19,8 @@ var DefaultBootstrapAddresses = []string{
"/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa",
"/dnsaddr/bootstrap.libp2p.io/p2p/QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb",
"/dnsaddr/bootstrap.libp2p.io/p2p/QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt",
"/ip4/104.131.131.82/tcp/4001/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", // mars.i.ipfs.io
"/ip4/104.131.131.82/udp/4001/quic/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", // mars.i.ipfs.io
"/ip4/104.131.131.82/tcp/4001/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", // mars.i.ipfs.io
"/ip4/104.131.131.82/udp/4001/quic-v1/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", // mars.i.ipfs.io
}
// ErrInvalidPeerAddr signals an address is not a valid peer address.
@ -36,7 +36,7 @@ func (c *Config) BootstrapPeers() ([]peer.AddrInfo, error) {
func DefaultBootstrapPeers() ([]peer.AddrInfo, error) {
ps, err := ParseBootstrapPeers(DefaultBootstrapAddresses)
if err != nil {
return nil, fmt.Errorf(`failed to parse hardcoded bootstrap peers: %s
return nil, fmt.Errorf(`failed to parse hardcoded bootstrap peers: %w
This is a problem with the ipfs codebase. Please report it to the dev team`, err)
}
return ps, nil

View File

@ -9,7 +9,7 @@ import (
"path/filepath"
"strings"
"github.com/mitchellh/go-homedir"
"github.com/ipfs/kubo/misc/fsutil"
)
// Config is used to load ipfs config files.
@ -26,6 +26,7 @@ type Config struct {
API API // local node's API settings
Swarm SwarmConfig
AutoNAT AutoNATConfig
AutoTLS AutoTLS
Pubsub PubsubConfig
Peering Peering
DNS DNS
@ -36,27 +37,29 @@ type Config struct {
Experimental Experiments
Plugins Plugins
Pinning Pinning
Import Import
Version Version
Internal Internal // experimental/unstable options
}
const (
// DefaultPathName is the default config dir name
// DefaultPathName is the default config dir name.
DefaultPathName = ".ipfs"
// DefaultPathRoot is the path to the default config dir location.
DefaultPathRoot = "~/" + DefaultPathName
// DefaultConfigFile is the filename of the configuration file
// DefaultConfigFile is the filename of the configuration file.
DefaultConfigFile = "config"
// EnvDir is the environment variable used to change the path root.
EnvDir = "IPFS_PATH"
)
// PathRoot returns the default configuration root directory
// PathRoot returns the default configuration root directory.
func PathRoot() (string, error) {
dir := os.Getenv(EnvDir)
var err error
if len(dir) == 0 {
dir, err = homedir.Expand(DefaultPathRoot)
dir, err = fsutil.ExpandHome(DefaultPathRoot)
}
return dir, err
}
@ -83,7 +86,7 @@ func Path(configroot, extension string) (string, error) {
// - If the user-provided configuration file path is only a file name, use the
// configuration root directory, otherwise use only the user-provided path
// and ignore the configuration root.
func Filename(configroot string, userConfigFile string) (string, error) {
func Filename(configroot, userConfigFile string) (string, error) {
if userConfigFile == "" {
return Path(configroot, DefaultConfigFile)
}
@ -95,7 +98,7 @@ func Filename(configroot string, userConfigFile string) (string, error) {
return userConfigFile, nil
}
// HumanOutput gets a config value ready for printing
// HumanOutput gets a config value ready for printing.
func HumanOutput(value interface{}) ([]byte, error) {
s, ok := value.(string)
if ok {
@ -104,7 +107,7 @@ func HumanOutput(value interface{}) ([]byte, error) {
return Marshal(value)
}
// Marshal configuration with JSON
// Marshal configuration with JSON.
func Marshal(value interface{}) ([]byte, error) {
// need to prettyprint, hence MarshalIndent, instead of Encoder
return json.MarshalIndent(value, "", " ")
@ -117,7 +120,7 @@ func FromMap(v map[string]interface{}) (*Config, error) {
}
var conf Config
if err := json.NewDecoder(buf).Decode(&conf); err != nil {
return nil, fmt.Errorf("failure to decode config: %s", err)
return nil, fmt.Errorf("failure to decode config: %w", err)
}
return &conf, nil
}
@ -129,7 +132,7 @@ func ToMap(conf *Config) (map[string]interface{}, error) {
}
var m map[string]interface{}
if err := json.NewDecoder(buf).Decode(&m); err != nil {
return nil, fmt.Errorf("failure to decode config: %s", err)
return nil, fmt.Errorf("failure to decode config: %w", err)
}
return m, nil
}
@ -140,11 +143,11 @@ func (c *Config) Clone() (*Config, error) {
var buf bytes.Buffer
if err := json.NewEncoder(&buf).Encode(c); err != nil {
return nil, fmt.Errorf("failure to encode config: %s", err)
return nil, fmt.Errorf("failure to encode config: %w", err)
}
if err := json.NewDecoder(&buf).Decode(&newConfig); err != nil {
return nil, fmt.Errorf("failure to decode config: %s", err)
return nil, fmt.Errorf("failure to decode config: %w", err)
}
return &newConfig, nil

View File

@ -26,7 +26,7 @@ type Datastore struct {
}
// DataStorePath returns the default data store path given a configuration root
// (set an empty string to have the default configuration root)
// (set an empty string to have the default configuration root).
func DataStorePath(configroot string) (string, error) {
return Path(configroot, DefaultDataStoreDirectory)
}

View File

@ -6,8 +6,4 @@ type Discovery struct {
type MDNS struct {
Enabled bool
// DEPRECATED: the time between discovery rounds is no longer configurable
// See: https://github.com/ipfs/go-ipfs/pull/9048#discussion_r906814717
Interval *OptionalInteger `json:",omitempty"`
}

View File

@ -1,6 +1,6 @@
package config
// DNS specifies DNS resolution rules using custom resolvers
// DNS specifies DNS resolution rules using custom resolvers.
type DNS struct {
// Resolvers is a map of FQDNs to URLs for custom DNS resolution.
// URLs starting with `https://` indicate DoH endpoints.

View File

@ -1,12 +1,16 @@
package config
type Experiments struct {
FilestoreEnabled bool
UrlstoreEnabled bool
ShardingEnabled bool `json:",omitempty"` // deprecated by autosharding: https://github.com/ipfs/kubo/pull/8527
GraphsyncEnabled bool
Libp2pStreamMounting bool
P2pHttpProxy bool //nolint
StrategicProviding bool
AcceleratedDHTClient bool
FilestoreEnabled bool
UrlstoreEnabled bool
ShardingEnabled bool `json:",omitempty"` // deprecated by autosharding: https://github.com/ipfs/kubo/pull/8527
Libp2pStreamMounting bool
P2pHttpProxy bool //nolint
StrategicProviding bool
OptimisticProvide bool
OptimisticProvideJobsPoolSize int
GatewayOverLibp2p bool `json:",omitempty"`
GraphsyncEnabled graphsyncEnabled `json:",omitempty"`
AcceleratedDHTClient experimentalAcceleratedDHTClient `json:",omitempty"`
}

Some files were not shown because too many files have changed in this diff Show More