Merge branch 'master' into galargh-patch-1

This commit is contained in:
Piotr Galar 2023-03-02 11:31:04 +01:00 committed by GitHub
commit 0cea5be411
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
58 changed files with 805 additions and 498 deletions

View File

@ -14,6 +14,7 @@ jobs:
prepare:
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
runs-on: ubuntu-latest
timeout-minutes: 5
env:
TEST_NO_DOCKER: 1
TEST_NO_FUSE: 1
@ -40,6 +41,7 @@ jobs:
ipfs-interop:
needs: [prepare]
runs-on: ubuntu-latest
timeout-minutes: 20
defaults:
run:
shell: bash
@ -77,6 +79,7 @@ jobs:
go-ipfs-api:
needs: [prepare]
runs-on: ubuntu-latest
timeout-minutes: 5
env:
TEST_NO_DOCKER: 1
TEST_NO_FUSE: 1
@ -116,6 +119,7 @@ jobs:
go-ipfs-http-client:
needs: [prepare]
runs-on: ubuntu-latest
timeout-minutes: 5
env:
TEST_NO_DOCKER: 1
TEST_NO_FUSE: 1
@ -148,6 +152,7 @@ jobs:
ipfs-webui:
needs: [prepare]
runs-on: ubuntu-latest
timeout-minutes: 20
env:
NO_SANDBOX: true
LIBP2P_TCP_REUSEPORT: false

View File

@ -16,6 +16,7 @@ jobs:
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
name: Analyze
runs-on: ubuntu-latest
timeout-minutes: 20
strategy:
fail-fast: false

View File

@ -11,6 +11,7 @@ jobs:
docker-build:
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
runs-on: ubuntu-latest
timeout-minutes: 10
env:
IMAGE_NAME: ipfs/kubo
WIP_IMAGE_TAG: wip

View File

@ -14,6 +14,7 @@ jobs:
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
name: Push Docker image to Docker Hub
runs-on: ubuntu-latest
timeout-minutes: 90
env:
IMAGE_NAME: ipfs/kubo
LEGACY_IMAGE_NAME: ipfs/go-ipfs

View File

@ -14,6 +14,7 @@ jobs:
gobuild:
needs: [runner]
runs-on: ${{ fromJSON(needs.runner.outputs.config).labels }}
timeout-minutes: 20
env:
TEST_NO_DOCKER: 1
TEST_VERBOSE: 1

View File

@ -11,6 +11,7 @@ jobs:
unit:
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
runs-on: ubuntu-latest
timeout-minutes: 10
name: All
steps:
- uses: actions/checkout@v2

View File

@ -11,6 +11,7 @@ jobs:
golint:
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
runs-on: ubuntu-latest
timeout-minutes: 10
env:
TEST_NO_DOCKER: 1
TEST_NO_FUSE: 1

View File

@ -11,6 +11,7 @@ jobs:
gotest:
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
runs-on: ubuntu-latest
timeout-minutes: 20
env:
TEST_NO_DOCKER: 1
TEST_NO_FUSE: 1

View File

@ -10,6 +10,7 @@ on:
jobs:
choose:
runs-on: ubuntu-latest
timeout-minutes: 1
outputs:
config: ${{ steps.config.outputs.result }}
steps:

View File

@ -14,6 +14,7 @@ jobs:
sharness:
needs: [runner]
runs-on: ${{ fromJSON(needs.runner.outputs.config).labels }}
timeout-minutes: 20
defaults:
run:
shell: bash
@ -55,9 +56,7 @@ jobs:
make -O -j "$PARALLEL" \
test_sharness \
coverage/sharness_tests.coverprofile \
test/sharness/test-results/sharness.xml \
test/sharness/test-results/sharness.html \
test/sharness/test-results/sharness-html
test/sharness/test-results/sharness.xml
working-directory: kubo
env:
TEST_NO_DOCKER: 0
@ -87,6 +86,13 @@ jobs:
echo "# Summary" >> $GITHUB_STEP_SUMMARY
echo >> $GITHUB_STEP_SUMMARY
cat kubo/test/sharness/test-results/summary.txt >> $GITHUB_STEP_SUMMARY
- name: Generate one-page HTML report
uses: pl-strflt/junit-xml-to-html@v1
if: failure() || success()
with:
mode: no-frames
input: kubo/test/sharness/test-results/sharness.xml
output: kubo/test/sharness/test-results/sharness.html
- name: Upload one-page HTML report to S3
id: one-page
uses: pl-strflt/tf-aws-gh-runner/.github/actions/upload-artifact@main
@ -100,6 +106,13 @@ jobs:
with:
name: sharness.html
path: kubo/test/sharness/test-results/sharness.html
- name: Generate full HTML report
uses: pl-strflt/junit-xml-to-html@v1
if: failure() || success()
with:
mode: frames
input: kubo/test/sharness/test-results/sharness.xml
output: kubo/test/sharness/test-results/sharness-html
- name: Upload full HTML report to S3
id: full
uses: pl-strflt/tf-aws-gh-runner/.github/actions/upload-artifact@main

View File

@ -13,6 +13,7 @@ jobs:
sync-github-and-dist-ipfs-tech:
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
runs-on: "ubuntu-latest"
timeout-minutes: 5
steps:
- uses: ipfs/download-ipfs-distribution-action@v1
- uses: ipfs/start-ipfs-daemon-action@v1

View File

@ -11,7 +11,7 @@ import (
cidutil "github.com/ipfs/go-cidutil"
cmds "github.com/ipfs/go-ipfs-cmds"
bitswap "github.com/ipfs/go-libipfs/bitswap"
decision "github.com/ipfs/go-libipfs/bitswap/decision"
"github.com/ipfs/go-libipfs/bitswap/server"
peer "github.com/libp2p/go-libp2p/core/peer"
)
@ -179,7 +179,7 @@ prints the ledger associated with a given peer.
Arguments: []cmds.Argument{
cmds.StringArg("peer", true, false, "The PeerID (B58) of the ledger to inspect."),
},
Type: decision.Receipt{},
Type: server.Receipt{},
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
nd, err := cmdenv.GetNode(env)
if err != nil {
@ -203,7 +203,7 @@ prints the ledger associated with a given peer.
return cmds.EmitOnce(res, bs.LedgerForPeer(partner))
},
Encoders: cmds.EncoderMap{
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *decision.Receipt) error {
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *server.Receipt) error {
fmt.Fprintf(w, "Ledger for %s\n"+
"Debt ratio:\t%f\n"+
"Exchanges:\t%d\n"+

View File

@ -1542,9 +1542,9 @@ another node, even if this other node is on a different network. This may
trigger netscan alerts on some hosting providers or cause strain in some setups.
The `server` configuration profile fills up this list with sensible defaults,
preventing dials to all non-routable IP addresses (e.g., `192.168.0.0/16`) but
you should always check settings against your own network and/or hosting
provider.
preventing dials to all non-routable IP addresses (e.g., `/ip4/192.168.0.0/ipcidr/16`,
which is the multiaddress representation of `192.168.0.0/16`) but you should always
check settings against your own network and/or hosting provider.
Default: `[]`

View File

@ -7,10 +7,10 @@ go 1.18
replace github.com/ipfs/kubo => ./../../..
require (
github.com/ipfs/go-libipfs v0.6.1-0.20230222011044-7b201415a176
github.com/ipfs/go-libipfs v0.6.1-0.20230228004237-36918f45f260
github.com/ipfs/interface-go-ipfs-core v0.11.0
github.com/ipfs/kubo v0.0.0-00010101000000-000000000000
github.com/libp2p/go-libp2p v0.26.0
github.com/libp2p/go-libp2p v0.26.2
github.com/multiformats/go-multiaddr v0.8.0
)
@ -101,7 +101,7 @@ require (
github.com/ipfs/go-metrics-interface v0.0.1 // indirect
github.com/ipfs/go-mfs v0.2.1 // indirect
github.com/ipfs/go-namesys v0.7.0 // indirect
github.com/ipfs/go-path v0.3.0 // indirect
github.com/ipfs/go-path v0.3.1 // indirect
github.com/ipfs/go-peertaskqueue v0.8.1 // indirect
github.com/ipfs/go-unixfs v0.4.3 // indirect
github.com/ipfs/go-unixfsnode v1.5.2 // indirect
@ -119,7 +119,7 @@ require (
github.com/libp2p/go-cidranger v1.1.0 // indirect
github.com/libp2p/go-doh-resolver v0.4.0 // indirect
github.com/libp2p/go-flow-metrics v0.1.0 // indirect
github.com/libp2p/go-libp2p-asn-util v0.2.0 // indirect
github.com/libp2p/go-libp2p-asn-util v0.3.0 // indirect
github.com/libp2p/go-libp2p-kad-dht v0.21.1 // indirect
github.com/libp2p/go-libp2p-kbucket v0.5.0 // indirect
github.com/libp2p/go-libp2p-pubsub v0.9.0 // indirect
@ -167,7 +167,7 @@ require (
github.com/quic-go/qtls-go1-19 v0.2.1 // indirect
github.com/quic-go/qtls-go1-20 v0.1.1 // indirect
github.com/quic-go/quic-go v0.33.0 // indirect
github.com/quic-go/webtransport-go v0.5.1 // indirect
github.com/quic-go/webtransport-go v0.5.2 // indirect
github.com/raulk/go-watchdog v1.3.0 // indirect
github.com/samber/lo v1.36.0 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect

View File

@ -569,8 +569,8 @@ github.com/ipfs/go-ipld-legacy v0.1.1 h1:BvD8PEuqwBHLTKqlGFTHSwrwFOMkVESEvwIYwR2
github.com/ipfs/go-ipld-legacy v0.1.1/go.mod h1:8AyKFCjgRPsQFf15ZQgDB8Din4DML/fOmKZkkFkrIEg=
github.com/ipfs/go-ipns v0.3.0 h1:ai791nTgVo+zTuq2bLvEGmWP1M0A6kGTXUsgv/Yq67A=
github.com/ipfs/go-ipns v0.3.0/go.mod h1:3cLT2rbvgPZGkHJoPO1YMJeh6LtkxopCkKFcio/wE24=
github.com/ipfs/go-libipfs v0.6.1-0.20230222011044-7b201415a176 h1:QJvWxLRKucrAo7W2vLz5FA2iLKwW6WkHWn8AL8kXAUU=
github.com/ipfs/go-libipfs v0.6.1-0.20230222011044-7b201415a176/go.mod h1:UjjDIuehp2GzlNP0HEr5I9GfFT7zWgst+YfpUEIThtw=
github.com/ipfs/go-libipfs v0.6.1-0.20230228004237-36918f45f260 h1:QRLcCoITO9ZQo2pvjmrfngqKhUKjPopBva3MVH62LT8=
github.com/ipfs/go-libipfs v0.6.1-0.20230228004237-36918f45f260/go.mod h1:3OoEQs95UkqFEf65SbRDpiMwuzI+C/jTsYQaHfBbJXI=
github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM=
github.com/ipfs/go-log v1.0.2/go.mod h1:1MNjMxe0u6xvJZgeqbJ8vdo2TKaGwZ1a0Bpza+sr2Sk=
github.com/ipfs/go-log v1.0.3/go.mod h1:OsLySYkwIbiSUR/yBTdv1qPtcE4FW3WPWk/ewz9Ru+A=
@ -598,8 +598,8 @@ github.com/ipfs/go-mfs v0.2.1/go.mod h1:Woj80iuw4ajDnIP6+seRaoHpPsc9hmL0pk/nDNDW
github.com/ipfs/go-namesys v0.7.0 h1:xqosk71GIVRkFDtF2UNRcXn4LdNeo7tzuy8feHD6NbU=
github.com/ipfs/go-namesys v0.7.0/go.mod h1:KYSZBVZG3VJC34EfqqJPG7T48aWgxseoMPAPA5gLyyQ=
github.com/ipfs/go-path v0.2.1/go.mod h1:NOScsVgxfC/eIw4nz6OiGwK42PjaSJ4Y/ZFPn1Xe07I=
github.com/ipfs/go-path v0.3.0 h1:tkjga3MtpXyM5v+3EbRvOHEoo+frwi4oumw5K+KYWyA=
github.com/ipfs/go-path v0.3.0/go.mod h1:NOScsVgxfC/eIw4nz6OiGwK42PjaSJ4Y/ZFPn1Xe07I=
github.com/ipfs/go-path v0.3.1 h1:wkeaCWE/NTuuPGlEkLTsED5UkzfKYZpxaFFPgk8ZVLE=
github.com/ipfs/go-path v0.3.1/go.mod h1:eNLsxJEEMxn/CDzUJ6wuNl+6No6tEUhOZcPKsZsYX0E=
github.com/ipfs/go-peertaskqueue v0.1.0/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U=
github.com/ipfs/go-peertaskqueue v0.7.0/go.mod h1:M/akTIE/z1jGNXMU7kFB4TeSEFvj68ow0Rrb04donIU=
github.com/ipfs/go-peertaskqueue v0.8.1 h1:YhxAs1+wxb5jk7RvS0LHdyiILpNmRIRnZVztekOF0pg=
@ -730,10 +730,10 @@ github.com/libp2p/go-libp2p v0.7.0/go.mod h1:hZJf8txWeCduQRDC/WSqBGMxaTHCOYHt2xS
github.com/libp2p/go-libp2p v0.7.4/go.mod h1:oXsBlTLF1q7pxr+9w6lqzS1ILpyHsaBPniVO7zIHGMw=
github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qDKwXujH5o=
github.com/libp2p/go-libp2p v0.14.3/go.mod h1:d12V4PdKbpL0T1/gsUNN8DfgMuRPDX8bS2QxCZlwRH0=
github.com/libp2p/go-libp2p v0.26.0 h1:0FE0bP9/G9YADjruqoFvf1snBBFvrdh1MmTuEeUkl2E=
github.com/libp2p/go-libp2p v0.26.0/go.mod h1:R8N+XhwPDPLNb4TKboKJKnDeg9vPw8+zlC6g793dTGw=
github.com/libp2p/go-libp2p-asn-util v0.2.0 h1:rg3+Os8jbnO5DxkC7K/Utdi+DkY3q/d1/1q+8WeNAsw=
github.com/libp2p/go-libp2p-asn-util v0.2.0/go.mod h1:WoaWxbHKBymSN41hWSq/lGKJEca7TNm58+gGJi2WsLI=
github.com/libp2p/go-libp2p v0.26.2 h1:eHEoW/696FP7/6DxOvcrKfTD6Bi0DExxiMSZUJxswA0=
github.com/libp2p/go-libp2p v0.26.2/go.mod h1:x75BN32YbwuY0Awm2Uix4d4KOz+/4piInkp4Wr3yOo8=
github.com/libp2p/go-libp2p-asn-util v0.3.0 h1:gMDcMyYiZKkocGXDQ5nsUQyquC9+H+iLEQHwOCZ7s8s=
github.com/libp2p/go-libp2p-asn-util v0.3.0/go.mod h1:B1mcOrKUE35Xq/ASTmQ4tN3LNzVVaMNmq2NACuqyB9w=
github.com/libp2p/go-libp2p-autonat v0.1.0/go.mod h1:1tLf2yXxiE/oKGtDwPYWTSYG3PtvYlJmg7NeVtPRqH8=
github.com/libp2p/go-libp2p-autonat v0.1.1/go.mod h1:OXqkeGOY2xJVWKAGV2inNF5aKN/djNA3fdpCWloIudE=
github.com/libp2p/go-libp2p-autonat v0.2.0/go.mod h1:DX+9teU4pEEoZUqR1PiMlqliONQdNbfzE1C718tcViI=
@ -1177,8 +1177,8 @@ github.com/quic-go/qtls-go1-20 v0.1.1 h1:KbChDlg82d3IHqaj2bn6GfKRj84Per2VGf5XV3w
github.com/quic-go/qtls-go1-20 v0.1.1/go.mod h1:JKtK6mjbAVcUTN/9jZpvLbGxvdWIKS8uT7EiStoU1SM=
github.com/quic-go/quic-go v0.33.0 h1:ItNoTDN/Fm/zBlq769lLJc8ECe9gYaW40veHCCco7y0=
github.com/quic-go/quic-go v0.33.0/go.mod h1:YMuhaAV9/jIu0XclDXwZPAsP/2Kgr5yMYhe9oxhhOFA=
github.com/quic-go/webtransport-go v0.5.1 h1:1eVb7WDWCRoaeTtFHpFBJ6WDN1bSrPrRoW6tZgSw0Ow=
github.com/quic-go/webtransport-go v0.5.1/go.mod h1:OhmmgJIzTTqXK5xvtuX0oBpLV2GkLWNDA+UeTGJXErU=
github.com/quic-go/webtransport-go v0.5.2 h1:GA6Bl6oZY+g/flt00Pnu0XtivSD8vukOu3lYhJjnGEk=
github.com/quic-go/webtransport-go v0.5.2/go.mod h1:OhmmgJIzTTqXK5xvtuX0oBpLV2GkLWNDA+UeTGJXErU=
github.com/rabbitmq/amqp091-go v1.1.0/go.mod h1:ogQDLSOACsLPsIq0NpbtiifNZi2YOz0VTJ0kHRghqbM=
github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk=
github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU=

8
go.mod
View File

@ -45,7 +45,7 @@ require (
github.com/ipfs/go-ipld-git v0.1.1
github.com/ipfs/go-ipld-legacy v0.1.1
github.com/ipfs/go-ipns v0.3.0
github.com/ipfs/go-libipfs v0.6.1-0.20230222011044-7b201415a176
github.com/ipfs/go-libipfs v0.6.1-0.20230228004237-36918f45f260
github.com/ipfs/go-log v1.0.5
github.com/ipfs/go-log/v2 v2.5.1
github.com/ipfs/go-merkledag v0.9.0
@ -53,7 +53,7 @@ require (
github.com/ipfs/go-metrics-prometheus v0.0.2
github.com/ipfs/go-mfs v0.2.1
github.com/ipfs/go-namesys v0.7.0
github.com/ipfs/go-path v0.3.0
github.com/ipfs/go-path v0.3.1
github.com/ipfs/go-pinning-service-http-client v0.1.2
github.com/ipfs/go-unixfs v0.4.3
github.com/ipfs/go-unixfsnode v1.5.2
@ -67,7 +67,7 @@ require (
github.com/jbenet/go-temp-err-catcher v0.1.0
github.com/jbenet/goprocess v0.1.4
github.com/libp2p/go-doh-resolver v0.4.0
github.com/libp2p/go-libp2p v0.26.0
github.com/libp2p/go-libp2p v0.26.2
github.com/libp2p/go-libp2p-http v0.4.0
github.com/libp2p/go-libp2p-kad-dht v0.21.1
github.com/libp2p/go-libp2p-kbucket v0.5.0
@ -210,7 +210,7 @@ require (
github.com/quic-go/qtls-go1-19 v0.2.1 // indirect
github.com/quic-go/qtls-go1-20 v0.1.1 // indirect
github.com/quic-go/quic-go v0.33.0 // indirect
github.com/quic-go/webtransport-go v0.5.1 // indirect
github.com/quic-go/webtransport-go v0.5.2 // indirect
github.com/raulk/go-watchdog v1.3.0 // indirect
github.com/rs/cors v1.7.0 // indirect
github.com/samber/lo v1.36.0 // indirect

16
go.sum
View File

@ -591,8 +591,8 @@ github.com/ipfs/go-ipld-legacy v0.1.1 h1:BvD8PEuqwBHLTKqlGFTHSwrwFOMkVESEvwIYwR2
github.com/ipfs/go-ipld-legacy v0.1.1/go.mod h1:8AyKFCjgRPsQFf15ZQgDB8Din4DML/fOmKZkkFkrIEg=
github.com/ipfs/go-ipns v0.3.0 h1:ai791nTgVo+zTuq2bLvEGmWP1M0A6kGTXUsgv/Yq67A=
github.com/ipfs/go-ipns v0.3.0/go.mod h1:3cLT2rbvgPZGkHJoPO1YMJeh6LtkxopCkKFcio/wE24=
github.com/ipfs/go-libipfs v0.6.1-0.20230222011044-7b201415a176 h1:QJvWxLRKucrAo7W2vLz5FA2iLKwW6WkHWn8AL8kXAUU=
github.com/ipfs/go-libipfs v0.6.1-0.20230222011044-7b201415a176/go.mod h1:UjjDIuehp2GzlNP0HEr5I9GfFT7zWgst+YfpUEIThtw=
github.com/ipfs/go-libipfs v0.6.1-0.20230228004237-36918f45f260 h1:QRLcCoITO9ZQo2pvjmrfngqKhUKjPopBva3MVH62LT8=
github.com/ipfs/go-libipfs v0.6.1-0.20230228004237-36918f45f260/go.mod h1:3OoEQs95UkqFEf65SbRDpiMwuzI+C/jTsYQaHfBbJXI=
github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM=
github.com/ipfs/go-log v1.0.2/go.mod h1:1MNjMxe0u6xvJZgeqbJ8vdo2TKaGwZ1a0Bpza+sr2Sk=
github.com/ipfs/go-log v1.0.3/go.mod h1:OsLySYkwIbiSUR/yBTdv1qPtcE4FW3WPWk/ewz9Ru+A=
@ -622,8 +622,8 @@ github.com/ipfs/go-mfs v0.2.1/go.mod h1:Woj80iuw4ajDnIP6+seRaoHpPsc9hmL0pk/nDNDW
github.com/ipfs/go-namesys v0.7.0 h1:xqosk71GIVRkFDtF2UNRcXn4LdNeo7tzuy8feHD6NbU=
github.com/ipfs/go-namesys v0.7.0/go.mod h1:KYSZBVZG3VJC34EfqqJPG7T48aWgxseoMPAPA5gLyyQ=
github.com/ipfs/go-path v0.2.1/go.mod h1:NOScsVgxfC/eIw4nz6OiGwK42PjaSJ4Y/ZFPn1Xe07I=
github.com/ipfs/go-path v0.3.0 h1:tkjga3MtpXyM5v+3EbRvOHEoo+frwi4oumw5K+KYWyA=
github.com/ipfs/go-path v0.3.0/go.mod h1:NOScsVgxfC/eIw4nz6OiGwK42PjaSJ4Y/ZFPn1Xe07I=
github.com/ipfs/go-path v0.3.1 h1:wkeaCWE/NTuuPGlEkLTsED5UkzfKYZpxaFFPgk8ZVLE=
github.com/ipfs/go-path v0.3.1/go.mod h1:eNLsxJEEMxn/CDzUJ6wuNl+6No6tEUhOZcPKsZsYX0E=
github.com/ipfs/go-peertaskqueue v0.1.0/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U=
github.com/ipfs/go-peertaskqueue v0.7.0/go.mod h1:M/akTIE/z1jGNXMU7kFB4TeSEFvj68ow0Rrb04donIU=
github.com/ipfs/go-peertaskqueue v0.8.1 h1:YhxAs1+wxb5jk7RvS0LHdyiILpNmRIRnZVztekOF0pg=
@ -760,8 +760,8 @@ github.com/libp2p/go-libp2p v0.7.0/go.mod h1:hZJf8txWeCduQRDC/WSqBGMxaTHCOYHt2xS
github.com/libp2p/go-libp2p v0.7.4/go.mod h1:oXsBlTLF1q7pxr+9w6lqzS1ILpyHsaBPniVO7zIHGMw=
github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qDKwXujH5o=
github.com/libp2p/go-libp2p v0.14.3/go.mod h1:d12V4PdKbpL0T1/gsUNN8DfgMuRPDX8bS2QxCZlwRH0=
github.com/libp2p/go-libp2p v0.26.0 h1:0FE0bP9/G9YADjruqoFvf1snBBFvrdh1MmTuEeUkl2E=
github.com/libp2p/go-libp2p v0.26.0/go.mod h1:R8N+XhwPDPLNb4TKboKJKnDeg9vPw8+zlC6g793dTGw=
github.com/libp2p/go-libp2p v0.26.2 h1:eHEoW/696FP7/6DxOvcrKfTD6Bi0DExxiMSZUJxswA0=
github.com/libp2p/go-libp2p v0.26.2/go.mod h1:x75BN32YbwuY0Awm2Uix4d4KOz+/4piInkp4Wr3yOo8=
github.com/libp2p/go-libp2p-asn-util v0.2.0 h1:rg3+Os8jbnO5DxkC7K/Utdi+DkY3q/d1/1q+8WeNAsw=
github.com/libp2p/go-libp2p-asn-util v0.2.0/go.mod h1:WoaWxbHKBymSN41hWSq/lGKJEca7TNm58+gGJi2WsLI=
github.com/libp2p/go-libp2p-autonat v0.1.0/go.mod h1:1tLf2yXxiE/oKGtDwPYWTSYG3PtvYlJmg7NeVtPRqH8=
@ -1227,8 +1227,8 @@ github.com/quic-go/qtls-go1-20 v0.1.1 h1:KbChDlg82d3IHqaj2bn6GfKRj84Per2VGf5XV3w
github.com/quic-go/qtls-go1-20 v0.1.1/go.mod h1:JKtK6mjbAVcUTN/9jZpvLbGxvdWIKS8uT7EiStoU1SM=
github.com/quic-go/quic-go v0.33.0 h1:ItNoTDN/Fm/zBlq769lLJc8ECe9gYaW40veHCCco7y0=
github.com/quic-go/quic-go v0.33.0/go.mod h1:YMuhaAV9/jIu0XclDXwZPAsP/2Kgr5yMYhe9oxhhOFA=
github.com/quic-go/webtransport-go v0.5.1 h1:1eVb7WDWCRoaeTtFHpFBJ6WDN1bSrPrRoW6tZgSw0Ow=
github.com/quic-go/webtransport-go v0.5.1/go.mod h1:OhmmgJIzTTqXK5xvtuX0oBpLV2GkLWNDA+UeTGJXErU=
github.com/quic-go/webtransport-go v0.5.2 h1:GA6Bl6oZY+g/flt00Pnu0XtivSD8vukOu3lYhJjnGEk=
github.com/quic-go/webtransport-go v0.5.2/go.mod h1:OhmmgJIzTTqXK5xvtuX0oBpLV2GkLWNDA+UeTGJXErU=
github.com/rabbitmq/amqp091-go v1.1.0/go.mod h1:ogQDLSOACsLPsIq0NpbtiifNZi2YOz0VTJ0kHRghqbM=
github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk=
github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU=

View File

@ -126,10 +126,10 @@ func TestGateway(t *testing.T) {
assert.Equal(t, 404, resp.StatusCode)
})
t.Run("GET IPFS invalid CID returns 400 (Bad Request)", func(t *testing.T) {
t.Run("GET IPFS invalid CID returns 500 (Internal Server Error)", func(t *testing.T) {
t.Parallel()
resp := client.Get("/ipfs/QmInvalid/pleaseDontAddMe")
assert.Equal(t, 400, resp.StatusCode)
assert.Equal(t, 500, resp.StatusCode)
})
t.Run("GET IPFS inlined zero-length data object returns ok code (200)", func(t *testing.T) {
@ -166,10 +166,10 @@ func TestGateway(t *testing.T) {
t.Parallel()
node.IPFS("name", "publish", "--allow-offline", cid)
t.Run("GET invalid IPNS root returns 400 (Bad Request)", func(t *testing.T) {
t.Run("GET invalid IPNS root returns 500 (Internal Server Error)", func(t *testing.T) {
t.Parallel()
resp := client.Get("/ipns/QmInvalid/pleaseDontAddMe")
assert.Equal(t, 400, resp.StatusCode)
assert.Equal(t, 500, resp.StatusCode)
})
t.Run("GET IPNS path succeeds", func(t *testing.T) {
@ -198,7 +198,7 @@ func TestGateway(t *testing.T) {
t.Run("GET invalid IPFS path errors", func(t *testing.T) {
t.Parallel()
assert.Equal(t, 400, client.Get("/ipfs/12345").StatusCode)
assert.Equal(t, 500, client.Get("/ipfs/12345").StatusCode)
})
t.Run("GET invalid path errors", func(t *testing.T) {

View File

@ -1,6 +1,7 @@
package harness
import (
"bytes"
"encoding/json"
"errors"
"fmt"
@ -62,6 +63,18 @@ func BuildNode(ipfsBin, baseDir string, id int) *Node {
}
}
func (n *Node) WriteBytes(filename string, b []byte) {
f, err := os.Create(filepath.Join(n.Dir, filename))
if err != nil {
panic(err)
}
defer f.Close()
_, err = io.Copy(f, bytes.NewReader(b))
if err != nil {
panic(err)
}
}
func (n *Node) ReadConfig() *config.Config {
cfg, err := serial.Load(filepath.Join(n.Dir, "config"))
if err != nil {

View File

@ -38,6 +38,10 @@ type RunResult struct {
Cmd *exec.Cmd
}
func (r *RunResult) ExitCode() int {
return r.Cmd.ProcessState.ExitCode()
}
func environToMap(environ []string) map[string]string {
m := map[string]string{}
for _, e := range environ {

377
test/cli/rcmgr_test.go Normal file
View File

@ -0,0 +1,377 @@
package cli
import (
"encoding/json"
"testing"
"github.com/ipfs/kubo/config"
"github.com/ipfs/kubo/core/node/libp2p"
"github.com/ipfs/kubo/test/cli/harness"
rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestRcmgr(t *testing.T) {
t.Parallel()
t.Run("Resource manager disabled", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
node.UpdateConfig(func(cfg *config.Config) {
cfg.Swarm.ResourceMgr.Enabled = config.False
})
node.StartDaemon()
t.Run("swarm limit should fail", func(t *testing.T) {
res := node.RunIPFS("swarm", "limit", "system")
assert.Equal(t, 1, res.ExitCode())
assert.Contains(t, res.Stderr.Lines()[0], "missing ResourceMgr")
})
t.Run("swarm stats should fail", func(t *testing.T) {
res := node.RunIPFS("swarm", "stats", "all")
assert.Equal(t, 1, res.ExitCode())
assert.Contains(t, res.Stderr.Lines()[0], "missing ResourceMgr")
})
})
t.Run("Node in offline mode", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
node.UpdateConfig(func(cfg *config.Config) {
cfg.Swarm.ResourceMgr.Enabled = config.False
})
node.StartDaemon()
t.Run("swarm limit should fail", func(t *testing.T) {
res := node.RunIPFS("swarm", "limit", "system")
assert.Equal(t, 1, res.ExitCode())
assert.Contains(t, res.Stderr.Lines()[0], "missing ResourceMgr")
})
t.Run("swarm stats should fail", func(t *testing.T) {
res := node.RunIPFS("swarm", "stats", "all")
assert.Equal(t, 1, res.ExitCode())
assert.Contains(t, res.Stderr.Lines()[0], "missing ResourceMgr")
})
})
t.Run("Very high connmgr highwater", func(t *testing.T) {
node := harness.NewT(t).NewNode().Init()
node.UpdateConfig(func(cfg *config.Config) {
cfg.Swarm.ConnMgr.HighWater = config.NewOptionalInteger(1000)
})
node.StartDaemon()
res := node.RunIPFS("swarm", "limit", "system", "--enc=json")
require.Equal(t, 0, res.ExitCode())
limits := unmarshalLimits(t, res.Stdout.Bytes())
assert.GreaterOrEqual(t, limits.ConnsInbound, 2000)
assert.GreaterOrEqual(t, limits.StreamsInbound, 2000)
})
t.Run("default configuration", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
node.UpdateConfig(func(cfg *config.Config) {
cfg.Swarm.ConnMgr.HighWater = config.NewOptionalInteger(1000)
})
node.StartDaemon()
t.Run("conns and streams are above 800 for default connmgr settings", func(t *testing.T) {
res := node.RunIPFS("swarm", "limit", "system", "--enc=json")
require.Equal(t, 0, res.ExitCode())
limits := unmarshalLimits(t, res.Stdout.Bytes())
assert.GreaterOrEqual(t, limits.ConnsInbound, 800)
assert.GreaterOrEqual(t, limits.StreamsInbound, 800)
})
t.Run("limits|stats should succeed", func(t *testing.T) {
res := node.RunIPFS("swarm", "limit", "all")
assert.Equal(t, 0, res.ExitCode())
limits := map[string]rcmgr.ResourceLimits{}
err := json.Unmarshal(res.Stdout.Bytes(), &limits)
require.NoError(t, err)
assert.Greater(t, limits["System"].Memory, int64(0))
assert.Greater(t, limits["System"].FD, 0)
assert.Greater(t, limits["System"].Conns, 0)
assert.Greater(t, limits["System"].ConnsInbound, 0)
assert.Greater(t, limits["System"].ConnsOutbound, 0)
assert.Greater(t, limits["System"].Streams, 0)
assert.Greater(t, limits["System"].StreamsInbound, 0)
assert.Greater(t, limits["System"].StreamsOutbound, 0)
assert.Greater(t, limits["Transient"].Memory, int64(0))
})
t.Run("resetting limits should produce the same default limits", func(t *testing.T) {
resetRes := node.RunIPFS("swarm", "limit", "system", "--reset", "--enc=json")
require.Equal(t, 0, resetRes.ExitCode())
limitRes := node.RunIPFS("swarm", "limit", "system", "--enc=json")
require.Equal(t, 0, limitRes.ExitCode())
assert.Equal(t, resetRes.Stdout.Bytes(), limitRes.Stdout.Bytes())
})
t.Run("swarm stats system with filter should fail", func(t *testing.T) {
res := node.RunIPFS("swarm", "stats", "system", "--min-used-limit-perc=99")
assert.Equal(t, 1, res.ExitCode())
assert.Contains(t, res.Stderr.Lines()[0], `Error: "min-used-limit-perc" can only be used when scope is "all"`)
})
t.Run("swarm limit reset on map values should work", func(t *testing.T) {
resetRes := node.RunIPFS("swarm", "limit", "peer:12D3KooWL7i1T9VSPeF8AgQApbyM51GNKZsYPvNvL347aMDmvNzG", "--reset", "--enc=json")
require.Equal(t, 0, resetRes.ExitCode())
limitRes := node.RunIPFS("swarm", "limit", "peer:12D3KooWL7i1T9VSPeF8AgQApbyM51GNKZsYPvNvL347aMDmvNzG", "--enc=json")
require.Equal(t, 0, limitRes.ExitCode())
assert.Equal(t, resetRes.Stdout.Bytes(), limitRes.Stdout.Bytes())
})
t.Run("scope is required using reset flags", func(t *testing.T) {
res := node.RunIPFS("swarm", "limit", "--reset")
assert.Equal(t, 1, res.ExitCode())
assert.Contains(t, res.Stderr.Lines()[0], `Error: argument "scope" is required`)
})
t.Run("swarm stats works", func(t *testing.T) {
res := node.RunIPFS("swarm", "stats", "all", "--enc=json")
require.Equal(t, 0, res.ExitCode())
stats := libp2p.NetStatOut{}
err := json.Unmarshal(res.Stdout.Bytes(), &stats)
require.NoError(t, err)
// every scope has the same fields, so we only inspect system
assert.Equal(t, rcmgr.LimitVal64(0), stats.System.Memory)
assert.Equal(t, rcmgr.LimitVal(0), stats.System.FD)
assert.Equal(t, rcmgr.LimitVal(0), stats.System.Conns)
assert.Equal(t, rcmgr.LimitVal(0), stats.System.ConnsInbound)
assert.Equal(t, rcmgr.LimitVal(0), stats.System.ConnsOutbound)
assert.Equal(t, rcmgr.LimitVal(0), stats.System.Streams)
assert.Equal(t, rcmgr.LimitVal(0), stats.System.StreamsInbound)
assert.Equal(t, rcmgr.LimitVal(0), stats.System.StreamsOutbound)
assert.Equal(t, rcmgr.LimitVal64(0), stats.Transient.Memory)
})
})
t.Run("set system conns limit while daemon is not running", func(t *testing.T) {
node := harness.NewT(t).NewNode().Init()
res := node.RunIPFS("config", "--json", "Swarm.ResourceMgr.Limits.System.Conns", "99999")
require.Equal(t, 0, res.ExitCode())
t.Run("set an invalid limit which should result in a failure", func(t *testing.T) {
res := node.RunIPFS("config", "--json", "Swarm.ResourceMgr.Limits.System.Conns", "asdf")
assert.Equal(t, 1, res.ExitCode())
assert.Contains(t, res.Stderr.String(), "failed to unmarshal")
})
node.StartDaemon()
t.Run("new system conns limit is applied", func(t *testing.T) {
res := node.RunIPFS("swarm", "limit", "system", "--enc=json")
limits := unmarshalLimits(t, res.Stdout.Bytes())
assert.Equal(t, limits.Conns, rcmgr.LimitVal(99999))
})
})
t.Run("set the system memory limit while the daemon is running", func(t *testing.T) {
node := harness.NewT(t).NewNode().Init().StartDaemon()
updateLimitsWithFile(t, node, "system", func(limits *rcmgr.ResourceLimits) {
limits.Memory = 99998
})
assert.Equal(t, rcmgr.LimitVal64(99998), node.ReadConfig().Swarm.ResourceMgr.Limits.System.Memory)
res := node.RunIPFS("swarm", "limit", "system", "--enc=json")
limits := unmarshalLimits(t, res.Stdout.Bytes())
assert.Equal(t, rcmgr.LimitVal64(99998), limits.Memory)
})
t.Run("smoke test transient scope", func(t *testing.T) {
node := harness.NewT(t).NewNode().Init().StartDaemon()
updateLimitsWithFile(t, node, "transient", func(limits *rcmgr.ResourceLimits) {
limits.Memory = 88888
})
res := node.RunIPFS("swarm", "limit", "transient", "--enc=json")
limits := unmarshalLimits(t, res.Stdout.Bytes())
assert.Equal(t, rcmgr.LimitVal64(88888), limits.Memory)
})
t.Run("smoke test service scope", func(t *testing.T) {
node := harness.NewT(t).NewNode().Init().StartDaemon()
updateLimitsWithFile(t, node, "svc:foo", func(limits *rcmgr.ResourceLimits) {
limits.Memory = 77777
})
res := node.RunIPFS("swarm", "limit", "svc:foo", "--enc=json")
limits := unmarshalLimits(t, res.Stdout.Bytes())
assert.Equal(t, rcmgr.LimitVal64(77777), limits.Memory)
})
t.Run("smoke test protocol scope", func(t *testing.T) {
node := harness.NewT(t).NewNode().Init().StartDaemon()
updateLimitsWithFile(t, node, "proto:foo", func(limits *rcmgr.ResourceLimits) {
limits.Memory = 66666
})
res := node.RunIPFS("swarm", "limit", "proto:foo", "--enc=json")
limits := unmarshalLimits(t, res.Stdout.Bytes())
assert.Equal(t, rcmgr.LimitVal64(66666), limits.Memory)
})
t.Run("smoke test peer scope", func(t *testing.T) {
validPeerID := "QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN"
node := harness.NewT(t).NewNode().Init().StartDaemon()
updateLimitsWithFile(t, node, "peer:"+validPeerID, func(limits *rcmgr.ResourceLimits) {
limits.Memory = 66666
})
res := node.RunIPFS("swarm", "limit", "peer:"+validPeerID, "--enc=json")
limits := unmarshalLimits(t, res.Stdout.Bytes())
assert.Equal(t, rcmgr.LimitVal64(66666), limits.Memory)
t.Parallel()
t.Run("getting limit for invalid peer ID fails", func(t *testing.T) {
res := node.RunIPFS("swarm", "limit", "peer:foo")
assert.Equal(t, 1, res.ExitCode())
assert.Contains(t, res.Stderr.String(), "invalid peer ID")
})
t.Run("setting limit for invalid peer ID fails", func(t *testing.T) {
filename := "invalid-peer-id.json"
node.WriteBytes(filename, []byte(`{"Memory":"99"}`))
res := node.RunIPFS("swarm", "limit", "peer:foo", filename)
assert.Equal(t, 1, res.ExitCode())
assert.Contains(t, res.Stderr.String(), "invalid peer ID")
})
})
t.Run("", func(t *testing.T) {
nodes := harness.NewT(t).NewNodes(3).Init()
node0, node1, node2 := nodes[0], nodes[1], nodes[2]
// peerID0, peerID1, peerID2 := node0.PeerID(), node1.PeerID(), node2.PeerID()
peerID1, peerID2 := node1.PeerID().String(), node2.PeerID().String()
node0.UpdateConfig(func(cfg *config.Config) {
cfg.Swarm.ResourceMgr.Enabled = config.True
cfg.Swarm.ResourceMgr.Allowlist = []string{"/ip4/0.0.0.0/ipcidr/0/p2p/" + peerID2}
})
nodes.StartDaemons()
// change system limits on node 0
updateLimitsWithFile(t, node0, "system", func(limits *rcmgr.ResourceLimits) {
limits.Conns = rcmgr.BlockAllLimit
limits.ConnsInbound = rcmgr.BlockAllLimit
limits.ConnsOutbound = rcmgr.BlockAllLimit
})
t.Parallel()
t.Run("node 0 should fail to connect to node 1", func(t *testing.T) {
res := node0.Runner.Run(harness.RunRequest{
Path: node0.IPFSBin,
Args: []string{"swarm", "connect", node1.SwarmAddrs()[0].String()},
})
assert.Equal(t, 1, res.ExitCode())
assert.Contains(t, res.Stderr.String(), "failed to find any peer in table")
})
t.Run("node 0 should connect to node 2 since it is allowlisted", func(t *testing.T) {
res := node0.Runner.Run(harness.RunRequest{
Path: node0.IPFSBin,
Args: []string{"swarm", "connect", node2.SwarmAddrs()[0].String()},
})
assert.Equal(t, 0, res.ExitCode())
})
t.Run("node 0 should fail to ping node 1", func(t *testing.T) {
res := node0.RunIPFS("ping", "-n2", peerID1)
assert.Equal(t, 1, res.ExitCode())
assert.Contains(t, res.Stderr.String(), "Error: ping failed")
})
t.Run("node 0 should be able to ping node 2", func(t *testing.T) {
res := node0.RunIPFS("ping", "-n2", peerID2)
assert.Equal(t, 0, res.ExitCode())
})
})
t.Run("daemon should refuse to start if connmgr.highwater < resources inbound", func(t *testing.T) {
t.Parallel()
t.Run("system conns", func(t *testing.T) {
node := harness.NewT(t).NewNode().Init()
node.UpdateConfig(func(cfg *config.Config) {
cfg.Swarm.ResourceMgr.Limits = &rcmgr.PartialLimitConfig{}
cfg.Swarm.ResourceMgr.Limits.System.Conns = 128
cfg.Swarm.ConnMgr.HighWater = config.NewOptionalInteger(128)
cfg.Swarm.ConnMgr.LowWater = config.NewOptionalInteger(64)
})
res := node.RunIPFS("daemon")
assert.Equal(t, 1, res.ExitCode())
})
t.Run("system conns inbound", func(t *testing.T) {
node := harness.NewT(t).NewNode().Init()
node.UpdateConfig(func(cfg *config.Config) {
cfg.Swarm.ResourceMgr.Limits = &rcmgr.PartialLimitConfig{}
cfg.Swarm.ResourceMgr.Limits.System.ConnsInbound = 128
cfg.Swarm.ConnMgr.HighWater = config.NewOptionalInteger(128)
cfg.Swarm.ConnMgr.LowWater = config.NewOptionalInteger(64)
})
res := node.RunIPFS("daemon")
assert.Equal(t, 1, res.ExitCode())
})
t.Run("system streams", func(t *testing.T) {
node := harness.NewT(t).NewNode().Init()
node.UpdateConfig(func(cfg *config.Config) {
cfg.Swarm.ResourceMgr.Limits = &rcmgr.PartialLimitConfig{}
cfg.Swarm.ResourceMgr.Limits.System.Streams = 128
cfg.Swarm.ConnMgr.HighWater = config.NewOptionalInteger(128)
cfg.Swarm.ConnMgr.LowWater = config.NewOptionalInteger(64)
})
res := node.RunIPFS("daemon")
assert.Equal(t, 1, res.ExitCode())
})
t.Run("system streams inbound", func(t *testing.T) {
node := harness.NewT(t).NewNode().Init()
node.UpdateConfig(func(cfg *config.Config) {
cfg.Swarm.ResourceMgr.Limits = &rcmgr.PartialLimitConfig{}
cfg.Swarm.ResourceMgr.Limits.System.StreamsInbound = 128
cfg.Swarm.ConnMgr.HighWater = config.NewOptionalInteger(128)
cfg.Swarm.ConnMgr.LowWater = config.NewOptionalInteger(64)
})
res := node.RunIPFS("daemon")
assert.Equal(t, 1, res.ExitCode())
})
})
}
func updateLimitsWithFile(t *testing.T, node *harness.Node, limit string, f func(*rcmgr.ResourceLimits)) {
filename := limit + ".json"
res := node.RunIPFS("swarm", "limit", limit)
limits := unmarshalLimits(t, res.Stdout.Bytes())
f(limits)
limitsOut, err := json.Marshal(limits)
require.NoError(t, err)
node.WriteBytes(filename, limitsOut)
res = node.RunIPFS("swarm", "limit", limit, filename)
assert.Equal(t, 0, res.ExitCode())
}
func unmarshalLimits(t *testing.T, b []byte) *rcmgr.ResourceLimits {
limits := &rcmgr.ResourceLimits{}
err := json.Unmarshal(b, limits)
require.NoError(t, err)
return limits
}

View File

@ -47,21 +47,6 @@ $(d)/test-results/sharness.xml: $(T_$(d))
@(cd $(@D)/.. && ./lib/test-aggregate-junit-reports.sh)
.PHONY: $(d)/test-results/sharness.xml
$(d)/download-saxon:
@echo "*** $@ ***"
@(cd $(@D) && ./lib/download-saxon.sh)
.PHONY: $(d)/download-saxon
$(d)/test-results/sharness-html: $(d)/test-results/sharness.xml $(d)/download-saxon
@echo "*** $@ ***"
@(cd $(@D)/.. && ./lib/test-generate-junit-html.sh frames)
.PHONY: $(d)/test-results/sharness-html
$(d)/test-results/sharness.html: $(d)/test-results/sharness.xml $(d)/download-saxon
@echo "*** $@ ***"
@(cd $(@D)/.. && ./lib/test-generate-junit-html.sh no-frames)
.PHONY: $(d)/test-results/sharness.html
$(d)/clean-test-results:
rm -rf $(@D)/test-results
.PHONY: $(d)/clean-test-results

View File

@ -1,39 +0,0 @@
#!/bin/bash
dependencies=(
"url=https://raw.githubusercontent.com/pl-strflt/Saxon-HE/3e039cdbccf4efb9643736f34c839a3bae3402ae/11/Java/SaxonHE11-4J.zip;md5=8a4783d307c32c898f8995b8f337fd6b"
"url=https://raw.githubusercontent.com/pl-strflt/ant/c781f7d79b92cc55530245d9554682a47f46851e/src/etc/junit-frames-saxon.xsl;md5=6eb013566903a91e4959413f6ff144d0"
"url=https://raw.githubusercontent.com/pl-strflt/ant/c781f7d79b92cc55530245d9554682a47f46851e/src/etc/junit-noframes-saxon.xsl;md5=8d54882d5f9d32a7743ec675cc2e30ac"
)
dependenciesdir="lib/dependencies"
mkdir -p "$dependenciesdir"
get_md5() {
md5sum "$1" | cut -d ' ' -f 1
}
for dependency in "${dependencies[@]}"; do
url="$(echo "$dependency" | cut -d ';' -f 1 | cut -d '=' -f 2)"
md5="$(echo "$dependency" | cut -d ';' -f 2 | cut -d '=' -f 2)"
filename="$(basename "$url")"
if test -f "$dependenciesdir/$filename" && test "$(get_md5 "$dependenciesdir/$filename")" = "$md5"; then
echo "Using cached $filename"
else
echo "Downloading $filename"
curl -L --max-redirs 5 --retry 5 --no-progress-meter --output "$dependenciesdir/$filename" "$url"
actual_md5="$(get_md5 "$dependenciesdir/$filename")"
if test "$actual_md5" != "$md5"; then
echo "Downloaded $filename has wrong md5sum ('$actual_md5' != '$md5')"
exit 1
fi
dirname=${filename%.*}
extension=${filename#$dirname.}
if test "$extension" = "zip"; then
echo "Removing old $dependenciesdir/$dirname"
rm -rf "$dependenciesdir/$dirname"
echo "Unzipping $dependenciesdir/$filename"
unzip "$dependenciesdir/$filename" -d "$dependenciesdir/$dirname"
fi
fi
done

View File

@ -1,20 +0,0 @@
#!/bin/bash
case "$1" in
"frames")
java -jar lib/dependencies/SaxonHE11-4J/saxon-he-11.4.jar \
-s:test-results/sharness.xml \
-xsl:lib/dependencies/junit-frames-saxon.xsl \
output.dir=$(pwd)/test-results/sharness-html
;;
"no-frames")
java -jar lib/dependencies/SaxonHE11-4J/saxon-he-11.4.jar \
-s:test-results/sharness.xml \
-xsl:lib/dependencies/junit-noframes-saxon.xsl \
-o:test-results/sharness.html
;;
*)
echo "Usage: $0 [frames|no-frames]"
exit 1
;;
esac

View File

@ -9,26 +9,21 @@ test_description="Test symlink support on the HTTP gateway"
test_init_ipfs
test_launch_ipfs_daemon
test_expect_success "Create a test directory with symlinks" '
mkdir testfiles &&
echo "content" > testfiles/foo &&
ln -s foo testfiles/bar &&
test_cmp testfiles/foo testfiles/bar
'
test_expect_success "Add the test directory" '
HASH=$(ipfs add -Qr testfiles)
# Import test case
# See the static fixtures in ./t0113-gateway-symlink/
test_expect_success "Add the test directory with symlinks" '
ipfs dag import ../t0113-gateway-symlink/testfiles.car
'
ROOT_DIR_CID=QmWvY6FaqFMS89YAQ9NAPjVP4WZKA1qbHbicc9HeSKQTgt # ./testfiles/
test_expect_success "Test the directory listing" '
curl "$GWAY_ADDR/ipfs/$HASH/" > list_response &&
curl "$GWAY_ADDR/ipfs/$ROOT_DIR_CID/" > list_response &&
test_should_contain ">foo<" list_response &&
test_should_contain ">bar<" list_response
'
test_expect_success "Test the symlink" '
curl "$GWAY_ADDR/ipfs/$HASH/bar" > bar_actual &&
curl "$GWAY_ADDR/ipfs/$ROOT_DIR_CID/bar" > bar_actual &&
echo -n "foo" > bar_expected &&
test_cmp bar_expected bar_actual
'

View File

@ -0,0 +1,17 @@
# Dataset description/sources
- testfiles.car
- raw CARv1
generated with:
```sh
# using ipfs version 0.18.1
mkdir testfiles &&
echo "content" > testfiles/foo &&
ln -s foo testfiles/bar &&
ROOT_DIR_CID=$(ipfs add -Qr testfiles) &&
ipfs dag export $ROOT_DIR_CID > testfiles.car
# ROOT_DIR_CID=QmWvY6FaqFMS89YAQ9NAPjVP4WZKA1qbHbicc9HeSKQTgt
```

Binary file not shown.

View File

@ -91,27 +91,19 @@ test_expect_success "ipfs init" '
test_launch_ipfs_daemon_without_network
# Import test case
# See the static fixtures in ./t0114-gateway-subdomains/
test_expect_success "Add the test fixtures" '
ipfs dag import ../t0114-gateway-subdomains/fixtures.car
'
CID_VAL="hello"
CIDv1=bafkreicysg23kiwv34eg2d7qweipxwosdo2py4ldv42nbauguluen5v6am
CIDv0=QmZULkCELmmk5XNfCgTnCyFgAVxBRBXyDHGGMVoLFLiXEN
# CIDv0to1 is necessary because raw-leaves are enabled by default during
# "ipfs add" with CIDv1 and disabled with CIDv0
test_expect_success "Add test text file" '
CID_VAL="hello"
CIDv1=$(echo $CID_VAL | ipfs add --cid-version 1 -Q)
CIDv0=$(echo $CID_VAL | ipfs add --cid-version 0 -Q)
CIDv0to1=$(echo "$CIDv0" | ipfs cid base32)
echo CIDv0to1=${CIDv0to1}
'
# Directory tree crafted to test for edge cases like "/ipfs/ipfs/ipns/bar"
test_expect_success "Add the test directory" '
mkdir -p testdirlisting/ipfs/ipns &&
echo "hello" > testdirlisting/hello &&
echo "text-file-content" > testdirlisting/ipfs/ipns/bar &&
mkdir -p testdirlisting/api &&
mkdir -p testdirlisting/ipfs &&
echo "I am a txt file" > testdirlisting/api/file.txt &&
echo "I am a txt file" > testdirlisting/ipfs/file.txt &&
DIR_CID=$(ipfs add -Qr --cid-version 1 testdirlisting)
'
CIDv0to1=bafybeiffndsajwhk3lwjewwdxqntmjm4b5wxaaanokonsggenkbw6slwk4
CIDv1_TOO_LONG=bafkrgqhhyivzstcz3hhswshfjgy6ertgmnqeleynhwt4dlfsthi4hn7zgh4uvlsb5xncykzapi3ocd4lzogukir6ksdy6wzrnz6ohnv4aglcs
DIR_CID=bafybeiht6dtwk3les7vqm6ibpvz6qpohidvlshsfyr7l5mpysdw2vmbbhe # ./testdirlisting
test_expect_success "Publish test text file to IPNS using RSA keys" '
RSA_KEY=$(ipfs key gen --ipns-base=b58mh --type=rsa --size=2048 test_key_rsa | head -n1 | tr -d "\n")
@ -600,8 +592,6 @@ test_expect_success \
IPNS_KEY="test_key_ed25519"
IPNS_ED25519_B58MH=$(ipfs key list -l --ipns-base b58mh | grep $IPNS_KEY | cut -d" " -f1 | tr -d "\n")
IPNS_ED25519_B36CID=$(ipfs key list -l --ipns-base base36 | grep $IPNS_KEY | cut -d" " -f1 | tr -d "\n")
# sha512 will be over 63char limit, even when represented in Base36
CIDv1_TOO_LONG=$(echo $CID_VAL | ipfs add --cid-version 1 --hash sha2-512 -Q)
# local: *.localhost
test_localhost_gateway_response_should_contain \

View File

@ -0,0 +1,54 @@
# Dataset description/sources
- fixtures.car
- raw CARv1
generated with:
```sh
# using ipfs version 0.18.1
# CIDv0to1 is necessary because raw-leaves are enabled by default during
# "ipfs add" with CIDv1 and disabled with CIDv0
CID_VAL="hello"
CIDv1=$(echo $CID_VAL | ipfs add --cid-version 1 -Q)
CIDv0=$(echo $CID_VAL | ipfs add --cid-version 0 -Q)
CIDv0to1=$(echo "$CIDv0" | ipfs cid base32)
# sha512 will be over 63char limit, even when represented in Base36
CIDv1_TOO_LONG=$(echo $CID_VAL | ipfs add --cid-version 1 --hash sha2-512 -Q)
echo CIDv1=${CIDv1}
echo CIDv0=${CIDv0}
echo CIDv0to1=${CIDv0to1}
echo CIDv1_TOO_LONG=${CIDv1_TOO_LONG}
# Directory tree crafted to test for edge cases like "/ipfs/ipfs/ipns/bar"
mkdir -p testdirlisting/ipfs/ipns &&
echo "hello" > testdirlisting/hello &&
echo "text-file-content" > testdirlisting/ipfs/ipns/bar &&
mkdir -p testdirlisting/api &&
mkdir -p testdirlisting/ipfs &&
echo "I am a txt file" > testdirlisting/api/file.txt &&
echo "I am a txt file" > testdirlisting/ipfs/file.txt &&
DIR_CID=$(ipfs add -Qr --cid-version 1 testdirlisting)
echo DIR_CID=${DIR_CID}
ipfs files mkdir /t0114/
ipfs files cp /ipfs/${CIDv1} /t0114/
ipfs files cp /ipfs/${CIDv0} /t0114/
ipfs files cp /ipfs/${CIDv0to1} /t0114/
ipfs files cp /ipfs/${DIR_CID} /t0114/
ipfs files cp /ipfs/${CIDv1_TOO_LONG} /t0114/
ROOT=`ipfs files stat /t0114/ --hash`
ipfs dag export ${ROOT} > ./fixtures.car
# CID_VAL="hello"
# CIDv1=bafkreicysg23kiwv34eg2d7qweipxwosdo2py4ldv42nbauguluen5v6am
# CIDv0=QmZULkCELmmk5XNfCgTnCyFgAVxBRBXyDHGGMVoLFLiXEN
# CIDv0to1=bafybeiffndsajwhk3lwjewwdxqntmjm4b5wxaaanokonsggenkbw6slwk4
# CIDv1_TOO_LONG=bafkrgqhhyivzstcz3hhswshfjgy6ertgmnqeleynhwt4dlfsthi4hn7zgh4uvlsb5xncykzapi3ocd4lzogukir6ksdy6wzrnz6ohnv4aglcs
# DIR_CID=bafybeiht6dtwk3les7vqm6ibpvz6qpohidvlshsfyr7l5mpysdw2vmbbhe # ./testdirlisting
```

Binary file not shown.

View File

@ -18,20 +18,14 @@ test_expect_success "ipfs init" '
test_launch_ipfs_daemon_without_network
# Import test case
# See the static fixtures in ./t0115-gateway-dir-listing/
test_expect_success "Add the test directory" '
mkdir -p rootDir/ipfs &&
mkdir -p rootDir/ipns &&
mkdir -p rootDir/api &&
mkdir -p rootDir/ą/ę &&
echo "I am a txt file on path with utf8" > rootDir/ą/ę/file-źł.txt &&
echo "I am a txt file in confusing /api dir" > rootDir/api/file.txt &&
echo "I am a txt file in confusing /ipfs dir" > rootDir/ipfs/file.txt &&
echo "I am a txt file in confusing /ipns dir" > rootDir/ipns/file.txt &&
DIR_CID=$(ipfs add -Qr --cid-version 1 rootDir) &&
FILE_CID=$(ipfs files stat --enc=json /ipfs/$DIR_CID/ą/ę/file-źł.txt | jq -r .Hash) &&
FILE_SIZE=$(ipfs files stat --enc=json /ipfs/$DIR_CID/ą/ę/file-źł.txt | jq -r .Size)
echo "$FILE_CID / $FILE_SIZE"
ipfs dag import ../t0115-gateway-dir-listing/fixtures.car
'
DIR_CID=bafybeig6ka5mlwkl4subqhaiatalkcleo4jgnr3hqwvpmsqfca27cijp3i # ./rootDir/
FILE_CID=bafkreialihlqnf5uwo4byh4n3cmwlntwqzxxs2fg5vanqdi3d7tb2l5xkm # ./rootDir/ą/ę/file-źł.txt
FILE_SIZE=34
## ============================================================================
## Test dir listing on path gateway (eg. 127.0.0.1:8080/ipfs/)

View File

@ -0,0 +1,32 @@
# Dataset description/sources
- fixtures.car
- raw CARv1
generated with:
```sh
# using ipfs version 0.18.1
mkdir -p rootDir/ipfs &&
mkdir -p rootDir/ipns &&
mkdir -p rootDir/api &&
mkdir -p rootDir/ą/ę &&
echo "I am a txt file on path with utf8" > rootDir/ą/ę/file-źł.txt &&
echo "I am a txt file in confusing /api dir" > rootDir/api/file.txt &&
echo "I am a txt file in confusing /ipfs dir" > rootDir/ipfs/file.txt &&
echo "I am a txt file in confusing /ipns dir" > rootDir/ipns/file.txt &&
DIR_CID=$(ipfs add -Qr --cid-version 1 rootDir) &&
FILE_CID=$(ipfs files stat --enc=json /ipfs/$DIR_CID/ą/ę/file-źł.txt | jq -r .Hash) &&
FILE_SIZE=$(ipfs files stat --enc=json /ipfs/$DIR_CID/ą/ę/file-źł.txt | jq -r .Size)
echo "$FILE_CID / $FILE_SIZE"
echo DIR_CID=${DIR_CID}
echo FILE_CID=${FILE_CID}
echo FILE_SIZE=${FILE_SIZE}
ipfs dag export ${DIR_CID} > ./fixtures.car
# DIR_CID=bafybeig6ka5mlwkl4subqhaiatalkcleo4jgnr3hqwvpmsqfca27cijp3i # ./rootDir/
# FILE_CID=bafkreialihlqnf5uwo4byh4n3cmwlntwqzxxs2fg5vanqdi3d7tb2l5xkm # ./rootDir/ą/ę/file-źł.txt
# FILE_SIZE=34
```

Binary file not shown.

View File

@ -25,15 +25,16 @@ test_launch_ipfs_daemon_without_network
# Caching of things like raw blocks, CARs, dag-json and dag-cbor
# is tested in their respective suites.
# Import test case
# See the static fixtures in ./t0116-gateway-cache/
test_expect_success "Add the test directory" '
mkdir -p root2/root3/root4 &&
echo "hello" > root2/root3/root4/index.html &&
ROOT1_CID=$(ipfs add -Qrw --cid-version 1 root2)
ROOT2_CID=$(ipfs resolve -r /ipfs/$ROOT1_CID/root2 | cut -d "/" -f3)
ROOT3_CID=$(ipfs resolve -r /ipfs/$ROOT1_CID/root2/root3 | cut -d "/" -f3)
ROOT4_CID=$(ipfs resolve -r /ipfs/$ROOT1_CID/root2/root3/root4 | cut -d "/" -f3)
FILE_CID=$(ipfs resolve -r /ipfs/$ROOT1_CID/root2/root3/root4/index.html | cut -d "/" -f3)
ipfs dag import ../t0116-gateway-cache/fixtures.car
'
ROOT1_CID=bafybeib3ffl2teiqdncv3mkz4r23b5ctrwkzrrhctdbne6iboayxuxk5ui # ./
ROOT2_CID=bafybeih2w7hjocxjg6g2ku25hvmd53zj7og4txpby3vsusfefw5rrg5sii # ./root2
ROOT3_CID=bafybeiawdvhmjcz65x5egzx4iukxc72hg4woks6v6fvgyupiyt3oczk5ja # ./root2/root3
ROOT4_CID=bafybeifq2rzpqnqrsdupncmkmhs3ckxxjhuvdcbvydkgvch3ms24k5lo7q # ./root2/root3/root4
FILE_CID=bafkreicysg23kiwv34eg2d7qweipxwosdo2py4ldv42nbauguluen5v6am # ./root2/root3/root4/index.html
test_expect_success "Prepare IPNS unixfs content path for testing" '
TEST_IPNS_ID=$(ipfs key gen --ipns-base=base36 --type=ed25519 cache_test_key | head -n1 | tr -d "\n")

View File

@ -0,0 +1,31 @@
# Dataset description/sources
- fixtures.car
- raw CARv1
generated with:
```sh
# using ipfs version 0.18.1
mkdir -p root2/root3/root4 &&
echo "hello" > root2/root3/root4/index.html &&
ROOT1_CID=$(ipfs add -Qrw --cid-version 1 root2)
ROOT2_CID=$(ipfs resolve -r /ipfs/$ROOT1_CID/root2 | cut -d "/" -f3)
ROOT3_CID=$(ipfs resolve -r /ipfs/$ROOT1_CID/root2/root3 | cut -d "/" -f3)
ROOT4_CID=$(ipfs resolve -r /ipfs/$ROOT1_CID/root2/root3/root4 | cut -d "/" -f3)
FILE_CID=$(ipfs resolve -r /ipfs/$ROOT1_CID/root2/root3/root4/index.html | cut -d "/" -f3)
echo ROOT1_CID=${ROOT1_CID}
echo ROOT2_CID=${ROOT2_CID}
echo ROOT3_CID=${ROOT3_CID}
echo ROOT4_CID=${ROOT4_CID}
echo FILE_CID=${FILE_CID}
ipfs dag export ${ROOT1_CID} > ./fixtures.car
# ROOT1_CID=bafybeib3ffl2teiqdncv3mkz4r23b5ctrwkzrrhctdbne6iboayxuxk5ui # ./
# ROOT2_CID=bafybeih2w7hjocxjg6g2ku25hvmd53zj7og4txpby3vsusfefw5rrg5sii # ./root2
# ROOT3_CID=bafybeiawdvhmjcz65x5egzx4iukxc72hg4woks6v6fvgyupiyt3oczk5ja # ./root2/root3
# ROOT4_CID=bafybeifq2rzpqnqrsdupncmkmhs3ckxxjhuvdcbvydkgvch3ms24k5lo7q # ./root2/root3/root4
# FILE_CID=bafkreicysg23kiwv34eg2d7qweipxwosdo2py4ldv42nbauguluen5v6am # ./root2/root3/root4/index.html
```

Binary file not shown.

View File

@ -7,12 +7,13 @@ test_description="Test HTTP Gateway Raw Block (application/vnd.ipld.raw) Support
test_init_ipfs
test_launch_ipfs_daemon_without_network
test_expect_success "Create text fixtures" '
mkdir -p dir &&
echo "hello application/vnd.ipld.raw" > dir/ascii.txt &&
ROOT_DIR_CID=$(ipfs add -Qrw --cid-version 1 dir) &&
FILE_CID=$(ipfs resolve -r /ipfs/$ROOT_DIR_CID/dir/ascii.txt | cut -d "/" -f3)
# Import test case
# See the static fixtures in ./t0117-gateway-block/
test_expect_success "Add the dir test directory" '
ipfs dag import ../t0117-gateway-block/fixtures.car
'
ROOT_DIR_CID=bafybeie72edlprgtlwwctzljf6gkn2wnlrddqjbkxo3jomh4n7omwblxly # ./
FILE_CID=bafkreihhpc5y2pqvl5rbe5uuyhqjouybfs3rvlmisccgzue2kkt5zq6upq # ./dir/ascii.txt
# GET unixfs dir root block and compare it with `ipfs block get` output

View File

@ -0,0 +1,21 @@
# Dataset description/sources
- fixtures.car
- raw CARv1
generated with:
```sh
# using ipfs version 0.18.1
mkdir -p dir &&
echo "hello application/vnd.ipld.raw" > dir/ascii.txt &&
ROOT_DIR_CID=$(ipfs add -Qrw --cid-version 1 dir) &&
FILE_CID=$(ipfs resolve -r /ipfs/$ROOT_DIR_CID/dir/ascii.txt | cut -d "/" -f3) &&
ipfs dag export $ROOT_DIR_CID > fixtures.car
echo ROOT_DIR_CID=${ROOT_DIR_CID} # ./
echo FILE_CID=${FILE_CID} # ./dir/ascii.txt
# ROOT_DIR_CID=bafybeie72edlprgtlwwctzljf6gkn2wnlrddqjbkxo3jomh4n7omwblxly # ./
# FILE_CID=bafkreihhpc5y2pqvl5rbe5uuyhqjouybfs3rvlmisccgzue2kkt5zq6upq # ./dir/ascii.txt
```

Binary file not shown.

View File

@ -11,15 +11,14 @@ test_launch_ipfs_daemon_without_network
# but if we have a small file that fits into a single block, and export its CID
# we will get a CAR that is a deterministic array of bytes.
test_expect_success "Create a deterministic CAR for testing" '
mkdir -p subdir &&
echo "hello application/vnd.ipld.car" > subdir/ascii.txt &&
ROOT_DIR_CID=$(ipfs add -Qrw --cid-version 1 subdir) &&
FILE_CID=$(ipfs resolve -r /ipfs/$ROOT_DIR_CID/subdir/ascii.txt | cut -d "/" -f3) &&
ipfs dag export $ROOT_DIR_CID > test-dag.car &&
ipfs dag export $FILE_CID > deterministic.car &&
purge_blockstore
'
# Import test case
# See the static fixtures in ./t0118-gateway-car/
test_expect_success "Add the dir test directory" '
cp ../t0118-gateway-car/test-dag.car ./test-dag.car &&
cp ../t0118-gateway-car/deterministic.car ./deterministic.car
'
ROOT_DIR_CID=bafybeiefu3d7oytdumk5v7gn6s7whpornueaw7m7u46v2o6omsqcrhhkzi # ./
FILE_CID=bafkreifkam6ns4aoolg3wedr4uzrs3kvq66p4pecirz6y2vlrngla62mxm # /subdir/ascii.txt
# GET a reference DAG with dag-cbor+dag-pb+raw blocks as CAR

View File

@ -8,3 +8,23 @@
- description of the contents and layout of the raw CAR, encoded in DAG-JSON
- Source: https://ipld.io/specs/transport/car/fixture/carv1-basic/carv1-basic.json
- test-dag.car + deterministic.car
- raw CARv1
generated with:
```sh
# using ipfs version 0.18.1
mkdir -p subdir &&
echo "hello application/vnd.ipld.car" > subdir/ascii.txt &&
ROOT_DIR_CID=$(ipfs add -Qrw --cid-version 1 subdir) &&
FILE_CID=$(ipfs resolve -r /ipfs/$ROOT_DIR_CID/subdir/ascii.txt | cut -d "/" -f3) &&
ipfs dag export $ROOT_DIR_CID > test-dag.car &&
ipfs dag export $FILE_CID > deterministic.car &&
echo ROOT_DIR_CID=${ROOT_DIR_CID} # ./
echo FILE_CID=${FILE_CID} # /\subdir/ascii.txt
# ROOT_DIR_CID=bafybeiefu3d7oytdumk5v7gn6s7whpornueaw7m7u46v2o6omsqcrhhkzi # ./
# FILE_CID=bafkreifkam6ns4aoolg3wedr4uzrs3kvq66p4pecirz6y2vlrngla62mxm # /subdir/ascii.txt
```

Binary file not shown.

Binary file not shown.

View File

@ -27,7 +27,7 @@ test_expect_success "filter metrics" '
'
test_expect_success "make sure metrics haven't changed" '
diff -u ../t0116-prometheus-data/prometheus_metrics filtered_metrics
diff -u ../t0119-prometheus-data/prometheus_metrics filtered_metrics
'
# Check what was added by enabling ResourceMgr.Enabled
@ -50,11 +50,11 @@ test_kill_ipfs_daemon
test_expect_success "filter metrics and find ones added by enabling ResourceMgr" '
sed -ne "s/^\([a-z0-9_]\+\).*/\1/p" raw_metrics | LC_ALL=C sort > filtered_metrics &&
grep -v -x -f ../t0116-prometheus-data/prometheus_metrics filtered_metrics > rcmgr_metrics
grep -v -x -f ../t0119-prometheus-data/prometheus_metrics filtered_metrics > rcmgr_metrics
'
test_expect_success "make sure initial metrics added by setting ResourceMgr.Enabled haven't changed" '
diff -u ../t0116-prometheus-data/prometheus_metrics_added_by_enabling_rcmgr rcmgr_metrics
diff -u ../t0119-prometheus-data/prometheus_metrics_added_by_enabling_rcmgr rcmgr_metrics
'
test_done

View File

@ -10,20 +10,14 @@ test_launch_ipfs_daemon_without_network
OUTSIDE_ROOT_CID="bafybeicaj7kvxpcv4neaqzwhrqqmdstu4dhrwfpknrgebq6nzcecfucvyu"
INSIDE_ROOT_CID="bafybeibfevfxlvxp5vxobr5oapczpf7resxnleb7tkqmdorc4gl5cdva3y"
# Import test case
# See the static fixtures in ./t0122-gateway-tar/
test_expect_success "Add the test directory" '
mkdir -p rootDir/ipfs &&
mkdir -p rootDir/ipns &&
mkdir -p rootDir/api &&
mkdir -p rootDir/ą/ę &&
echo "I am a txt file on path with utf8" > rootDir/ą/ę/file-źł.txt &&
echo "I am a txt file in confusing /api dir" > rootDir/api/file.txt &&
echo "I am a txt file in confusing /ipfs dir" > rootDir/ipfs/file.txt &&
echo "I am a txt file in confusing /ipns dir" > rootDir/ipns/file.txt &&
DIR_CID=$(ipfs add -Qr --cid-version 1 rootDir) &&
FILE_CID=$(ipfs files stat --enc=json /ipfs/$DIR_CID/ą/ę/file-źł.txt | jq -r .Hash) &&
FILE_SIZE=$(ipfs files stat --enc=json /ipfs/$DIR_CID/ą/ę/file-źł.txt | jq -r .Size)
echo "$FILE_CID / $FILE_SIZE"
ipfs dag import ../t0122-gateway-tar/fixtures.car
'
DIR_CID=bafybeig6ka5mlwkl4subqhaiatalkcleo4jgnr3hqwvpmsqfca27cijp3i # ./rootDir
FILE_CID=bafkreialihlqnf5uwo4byh4n3cmwlntwqzxxs2fg5vanqdi3d7tb2l5xkm # ./rootDir/ą/ę/file-źł.txt
FILE_SIZE=34
test_expect_success "GET TAR with format=tar and extract" '
curl "http://127.0.0.1:$GWAY_PORT/ipfs/$FILE_CID?format=tar" | tar -x
@ -69,9 +63,9 @@ test_expect_success "GET TAR with explicit ?filename= succeeds with modified Con
"
test_expect_success "Add CARs with relative paths to test with" '
ipfs dag import ../t0122-gateway-tar-data/outside-root.car > import_output &&
ipfs dag import ../t0122-gateway-tar/outside-root.car > import_output &&
test_should_contain $OUTSIDE_ROOT_CID import_output &&
ipfs dag import ../t0122-gateway-tar-data/inside-root.car > import_output &&
ipfs dag import ../t0122-gateway-tar/inside-root.car > import_output &&
test_should_contain $INSIDE_ROOT_CID import_output
'

View File

@ -0,0 +1,37 @@
# Dataset description/sources
- inside-root.car
- outside-root.car
- fixtures.car
- raw CARv1
generated with:
```sh
# ipfs version 0.18.1
mkdir -p rootDir/ipfs &&
mkdir -p rootDir/ipns &&
mkdir -p rootDir/api &&
mkdir -p rootDir/ą/ę &&
echo "I am a txt file on path with utf8" > rootDir/ą/ę/file-źł.txt &&
echo "I am a txt file in confusing /api dir" > rootDir/api/file.txt &&
echo "I am a txt file in confusing /ipfs dir" > rootDir/ipfs/file.txt &&
echo "I am a txt file in confusing /ipns dir" > rootDir/ipns/file.txt &&
DIR_CID=$(ipfs add -Qr --cid-version 1 rootDir) &&
FILE_CID=$(ipfs files stat --enc=json /ipfs/$DIR_CID/ą/ę/file-źł.txt | jq -r .Hash) &&
FILE_SIZE=$(ipfs files stat --enc=json /ipfs/$DIR_CID/ą/ę/file-źł.txt | jq -r .Size)
echo "$FILE_CID / $FILE_SIZE"
echo DIR_CID=${DIR_CID} # ./rootDir
echo FILE_CID=${FILE_CID} # ./rootDir/ą/ę/file-źł.txt
echo FILE_SIZE=${FILE_SIZE}
ipfs dag export ${DIR_CID} > ./fixtures.car
# DIR_CID=bafybeig6ka5mlwkl4subqhaiatalkcleo4jgnr3hqwvpmsqfca27cijp3i # ./rootDir
# FILE_CID=bafkreialihlqnf5uwo4byh4n3cmwlntwqzxxs2fg5vanqdi3d7tb2l5xkm # ./rootDir/ą/ę/file-źł.txt
# FILE_SIZE=34
```

Binary file not shown.

View File

@ -7,22 +7,15 @@ test_description="Test HTTP Gateway DAG-JSON (application/vnd.ipld.dag-json) and
test_init_ipfs
test_launch_ipfs_daemon_without_network
# Import test case
# See the static fixtures in ./t0123-gateway-json-cbor/
test_expect_success "Add the test directory" '
mkdir -p rootDir/ipfs &&
mkdir -p rootDir/ipns &&
mkdir -p rootDir/api &&
mkdir -p rootDir/ą/ę &&
echo "{ \"test\": \"i am a plain json file\" }" > rootDir/ą/ę/t.json &&
echo "I am a txt file on path with utf8" > rootDir/ą/ę/file-źł.txt &&
echo "I am a txt file in confusing /api dir" > rootDir/api/file.txt &&
echo "I am a txt file in confusing /ipfs dir" > rootDir/ipfs/file.txt &&
echo "I am a txt file in confusing /ipns dir" > rootDir/ipns/file.txt &&
DIR_CID=$(ipfs add -Qr --cid-version 1 rootDir) &&
FILE_JSON_CID=$(ipfs files stat --enc=json /ipfs/$DIR_CID/ą/ę/t.json | jq -r .Hash) &&
FILE_CID=$(ipfs files stat --enc=json /ipfs/$DIR_CID/ą/ę/file-źł.txt | jq -r .Hash) &&
FILE_SIZE=$(ipfs files stat --enc=json /ipfs/$DIR_CID/ą/ę/file-źł.txt | jq -r .Size)
echo "$FILE_CID / $FILE_SIZE"
ipfs dag import ../t0123-gateway-json-cbor/fixtures.car
'
DIR_CID=bafybeiafyvqlazbbbtjnn6how5d6h6l6rxbqc4qgpbmteaiskjrffmyy4a # ./rootDir
FILE_JSON_CID=bafkreibrppizs3g7axs2jdlnjua6vgpmltv7k72l7v7sa6mmht6mne3qqe # ./rootDir/ą/ę/t.json
FILE_CID=bafkreialihlqnf5uwo4byh4n3cmwlntwqzxxs2fg5vanqdi3d7tb2l5xkm # ./rootDir/ą/ę/file-źł.txt
FILE_SIZE=34
## Quick regression check for JSON stored on UnixFS:
## it has nothing to do with DAG-JSON and JSON codecs,

View File

@ -0,0 +1,44 @@
# Dataset description/sources
- dag-cbor-traversal.car
- dag-json-traversal.car
- dag-pb.car
- dag-pb.json
- fixtures.car
- raw CARv1
generated with:
```sh
# using ipfs version 0.18.1
mkdir -p rootDir/ipfs &&
mkdir -p rootDir/ipns &&
mkdir -p rootDir/api &&
mkdir -p rootDir/ą/ę &&
echo "{ \"test\": \"i am a plain json file\" }" > rootDir/ą/ę/t.json &&
echo "I am a txt file on path with utf8" > rootDir/ą/ę/file-źł.txt &&
echo "I am a txt file in confusing /api dir" > rootDir/api/file.txt &&
echo "I am a txt file in confusing /ipfs dir" > rootDir/ipfs/file.txt &&
echo "I am a txt file in confusing /ipns dir" > rootDir/ipns/file.txt &&
DIR_CID=$(ipfs add -Qr --cid-version 1 rootDir) &&
FILE_JSON_CID=$(ipfs files stat --enc=json /ipfs/$DIR_CID/ą/ę/t.json | jq -r .Hash) &&
FILE_CID=$(ipfs files stat --enc=json /ipfs/$DIR_CID/ą/ę/file-źł.txt | jq -r .Hash) &&
FILE_SIZE=$(ipfs files stat --enc=json /ipfs/$DIR_CID/ą/ę/file-źł.txt | jq -r .Size)
echo "$FILE_CID / $FILE_SIZE"
echo DIR_CID=${DIR_CID} # ./rootDir
echo FILE_JSON_CID=${FILE_JSON_CID} # ./rootDir/ą/ę/t.json
echo FILE_CID=${FILE_CID} # ./rootDir/ą/ę/file-źł.txt
echo FILE_SIZE=${FILE_SIZE}
ipfs dag export ${DIR_CID} > fixtures.car
# DIR_CID=bafybeiafyvqlazbbbtjnn6how5d6h6l6rxbqc4qgpbmteaiskjrffmyy4a # ./rootDir
# FILE_JSON_CID=bafkreibrppizs3g7axs2jdlnjua6vgpmltv7k72l7v7sa6mmht6mne3qqe # ./rootDir/ą/ę/t.json
# FILE_CID=bafkreialihlqnf5uwo4byh4n3cmwlntwqzxxs2fg5vanqdi3d7tb2l5xkm # ./rootDir/ą/ę/file-źł.txt
# FILE_SIZE=34
```

Binary file not shown.

View File

@ -1,284 +0,0 @@
#!/usr/bin/env bash
#
test_description="Test ipfs swarm ResourceMgr config and commands"
. lib/test-lib.sh
test_init_ipfs
test_expect_success 'Disable resource manager' '
ipfs config --bool Swarm.ResourceMgr.Enabled false
'
# test correct behavior when resource manager is disabled
test_launch_ipfs_daemon
test_expect_success 'Swarm limit should fail since RM is disabled' '
test_expect_code 1 ipfs swarm limit system 2> actual &&
test_should_contain "missing ResourceMgr" actual
'
test_expect_success 'Swarm stats should fail since RM is disabled' '
test_expect_code 1 ipfs swarm stats all 2> actual &&
test_should_contain "missing ResourceMgr" actual
'
test_kill_ipfs_daemon
test_expect_success 'Enable resource manager' '
ipfs config --bool Swarm.ResourceMgr.Enabled true
'
# swarm limit|stats should fail in offline mode
test_expect_success 'disconnected: swarm limit requires running daemon' '
test_expect_code 1 ipfs swarm limit system 2> actual &&
test_should_contain "missing ResourceMgr" actual
'
test_expect_success 'disconnected: swarm stats requires running daemon' '
test_expect_code 1 ipfs swarm stats all 2> actual &&
test_should_contain "missing ResourceMgr" actual
'
# test sanity scaling
test_expect_success 'set very high connmgr highwater' '
ipfs config --json Swarm.ConnMgr.HighWater 1000
'
test_launch_ipfs_daemon
test_expect_success 'conns and streams are above 2000' '
ipfs swarm limit system --enc=json | tee json &&
[ "$(jq -r .ConnsInbound < json)" -ge 2000 ] &&
[ "$(jq -r .StreamsInbound < json)" -ge 2000 ]
'
test_kill_ipfs_daemon
test_expect_success 'set previous connmgr highwater' '
ipfs config --json Swarm.ConnMgr.HighWater 96
'
test_launch_ipfs_daemon
test_expect_success 'conns and streams are above 800' '
ipfs swarm limit system --enc=json | tee json &&
[ "$(jq -r .ConnsInbound < json)" -ge 800 ] &&
[ "$(jq -r .StreamsInbound < json)" -ge 800 ]
'
# swarm limit|stats should succeed in online mode by default
# because Resource Manager is opt-out
# every scope has the same fields, so we only inspect System
test_expect_success 'ResourceMgr enabled: swarm limit' '
ipfs swarm limit system --enc=json | tee json &&
jq -e .Conns < json &&
jq -e .ConnsInbound < json &&
jq -e .ConnsOutbound < json &&
jq -e .FD < json &&
jq -e .Memory < json &&
jq -e .Streams < json &&
jq -e .StreamsInbound < json &&
jq -e .StreamsOutbound < json
'
test_expect_success 'ResourceMgr enabled: swarm limit reset' '
ipfs swarm limit system --reset --enc=json 2> reset &&
ipfs swarm limit system --enc=json 2> actual &&
test_cmp reset actual
'
test_expect_success 'Swarm stats system with filter should fail' '
test_expect_code 1 ipfs swarm stats system --min-used-limit-perc=99 2> actual &&
test_should_contain "Error: \"min-used-limit-perc\" can only be used when scope is \"all\"" actual
'
test_expect_success 'ResourceMgr enabled: swarm limit reset on map values' '
ipfs swarm limit peer:12D3KooWL7i1T9VSPeF8AgQApbyM51GNKZsYPvNvL347aMDmvNzG --reset --enc=json 2> reset &&
ipfs swarm limit peer:12D3KooWL7i1T9VSPeF8AgQApbyM51GNKZsYPvNvL347aMDmvNzG --enc=json 2> actual &&
test_cmp reset actual
'
test_expect_success 'ResourceMgr enabled: scope is required using reset flag' '
test_expect_code 1 ipfs swarm limit --reset 2> actual &&
test_should_contain "Error: argument \"scope\" is required" actual
'
test_expect_success 'connected: swarm stats all working properly' '
test_expect_code 0 ipfs swarm stats all
'
# every scope has the same fields, so we only inspect System
test_expect_success 'ResourceMgr enabled: swarm stats' '
ipfs swarm stats all --enc=json | tee json &&
jq -e .System.Memory < json &&
jq -e .System.FD < json &&
jq -e .System.Conns < json &&
jq -e .System.ConnsInbound < json &&
jq -e .System.ConnsOutbound < json &&
jq -e .System.Streams < json &&
jq -e .System.StreamsInbound < json &&
jq -e .System.StreamsOutbound < json &&
jq -e .Transient.Memory < json
'
# shut down the daemon, set a limit in the config, and verify that it's applied
test_kill_ipfs_daemon
test_expect_success "Set system conns limit while daemon is not running" "
ipfs config --json Swarm.ResourceMgr.Limits.System.Conns 99999
"
test_expect_success "Set an invalid limit, which should result in a failure" "
test_expect_code 1 ipfs config --json Swarm.ResourceMgr.Limits.System.Conns 'asdf' 2> actual &&
test_should_contain 'failed to unmarshal' actual
"
test_launch_ipfs_daemon
test_expect_success 'Ensure the new system conns limit is applied' '
ipfs swarm limit system --enc=json | tee json &&
jq -e ".Conns == 99999" < json
'
test_expect_success 'Set system memory limit while the daemon is running' '
ipfs swarm limit system | jq ".Memory = 99998" > system.json &&
ipfs swarm limit system system.json
'
test_expect_success 'The new system limits were written to the config' '
jq -e ".Swarm.ResourceMgr.Limits.System.Memory == \"99998\"" < "$IPFS_PATH/config"
'
test_expect_success 'The new system limits are in the swarm limit output' '
ipfs swarm limit system --enc=json | jq -e ".Memory == \"99998\""
'
# now test all the other scopes
test_expect_success 'Set limit on transient scope' '
ipfs swarm limit transient | jq ".Memory = 88888" > transient.json &&
ipfs swarm limit transient transient.json &&
jq -e ".Swarm.ResourceMgr.Limits.Transient.Memory == \"88888\"" < "$IPFS_PATH/config" &&
ipfs swarm limit transient --enc=json | tee limits &&
jq -e ".Memory == \"88888\"" < limits
'
test_expect_success 'Set limit on service scope' '
ipfs swarm limit svc:foo | jq ".Memory = 77777" > service-foo.json &&
ipfs swarm limit svc:foo service-foo.json --enc=json &&
jq -e ".Swarm.ResourceMgr.Limits.Service.foo.Memory == \"77777\"" < "$IPFS_PATH/config" &&
ipfs swarm limit svc:foo --enc=json | tee limits &&
jq -e ".Memory == \"77777\"" < limits
'
test_expect_success 'Set limit on protocol scope' '
ipfs swarm limit proto:foo | jq ".Memory = 66666" > proto-foo.json &&
ipfs swarm limit proto:foo proto-foo.json --enc=json &&
jq -e ".Swarm.ResourceMgr.Limits.Protocol.foo.Memory == \"66666\"" < "$IPFS_PATH/config" &&
ipfs swarm limit proto:foo --enc=json | tee limits &&
jq -e ".Memory == \"66666\"" < limits
'
# any valid peer id
PEER_ID=QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN
test_expect_success 'Set limit on peer scope' '
ipfs swarm limit peer:$PEER_ID | jq ".Memory = 66666" > peer-$PEER_ID.json &&
ipfs swarm limit peer:$PEER_ID peer-$PEER_ID.json --enc=json &&
jq -e ".Swarm.ResourceMgr.Limits.Peer.${PEER_ID}.Memory == \"66666\"" < "$IPFS_PATH/config" &&
ipfs swarm limit peer:$PEER_ID --enc=json | tee limits &&
jq -e ".Memory == \"66666\"" < limits
'
test_expect_success 'Get limit for peer scope with an invalid peer ID' '
test_expect_code 1 ipfs swarm limit peer:foo 2> actual &&
test_should_contain "invalid peer ID" actual
'
test_expect_success 'Set limit for peer scope with an invalid peer ID' '
echo "{\"Memory\": 99}" > invalid-peer-id.json &&
test_expect_code 1 ipfs swarm limit peer:foo invalid-peer-id.json 2> actual &&
test_should_contain "invalid peer ID" actual
'
test_kill_ipfs_daemon
## Test allowlist
test_expect_success 'init iptb' '
iptb testbed create -type localipfs -count 3 -init
'
test_expect_success 'peer ids' '
PEERID_0=$(iptb attr get 0 id) &&
PEERID_1=$(iptb attr get 1 id) &&
PEERID_2=$(iptb attr get 2 id)
'
#enable resource manager
test_expect_success 'enable RCMGR' '
ipfsi 0 config --bool Swarm.ResourceMgr.Enabled true &&
ipfsi 0 config --json Swarm.ResourceMgr.Allowlist "[\"/ip4/0.0.0.0/ipcidr/0/p2p/$PEERID_2\"]"
'
test_expect_success 'start nodes' '
iptb start -wait [0-2]
'
test_expect_success "change system limits on node 0" '
ipfsi 0 swarm limit system | jq ". + {Conns: 0,ConnsInbound: 0, ConnsOutbound: 0}" > system.json &&
ipfsi 0 swarm limit system system.json
'
test_expect_success "node 0 fails to connect to 1" '
test_expect_code 1 iptb connect 0 1
'
test_expect_success "node 0 connects to 2 because it's allowlisted" '
iptb connect 0 2
'
test_expect_success "node 0 fails to ping 1" '
test_expect_code 1 ipfsi 0 ping -n2 -- "$PEERID_1" 2> actual &&
test_should_contain "Error: ping failed" actual
'
test_expect_success "node 1 can ping 2" '
ipfsi 0 ping -n2 -- "$PEERID_2"
'
test_expect_success 'stop iptb' '
iptb stop 0 &&
iptb stop 1 &&
iptb stop 2
'
## Test daemon refuse to start if connmgr.highwater < ressources inbound
test_expect_success "node refuse to start if Swarm.ResourceMgr.Limits.System.Conns <= Swarm.ConnMgr.HighWater" '
ipfs config --json Swarm.ResourceMgr.Limits.System.Conns 128 &&
ipfs config --json Swarm.ConnMgr.HighWater 128 &&
ipfs config --json Swarm.ConnMgr.LowWater 64 &&
test_expect_code 1 ipfs daemon &&
ipfs config --json Swarm.ResourceMgr.Limits.System.Conns 256
'
test_expect_success "node refuse to start if Swarm.ResourceMgr.Limits.System.ConnsInbound <= Swarm.ConnMgr.HighWater" '
ipfs config --json Swarm.ResourceMgr.Limits.System.ConnsInbound 128 &&
test_expect_code 1 ipfs daemon &&
ipfs config --json Swarm.ResourceMgr.Limits.System.ConnsInbound 256
'
test_expect_success "node refuse to start if Swarm.ResourceMgr.Limits.System.Streams <= Swarm.ConnMgr.HighWater" '
ipfs config --json Swarm.ResourceMgr.Limits.System.Streams 128 &&
test_expect_code 1 ipfs daemon &&
ipfs config --json Swarm.ResourceMgr.Limits.System.Streams 256
'
test_expect_success "node refuse to start if Swarm.ResourceMgr.Limits.System.StreamsInbound <= Swarm.ConnMgr.HighWater" '
ipfs config --json Swarm.ResourceMgr.Limits.System.StreamsInbound 128 &&
test_expect_code 1 ipfs daemon &&
ipfs config --json Swarm.ResourceMgr.Limits.System.StreamsInbound 256
'
test_done

View File

@ -10,6 +10,13 @@ test_description="Test API security"
test_init_ipfs
# Import test case
# See the static fixtures in ./t0400-api-no-gateway/
test_expect_success "Add the test directory" '
ipfs dag import ../t0400-api-no-gateway/fixtures.car
'
HASH=QmNYERzV2LfD2kkfahtfv44ocHzEFK1sLBaE7zdcYT2GAZ # a file containing the string "testing"
# by default, we don't let you load arbitrary ipfs objects through the api,
# because this would open up the api to scripting vulnerabilities.
# only the webui objects are allowed.
@ -17,14 +24,12 @@ test_init_ipfs
test_launch_ipfs_daemon
test_expect_success "Gateway on API unavailable" '
HASH=$(echo "testing" | ipfs add -q)
test_curl_resp_http_code "http://127.0.0.1:$API_PORT/ipfs/$HASH" "HTTP/1.1 404 Not Found"
'
test_kill_ipfs_daemon
test_launch_ipfs_daemon --unrestricted-api
test_expect_success "Gateway on --unrestricted-api API available" '
HASH=$(echo "testing" | ipfs add -q)
test_curl_resp_http_code "http://127.0.0.1:$API_PORT/ipfs/$HASH" "HTTP/1.1 200 OK"
'
test_kill_ipfs_daemon

View File

@ -0,0 +1,16 @@
# Dataset description/sources
- fixtures.car
- raw CARv1
generated with:
```sh
# using ipfs version 0.18.1
HASH=$(echo "testing" | ipfs add -q)
ipfs dag export $HASH > fixtures.car
echo HASH=${HASH} # a file containing the string "testing"
# HASH=QmNYERzV2LfD2kkfahtfv44ocHzEFK1sLBaE7zdcYT2GAZ # a file containing the string "testing"
```

Binary file not shown.