mirror of
https://github.com/ipfs/kubo.git
synced 2026-02-21 18:37:45 +08:00
Add global --cid-base option and enable it for most commands.
This does it on ther server side for most commands. This also adds a global --output-cidv1 option. License: MIT Signed-off-by: Kevin Atkinson <k@kevina.org>
This commit is contained in:
parent
5512ad53c4
commit
b22275fb66
@ -174,6 +174,11 @@ You can now check what blocks have been created by:
|
||||
return fmt.Errorf("unrecognized hash function: %s", strings.ToLower(hashFunStr))
|
||||
}
|
||||
|
||||
enc, err := cmdenv.GetCidEncoder(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
events := make(chan interface{}, adderOutChanSize)
|
||||
|
||||
opts := []options.UnixfsAddOption{
|
||||
@ -226,7 +231,7 @@ You can now check what blocks have been created by:
|
||||
|
||||
h := ""
|
||||
if output.Path != nil {
|
||||
h = output.Path.Cid().String()
|
||||
h = enc.Encode(output.Path.Cid())
|
||||
}
|
||||
|
||||
res.Emit(&AddEvent{
|
||||
|
||||
@ -74,12 +74,15 @@ Print out all blocks currently on the bitswap wantlist for the local peer.`,
|
||||
},
|
||||
Encoders: cmds.EncoderMap{
|
||||
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *KeyList) error {
|
||||
enc, err := cmdenv.GetLowLevelCidEncoder(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// sort the keys first
|
||||
cidutil.Sort(out.Keys)
|
||||
for _, key := range out.Keys {
|
||||
fmt.Fprintln(w, key)
|
||||
fmt.Fprintln(w, enc.Encode(key))
|
||||
}
|
||||
|
||||
return nil
|
||||
}),
|
||||
},
|
||||
@ -115,6 +118,10 @@ var bitswapStatCmd = &cmds.Command{
|
||||
},
|
||||
Encoders: cmds.EncoderMap{
|
||||
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, s *bitswap.Stat) error {
|
||||
enc, err := cmdenv.GetLowLevelCidEncoder(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(w, "bitswap status")
|
||||
fmt.Fprintf(w, "\tprovides buffer: %d / %d\n", s.ProvideBufLen, bitswap.HasBlockBufferSize)
|
||||
fmt.Fprintf(w, "\tblocks received: %d\n", s.BlocksReceived)
|
||||
@ -125,7 +132,7 @@ var bitswapStatCmd = &cmds.Command{
|
||||
fmt.Fprintf(w, "\tdup data received: %s\n", humanize.Bytes(s.DupDataReceived))
|
||||
fmt.Fprintf(w, "\twantlist [%d keys]\n", len(s.Wantlist))
|
||||
for _, k := range s.Wantlist {
|
||||
fmt.Fprintf(w, "\t\t%s\n", k.String())
|
||||
fmt.Fprintf(w, "\t\t%s\n", enc.Encode(k))
|
||||
}
|
||||
fmt.Fprintf(w, "\tpartners [%d]\n", len(s.Peers))
|
||||
for _, p := range s.Peers {
|
||||
|
||||
98
core/commands/cmdenv/cidbase.go
Normal file
98
core/commands/cmdenv/cidbase.go
Normal file
@ -0,0 +1,98 @@
|
||||
package cmdenv
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
path "gx/ipfs/QmNYPETsdAu2uQ1k9q9S1jYEGURaLHV6cbYRSVFVRftpF8/go-path"
|
||||
cmds "gx/ipfs/QmWGm4AbZEbnmdgVTza52MSNpEmBdFVqzmAysRbjrRyGbH/go-ipfs-cmds"
|
||||
cidenc "gx/ipfs/QmdPQx9fvN5ExVwMhRmh7YpCQJzJrFhd1AjVBwJmRMFJeX/go-cidutil/cidenc"
|
||||
cmdkit "gx/ipfs/Qmde5VP1qUkyQXKCfmEUA7bP64V2HAptbJ7phuPp7jXWwg/go-ipfs-cmdkit"
|
||||
mbase "gx/ipfs/QmekxXDhCxCJRNuzmHreuaT3BsuJcsjcXWNrtV9C8DRHtd/go-multibase"
|
||||
)
|
||||
|
||||
var OptionCidBase = cmdkit.StringOption("cid-base", "Multibase encoding used for version 1 CIDs in output.")
|
||||
var OptionOutputCidV1 = cmdkit.BoolOption("output-cidv1", "Upgrade CID version 0 to version 1 in output.")
|
||||
|
||||
// GetCidEncoder processes the `cid-base` and `output-cidv1` options and
|
||||
// returns a encoder to use based on those parameters.
|
||||
func GetCidEncoder(req *cmds.Request) (cidenc.Encoder, error) {
|
||||
return getCidBase(req, true)
|
||||
}
|
||||
|
||||
// GetLowLevelCidEncoder is like GetCidEncoder but meant to be used by
|
||||
// lower level commands. It differs from GetCidEncoder in that CIDv0
|
||||
// are not, by default, auto-upgraded to CIDv1.
|
||||
func GetLowLevelCidEncoder(req *cmds.Request) (cidenc.Encoder, error) {
|
||||
return getCidBase(req, false)
|
||||
}
|
||||
|
||||
func getCidBase(req *cmds.Request, autoUpgrade bool) (cidenc.Encoder, error) {
|
||||
base, _ := req.Options["cid-base"].(string)
|
||||
upgrade, upgradeDefined := req.Options["output-cidv1"].(bool)
|
||||
|
||||
e := cidenc.Default()
|
||||
|
||||
if base != "" {
|
||||
var err error
|
||||
e.Base, err = mbase.EncoderByName(base)
|
||||
if err != nil {
|
||||
return e, err
|
||||
}
|
||||
if autoUpgrade {
|
||||
e.Upgrade = true
|
||||
}
|
||||
}
|
||||
|
||||
if upgradeDefined {
|
||||
e.Upgrade = upgrade
|
||||
}
|
||||
|
||||
return e, nil
|
||||
}
|
||||
|
||||
// CidBaseDefined returns true if the `cid-base` option is specified
|
||||
// on the command line
|
||||
func CidBaseDefined(req *cmds.Request) bool {
|
||||
base, _ := req.Options["cid-base"].(string)
|
||||
return base != ""
|
||||
}
|
||||
|
||||
// CidEncoderFromPath creates a new encoder that is influenced from
|
||||
// the encoded Cid in a Path. For CidV0 the multibase from the base
|
||||
// encoder is used and automatic upgrades are disabled. For CidV1 the
|
||||
// multibase from the CID is used and upgrades are eneabled. On error
|
||||
// the base encoder is returned. If you don't care about the error
|
||||
// condition, it is safe to ignore the error returned.
|
||||
func CidEncoderFromPath(enc cidenc.Encoder, p string) (cidenc.Encoder, error) {
|
||||
v, err := extractCidString(p)
|
||||
if err != nil {
|
||||
return enc, err
|
||||
}
|
||||
if cidVer(v) == 0 {
|
||||
return cidenc.Encoder{Base: enc.Base, Upgrade: false}, nil
|
||||
}
|
||||
e, err := mbase.NewEncoder(mbase.Encoding(v[0]))
|
||||
if err != nil {
|
||||
return enc, err
|
||||
}
|
||||
return cidenc.Encoder{Base: e, Upgrade: true}, nil
|
||||
}
|
||||
|
||||
func extractCidString(str string) (string, error) {
|
||||
p, err := path.ParsePath(str)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
segs := p.Segments()
|
||||
if segs[0] == "ipfs" || segs[0] == "ipld" {
|
||||
return segs[1], nil
|
||||
}
|
||||
return "", errors.New("no CID found")
|
||||
}
|
||||
|
||||
func cidVer(v string) int {
|
||||
if len(v) == 46 && v[:2] == "Qm" {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
}
|
||||
31
core/commands/cmdenv/cidbase_test.go
Normal file
31
core/commands/cmdenv/cidbase_test.go
Normal file
@ -0,0 +1,31 @@
|
||||
package cmdenv
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestExtractCidString(t *testing.T) {
|
||||
test := func(path string, cid string) {
|
||||
res, err := extractCidString(path)
|
||||
if err != nil || res != cid {
|
||||
t.Errorf("extractCidString(%s) failed", path)
|
||||
}
|
||||
}
|
||||
testFailure := func(path string) {
|
||||
_, err := extractCidString(path)
|
||||
if err == nil {
|
||||
t.Errorf("extractCidString(%s) should of failed", path)
|
||||
}
|
||||
}
|
||||
p := "QmRqVG8VGdKZ7KARqR96MV7VNHgWvEQifk94br5HpURpfu"
|
||||
test(p, p)
|
||||
test("/ipfs/"+p, p)
|
||||
testFailure("/ipns/" + p)
|
||||
|
||||
p = "zb2rhfkM4FjkMLaUnygwhuqkETzbYXnUDf1P9MSmdNjW1w1Lk"
|
||||
test(p, p)
|
||||
test("/ipfs/"+p, p)
|
||||
test("/ipld/"+p, p)
|
||||
|
||||
testFailure("/ipfs")
|
||||
}
|
||||
@ -144,7 +144,11 @@ into an object of the specified format.
|
||||
Type: OutputObject{},
|
||||
Encoders: cmds.EncoderMap{
|
||||
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *OutputObject) error {
|
||||
fmt.Fprintln(w, out.Cid.String())
|
||||
enc, err := cmdenv.GetLowLevelCidEncoder(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(w, enc.Encode(out.Cid))
|
||||
return nil
|
||||
}),
|
||||
},
|
||||
@ -227,7 +231,14 @@ var DagResolveCmd = &cmds.Command{
|
||||
},
|
||||
Encoders: cmds.EncoderMap{
|
||||
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *ResolveOutput) error {
|
||||
p := out.Cid.String()
|
||||
enc, err := cmdenv.GetLowLevelCidEncoder(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !cmdenv.CidBaseDefined(req) {
|
||||
enc, _ = cmdenv.CidEncoderFromPath(enc, req.Arguments[0])
|
||||
}
|
||||
p := enc.Encode(out.Cid)
|
||||
if out.RemPath != "" {
|
||||
p = path.Join([]string{p, out.RemPath})
|
||||
}
|
||||
|
||||
@ -17,13 +17,14 @@ import (
|
||||
"gx/ipfs/QmP9eu5X5Ax8169jNWqAJcc42mdZgzLR1aKCEzqhNoBLKk/go-mfs"
|
||||
"gx/ipfs/QmPSBJL4momYnE7DcUyk2DVhD6rH488ZmHBGLbxNdhU44K/go-humanize"
|
||||
ft "gx/ipfs/QmQXze9tG878pa4Euya4rrDpyTNX3kQe4dhCaBzBozGgpe/go-unixfs"
|
||||
"gx/ipfs/QmR8BauakNcBa3RbE4nbQu76PDiJgoQgz8AJdhJuiU4TAw/go-cid"
|
||||
cid "gx/ipfs/QmR8BauakNcBa3RbE4nbQu76PDiJgoQgz8AJdhJuiU4TAw/go-cid"
|
||||
dag "gx/ipfs/QmTQdH4848iTVCJmKXYyRiK72HufWTLYQQ8iN3JaQ8K1Hq/go-merkledag"
|
||||
"gx/ipfs/QmWGm4AbZEbnmdgVTza52MSNpEmBdFVqzmAysRbjrRyGbH/go-ipfs-cmds"
|
||||
bservice "gx/ipfs/QmYPZzd9VqmJDwxUnThfeSbV1Y5o53aVPDijTB7j7rS9Ep/go-blockservice"
|
||||
"gx/ipfs/QmYZwey1thDTynSrvd6qQkX24UpTka6TFhQ2v569UpoqxD/go-ipfs-exchange-offline"
|
||||
ipld "gx/ipfs/QmcKKBwfz6FyQdHR2jsXrrF6XeSBXYL86anmWNewpFpoF5/go-ipld-format"
|
||||
logging "gx/ipfs/QmcuXC5cxs79ro2cUuHs4HQ2bkDLJUYokwL8aivcX6HW3C/go-log"
|
||||
cidenc "gx/ipfs/QmdPQx9fvN5ExVwMhRmh7YpCQJzJrFhd1AjVBwJmRMFJeX/go-cidutil/cidenc"
|
||||
"gx/ipfs/Qmde5VP1qUkyQXKCfmEUA7bP64V2HAptbJ7phuPp7jXWwg/go-ipfs-cmdkit"
|
||||
mh "gx/ipfs/QmerPMzPk1mJVowm8KgmoknWa4yCYvvugMPsgWmDNUvDLW/go-multihash"
|
||||
)
|
||||
@ -136,6 +137,11 @@ var filesStatCmd = &cmds.Command{
|
||||
|
||||
withLocal, _ := req.Options[filesWithLocalOptionName].(bool)
|
||||
|
||||
enc, err := cmdenv.GetCidEncoder(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var dagserv ipld.DAGService
|
||||
if withLocal {
|
||||
// an offline DAGService will not fetch from the network
|
||||
@ -152,7 +158,7 @@ var filesStatCmd = &cmds.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
o, err := statNode(nd)
|
||||
o, err := statNode(nd, enc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -217,7 +223,7 @@ func statGetFormatOptions(req *cmds.Request) (string, error) {
|
||||
}
|
||||
}
|
||||
|
||||
func statNode(nd ipld.Node) (*statOutput, error) {
|
||||
func statNode(nd ipld.Node, enc cidenc.Encoder) (*statOutput, error) {
|
||||
c := nd.Cid()
|
||||
|
||||
cumulsize, err := nd.Size()
|
||||
@ -243,7 +249,7 @@ func statNode(nd ipld.Node) (*statOutput, error) {
|
||||
}
|
||||
|
||||
return &statOutput{
|
||||
Hash: c.String(),
|
||||
Hash: enc.Encode(c),
|
||||
Blocks: len(nd.Links()),
|
||||
Size: d.FileSize(),
|
||||
CumulativeSize: cumulsize,
|
||||
@ -251,7 +257,7 @@ func statNode(nd ipld.Node) (*statOutput, error) {
|
||||
}, nil
|
||||
case *dag.RawNode:
|
||||
return &statOutput{
|
||||
Hash: c.String(),
|
||||
Hash: enc.Encode(c),
|
||||
Blocks: 0,
|
||||
Size: cumulsize,
|
||||
CumulativeSize: cumulsize,
|
||||
@ -433,6 +439,11 @@ Examples:
|
||||
|
||||
long, _ := req.Options[longOptionName].(bool)
|
||||
|
||||
enc, err := cmdenv.GetCidEncoder(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch fsn := fsn.(type) {
|
||||
case *mfs.Directory:
|
||||
if !long {
|
||||
@ -470,7 +481,7 @@ Examples:
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
out.Entries[0].Hash = nd.Cid().String()
|
||||
out.Entries[0].Hash = enc.Encode(nd.Cid())
|
||||
}
|
||||
return cmds.EmitOnce(res, out)
|
||||
default:
|
||||
|
||||
@ -79,14 +79,20 @@ The output is:
|
||||
return nil
|
||||
},
|
||||
PostRun: cmds.PostRunMap{
|
||||
cmds.CLI: streamResult(func(v interface{}, out io.Writer) nonFatalError {
|
||||
r := v.(*filestore.ListRes)
|
||||
if r.ErrorMsg != "" {
|
||||
return nonFatalError(r.ErrorMsg)
|
||||
cmds.CLI: func(res cmds.Response, re cmds.ResponseEmitter) error {
|
||||
enc, err := cmdenv.GetCidEncoder(res.Request())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintf(out, "%s\n", r.FormatLong())
|
||||
return ""
|
||||
}),
|
||||
return streamResult(func(v interface{}, out io.Writer) nonFatalError {
|
||||
r := v.(*filestore.ListRes)
|
||||
if r.ErrorMsg != "" {
|
||||
return nonFatalError(r.ErrorMsg)
|
||||
}
|
||||
fmt.Fprintf(out, "%s\n", r.FormatLong(enc.Encode))
|
||||
return ""
|
||||
})(res, re)
|
||||
},
|
||||
},
|
||||
Type: filestore.ListRes{},
|
||||
}
|
||||
@ -151,6 +157,11 @@ For ERROR entries the error will also be printed to stderr.
|
||||
},
|
||||
PostRun: cmds.PostRunMap{
|
||||
cmds.CLI: func(res cmds.Response, re cmds.ResponseEmitter) error {
|
||||
enc, err := cmdenv.GetCidEncoder(res.Request())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for {
|
||||
v, err := res.Next()
|
||||
if err != nil {
|
||||
@ -168,7 +179,7 @@ For ERROR entries the error will also be printed to stderr.
|
||||
if list.Status == filestore.StatusOtherError {
|
||||
fmt.Fprintf(os.Stderr, "%s\n", list.ErrorMsg)
|
||||
}
|
||||
fmt.Fprintf(os.Stdout, "%s %s\n", list.Status.Format(), list.FormatLong())
|
||||
fmt.Fprintf(os.Stdout, "%s %s\n", list.Status.Format(), list.FormatLong(enc.Encode))
|
||||
}
|
||||
},
|
||||
},
|
||||
@ -184,6 +195,12 @@ var dupsFileStore = &cmds.Command{
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
enc, err := cmdenv.GetCidEncoder(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ch, err := fs.FileManager().AllKeysChan(req.Context)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -195,7 +212,7 @@ var dupsFileStore = &cmds.Command{
|
||||
return res.Emit(&RefWrapper{Err: err.Error()})
|
||||
}
|
||||
if have {
|
||||
if err := res.Emit(&RefWrapper{Ref: cid.String()}); err != nil {
|
||||
if err := res.Emit(&RefWrapper{Ref: enc.Encode(cid)}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@ -18,6 +18,7 @@ import (
|
||||
blockservice "gx/ipfs/QmYPZzd9VqmJDwxUnThfeSbV1Y5o53aVPDijTB7j7rS9Ep/go-blockservice"
|
||||
offline "gx/ipfs/QmYZwey1thDTynSrvd6qQkX24UpTka6TFhQ2v569UpoqxD/go-ipfs-exchange-offline"
|
||||
ipld "gx/ipfs/QmcKKBwfz6FyQdHR2jsXrrF6XeSBXYL86anmWNewpFpoF5/go-ipld-format"
|
||||
cidenc "gx/ipfs/QmdPQx9fvN5ExVwMhRmh7YpCQJzJrFhd1AjVBwJmRMFJeX/go-cidutil/cidenc"
|
||||
"gx/ipfs/Qmde5VP1qUkyQXKCfmEUA7bP64V2HAptbJ7phuPp7jXWwg/go-ipfs-cmdkit"
|
||||
)
|
||||
|
||||
@ -94,9 +95,13 @@ The JSON output contains type information.
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
paths := req.Arguments
|
||||
|
||||
enc, err := cmdenv.GetCidEncoder(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var dagnodes []ipld.Node
|
||||
for _, fpath := range paths {
|
||||
p, err := iface.ParsePath(fpath)
|
||||
@ -134,7 +139,7 @@ The JSON output contains type information.
|
||||
}
|
||||
outputLinks := make([]LsLink, len(links))
|
||||
for j, link := range links {
|
||||
lsLink, err := makeLsLink(req, dserv, resolveType, resolveSize, link)
|
||||
lsLink, err := makeLsLink(req, dserv, resolveType, resolveSize, link, enc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -168,7 +173,7 @@ The JSON output contains type information.
|
||||
return linkResult.Err
|
||||
}
|
||||
link := linkResult.Link
|
||||
lsLink, err := makeLsLink(req, dserv, resolveType, resolveSize, link)
|
||||
lsLink, err := makeLsLink(req, dserv, resolveType, resolveSize, link, enc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -227,7 +232,7 @@ func makeDagNodeLinkResults(req *cmds.Request, dagnode ipld.Node) <-chan unixfs.
|
||||
return linkResults
|
||||
}
|
||||
|
||||
func makeLsLink(req *cmds.Request, dserv ipld.DAGService, resolveType bool, resolveSize bool, link *ipld.Link) (*LsLink, error) {
|
||||
func makeLsLink(req *cmds.Request, dserv ipld.DAGService, resolveType bool, resolveSize bool, link *ipld.Link, enc cidenc.Encoder) (*LsLink, error) {
|
||||
t := unixfspb.Data_DataType(-1)
|
||||
var size uint64
|
||||
|
||||
@ -260,7 +265,7 @@ func makeLsLink(req *cmds.Request, dserv ipld.DAGService, resolveType bool, reso
|
||||
}
|
||||
return &LsLink{
|
||||
Name: link.Name,
|
||||
Hash: link.Cid.String(),
|
||||
Hash: enc.Encode(link.Cid),
|
||||
Size: size,
|
||||
Type: t,
|
||||
}, nil
|
||||
|
||||
@ -119,6 +119,11 @@ multihash.
|
||||
return err
|
||||
}
|
||||
|
||||
enc, err := cmdenv.GetLowLevelCidEncoder(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
path, err := coreiface.ParsePath(req.Arguments[0])
|
||||
if err != nil {
|
||||
return err
|
||||
@ -137,14 +142,14 @@ multihash.
|
||||
outLinks := make([]Link, len(links))
|
||||
for i, link := range links {
|
||||
outLinks[i] = Link{
|
||||
Hash: link.Cid.String(),
|
||||
Hash: enc.Encode(link.Cid),
|
||||
Name: link.Name,
|
||||
Size: link.Size,
|
||||
}
|
||||
}
|
||||
|
||||
out := &Object{
|
||||
Hash: rp.Cid().String(),
|
||||
Hash: enc.Encode(rp.Cid()),
|
||||
Links: outLinks,
|
||||
}
|
||||
|
||||
@ -209,6 +214,11 @@ Supported values are:
|
||||
return err
|
||||
}
|
||||
|
||||
enc, err := cmdenv.GetLowLevelCidEncoder(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
path, err := coreiface.ParsePath(req.Arguments[0])
|
||||
if err != nil {
|
||||
return err
|
||||
@ -246,7 +256,7 @@ Supported values are:
|
||||
|
||||
for i, link := range nd.Links() {
|
||||
node.Links[i] = Link{
|
||||
Hash: link.Cid.String(),
|
||||
Hash: enc.Encode(link.Cid),
|
||||
Name: link.Name,
|
||||
Size: link.Size,
|
||||
}
|
||||
@ -299,6 +309,11 @@ var ObjectStatCmd = &cmds.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
enc, err := cmdenv.GetLowLevelCidEncoder(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
path, err := coreiface.ParsePath(req.Arguments[0])
|
||||
if err != nil {
|
||||
return err
|
||||
@ -310,7 +325,7 @@ var ObjectStatCmd = &cmds.Command{
|
||||
}
|
||||
|
||||
oldStat := &ipld.NodeStat{
|
||||
Hash: ns.Cid.String(),
|
||||
Hash: enc.Encode(ns.Cid),
|
||||
NumLinks: ns.NumLinks,
|
||||
BlockSize: ns.BlockSize,
|
||||
LinksSize: ns.LinksSize,
|
||||
@ -391,6 +406,11 @@ And then run:
|
||||
return err
|
||||
}
|
||||
|
||||
enc, err := cmdenv.GetLowLevelCidEncoder(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
file, err := cmdenv.GetFileArg(req.Files.Entries())
|
||||
if err != nil {
|
||||
return err
|
||||
@ -419,7 +439,7 @@ And then run:
|
||||
return err
|
||||
}
|
||||
|
||||
return cmds.EmitOnce(res, &Object{Hash: p.Cid().String()})
|
||||
return cmds.EmitOnce(res, &Object{Hash: enc.Encode(p.Cid())})
|
||||
},
|
||||
Encoders: cmds.EncoderMap{
|
||||
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *Object) error {
|
||||
@ -464,6 +484,11 @@ Available templates:
|
||||
return err
|
||||
}
|
||||
|
||||
enc, err := cmdenv.GetLowLevelCidEncoder(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
template := "empty"
|
||||
if len(req.Arguments) == 1 {
|
||||
template = req.Arguments[0]
|
||||
@ -474,7 +499,7 @@ Available templates:
|
||||
return err
|
||||
}
|
||||
|
||||
return cmds.EmitOnce(res, &Object{Hash: nd.Cid().String()})
|
||||
return cmds.EmitOnce(res, &Object{Hash: enc.Encode(nd.Cid())})
|
||||
},
|
||||
Encoders: cmds.EncoderMap{
|
||||
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *Object) error {
|
||||
|
||||
@ -21,6 +21,7 @@ import (
|
||||
"gx/ipfs/QmYMQuypUbgsdNHmuCBSUJV6wdQVsBHRivNAp3efHJwZJD/go-verifcid"
|
||||
bserv "gx/ipfs/QmYPZzd9VqmJDwxUnThfeSbV1Y5o53aVPDijTB7j7rS9Ep/go-blockservice"
|
||||
offline "gx/ipfs/QmYZwey1thDTynSrvd6qQkX24UpTka6TFhQ2v569UpoqxD/go-ipfs-exchange-offline"
|
||||
cidenc "gx/ipfs/QmdPQx9fvN5ExVwMhRmh7YpCQJzJrFhd1AjVBwJmRMFJeX/go-cidutil/cidenc"
|
||||
cmdkit "gx/ipfs/Qmde5VP1qUkyQXKCfmEUA7bP64V2HAptbJ7phuPp7jXWwg/go-ipfs-cmdkit"
|
||||
)
|
||||
|
||||
@ -87,12 +88,17 @@ var addPinCmd = &cmds.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
enc, err := cmdenv.GetCidEncoder(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !showProgress {
|
||||
added, err := corerepo.Pin(n.Pinning, api, req.Context, req.Arguments, recursive)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmds.EmitOnce(res, &AddPinOutput{Pins: cidsToStrings(added)})
|
||||
return cmds.EmitOnce(res, &AddPinOutput{Pins: cidsToStrings(added, enc)})
|
||||
}
|
||||
|
||||
v := new(dag.ProgressTracker)
|
||||
@ -124,7 +130,7 @@ var addPinCmd = &cmds.Command{
|
||||
return err
|
||||
}
|
||||
}
|
||||
return res.Emit(&AddPinOutput{Pins: cidsToStrings(val.pins)})
|
||||
return res.Emit(&AddPinOutput{Pins: cidsToStrings(val.pins, enc)})
|
||||
case <-ticker.C:
|
||||
if err := res.Emit(&AddPinOutput{Progress: v.Value()}); err != nil {
|
||||
return err
|
||||
@ -215,12 +221,17 @@ collected if needed. (By default, recursively. Use -r=false for direct pins.)
|
||||
return err
|
||||
}
|
||||
|
||||
enc, err := cmdenv.GetCidEncoder(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
removed, err := corerepo.Unpin(n.Pinning, api, req.Context, req.Arguments, recursive)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return cmds.EmitOnce(res, &PinOutput{cidsToStrings(removed)})
|
||||
return cmds.EmitOnce(res, &PinOutput{cidsToStrings(removed, enc)})
|
||||
},
|
||||
Encoders: cmds.EncoderMap{
|
||||
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *PinOutput) error {
|
||||
@ -311,19 +322,27 @@ Example:
|
||||
return err
|
||||
}
|
||||
|
||||
var keys map[string]RefKeyObject
|
||||
enc, err := cmdenv.GetCidEncoder(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var keys map[cid.Cid]RefKeyObject
|
||||
if len(req.Arguments) > 0 {
|
||||
keys, err = pinLsKeys(req.Context, req.Arguments, typeStr, n, api)
|
||||
} else {
|
||||
keys, err = pinLsAll(req.Context, typeStr, n)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return cmds.EmitOnce(res, &RefKeyList{Keys: keys})
|
||||
refKeys := make(map[string]RefKeyObject, len(keys))
|
||||
for k, v := range keys {
|
||||
refKeys[enc.Encode(k)] = v
|
||||
}
|
||||
|
||||
return cmds.EmitOnce(res, &RefKeyList{Keys: refKeys})
|
||||
},
|
||||
Type: RefKeyList{},
|
||||
Encoders: cmds.EncoderMap{
|
||||
@ -423,11 +442,16 @@ var verifyPinCmd = &cmds.Command{
|
||||
return fmt.Errorf("the --verbose and --quiet options can not be used at the same time")
|
||||
}
|
||||
|
||||
enc, err := cmdenv.GetCidEncoder(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
opts := pinVerifyOpts{
|
||||
explain: !quiet,
|
||||
includeOk: verbose,
|
||||
}
|
||||
out := pinVerify(req.Context, n, opts)
|
||||
out := pinVerify(req.Context, n, opts, enc)
|
||||
|
||||
return res.Emit(out)
|
||||
},
|
||||
@ -455,14 +479,14 @@ type RefKeyList struct {
|
||||
Keys map[string]RefKeyObject
|
||||
}
|
||||
|
||||
func pinLsKeys(ctx context.Context, args []string, typeStr string, n *core.IpfsNode, api iface.CoreAPI) (map[string]RefKeyObject, error) {
|
||||
func pinLsKeys(ctx context.Context, args []string, typeStr string, n *core.IpfsNode, api iface.CoreAPI) (map[cid.Cid]RefKeyObject, error) {
|
||||
|
||||
mode, ok := pin.StringToMode(typeStr)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid pin mode '%s'", typeStr)
|
||||
}
|
||||
|
||||
keys := make(map[string]RefKeyObject)
|
||||
keys := make(map[cid.Cid]RefKeyObject)
|
||||
|
||||
for _, p := range args {
|
||||
pth, err := iface.ParsePath(p)
|
||||
@ -489,7 +513,7 @@ func pinLsKeys(ctx context.Context, args []string, typeStr string, n *core.IpfsN
|
||||
default:
|
||||
pinType = "indirect through " + pinType
|
||||
}
|
||||
keys[c.Cid().String()] = RefKeyObject{
|
||||
keys[c.Cid()] = RefKeyObject{
|
||||
Type: pinType,
|
||||
}
|
||||
}
|
||||
@ -497,13 +521,13 @@ func pinLsKeys(ctx context.Context, args []string, typeStr string, n *core.IpfsN
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
func pinLsAll(ctx context.Context, typeStr string, n *core.IpfsNode) (map[string]RefKeyObject, error) {
|
||||
func pinLsAll(ctx context.Context, typeStr string, n *core.IpfsNode) (map[cid.Cid]RefKeyObject, error) {
|
||||
|
||||
keys := make(map[string]RefKeyObject)
|
||||
keys := make(map[cid.Cid]RefKeyObject)
|
||||
|
||||
AddToResultKeys := func(keyList []cid.Cid, typeStr string) {
|
||||
for _, c := range keyList {
|
||||
keys[c.String()] = RefKeyObject{
|
||||
keys[c] = RefKeyObject{
|
||||
Type: typeStr,
|
||||
}
|
||||
}
|
||||
@ -552,8 +576,8 @@ type pinVerifyOpts struct {
|
||||
includeOk bool
|
||||
}
|
||||
|
||||
func pinVerify(ctx context.Context, n *core.IpfsNode, opts pinVerifyOpts) <-chan interface{} {
|
||||
visited := make(map[string]PinStatus)
|
||||
func pinVerify(ctx context.Context, n *core.IpfsNode, opts pinVerifyOpts, enc cidenc.Encoder) <-chan interface{} {
|
||||
visited := make(map[cid.Cid]PinStatus)
|
||||
|
||||
bs := n.Blocks.Blockstore()
|
||||
DAG := dag.NewDAGService(bserv.New(bs, offline.Exchange(bs)))
|
||||
@ -562,7 +586,7 @@ func pinVerify(ctx context.Context, n *core.IpfsNode, opts pinVerifyOpts) <-chan
|
||||
|
||||
var checkPin func(root cid.Cid) PinStatus
|
||||
checkPin = func(root cid.Cid) PinStatus {
|
||||
key := root.String()
|
||||
key := root
|
||||
if status, ok := visited[key]; ok {
|
||||
return status
|
||||
}
|
||||
@ -570,7 +594,7 @@ func pinVerify(ctx context.Context, n *core.IpfsNode, opts pinVerifyOpts) <-chan
|
||||
if err := verifcid.ValidateCid(root); err != nil {
|
||||
status := PinStatus{Ok: false}
|
||||
if opts.explain {
|
||||
status.BadNodes = []BadNode{BadNode{Cid: key, Err: err.Error()}}
|
||||
status.BadNodes = []BadNode{BadNode{Cid: enc.Encode(key), Err: err.Error()}}
|
||||
}
|
||||
visited[key] = status
|
||||
return status
|
||||
@ -580,7 +604,7 @@ func pinVerify(ctx context.Context, n *core.IpfsNode, opts pinVerifyOpts) <-chan
|
||||
if err != nil {
|
||||
status := PinStatus{Ok: false}
|
||||
if opts.explain {
|
||||
status.BadNodes = []BadNode{BadNode{Cid: key, Err: err.Error()}}
|
||||
status.BadNodes = []BadNode{BadNode{Cid: enc.Encode(key), Err: err.Error()}}
|
||||
}
|
||||
visited[key] = status
|
||||
return status
|
||||
@ -606,7 +630,7 @@ func pinVerify(ctx context.Context, n *core.IpfsNode, opts pinVerifyOpts) <-chan
|
||||
pinStatus := checkPin(cid)
|
||||
if !pinStatus.Ok || opts.includeOk {
|
||||
select {
|
||||
case out <- &PinVerifyRes{cid.String(), pinStatus}:
|
||||
case out <- &PinVerifyRes{enc.Encode(cid), pinStatus}:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
@ -629,10 +653,10 @@ func (r PinVerifyRes) Format(out io.Writer) {
|
||||
}
|
||||
}
|
||||
|
||||
func cidsToStrings(cs []cid.Cid) []string {
|
||||
func cidsToStrings(cs []cid.Cid, enc cidenc.Encoder) []string {
|
||||
out := make([]string, 0, len(cs))
|
||||
for _, c := range cs {
|
||||
out = append(out, c.String())
|
||||
out = append(out, enc.Encode(c))
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
@ -14,6 +14,7 @@ import (
|
||||
cid "gx/ipfs/QmR8BauakNcBa3RbE4nbQu76PDiJgoQgz8AJdhJuiU4TAw/go-cid"
|
||||
cmds "gx/ipfs/QmWGm4AbZEbnmdgVTza52MSNpEmBdFVqzmAysRbjrRyGbH/go-ipfs-cmds"
|
||||
ipld "gx/ipfs/QmcKKBwfz6FyQdHR2jsXrrF6XeSBXYL86anmWNewpFpoF5/go-ipld-format"
|
||||
cidenc "gx/ipfs/QmdPQx9fvN5ExVwMhRmh7YpCQJzJrFhd1AjVBwJmRMFJeX/go-cidutil/cidenc"
|
||||
cmdkit "gx/ipfs/Qmde5VP1qUkyQXKCfmEUA7bP64V2HAptbJ7phuPp7jXWwg/go-ipfs-cmdkit"
|
||||
)
|
||||
|
||||
@ -79,6 +80,11 @@ NOTE: List all references recursively by using the flag '-r'.
|
||||
return err
|
||||
}
|
||||
|
||||
enc, err := cmdenv.GetCidEncoder(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
unique, _ := req.Options[refsUniqueOptionName].(bool)
|
||||
recursive, _ := req.Options[refsRecursiveOptionName].(bool)
|
||||
maxDepth, _ := req.Options[refsMaxDepthOptionName].(int)
|
||||
@ -112,7 +118,7 @@ NOTE: List all references recursively by using the flag '-r'.
|
||||
}
|
||||
|
||||
for _, o := range objs {
|
||||
if _, err := rw.WriteRefs(o); err != nil {
|
||||
if _, err := rw.WriteRefs(o, enc); err != nil {
|
||||
if err := res.Emit(&RefWrapper{Err: err.Error()}); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -194,11 +200,11 @@ type RefWriter struct {
|
||||
}
|
||||
|
||||
// WriteRefs writes refs of the given object to the underlying writer.
|
||||
func (rw *RefWriter) WriteRefs(n ipld.Node) (int, error) {
|
||||
return rw.writeRefsRecursive(n, 0)
|
||||
func (rw *RefWriter) WriteRefs(n ipld.Node, enc cidenc.Encoder) (int, error) {
|
||||
return rw.writeRefsRecursive(n, 0, enc)
|
||||
}
|
||||
|
||||
func (rw *RefWriter) writeRefsRecursive(n ipld.Node, depth int) (int, error) {
|
||||
func (rw *RefWriter) writeRefsRecursive(n ipld.Node, depth int, enc cidenc.Encoder) (int, error) {
|
||||
nc := n.Cid()
|
||||
|
||||
var count int
|
||||
@ -228,7 +234,7 @@ func (rw *RefWriter) writeRefsRecursive(n ipld.Node, depth int) (int, error) {
|
||||
|
||||
// Write this node if not done before (or !Unique)
|
||||
if shouldWrite {
|
||||
if err := rw.WriteEdge(nc, lc, n.Links()[i].Name); err != nil {
|
||||
if err := rw.WriteEdge(nc, lc, n.Links()[i].Name, enc); err != nil {
|
||||
return count, err
|
||||
}
|
||||
count++
|
||||
@ -240,7 +246,7 @@ func (rw *RefWriter) writeRefsRecursive(n ipld.Node, depth int) (int, error) {
|
||||
// Note when !Unique, branches are always considered
|
||||
// unexplored and only depth limits apply.
|
||||
if goDeeper {
|
||||
c, err := rw.writeRefsRecursive(nd, depth+1)
|
||||
c, err := rw.writeRefsRecursive(nd, depth+1, enc)
|
||||
count += c
|
||||
if err != nil {
|
||||
return count, err
|
||||
@ -309,7 +315,7 @@ func (rw *RefWriter) visit(c cid.Cid, depth int) (bool, bool) {
|
||||
}
|
||||
|
||||
// Write one edge
|
||||
func (rw *RefWriter) WriteEdge(from, to cid.Cid, linkname string) error {
|
||||
func (rw *RefWriter) WriteEdge(from, to cid.Cid, linkname string, enc cidenc.Encoder) error {
|
||||
if rw.Ctx != nil {
|
||||
select {
|
||||
case <-rw.Ctx.Done(): // just in case.
|
||||
@ -322,11 +328,11 @@ func (rw *RefWriter) WriteEdge(from, to cid.Cid, linkname string) error {
|
||||
switch {
|
||||
case rw.PrintFmt != "":
|
||||
s = rw.PrintFmt
|
||||
s = strings.Replace(s, "<src>", from.String(), -1)
|
||||
s = strings.Replace(s, "<dst>", to.String(), -1)
|
||||
s = strings.Replace(s, "<src>", enc.Encode(from), -1)
|
||||
s = strings.Replace(s, "<dst>", enc.Encode(to), -1)
|
||||
s = strings.Replace(s, "<linkname>", linkname, -1)
|
||||
default:
|
||||
s += to.String()
|
||||
s += enc.Encode(to)
|
||||
}
|
||||
|
||||
return rw.res.Emit(&RefWrapper{Ref: s})
|
||||
|
||||
@ -82,6 +82,14 @@ Resolve the value of an IPFS DAG path:
|
||||
name := req.Arguments[0]
|
||||
recursive, _ := req.Options[resolveRecursiveOptionName].(bool)
|
||||
|
||||
enc, err := cmdenv.GetCidEncoder(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !cmdenv.CidBaseDefined(req) {
|
||||
enc, _ = cmdenv.CidEncoderFromPath(enc, name)
|
||||
}
|
||||
|
||||
// the case when ipns is resolved step by step
|
||||
if strings.HasPrefix(name, "/ipns/") && !recursive {
|
||||
rc, rcok := req.Options[resolveDhtRecordCountOptionName].(uint)
|
||||
@ -128,7 +136,7 @@ Resolve the value of an IPFS DAG path:
|
||||
return fmt.Errorf("found non-link at given path")
|
||||
}
|
||||
|
||||
return cmds.EmitOnce(res, &ncmd.ResolvedPath{Path: path.Path("/" + rp.Namespace() + "/" + rp.Cid().String())})
|
||||
return cmds.EmitOnce(res, &ncmd.ResolvedPath{Path: path.Path("/" + rp.Namespace() + "/" + enc.Encode(rp.Cid()))})
|
||||
},
|
||||
Encoders: cmds.EncoderMap{
|
||||
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, rp *ncmd.ResolvedPath) error {
|
||||
|
||||
@ -3,6 +3,7 @@ package commands
|
||||
import (
|
||||
"errors"
|
||||
|
||||
cmdenv "github.com/ipfs/go-ipfs/core/commands/cmdenv"
|
||||
dag "github.com/ipfs/go-ipfs/core/commands/dag"
|
||||
name "github.com/ipfs/go-ipfs/core/commands/name"
|
||||
ocmd "github.com/ipfs/go-ipfs/core/commands/object"
|
||||
@ -98,6 +99,9 @@ The CLI will exit with one of the following values:
|
||||
cmdkit.StringOption(ApiOption, "Use a specific API instance (defaults to /ip4/127.0.0.1/tcp/5001)"),
|
||||
|
||||
// global options, added to every command
|
||||
cmdenv.OptionCidBase,
|
||||
cmdenv.OptionOutputCidV1,
|
||||
|
||||
cmds.OptionEncodingType,
|
||||
cmds.OptionStreamChannels,
|
||||
cmds.OptionTimeout,
|
||||
|
||||
@ -43,6 +43,11 @@ represent it.
|
||||
return err
|
||||
}
|
||||
|
||||
enc, err := cmdenv.GetCidEncoder(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
it := req.Files.Entries()
|
||||
file, err := cmdenv.GetFileArg(it)
|
||||
if err != nil {
|
||||
@ -58,7 +63,7 @@ represent it.
|
||||
|
||||
return cmds.EmitOnce(res, &AddEvent{
|
||||
Name: it.Name(),
|
||||
Hash: c.String(),
|
||||
Hash: enc.Encode(c),
|
||||
})
|
||||
},
|
||||
Type: AddEvent{},
|
||||
|
||||
@ -77,6 +77,11 @@ time.
|
||||
useTrickledag, _ := req.Options[trickleOptionName].(bool)
|
||||
dopin, _ := req.Options[pinOptionName].(bool)
|
||||
|
||||
enc, err := cmdenv.GetCidEncoder(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hreq, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -125,7 +130,7 @@ time.
|
||||
}
|
||||
|
||||
return cmds.EmitOnce(res, &BlockStat{
|
||||
Key: c.String(),
|
||||
Key: enc.Encode(c),
|
||||
Size: int(hres.ContentLength),
|
||||
})
|
||||
},
|
||||
|
||||
@ -66,15 +66,18 @@ type ListRes struct {
|
||||
Size uint64
|
||||
}
|
||||
|
||||
// FormatLong returns a human readable string for a ListRes object.
|
||||
func (r *ListRes) FormatLong() string {
|
||||
// FormatLong returns a human readable string for a ListRes object
|
||||
func (r *ListRes) FormatLong(enc func(cid.Cid) string) string {
|
||||
if enc == nil {
|
||||
enc = (cid.Cid).String
|
||||
}
|
||||
switch {
|
||||
case !r.Key.Defined():
|
||||
return "<corrupt key>"
|
||||
case r.FilePath == "":
|
||||
return r.Key.String()
|
||||
default:
|
||||
return fmt.Sprintf("%-50s %6d %s %d", r.Key, r.Size, r.FilePath, r.Offset)
|
||||
return fmt.Sprintf("%-50s %6d %s %d", enc(r.Key), r.Size, r.FilePath, r.Offset)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -272,6 +272,36 @@ test_add_cat_file() {
|
||||
echo "added QmZQWnfcqJ6hNkkPvrY9Q5X39GP3jUnUbAV4AbmbbR3Cb1 test_current_dir" > expected
|
||||
test_cmp expected actual
|
||||
'
|
||||
|
||||
# --cid-base=base32
|
||||
|
||||
test_expect_success "ipfs add --cid-base=base32 succeeds" '
|
||||
echo "Hello Worlds!" >mountdir/hello.txt &&
|
||||
ipfs add --cid-base=base32 mountdir/hello.txt >actual
|
||||
'
|
||||
|
||||
test_expect_success "ipfs add output looks good" '
|
||||
HASH="bafybeidpq7lcjx4w5c6yr4vuthzvlav54hgxsremwk73to5ferdc2rxhai" &&
|
||||
echo "added $HASH hello.txt" >expected &&
|
||||
test_cmp expected actual
|
||||
'
|
||||
|
||||
test_expect_success "ipfs add --cid-base=base32 --only-hash succeeds" '
|
||||
ipfs add --cid-base=base32 --only-hash mountdir/hello.txt > oh_actual
|
||||
'
|
||||
|
||||
test_expect_success "ipfs add --only-hash output looks good" '
|
||||
test_cmp expected oh_actual
|
||||
'
|
||||
|
||||
test_expect_success "ipfs cat succeeds" '
|
||||
ipfs cat "$HASH" >actual
|
||||
'
|
||||
|
||||
test_expect_success "ipfs cat output looks good" '
|
||||
echo "Hello Worlds!" >expected &&
|
||||
test_cmp expected actual
|
||||
'
|
||||
}
|
||||
|
||||
test_add_cat_5MB() {
|
||||
@ -312,6 +342,25 @@ test_add_cat_5MB() {
|
||||
test_expect_success FUSE "cat ipfs/bigfile looks good" '
|
||||
test_cmp mountdir/bigfile actual
|
||||
'
|
||||
|
||||
test_expect_success "get base32 version of CID" '
|
||||
ipfs cid base32 $EXP_HASH > base32_cid &&
|
||||
BASE32_HASH=`cat base32_cid`
|
||||
'
|
||||
|
||||
test_expect_success "ipfs add --cid-base=base32 bigfile' succeeds" '
|
||||
ipfs add $ADD_FLAGS --cid-base=base32 mountdir/bigfile >actual ||
|
||||
test_fsh cat daemon_err
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs add bigfile --cid-base=base32' output looks good" '
|
||||
echo "added $BASE32_HASH bigfile" >expected &&
|
||||
test_cmp expected actual
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs cat $BASE32_HASH' succeeds" '
|
||||
ipfs cat "$BASE32_HASH" >actual
|
||||
'
|
||||
}
|
||||
|
||||
test_add_cat_raw() {
|
||||
|
||||
@ -11,7 +11,6 @@ test_description="Test ls command"
|
||||
test_init_ipfs
|
||||
|
||||
test_ls_cmd() {
|
||||
|
||||
test_expect_success "'ipfs add -r testData' succeeds" '
|
||||
mkdir -p testData testData/d1 testData/d2 &&
|
||||
echo "test" >testData/f1 &&
|
||||
@ -109,6 +108,15 @@ QmZULkCELmmk5XNfCgTnCyFgAVxBRBXyDHGGMVoLFLiXEN 6 a
|
||||
EOF
|
||||
test_cmp expected_ls_headers actual_ls_headers
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs ls --size=false --cid-base=base32 <three dir hashes>' succeeds" '
|
||||
ipfs ls --size=false --cid-base=base32 $(cid-fmt -v 1 -b base32 %s QmfNy183bXiRVyrhyWtq3TwHn79yHEkiAGFr18P7YNzESj QmR3jhV4XpxxPjPT3Y8vNnWvWNvakdcT3H6vqpRBsX1MLy QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss) >actual_ls_base32
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs ls --size=false --cid-base=base32 <three dir hashes>' output looks good" '
|
||||
cid-fmt -b base32 -v 1 --filter %s < expected_ls > expected_ls_base32
|
||||
test_cmp expected_ls_base32 actual_ls_base32
|
||||
'
|
||||
}
|
||||
|
||||
|
||||
|
||||
5
test/sharness/t0051-object-data/mixed.json
Normal file
5
test/sharness/t0051-object-data/mixed.json
Normal file
@ -0,0 +1,5 @@
|
||||
{"Data": "another",
|
||||
"Links": [
|
||||
{"Name": "some link", "Hash": "QmXg9Pp2ytZ14xgmQjYEiHjVjMFXzCVVEcRTWJBmLgR39V", "Size": 8},
|
||||
{"Name": "inlined", "Hash": "z4CrgyEyhm4tAw1pgzQtNNuP7", "Size": 14}
|
||||
]}
|
||||
@ -251,8 +251,6 @@ test_object_cmd() {
|
||||
test_cmp expected actual
|
||||
'
|
||||
|
||||
|
||||
|
||||
test_expect_success "object patch creation looks right" '
|
||||
echo "QmPc73aWK9dgFBXe86P4PvQizHo9e5Qt7n7DAMXWuigFuG" > hash_exp &&
|
||||
echo $N3 > hash_actual &&
|
||||
@ -350,6 +348,58 @@ test_object_cmd() {
|
||||
ipfs object get $HASH > actual_data_append &&
|
||||
test_cmp exp_data_append actual_data_append
|
||||
'
|
||||
|
||||
#
|
||||
# CidBase Tests
|
||||
#
|
||||
|
||||
test_expect_success "'ipfs object put file.json --cid-base=base32' succeeds" '
|
||||
ipfs object put --cid-base=base32 ../t0051-object-data/testPut.json > actual_putOut
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs object put file.json --cid-base=base32' output looks good" '
|
||||
HASH="QmUTSAdDi2xsNkDtLqjFgQDMEn5di3Ab9eqbrt4gaiNbUD" &&
|
||||
printf "added $HASH\n" > expected_putOut &&
|
||||
test_cmp expected_putOut actual_putOut
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs object put file.json --cid-base=base32 --output-cidv1=true' succeeds" '
|
||||
ipfs object put --cid-base=base32 --output-cidv1=true ../t0051-object-data/testPut.json > actual_putOut
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs object put file.json --cid-base=base32 --output-cidv1=true' output looks good" '
|
||||
HASH=$(ipfs cid base32 "QmUTSAdDi2xsNkDtLqjFgQDMEn5di3Ab9eqbrt4gaiNbUD") &&
|
||||
printf "added $HASH\n" > expected_putOut &&
|
||||
test_cmp expected_putOut actual_putOut
|
||||
'
|
||||
|
||||
test_expect_success "'insert json dag with both CidV0 and CidV1 links'" '
|
||||
MIXED=$(ipfs object put ../t0051-object-data/mixed.json -q) &&
|
||||
echo $MIXED
|
||||
'
|
||||
|
||||
test_expect_success "ipfs object get then put creates identical object with --cid-base=base32" '
|
||||
ipfs object get --cid-base=base32 $MIXED > mixedv2.json &&
|
||||
MIXED2=$(ipfs object put -q mixedv2.json) &&
|
||||
echo "$MIXED =? $MIXED2" &&
|
||||
test "$MIXED" = "$MIXED2"
|
||||
'
|
||||
|
||||
HASHv0=QmXg9Pp2ytZ14xgmQjYEiHjVjMFXzCVVEcRTWJBmLgR39V
|
||||
HASHv1=z4CrgyEyhm4tAw1pgzQtNNuP7
|
||||
|
||||
test_expect_success "ipfs object get with --cid-base=base32 uses base32 for CidV1 link only" '
|
||||
ipfs object get --cid-base=base32 $MIXED > mixed.actual &&
|
||||
grep -q $HASHv0 mixed.actual &&
|
||||
grep -q $(ipfs cid base32 $HASHv1) mixed.actual
|
||||
'
|
||||
|
||||
test_expect_success "ipfs object links --cid-base=base32 --output-cidv1=true converts both links" '
|
||||
ipfs object links --cid-base=base32 --output-cidv1=true $MIXED | awk "{print \$1}" | sort > links.actual &&
|
||||
echo $(ipfs cid base32 $HASHv1) > links.expected
|
||||
echo $(ipfs cid base32 $HASHv0) >> links.expected
|
||||
test_cmp links.actual links.expected
|
||||
'
|
||||
}
|
||||
|
||||
test_object_content_type() {
|
||||
|
||||
@ -26,6 +26,23 @@ test_expect_success "make an ipld object in json" '
|
||||
'
|
||||
|
||||
test_dag_cmd() {
|
||||
test_expect_success "can add an ipld object using protobuf" '
|
||||
IPLDHASH=$(cat ipld_object | ipfs dag put -f protobuf)
|
||||
'
|
||||
|
||||
test_expect_success "output looks correct" '
|
||||
EXPHASH="QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n"
|
||||
test $EXPHASH = $IPLDHASH
|
||||
'
|
||||
|
||||
test_expect_success "can add an ipld object using protobuf and --cid=base=base32" '
|
||||
IPLDHASHb32=$(cat ipld_object | ipfs dag put -f protobuf --cid-base=base32)
|
||||
'
|
||||
|
||||
test_expect_success "output looks correct (does not upgrade to CIDv1)" '
|
||||
test $EXPHASH = $IPLDHASHb32
|
||||
'
|
||||
|
||||
test_expect_success "can add an ipld object" '
|
||||
IPLDHASH=$(cat ipld_object | ipfs dag put)
|
||||
'
|
||||
@ -35,6 +52,14 @@ test_dag_cmd() {
|
||||
test $EXPHASH = $IPLDHASH
|
||||
'
|
||||
|
||||
test_expect_success "can add an ipld object using --cid-base=base32" '
|
||||
IPLDHASHb32=$(cat ipld_object | ipfs dag put --cid-base=base32)
|
||||
'
|
||||
|
||||
test_expect_success "output looks correct" '
|
||||
test $(ipfs cid base32 $EXPHASH) = $IPLDHASHb32
|
||||
'
|
||||
|
||||
test_expect_success "various path traversals work" '
|
||||
ipfs cat $IPLDHASH/cats/0 > out1 &&
|
||||
ipfs cat $IPLDHASH/cats/1/water > out2 &&
|
||||
@ -206,6 +231,43 @@ test_dag_cmd() {
|
||||
test_cmp resolve_obj_exp resolve_obj &&
|
||||
test_cmp resolve_data_exp resolve_data
|
||||
'
|
||||
|
||||
test_expect_success "get base32 version of hashes for testing" '
|
||||
HASHb32=$(ipfs cid base32 $HASH) &&
|
||||
NESTED_HASHb32=$(ipfs cid base32 $NESTED_HASH)
|
||||
'
|
||||
|
||||
test_expect_success "dag resolve some things with --cid-base=base32" '
|
||||
ipfs dag resolve $HASH --cid-base=base32 > resolve_hash &&
|
||||
ipfs dag resolve ${HASH}/obj --cid-base=base32 > resolve_obj &&
|
||||
ipfs dag resolve ${HASH}/obj/data --cid-base=base32 > resolve_data
|
||||
'
|
||||
|
||||
test_expect_success "dag resolve output looks good with --cid-base=base32" '
|
||||
printf $HASHb32 > resolve_hash_exp &&
|
||||
printf $NESTED_HASHb32 > resolve_obj_exp &&
|
||||
printf $NESTED_HASHb32/data > resolve_data_exp &&
|
||||
|
||||
test_cmp resolve_hash_exp resolve_hash &&
|
||||
test_cmp resolve_obj_exp resolve_obj &&
|
||||
test_cmp resolve_data_exp resolve_data
|
||||
'
|
||||
|
||||
test_expect_success "dag resolve some things with base32 hash" '
|
||||
ipfs dag resolve $HASHb32 > resolve_hash &&
|
||||
ipfs dag resolve ${HASHb32}/obj > resolve_obj &&
|
||||
ipfs dag resolve ${HASHb32}/obj/data > resolve_data
|
||||
'
|
||||
|
||||
test_expect_success "dag resolve output looks good with base32 hash" '
|
||||
printf $HASHb32 > resolve_hash_exp &&
|
||||
printf $NESTED_HASHb32 > resolve_obj_exp &&
|
||||
printf $NESTED_HASHb32/data > resolve_data_exp &&
|
||||
|
||||
test_cmp resolve_hash_exp resolve_hash &&
|
||||
test_cmp resolve_obj_exp resolve_obj &&
|
||||
test_cmp resolve_data_exp resolve_data
|
||||
'
|
||||
}
|
||||
|
||||
# should work offline
|
||||
|
||||
@ -11,15 +11,19 @@ test_description="Test ipfs pinning operations"
|
||||
|
||||
test_pins() {
|
||||
EXTRA_ARGS=$1
|
||||
BASE=$2
|
||||
if [ -n "$BASE" ]; then
|
||||
BASE_ARGS="--cid-base=$BASE"
|
||||
fi
|
||||
|
||||
test_expect_success "create some hashes" '
|
||||
HASH_A=$(echo "A" | ipfs add -q --pin=false) &&
|
||||
HASH_B=$(echo "B" | ipfs add -q --pin=false) &&
|
||||
HASH_C=$(echo "C" | ipfs add -q --pin=false) &&
|
||||
HASH_D=$(echo "D" | ipfs add -q --pin=false) &&
|
||||
HASH_E=$(echo "E" | ipfs add -q --pin=false) &&
|
||||
HASH_F=$(echo "F" | ipfs add -q --pin=false) &&
|
||||
HASH_G=$(echo "G" | ipfs add -q --pin=false)
|
||||
test_expect_success "create some hashes $BASE" '
|
||||
HASH_A=$(echo "A" | ipfs add $BASE_ARGS -q --pin=false) &&
|
||||
HASH_B=$(echo "B" | ipfs add $BASE_ARGS -q --pin=false) &&
|
||||
HASH_C=$(echo "C" | ipfs add $BASE_ARGS -q --pin=false) &&
|
||||
HASH_D=$(echo "D" | ipfs add $BASE_ARGS -q --pin=false) &&
|
||||
HASH_E=$(echo "E" | ipfs add $BASE_ARGS -q --pin=false) &&
|
||||
HASH_F=$(echo "F" | ipfs add $BASE_ARGS -q --pin=false) &&
|
||||
HASH_G=$(echo "G" | ipfs add $BASE_ARGS -q --pin=false)
|
||||
'
|
||||
|
||||
test_expect_success "put all those hashes in a file" '
|
||||
@ -32,22 +36,53 @@ test_pins() {
|
||||
echo $HASH_G >> hashes
|
||||
'
|
||||
|
||||
if [ -n "$BASE" ]; then
|
||||
test_expect_success "make sure hashes are in $BASE" '
|
||||
cat hashes | xargs cid-fmt %b | sort -u > actual
|
||||
echo base32 > expected
|
||||
test_cmp expected actual
|
||||
'
|
||||
fi
|
||||
|
||||
test_expect_success "'ipfs pin add $EXTRA_ARGS' via stdin" '
|
||||
cat hashes | ipfs pin add $EXTRA_ARGS
|
||||
cat hashes | ipfs pin add $EXTRA_ARGS $BASE_ARGS | tee actual
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs pin add $EXTRA_ARGS' output looks good" '
|
||||
sed -e "s/^/pinned /; s/$/ recursively/" hashes > expected &&
|
||||
test_cmp expected actual
|
||||
'
|
||||
|
||||
test_expect_success "see if verify works" '
|
||||
ipfs pin verify
|
||||
'
|
||||
|
||||
test_expect_success "see if verify --verbose works" '
|
||||
ipfs pin verify --verbose > verify_out &&
|
||||
test $(cat verify_out | wc -l) > 8
|
||||
test_expect_success "see if verify --verbose $BASE_ARGS works" '
|
||||
ipfs pin verify --verbose $BASE_ARGS > verify_out &&
|
||||
test $(cat verify_out | wc -l) -ge 7 &&
|
||||
test_should_contain "$HASH_A ok" verify_out &&
|
||||
test_should_contain "$HASH_B ok" verify_out &&
|
||||
test_should_contain "$HASH_C ok" verify_out &&
|
||||
test_should_contain "$HASH_D ok" verify_out &&
|
||||
test_should_contain "$HASH_E ok" verify_out &&
|
||||
test_should_contain "$HASH_F ok" verify_out &&
|
||||
test_should_contain "$HASH_G ok" verify_out
|
||||
'
|
||||
|
||||
test_expect_success "test pin ls hash" '
|
||||
test_expect_success "ipfs pin ls $BASE_ARGS works" '
|
||||
ipfs pin ls $BASE_ARGS > ls_out &&
|
||||
test_should_contain "$HASH_A" ls_out &&
|
||||
test_should_contain "$HASH_B" ls_out &&
|
||||
test_should_contain "$HASH_C" ls_out &&
|
||||
test_should_contain "$HASH_D" ls_out &&
|
||||
test_should_contain "$HASH_E" ls_out &&
|
||||
test_should_contain "$HASH_F" ls_out &&
|
||||
test_should_contain "$HASH_G" ls_out
|
||||
'
|
||||
|
||||
test_expect_success "test pin ls $BASE_ARGS hash" '
|
||||
echo $HASH_B | test_must_fail grep /ipfs && # just to be sure
|
||||
ipfs pin ls $HASH_B > ls_hash_out &&
|
||||
ipfs pin ls $BASE_ARGS $HASH_B > ls_hash_out &&
|
||||
echo "$HASH_B recursive" > ls_hash_exp &&
|
||||
test_cmp ls_hash_exp ls_hash_out
|
||||
'
|
||||
@ -58,11 +93,11 @@ test_pins() {
|
||||
|
||||
test_expect_success "test pin update" '
|
||||
ipfs pin add "$HASH_A" &&
|
||||
ipfs pin ls > before_update &&
|
||||
ipfs pin ls $BASE_ARGS | tee before_update &&
|
||||
test_should_contain "$HASH_A" before_update &&
|
||||
test_must_fail grep -q "$HASH_B" before_update &&
|
||||
ipfs pin update --unpin=true "$HASH_A" "$HASH_B" &&
|
||||
ipfs pin ls > after_update &&
|
||||
ipfs pin ls $BASE_ARGS > after_update &&
|
||||
test_must_fail grep -q "$HASH_A" after_update &&
|
||||
test_should_contain "$HASH_B" after_update &&
|
||||
ipfs pin rm "$HASH_B"
|
||||
@ -129,6 +164,7 @@ test_init_ipfs
|
||||
|
||||
test_pins
|
||||
test_pins --progress
|
||||
test_pins '' base32
|
||||
|
||||
test_pins_error_reporting
|
||||
test_pins_error_reporting --progress
|
||||
@ -142,6 +178,7 @@ test_launch_ipfs_daemon --offline
|
||||
|
||||
test_pins
|
||||
test_pins --progress
|
||||
test_pins '' base32
|
||||
|
||||
test_pins_error_reporting
|
||||
test_pins_error_reporting --progress
|
||||
|
||||
@ -71,8 +71,12 @@ test_expect_success "create and add folders for refs" '
|
||||
[[ "$root" == "$refsroot" ]]
|
||||
'
|
||||
|
||||
test_expect_success "ipfs refs -r" '
|
||||
cat <<EOF > expected.txt
|
||||
test_refs_output() {
|
||||
ARGS=$1
|
||||
FILTER=$2
|
||||
|
||||
test_expect_success "ipfs refs $ARGS -r" '
|
||||
cat <<EOF | $FILTER > expected.txt
|
||||
QmdytmR4wULMd3SLo6ePF4s3WcRHWcpnJZ7bHhoj3QB13v
|
||||
QmNkQvpiyAEtbeLviC7kqfifYoK1GXPcsSxTpP1yS3ykLa
|
||||
QmdytmR4wULMd3SLo6ePF4s3WcRHWcpnJZ7bHhoj3QB13v
|
||||
@ -87,13 +91,13 @@ QmSanP5DpxpqfDdS4yekHY1MqrVge47gtxQcp2e2yZ4UwS
|
||||
QmSFxnK675wQ9Kc1uqWKyJUaNxvSc2BP5DbXCD3x93oq61
|
||||
EOF
|
||||
|
||||
ipfs refs -r $refsroot > refsr.txt
|
||||
test_cmp expected.txt refsr.txt
|
||||
'
|
||||
ipfs refs $ARGS -r $refsroot > refsr.txt
|
||||
test_cmp expected.txt refsr.txt
|
||||
'
|
||||
|
||||
# Unique is like above but removing duplicates
|
||||
test_expect_success "ipfs refs -r --unique" '
|
||||
cat <<EOF > expected.txt
|
||||
# Unique is like above but removing duplicates
|
||||
test_expect_success "ipfs refs $ARGS -r --unique" '
|
||||
cat <<EOF | $FILTER > expected.txt
|
||||
QmdytmR4wULMd3SLo6ePF4s3WcRHWcpnJZ7bHhoj3QB13v
|
||||
QmNkQvpiyAEtbeLviC7kqfifYoK1GXPcsSxTpP1yS3ykLa
|
||||
QmSanP5DpxpqfDdS4yekHY1MqrVge47gtxQcp2e2yZ4UwS
|
||||
@ -101,40 +105,40 @@ QmSFxnK675wQ9Kc1uqWKyJUaNxvSc2BP5DbXCD3x93oq61
|
||||
QmXXazTjeNCKFnpW1D65vTKsTs8fbgkCWTv8Em4pdK2coH
|
||||
EOF
|
||||
|
||||
ipfs refs -r --unique $refsroot > refsr.txt
|
||||
test_cmp expected.txt refsr.txt
|
||||
'
|
||||
ipfs refs $ARGS -r --unique $refsroot > refsr.txt
|
||||
test_cmp expected.txt refsr.txt
|
||||
'
|
||||
|
||||
# First level is 1.txt, B, C, D
|
||||
test_expect_success "ipfs refs" '
|
||||
cat <<EOF > expected.txt
|
||||
# First level is 1.txt, B, C, D
|
||||
test_expect_success "ipfs refs $ARGS" '
|
||||
cat <<EOF | $FILTER > expected.txt
|
||||
QmdytmR4wULMd3SLo6ePF4s3WcRHWcpnJZ7bHhoj3QB13v
|
||||
QmNkQvpiyAEtbeLviC7kqfifYoK1GXPcsSxTpP1yS3ykLa
|
||||
QmXXazTjeNCKFnpW1D65vTKsTs8fbgkCWTv8Em4pdK2coH
|
||||
QmSanP5DpxpqfDdS4yekHY1MqrVge47gtxQcp2e2yZ4UwS
|
||||
EOF
|
||||
ipfs refs $refsroot > refs.txt
|
||||
test_cmp expected.txt refs.txt
|
||||
'
|
||||
ipfs refs $ARGS $refsroot > refs.txt
|
||||
test_cmp expected.txt refs.txt
|
||||
'
|
||||
|
||||
# max-depth=0 should return an empty list
|
||||
test_expect_success "ipfs refs -r --max-depth=0" '
|
||||
cat <<EOF > expected.txt
|
||||
# max-depth=0 should return an empty list
|
||||
test_expect_success "ipfs refs $ARGS -r --max-depth=0" '
|
||||
cat <<EOF > expected.txt
|
||||
EOF
|
||||
ipfs refs -r --max-depth=0 $refsroot > refs.txt
|
||||
test_cmp expected.txt refs.txt
|
||||
'
|
||||
ipfs refs $ARGS -r --max-depth=0 $refsroot > refs.txt
|
||||
test_cmp expected.txt refs.txt
|
||||
'
|
||||
|
||||
# max-depth=1 should be equivalent to running without -r
|
||||
test_expect_success "ipfs refs -r --max-depth=1" '
|
||||
ipfs refs -r --max-depth=1 $refsroot > refsr.txt
|
||||
ipfs refs $refsroot > refs.txt
|
||||
test_cmp refsr.txt refs.txt
|
||||
'
|
||||
# max-depth=1 should be equivalent to running without -r
|
||||
test_expect_success "ipfs refs $ARGS -r --max-depth=1" '
|
||||
ipfs refs $ARGS -r --max-depth=1 $refsroot > refsr.txt
|
||||
ipfs refs $ARGS $refsroot > refs.txt
|
||||
test_cmp refsr.txt refs.txt
|
||||
'
|
||||
|
||||
# We should see the depth limit engage at level 2
|
||||
test_expect_success "ipfs refs -r --max-depth=2" '
|
||||
cat <<EOF > expected.txt
|
||||
# We should see the depth limit engage at level 2
|
||||
test_expect_success "ipfs refs $ARGS -r --max-depth=2" '
|
||||
cat <<EOF | $FILTER > expected.txt
|
||||
QmdytmR4wULMd3SLo6ePF4s3WcRHWcpnJZ7bHhoj3QB13v
|
||||
QmNkQvpiyAEtbeLviC7kqfifYoK1GXPcsSxTpP1yS3ykLa
|
||||
QmdytmR4wULMd3SLo6ePF4s3WcRHWcpnJZ7bHhoj3QB13v
|
||||
@ -144,33 +148,38 @@ QmNkQvpiyAEtbeLviC7kqfifYoK1GXPcsSxTpP1yS3ykLa
|
||||
QmSanP5DpxpqfDdS4yekHY1MqrVge47gtxQcp2e2yZ4UwS
|
||||
QmSFxnK675wQ9Kc1uqWKyJUaNxvSc2BP5DbXCD3x93oq61
|
||||
EOF
|
||||
ipfs refs -r --max-depth=2 $refsroot > refsr.txt
|
||||
test_cmp refsr.txt expected.txt
|
||||
'
|
||||
ipfs refs $ARGS -r --max-depth=2 $refsroot > refsr.txt
|
||||
test_cmp refsr.txt expected.txt
|
||||
'
|
||||
|
||||
# Here branch pruning and re-exploration come into place
|
||||
# At first it should see D at level 2 and don't go deeper.
|
||||
# But then after doing C it will see D at level 1 and go deeper
|
||||
# so that it outputs the hash for 2.txt (-q61).
|
||||
# We also see that C/B is pruned as it's been shown before.
|
||||
#
|
||||
# Excerpt from diagram above:
|
||||
#
|
||||
# L0- _______ A_________
|
||||
# / | \ \
|
||||
# L1- B C D 1.txt
|
||||
# / \ | |
|
||||
# L2- D 1.txt B 2.txt
|
||||
test_expect_success "ipfs refs -r --unique --max-depth=2" '
|
||||
cat <<EOF > expected.txt
|
||||
# Here branch pruning and re-exploration come into place
|
||||
# At first it should see D at level 2 and don't go deeper.
|
||||
# But then after doing C it will see D at level 1 and go deeper
|
||||
# so that it outputs the hash for 2.txt (-q61).
|
||||
# We also see that C/B is pruned as it's been shown before.
|
||||
#
|
||||
# Excerpt from diagram above:
|
||||
#
|
||||
# L0- _______ A_________
|
||||
# / | \ \
|
||||
# L1- B C D 1.txt
|
||||
# / \ | |
|
||||
# L2- D 1.txt B 2.txt
|
||||
test_expect_success "ipfs refs $ARGS -r --unique --max-depth=2" '
|
||||
cat <<EOF | $FILTER > expected.txt
|
||||
QmdytmR4wULMd3SLo6ePF4s3WcRHWcpnJZ7bHhoj3QB13v
|
||||
QmNkQvpiyAEtbeLviC7kqfifYoK1GXPcsSxTpP1yS3ykLa
|
||||
QmSanP5DpxpqfDdS4yekHY1MqrVge47gtxQcp2e2yZ4UwS
|
||||
QmXXazTjeNCKFnpW1D65vTKsTs8fbgkCWTv8Em4pdK2coH
|
||||
QmSFxnK675wQ9Kc1uqWKyJUaNxvSc2BP5DbXCD3x93oq61
|
||||
EOF
|
||||
ipfs refs -r --unique --max-depth=2 $refsroot > refsr.txt
|
||||
test_cmp refsr.txt expected.txt
|
||||
'
|
||||
ipfs refs $ARGS -r --unique --max-depth=2 $refsroot > refsr.txt
|
||||
test_cmp refsr.txt expected.txt
|
||||
'
|
||||
}
|
||||
|
||||
test_refs_output '' 'cat'
|
||||
|
||||
test_refs_output '--cid-base=base32' 'ipfs cid base32'
|
||||
|
||||
test_done
|
||||
|
||||
@ -12,6 +12,9 @@ test_expect_success "resolve: prepare files" '
|
||||
a_hash=$(ipfs add -q -r a | tail -n1) &&
|
||||
b_hash=$(ipfs add -q -r a/b | tail -n1) &&
|
||||
c_hash=$(ipfs add -q -r a/b/c | tail -n1)
|
||||
a_hash_b32=$(cid-fmt -v 1 -b b %s $a_hash)
|
||||
b_hash_b32=$(cid-fmt -v 1 -b b %s $b_hash)
|
||||
c_hash_b32=$(cid-fmt -v 1 -b b %s $c_hash)
|
||||
'
|
||||
|
||||
test_expect_success "resolve: prepare dag" '
|
||||
@ -45,9 +48,10 @@ test_resolve_setup_name_fail() {
|
||||
test_resolve() {
|
||||
src=$1
|
||||
dst=$2
|
||||
extra=$3
|
||||
|
||||
test_expect_success "resolve succeeds: $src" '
|
||||
ipfs resolve -r "$src" >actual
|
||||
ipfs resolve $extra -r "$src" >actual
|
||||
'
|
||||
|
||||
test_expect_success "resolved correctly: $src -> $dst" '
|
||||
@ -57,7 +61,6 @@ test_resolve() {
|
||||
}
|
||||
|
||||
test_resolve_cmd() {
|
||||
|
||||
test_resolve "/ipfs/$a_hash" "/ipfs/$a_hash"
|
||||
test_resolve "/ipfs/$a_hash/b" "/ipfs/$b_hash"
|
||||
test_resolve "/ipfs/$a_hash/b/c" "/ipfs/$c_hash"
|
||||
@ -76,6 +79,30 @@ test_resolve_cmd() {
|
||||
test_resolve "/ipns/$id_hash" "/ipfs/$c_hash"
|
||||
}
|
||||
|
||||
test_resolve_cmd_b32() {
|
||||
# no flags needed, base should be preserved
|
||||
|
||||
test_resolve "/ipfs/$a_hash_b32" "/ipfs/$a_hash_b32"
|
||||
test_resolve "/ipfs/$a_hash_b32/b" "/ipfs/$b_hash_b32"
|
||||
test_resolve "/ipfs/$a_hash_b32/b/c" "/ipfs/$c_hash_b32"
|
||||
test_resolve "/ipfs/$b_hash_b32/c" "/ipfs/$c_hash_b32"
|
||||
|
||||
# flags needed passed in path does not contain cid to derive base
|
||||
|
||||
test_resolve_setup_name "/ipfs/$a_hash_b32"
|
||||
test_resolve "/ipns/$id_hash" "/ipfs/$a_hash_b32" --cid-base=base32
|
||||
test_resolve "/ipns/$id_hash/b" "/ipfs/$b_hash_b32" --cid-base=base32
|
||||
test_resolve "/ipns/$id_hash/b/c" "/ipfs/$c_hash_b32" --cid-base=base32
|
||||
|
||||
test_resolve_setup_name "/ipfs/$b_hash_b32" --cid-base=base32
|
||||
test_resolve "/ipns/$id_hash" "/ipfs/$b_hash_b32" --cid-base=base32
|
||||
test_resolve "/ipns/$id_hash/c" "/ipfs/$c_hash_b32" --cid-base=base32
|
||||
|
||||
test_resolve_setup_name "/ipfs/$c_hash_b32"
|
||||
test_resolve "/ipns/$id_hash" "/ipfs/$c_hash_b32" --cid-base=base32
|
||||
}
|
||||
|
||||
|
||||
#todo remove this once the online resolve is fixed
|
||||
test_resolve_fail() {
|
||||
src=$1
|
||||
@ -117,6 +144,7 @@ test_resolve_cmd_fail() {
|
||||
|
||||
# should work offline
|
||||
test_resolve_cmd
|
||||
test_resolve_cmd_b32
|
||||
|
||||
# should work online
|
||||
test_launch_ipfs_daemon
|
||||
|
||||
@ -46,4 +46,13 @@ test_expect_success "files look right" '
|
||||
[ -x foo/script ]
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs tar add --cid-base=base32' succeeds" '
|
||||
ipfs tar add --cid-base=base32 files.tar > actual
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs tar add --cid-base=base32' has correct hash" '
|
||||
ipfs cid base32 $TAR_HASH > expected &&
|
||||
test_cmp expected actual
|
||||
'
|
||||
|
||||
test_done
|
||||
|
||||
@ -202,6 +202,12 @@ test_files_api() {
|
||||
test_cmp ls_l_expected ls_l_actual
|
||||
'
|
||||
|
||||
test_expect_success "file has correct hash and size listed with -l --cid-base=base32" '
|
||||
echo "file1 `cid-fmt -v 1 -b base32 %s $FILE1` 4" > ls_l_expected &&
|
||||
ipfs files ls --cid-base=base32 -l /cats/file1 > ls_l_actual &&
|
||||
test_cmp ls_l_expected ls_l_actual
|
||||
'
|
||||
|
||||
test_expect_success "file shows up with the correct name" '
|
||||
echo "file1" > ls_l_expected &&
|
||||
ipfs files ls /cats/file1 > ls_l_actual &&
|
||||
@ -221,6 +227,19 @@ test_files_api() {
|
||||
test_cmp file1stat_expect file1stat_actual
|
||||
'
|
||||
|
||||
test_expect_success "can stat file with --cid-base=base32 $EXTRA" '
|
||||
ipfs files stat --cid-base=base32 /cats/file1 > file1stat_orig
|
||||
'
|
||||
|
||||
test_expect_success "stat output looks good with --cid-base=base32" '
|
||||
grep -v CumulativeSize: file1stat_orig > file1stat_actual &&
|
||||
echo `cid-fmt -v 1 -b base32 %s $FILE1` > file1stat_expect &&
|
||||
echo "Size: 4" >> file1stat_expect &&
|
||||
echo "ChildBlocks: 0" >> file1stat_expect &&
|
||||
echo "Type: file" >> file1stat_expect &&
|
||||
test_cmp file1stat_expect file1stat_actual
|
||||
'
|
||||
|
||||
test_expect_success "can read file $EXTRA" '
|
||||
ipfs files read /cats/file1 > file1out
|
||||
'
|
||||
|
||||
@ -63,40 +63,42 @@ EOF
|
||||
|
||||
sort < verify_expect_file_order > verify_expect_key_order
|
||||
|
||||
IPFS_CMD="ipfs"
|
||||
|
||||
test_filestore_adds() {
|
||||
test_expect_success "nocopy add succeeds" '
|
||||
HASH=$(ipfs add --raw-leaves --nocopy -r -q somedir | tail -n1)
|
||||
test_expect_success "$IPFS_CMD add nocopy add succeeds" '
|
||||
HASH=$($IPFS_CMD add --raw-leaves --nocopy -r -q somedir | tail -n1)
|
||||
'
|
||||
|
||||
test_expect_success "nocopy add has right hash" '
|
||||
test "$HASH" = "$EXPHASH"
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs filestore ls' output looks good'" '
|
||||
ipfs filestore ls | sort > ls_actual &&
|
||||
test_expect_success "'$IPFS_CMD filestore ls' output looks good'" '
|
||||
$IPFS_CMD filestore ls | sort > ls_actual &&
|
||||
test_cmp ls_expect_key_order ls_actual
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs filestore ls --file-order' output looks good'" '
|
||||
ipfs filestore ls --file-order > ls_actual &&
|
||||
test_expect_success "'$IPFS_CMD filestore ls --file-order' output looks good'" '
|
||||
$IPFS_CMD filestore ls --file-order > ls_actual &&
|
||||
test_cmp ls_expect_file_order ls_actual
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs filestore ls HASH' works" '
|
||||
ipfs filestore ls $FILE1_HASH > ls_actual &&
|
||||
test_expect_success "'$IPFS_CMD filestore ls HASH' works" '
|
||||
$IPFS_CMD filestore ls $FILE1_HASH > ls_actual &&
|
||||
grep -q somedir/file1 ls_actual
|
||||
'
|
||||
|
||||
test_expect_success "can retrieve multi-block file" '
|
||||
ipfs cat $FILE3_HASH > file3.data &&
|
||||
$IPFS_CMD cat $FILE3_HASH > file3.data &&
|
||||
test_cmp somedir/file3 file3.data
|
||||
'
|
||||
}
|
||||
|
||||
# check that the filestore is in a clean state
|
||||
test_filestore_state() {
|
||||
test_expect_success "ipfs filestore verify' output looks good'" '
|
||||
ipfs filestore verify | LC_ALL=C sort > verify_actual
|
||||
test_expect_success "$IPFS_CMD filestore verify' output looks good'" '
|
||||
$IPFS_CMD filestore verify | LC_ALL=C sort > verify_actual
|
||||
test_cmp verify_expect_key_order verify_actual
|
||||
'
|
||||
}
|
||||
@ -104,13 +106,13 @@ test_filestore_state() {
|
||||
test_filestore_verify() {
|
||||
test_filestore_state
|
||||
|
||||
test_expect_success "ipfs filestore verify --file-order' output looks good'" '
|
||||
ipfs filestore verify --file-order > verify_actual
|
||||
test_expect_success "$IPFS_CMD filestore verify --file-order' output looks good'" '
|
||||
$IPFS_CMD filestore verify --file-order > verify_actual
|
||||
test_cmp verify_expect_file_order verify_actual
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs filestore verify HASH' works" '
|
||||
ipfs filestore verify $FILE1_HASH > verify_actual &&
|
||||
test_expect_success "'$IPFS_CMD filestore verify HASH' works" '
|
||||
$IPFS_CMD filestore verify $FILE1_HASH > verify_actual &&
|
||||
grep -q somedir/file1 verify_actual
|
||||
'
|
||||
|
||||
@ -119,11 +121,11 @@ test_filestore_verify() {
|
||||
'
|
||||
|
||||
test_expect_success "can not retrieve block after backing file moved" '
|
||||
test_must_fail ipfs cat $FILE1_HASH
|
||||
test_must_fail $IPFS_CMD cat $FILE1_HASH
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs filestore verify' shows file as missing" '
|
||||
ipfs filestore verify > verify_actual &&
|
||||
test_expect_success "'$IPFS_CMD filestore verify' shows file as missing" '
|
||||
$IPFS_CMD filestore verify > verify_actual &&
|
||||
grep no-file verify_actual | grep -q somedir/file1
|
||||
'
|
||||
|
||||
@ -132,7 +134,7 @@ test_filestore_verify() {
|
||||
'
|
||||
|
||||
test_expect_success "block okay now" '
|
||||
ipfs cat $FILE1_HASH > file1.data &&
|
||||
$IPFS_CMD cat $FILE1_HASH > file1.data &&
|
||||
test_cmp somedir/file1 file1.data
|
||||
'
|
||||
|
||||
@ -141,11 +143,11 @@ test_filestore_verify() {
|
||||
'
|
||||
|
||||
test_expect_success "can not retrieve block after backing file changed" '
|
||||
test_must_fail ipfs cat $FILE3_HASH
|
||||
test_must_fail $IPFS_CMD cat $FILE3_HASH
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs filestore verify' shows file as changed" '
|
||||
ipfs filestore verify > verify_actual &&
|
||||
test_expect_success "'$IPFS_CMD filestore verify' shows file as changed" '
|
||||
$IPFS_CMD filestore verify > verify_actual &&
|
||||
grep changed verify_actual | grep -q somedir/file3
|
||||
'
|
||||
|
||||
@ -157,9 +159,9 @@ test_filestore_dups() {
|
||||
# make sure the filestore is in a clean state
|
||||
test_filestore_state
|
||||
|
||||
test_expect_success "'ipfs filestore dups'" '
|
||||
ipfs add --raw-leaves somedir/file1 &&
|
||||
ipfs filestore dups > dups_actual &&
|
||||
test_expect_success "'$IPFS_CMD filestore dups'" '
|
||||
$IPFS_CMD add --raw-leaves somedir/file1 &&
|
||||
$IPFS_CMD filestore dups > dups_actual &&
|
||||
echo "$FILE1_HASH" > dups_expect
|
||||
test_cmp dups_expect dups_actual
|
||||
'
|
||||
@ -195,4 +197,72 @@ test_filestore_dups
|
||||
|
||||
test_kill_ipfs_daemon
|
||||
|
||||
##
|
||||
## base32
|
||||
##
|
||||
|
||||
EXPHASH="bafybeibva2uh4qpwjo2yr5g7m7nd5kfq64atydq77qdlrikh5uejwqdcbi"
|
||||
|
||||
cat <<EOF > ls_expect_file_order
|
||||
bafkreicj3ezgtrh3euw2gyub6w3jydhnouqobxt7stbgtns3mv3iwv6bqq 1000 somedir/file1 0
|
||||
bafkreibxwxisv4cld6x76ybqbvf2uwbkoswjqt4hut46af6rps2twme7ey 10000 somedir/file2 0
|
||||
bafkreidntk6ciin24oez6yjz4b25fgwecncvi4ua4uhr2tdyenogpzpid4 262144 somedir/file3 0
|
||||
bafkreidwie26yauqbhpd2nhhhmod55irq3z372mh6gw4ikl2ifo34c5jra 262144 somedir/file3 262144
|
||||
bafkreib7piyesy3dr22sawmycdftrmpyt3z4tmhxrdig2zt5zdp7qwbuay 262144 somedir/file3 524288
|
||||
bafkreigxp5k3k6b3i5sldu4r3im74nfxmoptuuubcvq6rg632nfznskglu 213568 somedir/file3 786432
|
||||
EOF
|
||||
|
||||
sort < ls_expect_file_order > ls_expect_key_order
|
||||
|
||||
FILE1_HASH=bafkreicj3ezgtrh3euw2gyub6w3jydhnouqobxt7stbgtns3mv3iwv6bqq
|
||||
FILE2_HASH=bafkreibxwxisv4cld6x76ybqbvf2uwbkoswjqt4hut46af6rps2twme7ey
|
||||
FILE3_HASH=bafybeih24zygzr2orr5q62mjnbgmjwgj6rx3tp74pwcqsqth44rloncllq
|
||||
|
||||
cat <<EOF > verify_expect_file_order
|
||||
ok bafkreicj3ezgtrh3euw2gyub6w3jydhnouqobxt7stbgtns3mv3iwv6bqq 1000 somedir/file1 0
|
||||
ok bafkreibxwxisv4cld6x76ybqbvf2uwbkoswjqt4hut46af6rps2twme7ey 10000 somedir/file2 0
|
||||
ok bafkreidntk6ciin24oez6yjz4b25fgwecncvi4ua4uhr2tdyenogpzpid4 262144 somedir/file3 0
|
||||
ok bafkreidwie26yauqbhpd2nhhhmod55irq3z372mh6gw4ikl2ifo34c5jra 262144 somedir/file3 262144
|
||||
ok bafkreib7piyesy3dr22sawmycdftrmpyt3z4tmhxrdig2zt5zdp7qwbuay 262144 somedir/file3 524288
|
||||
ok bafkreigxp5k3k6b3i5sldu4r3im74nfxmoptuuubcvq6rg632nfznskglu 213568 somedir/file3 786432
|
||||
EOF
|
||||
|
||||
sort < verify_expect_file_order > verify_expect_key_order
|
||||
|
||||
IPFS_CMD="ipfs --cid-base=base32"
|
||||
|
||||
#
|
||||
# No daemon
|
||||
#
|
||||
|
||||
test_init
|
||||
|
||||
test_filestore_adds
|
||||
|
||||
test_filestore_verify
|
||||
|
||||
test_filestore_dups
|
||||
|
||||
#
|
||||
# With daemon
|
||||
#
|
||||
|
||||
test_init
|
||||
|
||||
# must be in offline mode so tests that retrieve non-existent blocks
|
||||
# doesn't hang
|
||||
test_launch_ipfs_daemon --offline
|
||||
|
||||
test_filestore_adds
|
||||
|
||||
test_filestore_verify
|
||||
|
||||
test_filestore_dups
|
||||
|
||||
test_kill_ipfs_daemon
|
||||
|
||||
test_done
|
||||
|
||||
##
|
||||
|
||||
test_done
|
||||
|
||||
@ -150,6 +150,13 @@ test_expect_success "check that the trickle option works" '
|
||||
test $HASHat = $HASHut
|
||||
'
|
||||
|
||||
test_expect_success "add files using gateway address via url store using --cid-base=base32" '
|
||||
HASH1a=$(ipfs add -q --trickle --raw-leaves=false file1) &&
|
||||
HASH2a=$(ipfs add -q --trickle --raw-leaves=false file2) &&
|
||||
HASH1b32=$(ipfs --cid-base=base32 urlstore add http://127.0.0.1:$GWAY_PORT/ipfs/$HASH1a) &&
|
||||
HASH2b32=$(ipfs --cid-base=base32 urlstore add http://127.0.0.1:$GWAY_PORT/ipfs/$HASH2a)
|
||||
'
|
||||
|
||||
test_kill_ipfs_daemon
|
||||
|
||||
test_expect_success "files can not be retrieved via the urlstore" '
|
||||
@ -167,4 +174,11 @@ test_expect_success "check that the hashes were correct" '
|
||||
test $HASH3e = $HASH3
|
||||
'
|
||||
|
||||
test_expect_success "check that the base32 hashes were correct" '
|
||||
HASH1e32=$(ipfs cid base32 $HASH1e)
|
||||
HASH2e32=$(ipfs cid base32 $HASH2e)
|
||||
test $HASH1e32 = $HASH1b32 &&
|
||||
test $HASH2e32 = $HASH2b32
|
||||
'
|
||||
|
||||
test_done
|
||||
|
||||
Loading…
Reference in New Issue
Block a user