Merge branch 'master' into merge-release-v0.25.0

This commit is contained in:
Jorropo 2023-12-14 18:49:34 +01:00 committed by GitHub
commit 133feb6efb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 36 additions and 1030 deletions

View File

@ -1,5 +1,6 @@
# Kubo Changelogs
- [v0.26](docs/changelogs/v0.26.md)
- [v0.25](docs/changelogs/v0.25.md)
- [v0.24](docs/changelogs/v0.24.md)
- [v0.23](docs/changelogs/v0.23.md)

View File

@ -31,7 +31,6 @@ func TestROCommands(t *testing.T) {
"/dag/resolve",
"/dag/stat",
"/dag/export",
"/dns",
"/get",
"/ls",
"/name",
@ -136,9 +135,6 @@ func TestCommands(t *testing.T) {
"/diag/cmds/set-time",
"/diag/profile",
"/diag/sys",
"/dns",
"/file",
"/file/ls",
"/files",
"/files/chcid",
"/files/cp",
@ -229,7 +225,6 @@ func TestCommands(t *testing.T) {
"/refs",
"/refs/local",
"/repo",
"/repo/fsck",
"/repo/gc",
"/repo/migrate",
"/repo/stat",
@ -259,12 +254,7 @@ func TestCommands(t *testing.T) {
"/swarm/peering/ls",
"/swarm/peering/rm",
"/swarm/resources",
"/tar",
"/tar/add",
"/tar/cat",
"/update",
"/urlstore",
"/urlstore/add",
"/version",
"/version/deps",
}

View File

@ -1,78 +0,0 @@
package commands
import (
"fmt"
"io"
"strings"
namesys "github.com/ipfs/boxo/namesys"
"github.com/ipfs/boxo/path"
cmdenv "github.com/ipfs/kubo/core/commands/cmdenv"
ncmd "github.com/ipfs/kubo/core/commands/name"
cmds "github.com/ipfs/go-ipfs-cmds"
)
const (
dnsRecursiveOptionName = "recursive"
)
var DNSCmd = &cmds.Command{
Status: cmds.Deprecated, // https://github.com/ipfs/kubo/issues/8607
Helptext: cmds.HelpText{
Tagline: "Resolve DNSLink records. Deprecated: Use 'ipfs resolve /ipns/domain-name' instead.",
ShortDescription: `
This command can only recursively resolve DNSLink TXT records.
It will fail to recursively resolve through IPNS keys etc.
DEPRECATED: superseded by 'ipfs resolve'
For general-purpose recursive resolution, use 'ipfs resolve -r'.
It will work across multiple DNSLinks and IPNS keys.
`,
},
Arguments: []cmds.Argument{
cmds.StringArg("domain-name", true, false, "The domain-name name to resolve.").EnableStdin(),
},
Options: []cmds.Option{
cmds.BoolOption(dnsRecursiveOptionName, "r", "Resolve until the result is not a DNS link.").WithDefault(true),
},
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
node, err := cmdenv.GetNode(env)
if err != nil {
return err
}
recursive, _ := req.Options[dnsRecursiveOptionName].(bool)
name := req.Arguments[0]
resolver := namesys.NewDNSResolver(node.DNSResolver.LookupTXT)
var routing []namesys.ResolveOption
if !recursive {
routing = append(routing, namesys.ResolveWithDepth(1))
}
if !strings.HasPrefix(name, "/ipns/") {
name = "/ipns/" + name
}
p, err := path.NewPath(name)
if err != nil {
return err
}
val, err := resolver.Resolve(req.Context, p, routing...)
if err != nil && (recursive || err != namesys.ErrResolveRecursion) {
return err
}
return cmds.EmitOnce(res, &ncmd.ResolvedPath{Path: val.Path.String()})
},
Encoders: cmds.EncoderMap{
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *ncmd.ResolvedPath) error {
fmt.Fprintln(w, cmdenv.EscNonPrint(out.Path))
return nil
}),
},
Type: ncmd.ResolvedPath{},
}

View File

@ -39,7 +39,6 @@ var RepoCmd = &cmds.Command{
Subcommands: map[string]*cmds.Command{
"stat": repoStatCmd,
"gc": repoGcCmd,
"fsck": repoFsckCmd,
"version": repoVersionCmd,
"verify": repoVerifyCmd,
"migrate": repoMigrateCmd,
@ -227,27 +226,6 @@ Version string The repo version.
},
}
var repoFsckCmd = &cmds.Command{
Status: cmds.Deprecated, // https://github.com/ipfs/kubo/issues/6435
Helptext: cmds.HelpText{
Tagline: "Remove repo lockfiles.",
ShortDescription: `
'ipfs repo fsck' is now a no-op.
`,
},
NoRemote: true,
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
return cmds.EmitOnce(res, &MessageOutput{"`ipfs repo fsck` is deprecated and does nothing.\n"})
},
Type: MessageOutput{},
Encoders: cmds.EncoderMap{
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *MessageOutput) error {
fmt.Fprintf(w, out.Message)
return nil
}),
},
}
type VerifyProgress struct {
Msg string
Progress int

View File

@ -8,7 +8,6 @@ import (
name "github.com/ipfs/kubo/core/commands/name"
ocmd "github.com/ipfs/kubo/core/commands/object"
"github.com/ipfs/kubo/core/commands/pin"
unixfs "github.com/ipfs/kubo/core/commands/unixfs"
cmds "github.com/ipfs/go-ipfs-cmds"
logging "github.com/ipfs/go-log"
@ -143,7 +142,6 @@ var rootSubcommands = map[string]*cmds.Command{
"dht": DhtCmd,
"routing": RoutingCmd,
"diag": DiagCmd,
"dns": DNSCmd,
"id": IDCmd,
"key": KeyCmd,
"log": LogCmd,
@ -157,10 +155,7 @@ var rootSubcommands = map[string]*cmds.Command{
"refs": RefsCmd,
"resolve": ResolveCmd,
"swarm": SwarmCmd,
"tar": TarCmd,
"file": unixfs.UnixFSCmd,
"update": ExternalBinary("Please see https://github.com/ipfs/ipfs-update/blob/master/README.md#install for installation instructions."),
"urlstore": urlStoreCmd,
"version": VersionCmd,
"shutdown": daemonShutdownCmd,
"cid": CidCmd,
@ -188,7 +183,6 @@ var rootROSubcommands = map[string]*cmds.Command{
},
},
"get": GetCmd,
"dns": DNSCmd,
"ls": LsCmd,
"name": {
Subcommands: map[string]*cmds.Command{

View File

@ -1,118 +0,0 @@
package commands
import (
"fmt"
"io"
cmds "github.com/ipfs/go-ipfs-cmds"
"github.com/ipfs/kubo/core/commands/cmdenv"
"github.com/ipfs/kubo/core/commands/cmdutils"
tar "github.com/ipfs/kubo/tar"
dag "github.com/ipfs/boxo/ipld/merkledag"
)
var TarCmd = &cmds.Command{
Status: cmds.Deprecated, // https://github.com/ipfs/kubo/issues/7951
Helptext: cmds.HelpText{
Tagline: "Utility functions for tar files in ipfs.",
},
Subcommands: map[string]*cmds.Command{
"add": tarAddCmd,
"cat": tarCatCmd,
},
}
var tarAddCmd = &cmds.Command{
Status: cmds.Deprecated, // https://github.com/ipfs/kubo/issues/7951
Helptext: cmds.HelpText{
Tagline: "Import a tar file into IPFS.",
ShortDescription: `
'ipfs tar add' will parse a tar file and create a merkledag structure to
represent it.
`,
},
Arguments: []cmds.Argument{
cmds.FileArg("file", true, false, "Tar file to add.").EnableStdin(),
},
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
api, err := cmdenv.GetApi(env, req)
if err != nil {
return err
}
enc, err := cmdenv.GetCidEncoder(req)
if err != nil {
return err
}
it := req.Files.Entries()
file, err := cmdenv.GetFileArg(it)
if err != nil {
return err
}
node, err := tar.ImportTar(req.Context, file, api.Dag())
if err != nil {
return err
}
c := node.Cid()
return cmds.EmitOnce(res, &AddEvent{
Name: it.Name(),
Hash: enc.Encode(c),
})
},
Type: AddEvent{},
Encoders: cmds.EncoderMap{
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *AddEvent) error {
fmt.Fprintln(w, out.Hash)
return nil
}),
},
}
var tarCatCmd = &cmds.Command{
Status: cmds.Deprecated, // https://github.com/ipfs/kubo/issues/7951
Helptext: cmds.HelpText{
Tagline: "Export a tar file from IPFS.",
ShortDescription: `
'ipfs tar cat' will export a tar file from a previously imported one in IPFS.
`,
},
Arguments: []cmds.Argument{
cmds.StringArg("path", true, false, "ipfs path of archive to export.").EnableStdin(),
},
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
api, err := cmdenv.GetApi(env, req)
if err != nil {
return err
}
p, err := cmdutils.PathOrCidPath(req.Arguments[0])
if err != nil {
return err
}
root, err := api.ResolveNode(req.Context, p)
if err != nil {
return err
}
rootpb, ok := root.(*dag.ProtoNode)
if !ok {
return dag.ErrNotProtobuf
}
r, err := tar.ExportTar(req.Context, rootpb, api.Dag())
if err != nil {
return err
}
return res.Emit(r)
},
}

View File

@ -1,236 +0,0 @@
package unixfs
import (
"fmt"
"io"
"sort"
"text/tabwriter"
cmdenv "github.com/ipfs/kubo/core/commands/cmdenv"
"github.com/ipfs/kubo/core/commands/cmdutils"
merkledag "github.com/ipfs/boxo/ipld/merkledag"
unixfs "github.com/ipfs/boxo/ipld/unixfs"
cmds "github.com/ipfs/go-ipfs-cmds"
)
type LsLink struct {
Name, Hash string
Size uint64
Type string
}
type LsObject struct {
Hash string
Size uint64
Type string
Links []LsLink
}
type LsOutput struct {
Arguments map[string]string
Objects map[string]*LsObject
}
var LsCmd = &cmds.Command{
Status: cmds.Deprecated, // https://github.com/ipfs/kubo/pull/7755
Helptext: cmds.HelpText{
Tagline: "List directory contents for Unix filesystem objects. Deprecated: Use 'ipfs ls' and 'ipfs files ls' instead.",
ShortDescription: `
Displays the contents of an IPFS or IPNS object(s) at the given path.
The JSON output contains size information. For files, the child size
is the total size of the file contents. For directories, the child
size is the IPFS link size.
This functionality is deprecated, and will be removed in future versions as it duplicates the functionality of 'ipfs ls'.
If possible, please use 'ipfs ls' instead.
`,
LongDescription: `
Displays the contents of an IPFS or IPNS object(s) at the given path.
The JSON output contains size information. For files, the child size
is the total size of the file contents. For directories, the child
size is the IPFS link size.
The path can be a prefixless ref; in this case, we assume it to be an
/ipfs ref and not /ipns.
Example:
> ipfs file ls QmW2WQi7j6c7UgJTarActp7tDNikE4B2qXtFCfLPdsgaTQ
cat.jpg
> ipfs file ls /ipfs/QmW2WQi7j6c7UgJTarActp7tDNikE4B2qXtFCfLPdsgaTQ
cat.jpg
This functionality is deprecated, and will be removed in future versions as it duplicates the functionality of 'ipfs ls'.
If possible, please use 'ipfs ls' instead.
`,
},
Arguments: []cmds.Argument{
cmds.StringArg("ipfs-path", true, true, "The path to the IPFS object(s) to list links from.").EnableStdin(),
},
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
nd, err := cmdenv.GetNode(env)
if err != nil {
return err
}
api, err := cmdenv.GetApi(env, req)
if err != nil {
return err
}
if err := req.ParseBodyArgs(); err != nil {
return err
}
paths := req.Arguments
output := LsOutput{
Arguments: map[string]string{},
Objects: map[string]*LsObject{},
}
for _, p := range paths {
ctx := req.Context
pth, err := cmdutils.PathOrCidPath(p)
if err != nil {
return err
}
merkleNode, err := api.ResolveNode(ctx, pth)
if err != nil {
return err
}
c := merkleNode.Cid()
hash := c.String()
output.Arguments[p] = hash
if _, ok := output.Objects[hash]; ok {
// duplicate argument for an already-listed node
continue
}
ndpb, ok := merkleNode.(*merkledag.ProtoNode)
if !ok {
return merkledag.ErrNotProtobuf
}
unixFSNode, err := unixfs.FSNodeFromBytes(ndpb.Data())
if err != nil {
return err
}
t := unixFSNode.Type()
output.Objects[hash] = &LsObject{
Hash: c.String(),
Type: t.String(),
Size: unixFSNode.FileSize(),
}
switch t {
case unixfs.TFile:
break
case unixfs.THAMTShard:
// We need a streaming ls API for this.
return fmt.Errorf("cannot list large directories yet")
case unixfs.TDirectory:
links := make([]LsLink, len(merkleNode.Links()))
output.Objects[hash].Links = links
for i, link := range merkleNode.Links() {
linkNode, err := link.GetNode(ctx, nd.DAG)
if err != nil {
return err
}
lnpb, ok := linkNode.(*merkledag.ProtoNode)
if !ok {
return merkledag.ErrNotProtobuf
}
d, err := unixfs.FSNodeFromBytes(lnpb.Data())
if err != nil {
return err
}
t := d.Type()
lsLink := LsLink{
Name: link.Name,
Hash: link.Cid.String(),
Type: t.String(),
}
if t == unixfs.TFile {
lsLink.Size = d.FileSize()
} else {
lsLink.Size = link.Size
}
links[i] = lsLink
}
case unixfs.TSymlink:
return fmt.Errorf("cannot list symlinks yet")
default:
return fmt.Errorf("unrecognized type: %s", t)
}
}
return cmds.EmitOnce(res, &output)
},
Encoders: cmds.EncoderMap{
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *LsOutput) error {
tw := tabwriter.NewWriter(w, 1, 2, 1, ' ', 0)
nonDirectories := []string{}
directories := []string{}
for argument, hash := range out.Arguments {
object, ok := out.Objects[hash]
if !ok {
return fmt.Errorf("unresolved hash: %s", hash)
}
if object.Type == "Directory" {
directories = append(directories, argument)
} else {
nonDirectories = append(nonDirectories, argument)
}
}
sort.Strings(nonDirectories)
sort.Strings(directories)
for _, argument := range nonDirectories {
fmt.Fprintf(tw, "%s\n", argument)
}
seen := map[string]bool{}
for i, argument := range directories {
hash := out.Arguments[argument]
if _, ok := seen[hash]; ok {
continue
}
seen[hash] = true
object := out.Objects[hash]
if i > 0 || len(nonDirectories) > 0 {
fmt.Fprintln(tw)
}
if len(out.Arguments) > 1 {
for _, arg := range directories[i:] {
if out.Arguments[arg] == hash {
fmt.Fprintf(tw, "%s:\n", cmdenv.EscNonPrint(arg))
}
}
}
for _, link := range object.Links {
fmt.Fprintf(tw, "%s\n", cmdenv.EscNonPrint(link.Name))
}
}
tw.Flush()
return nil
}),
},
Type: LsOutput{},
}

View File

@ -1,20 +0,0 @@
package unixfs
import (
cmds "github.com/ipfs/go-ipfs-cmds"
)
var UnixFSCmd = &cmds.Command{
Status: cmds.Deprecated, // https://github.com/ipfs/kubo/pull/7755
Helptext: cmds.HelpText{
Tagline: "Interact with IPFS objects representing Unix filesystems.",
ShortDescription: `
Old interface to file systems represented by UnixFS.
Superseded by modern alternatives: 'ipfs ls' and 'ipfs files'
`,
},
Subcommands: map[string]*cmds.Command{
"ls": LsCmd,
},
}

View File

@ -1,105 +0,0 @@
package commands
import (
"fmt"
"io"
"net/url"
filestore "github.com/ipfs/boxo/filestore"
cmdenv "github.com/ipfs/kubo/core/commands/cmdenv"
"github.com/ipfs/boxo/files"
cmds "github.com/ipfs/go-ipfs-cmds"
"github.com/ipfs/kubo/core/coreiface/options"
)
var urlStoreCmd = &cmds.Command{
Helptext: cmds.HelpText{
Tagline: "Interact with urlstore.",
},
Subcommands: map[string]*cmds.Command{
"add": urlAdd,
},
}
var urlAdd = &cmds.Command{
Status: cmds.Deprecated,
Helptext: cmds.HelpText{
Tagline: "Add URL via urlstore.",
LongDescription: `
DEPRECATED: Use 'ipfs add --nocopy --cid-version=1 URL'.
Add URLs to ipfs without storing the data locally.
The URL provided must be stable and ideally on a web server under your
control.
The file is added using raw-leaves but otherwise using the default
settings for 'ipfs add'.
`,
},
Options: []cmds.Option{
cmds.BoolOption(trickleOptionName, "t", "Use trickle-dag format for dag generation."),
cmds.BoolOption(pinOptionName, "Pin this object when adding.").WithDefault(true),
},
Arguments: []cmds.Argument{
cmds.StringArg("url", true, false, "URL to add to IPFS"),
},
Type: &BlockStat{},
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
log.Error("The 'ipfs urlstore' command is deprecated, please use 'ipfs add --nocopy --cid-version=1")
urlString := req.Arguments[0]
if !filestore.IsURL(req.Arguments[0]) {
return fmt.Errorf("unsupported url syntax: %s", urlString)
}
url, err := url.Parse(urlString)
if err != nil {
return err
}
enc, err := cmdenv.GetCidEncoder(req)
if err != nil {
return err
}
api, err := cmdenv.GetApi(env, req)
if err != nil {
return err
}
useTrickledag, _ := req.Options[trickleOptionName].(bool)
dopin, _ := req.Options[pinOptionName].(bool)
opts := []options.UnixfsAddOption{
options.Unixfs.Pin(dopin),
options.Unixfs.CidVersion(1),
options.Unixfs.RawLeaves(true),
options.Unixfs.Nocopy(true),
}
if useTrickledag {
opts = append(opts, options.Unixfs.Layout(options.TrickleLayout))
}
file := files.NewWebFile(url)
path, err := api.Unixfs().Add(req.Context, file, opts...)
if err != nil {
return err
}
size, _ := file.Size()
return cmds.EmitOnce(res, &BlockStat{
Key: enc.Encode(path.RootCid()),
Size: int(size),
})
},
Encoders: cmds.EncoderMap{
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, bs *BlockStat) error {
_, err := fmt.Fprintln(w, bs.Key)
return err
}),
},
}

29
docs/changelogs/v0.26.md Normal file
View File

@ -0,0 +1,29 @@
# Kubo changelog v0.26
- [v0.26.0](#v0260)
## v0.26.0
- [Overview](#overview)
- [🔦 Highlights](#-highlights)
- [Several deprecated commands have been removed](#several-deprecated-commands-have-been-removed)
- [📝 Changelog](#-changelog)
- [👨‍👩‍👧‍👦 Contributors](#-contributors)
### Overview
### 🔦 Highlights
#### Several deprecated commands have been removed
Several deprecated commands have been removed:
- `ipfs urlstore` deprecated in [April 2019, Kubo 0.4.21](https://github.com/ipfs/kubo/commit/8beaee63b3fa634c59b85179286ad3873921a535), use `ipfs add -q --nocopy --cid-version=1 {url}` instead.
- `ipfs repo fsck` deprecated in [July 2019, Kubo 0.5.0](https://github.com/ipfs/kubo/commit/288a83ce7dcbf4a2498e06e4a95245bbb5e30f45)
- `ipfs file` (and `ipfs file ls`) deprecated in [November 2020, Kubo 0.8.0](https://github.com/ipfs/kubo/commit/ec64dc5c396e7114590e15909384fabce0035482), use `ipfs ls` and `ipfs files ls` instead.
- `ipfs dns` deprecated in [April 2022, Kubo 0.13](https://github.com/ipfs/kubo/commit/76ae33a9f3f9abd166d1f6f23d6a8a0511510e3c), use `ipfs resolve /ipns/{name}` instead.
- `ipfs tar` deprecated [April 2022, Kubo 0.13](https://github.com/ipfs/kubo/pull/8849)
### 📝 Changelog
### 👨‍👩‍👧‍👦 Contributors

View File

@ -22,9 +22,12 @@ Place a `*.deny` file in one of directories:
- `$XDG_CONFIG_HOME/ipfs/denylists/` (`$HOME/.config/ipfs/denylists/` if `XDG_CONFIG_HOME` is not set)
- `/etc/ipfs/denylists/` (global)
Files need to be present before starting the `ipfs daemon` in order to be watched for updates.
Files need to be present before starting the `ipfs daemon` in order to be watched for any new updates
appended once started. Any other changes (such as removal of entries, prepending of entries, or
insertion of new entries before the EOF at time of daemon starting) will not be detected or processed
after boot; a restart of the daemon will be required for them to be factored in.
If a new denylist file is added, `ipfs daemon` needs to be restarted.
If an entire new denylist file is added, `ipfs daemon` also needs to be restarted to track it.
CLI and Gateway users will receive errors in response to request impacted by a blocklist:

View File

@ -1,227 +0,0 @@
package tarfmt
import (
"archive/tar"
"bytes"
"context"
"errors"
"io"
"path"
"strings"
dag "github.com/ipfs/boxo/ipld/merkledag"
"github.com/ipfs/boxo/ipld/merkledag/dagutils"
importer "github.com/ipfs/boxo/ipld/unixfs/importer"
uio "github.com/ipfs/boxo/ipld/unixfs/io"
chunker "github.com/ipfs/boxo/chunker"
ipld "github.com/ipfs/go-ipld-format"
logging "github.com/ipfs/go-log"
)
var log = logging.Logger("tarfmt")
var (
blockSize = 512
zeroBlock = make([]byte, blockSize)
)
func marshalHeader(h *tar.Header) ([]byte, error) {
buf := new(bytes.Buffer)
w := tar.NewWriter(buf)
err := w.WriteHeader(h)
if err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// ImportTar imports a tar file into the given DAGService and returns the root
// node.
func ImportTar(ctx context.Context, r io.Reader, ds ipld.DAGService) (*dag.ProtoNode, error) {
tr := tar.NewReader(r)
root := new(dag.ProtoNode)
root.SetData([]byte("ipfs/tar"))
e := dagutils.NewDagEditor(root, ds)
for {
h, err := tr.Next()
if err != nil {
if err == io.EOF {
break
}
return nil, err
}
header := new(dag.ProtoNode)
headerBytes, err := marshalHeader(h)
if err != nil {
return nil, err
}
header.SetData(headerBytes)
if h.Size > 0 {
spl := chunker.NewRabin(tr, uint64(chunker.DefaultBlockSize))
nd, err := importer.BuildDagFromReader(ds, spl)
if err != nil {
return nil, err
}
err = header.AddNodeLink("data", nd)
if err != nil {
return nil, err
}
}
err = ds.Add(ctx, header)
if err != nil {
return nil, err
}
path := escapePath(h.Name)
err = e.InsertNodeAtPath(context.Background(), path, header, func() *dag.ProtoNode { return new(dag.ProtoNode) })
if err != nil {
return nil, err
}
}
return e.Finalize(ctx, ds)
}
// adds a '-' to the beginning of each path element so we can use 'data' as a
// special link in the structure without having to worry about.
func escapePath(pth string) string {
elems := strings.Split(strings.Trim(pth, "/"), "/")
for i, e := range elems {
elems[i] = "-" + e
}
return path.Join(elems...)
}
type tarReader struct {
links []*ipld.Link
ds ipld.DAGService
childRead *tarReader
hdrBuf *bytes.Reader
fileRead *countReader
pad int
ctx context.Context
}
func (tr *tarReader) Read(b []byte) (int, error) {
// if we have a header to be read, it takes priority
if tr.hdrBuf != nil {
n, err := tr.hdrBuf.Read(b)
if err == io.EOF {
tr.hdrBuf = nil
return n, nil
}
return n, err
}
// no header remaining, check for recursive
if tr.childRead != nil {
n, err := tr.childRead.Read(b)
if err == io.EOF {
tr.childRead = nil
return n, nil
}
return n, err
}
// check for filedata to be read
if tr.fileRead != nil {
n, err := tr.fileRead.Read(b)
if err == io.EOF {
nr := tr.fileRead.n
tr.pad = (blockSize - (nr % blockSize)) % blockSize
tr.fileRead.Close()
tr.fileRead = nil
return n, nil
}
return n, err
}
// filedata reads must be padded out to 512 byte offsets
if tr.pad > 0 {
n := copy(b, zeroBlock[:tr.pad])
tr.pad -= n
return n, nil
}
if len(tr.links) == 0 {
return 0, io.EOF
}
next := tr.links[0]
tr.links = tr.links[1:]
headerNd, err := next.GetNode(tr.ctx, tr.ds)
if err != nil {
return 0, err
}
hndpb, ok := headerNd.(*dag.ProtoNode)
if !ok {
return 0, dag.ErrNotProtobuf
}
tr.hdrBuf = bytes.NewReader(hndpb.Data())
dataNd, err := hndpb.GetLinkedProtoNode(tr.ctx, tr.ds, "data")
if err != nil && !errors.Is(err, dag.ErrLinkNotFound) {
return 0, err
}
if err == nil {
dr, err := uio.NewDagReader(tr.ctx, dataNd, tr.ds)
if err != nil {
log.Error("dagreader error: ", err)
return 0, err
}
tr.fileRead = &countReader{r: dr}
} else if len(headerNd.Links()) > 0 {
tr.childRead = &tarReader{
links: headerNd.Links(),
ds: tr.ds,
ctx: tr.ctx,
}
}
return tr.Read(b)
}
// ExportTar exports the passed DAG as a tar file. This function is the inverse
// of ImportTar.
func ExportTar(ctx context.Context, root *dag.ProtoNode, ds ipld.DAGService) (io.Reader, error) {
if string(root.Data()) != "ipfs/tar" {
return nil, errors.New("not an IPFS tarchive")
}
return &tarReader{
links: root.Links(),
ds: ds,
ctx: ctx,
}, nil
}
type countReader struct {
r io.ReadCloser
n int
}
func (r *countReader) Read(b []byte) (int, error) {
n, err := r.r.Read(b)
r.n += n
return n, err
}
func (r *countReader) Close() error {
return r.r.Close()
}

View File

@ -116,9 +116,6 @@ func TestAllRootCommandsAreMentionedInHelpText(t *testing.T) {
notInHelp := map[string]bool{
"object": true,
"shutdown": true,
"tar": true,
"urlstore": true,
"dns": true,
}
helpMsg := strings.TrimSpace(node.IPFS("--help").Stdout.String())
@ -159,7 +156,6 @@ func TestCommandDocsWidth(t *testing.T) {
"ipfs pin verify": true,
"ipfs dht get": true,
"ipfs pin remote service add": true,
"ipfs file ls": true,
"ipfs pin update": true,
"ipfs pin rm": true,
"ipfs p2p": true,
@ -177,10 +173,8 @@ func TestCommandDocsWidth(t *testing.T) {
"ipfs swarm addrs local": true,
"ipfs files ls": true,
"ipfs stats bw": true,
"ipfs urlstore add": true,
"ipfs swarm peers": true,
"ipfs pubsub sub": true,
"ipfs repo fsck": true,
"ipfs files write": true,
"ipfs swarm limit": true,
"ipfs commands completion fish": true,

View File

@ -1,140 +0,0 @@
#!/usr/bin/env bash
#
# Copyright (c) 2014 Christian Couder
# MIT Licensed; see the LICENSE file in this repository.
#
test_description="Test file ls command"
. lib/test-lib.sh
test_init_ipfs
test_ls_cmd() {
test_expect_success "'ipfs add -r testData' succeeds" '
mkdir -p testData testData/d1 testData/d2 &&
echo "test" >testData/f1 &&
echo "data" >testData/f2 &&
echo "hello" >testData/d1/a &&
random 128 42 >testData/d1/128 &&
echo "world" >testData/d2/a &&
random 1024 42 >testData/d2/1024 &&
ipfs add -r testData >actual_add
'
test_expect_success "'ipfs add' output looks good" '
cat <<-\EOF >expected_add &&
added QmQNd6ubRXaNG6Prov8o6vk3bn6eWsj9FxLGrAVDUAGkGe testData/d1/128
added QmZULkCELmmk5XNfCgTnCyFgAVxBRBXyDHGGMVoLFLiXEN testData/d1/a
added QmbQBUSRL9raZtNXfpTDeaxQapibJEG6qEY8WqAN22aUzd testData/d2/1024
added QmaRGe7bVmVaLmxbrMiVNXqW4pRNNp3xq7hFtyRKA3mtJL testData/d2/a
added QmeomffUNfmQy76CQGy9NdmqEnnHU9soCexBnGU3ezPHVH testData/f1
added QmNtocSs7MoDkJMc1RkyisCSKvLadujPsfJfSdJ3e1eA1M testData/f2
added QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss testData/d1
added QmR3jhV4XpxxPjPT3Y8vNnWvWNvakdcT3H6vqpRBsX1MLy testData/d2
added QmfNy183bXiRVyrhyWtq3TwHn79yHEkiAGFr18P7YNzESj testData
EOF
test_cmp expected_add actual_add
'
test_expect_success "'ipfs file ls <dir>' succeeds" '
ipfs file ls QmR3jhV4XpxxPjPT3Y8vNnWvWNvakdcT3H6vqpRBsX1MLy >actual_ls_one_directory
'
test_expect_success "'ipfs file ls <dir>' output looks good" '
cat <<-\EOF >expected_ls_one_directory &&
1024
a
EOF
test_cmp expected_ls_one_directory actual_ls_one_directory
'
test_expect_success "'ipfs file ls <three dir hashes>' succeeds" '
ipfs file ls QmfNy183bXiRVyrhyWtq3TwHn79yHEkiAGFr18P7YNzESj QmR3jhV4XpxxPjPT3Y8vNnWvWNvakdcT3H6vqpRBsX1MLy QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss >actual_ls_three_directories
'
test_expect_success "'ipfs file ls <three dir hashes>' output looks good" '
cat <<-\EOF >expected_ls_three_directories &&
QmR3jhV4XpxxPjPT3Y8vNnWvWNvakdcT3H6vqpRBsX1MLy:
1024
a
QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss:
128
a
QmfNy183bXiRVyrhyWtq3TwHn79yHEkiAGFr18P7YNzESj:
d1
d2
f1
f2
EOF
test_cmp expected_ls_three_directories actual_ls_three_directories
'
test_expect_success "'ipfs file ls <file hashes>' succeeds" '
ipfs file ls /ipfs/QmR3jhV4XpxxPjPT3Y8vNnWvWNvakdcT3H6vqpRBsX1MLy/1024 QmQNd6ubRXaNG6Prov8o6vk3bn6eWsj9FxLGrAVDUAGkGe >actual_ls_file
'
test_expect_success "'ipfs file ls <file hashes>' output looks good" '
cat <<-\EOF >expected_ls_file &&
/ipfs/QmR3jhV4XpxxPjPT3Y8vNnWvWNvakdcT3H6vqpRBsX1MLy/1024
QmQNd6ubRXaNG6Prov8o6vk3bn6eWsj9FxLGrAVDUAGkGe
EOF
test_cmp expected_ls_file actual_ls_file
'
test_expect_success "'ipfs file ls <duplicates>' succeeds" '
ipfs file ls /ipfs/QmfNy183bXiRVyrhyWtq3TwHn79yHEkiAGFr18P7YNzESj/d1 /ipfs/QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss /ipfs/QmR3jhV4XpxxPjPT3Y8vNnWvWNvakdcT3H6vqpRBsX1MLy/1024 /ipfs/QmbQBUSRL9raZtNXfpTDeaxQapibJEG6qEY8WqAN22aUzd >actual_ls_duplicates_file
'
test_expect_success "'ipfs file ls <duplicates>' output looks good" '
cat <<-\EOF >expected_ls_duplicates_file &&
/ipfs/QmR3jhV4XpxxPjPT3Y8vNnWvWNvakdcT3H6vqpRBsX1MLy/1024
/ipfs/QmbQBUSRL9raZtNXfpTDeaxQapibJEG6qEY8WqAN22aUzd
/ipfs/QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss:
/ipfs/QmfNy183bXiRVyrhyWtq3TwHn79yHEkiAGFr18P7YNzESj/d1:
128
a
EOF
test_cmp expected_ls_duplicates_file actual_ls_duplicates_file
'
test_expect_success "'ipfs --encoding=json file ls <file hashes>' succeeds" '
ipfs --encoding=json file ls /ipfs/QmR3jhV4XpxxPjPT3Y8vNnWvWNvakdcT3H6vqpRBsX1MLy/1024 >actual_json_ls_file
'
test_expect_success "'ipfs --encoding=json file ls <file hashes>' output looks good" '
cat <<-\EOF >expected_json_ls_file_trailing_newline &&
{"Arguments":{"/ipfs/QmR3jhV4XpxxPjPT3Y8vNnWvWNvakdcT3H6vqpRBsX1MLy/1024":"QmbQBUSRL9raZtNXfpTDeaxQapibJEG6qEY8WqAN22aUzd"},"Objects":{"QmbQBUSRL9raZtNXfpTDeaxQapibJEG6qEY8WqAN22aUzd":{"Hash":"QmbQBUSRL9raZtNXfpTDeaxQapibJEG6qEY8WqAN22aUzd","Size":1024,"Type":"File","Links":null}}}
EOF
printf "%s\n" "$(cat expected_json_ls_file_trailing_newline)" >expected_json_ls_file &&
test_cmp expected_json_ls_file actual_json_ls_file
'
test_expect_success "'ipfs --encoding=json file ls <duplicates>' succeeds" '
ipfs --encoding=json file ls /ipfs/QmfNy183bXiRVyrhyWtq3TwHn79yHEkiAGFr18P7YNzESj/d1 /ipfs/QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss /ipfs/QmR3jhV4XpxxPjPT3Y8vNnWvWNvakdcT3H6vqpRBsX1MLy/1024 /ipfs/QmbQBUSRL9raZtNXfpTDeaxQapibJEG6qEY8WqAN22aUzd >actual_json_ls_duplicates_file
'
test_expect_success "'ipfs --encoding=json file ls <duplicates>' output looks good" '
cat <<-\EOF >expected_json_ls_duplicates_file_trailing_newline &&
{"Arguments":{"/ipfs/QmR3jhV4XpxxPjPT3Y8vNnWvWNvakdcT3H6vqpRBsX1MLy/1024":"QmbQBUSRL9raZtNXfpTDeaxQapibJEG6qEY8WqAN22aUzd","/ipfs/QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss":"QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss","/ipfs/QmbQBUSRL9raZtNXfpTDeaxQapibJEG6qEY8WqAN22aUzd":"QmbQBUSRL9raZtNXfpTDeaxQapibJEG6qEY8WqAN22aUzd","/ipfs/QmfNy183bXiRVyrhyWtq3TwHn79yHEkiAGFr18P7YNzESj/d1":"QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss"},"Objects":{"QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss":{"Hash":"QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss","Size":0,"Type":"Directory","Links":[{"Name":"128","Hash":"QmQNd6ubRXaNG6Prov8o6vk3bn6eWsj9FxLGrAVDUAGkGe","Size":128,"Type":"File"},{"Name":"a","Hash":"QmZULkCELmmk5XNfCgTnCyFgAVxBRBXyDHGGMVoLFLiXEN","Size":6,"Type":"File"}]},"QmbQBUSRL9raZtNXfpTDeaxQapibJEG6qEY8WqAN22aUzd":{"Hash":"QmbQBUSRL9raZtNXfpTDeaxQapibJEG6qEY8WqAN22aUzd","Size":1024,"Type":"File","Links":null}}}
EOF
printf "%s\n" "$(cat expected_json_ls_duplicates_file_trailing_newline)" >expected_json_ls_duplicates_file &&
test_cmp expected_json_ls_duplicates_file actual_json_ls_duplicates_file
'
}
# should work offline
test_ls_cmd
# should work online
test_launch_ipfs_daemon
test_ls_cmd
test_kill_ipfs_daemon
test_done

View File

@ -1,58 +0,0 @@
#!/usr/bin/env bash
#
# Copyright (c) 2015 Jeromy Johnson
# MIT Licensed; see the LICENSE file in this repository.
#
test_description="Test tar commands"
. lib/test-lib.sh
test_init_ipfs
test_expect_success "create some random files" '
mkdir foo &&
random 10000 > foo/a &&
random 12345 > foo/b &&
mkdir foo/bar &&
random 5432 > foo/bar/baz &&
ln -s ../a foo/bar/link &&
echo "exit" > foo/script &&
chmod +x foo/script
'
test_expect_success "tar those random files up" '
tar cf files.tar foo/
'
test_expect_success "'ipfs tar add' succeeds" '
TAR_HASH=$(ipfs tar add files.tar)
'
test_expect_success "'ipfs tar cat' succeeds" '
mkdir output &&
ipfs tar cat $TAR_HASH > output/out.tar
'
test_expect_success "can extract tar" '
tar xf output/out.tar -C output/
'
test_expect_success "files look right" '
diff foo/a output/foo/a &&
diff foo/b output/foo/b &&
diff foo/bar/baz output/foo/bar/baz &&
[ -L output/foo/bar/link ] &&
[ -x foo/script ]
'
test_expect_success "'ipfs tar add --cid-base=base32' succeeds" '
ipfs tar add --cid-base=base32 files.tar > actual
'
test_expect_success "'ipfs tar add --cid-base=base32' has correct hash" '
ipfs cid base32 $TAR_HASH > expected &&
test_cmp expected actual
'
test_done

View File

@ -191,7 +191,6 @@ EOF
'
}
test_urlstore urlstore add
test_urlstore add -q --nocopy --cid-version=1
test_done

View File

@ -11,7 +11,7 @@ import (
var CurrentCommit string
// CurrentVersionNumber is the current application's version literal.
const CurrentVersionNumber = "0.25.0"
const CurrentVersionNumber = "0.26.0-dev"
const ApiVersion = "/kubo/" + CurrentVersionNumber + "/" //nolint