diff --git a/cmd/ipfs/init.go b/cmd/ipfs/init.go index 9d4bb51a4..9dc4f7fb8 100644 --- a/cmd/ipfs/init.go +++ b/cmd/ipfs/init.go @@ -85,13 +85,16 @@ environment variable: f := req.Files if f != nil { - _, confFile, err := f.NextFile() - if err != nil { - return err + it, _ := req.Files.Entries() + if !it.Next() && it.Err() != nil { + return it.Err() + } + if it.File() == nil { + return fmt.Errorf("expected a regular file") } conf = &config.Config{} - if err := json.NewDecoder(confFile).Decode(conf); err != nil { + if err := json.NewDecoder(it.File()).Decode(conf); err != nil { return err } } diff --git a/core/commands/add.go b/core/commands/add.go index 058519322..148e3a76b 100644 --- a/core/commands/add.go +++ b/core/commands/add.go @@ -11,7 +11,6 @@ import ( "github.com/ipfs/go-ipfs/core/coreapi/interface/options" pb "gx/ipfs/QmPtj12fdwuAqj9sBSTNUxBNu8kCGNp8b3o8yUzMm5GHpq/pb" - files "gx/ipfs/QmXWZCd8jfaHmt4UDSnjKmGcrQMw95bDGWqEeVLVJjoANX/go-ipfs-files" cmds "gx/ipfs/QmaAP56JAwdjwisPTu4yx17whcjTr6y5JCSCF77Y1rahWV/go-ipfs-cmds" cmdkit "gx/ipfs/Qmde5VP1qUkyQXKCfmEUA7bP64V2HAptbJ7phuPp7jXWwg/go-ipfs-cmdkit" mh "gx/ipfs/QmerPMzPk1mJVowm8KgmoknWa4yCYvvugMPsgWmDNUvDLW/go-multihash" diff --git a/core/commands/block.go b/core/commands/block.go index 054e621d8..b50f4c141 100644 --- a/core/commands/block.go +++ b/core/commands/block.go @@ -153,9 +153,12 @@ than 'sha2-256' or format to anything other than 'v0' will result in CIDv1. return err } - _, file, err := req.Files.NextFile() - if err != nil { - return err + it, _ := req.Files.Entries() + if !it.Next() && it.Err() != nil { + return it.Err() + } + if it.File() == nil { + return fmt.Errorf("expected a regular file") } mhtype, _ := req.Options[mhtypeOptionName].(string) @@ -178,7 +181,7 @@ than 'sha2-256' or format to anything other than 'v0' will result in CIDv1. } } - p, err := api.Block().Put(req.Context, file, options.Block.Hash(mhtval, mhlen), options.Block.Format(format)) + p, err := api.Block().Put(req.Context, it.File(), options.Block.Hash(mhtval, mhlen), options.Block.Format(format)) if err != nil { return err } diff --git a/core/commands/cat.go b/core/commands/cat.go index 3fa2fec09..734316927 100644 --- a/core/commands/cat.go +++ b/core/commands/cat.go @@ -9,6 +9,7 @@ import ( "github.com/ipfs/go-ipfs/core/commands/cmdenv" "github.com/ipfs/go-ipfs/core/coreapi/interface" + "gx/ipfs/QmXWZCd8jfaHmt4UDSnjKmGcrQMw95bDGWqEeVLVJjoANX/go-ipfs-files" cmds "gx/ipfs/QmaAP56JAwdjwisPTu4yx17whcjTr6y5JCSCF77Y1rahWV/go-ipfs-cmds" "gx/ipfs/Qmde5VP1qUkyQXKCfmEUA7bP64V2HAptbJ7phuPp7jXWwg/go-ipfs-cmdkit" ) @@ -122,12 +123,13 @@ func cat(ctx context.Context, api iface.CoreAPI, paths []string, offset int64, m return nil, 0, err } - file, err := api.Unixfs().Get(ctx, fpath) + f, err := api.Unixfs().Get(ctx, fpath) if err != nil { return nil, 0, err } - if file.IsDirectory() { + file, ok := f.(files.File) + if !ok { return nil, 0, iface.ErrIsDir } diff --git a/core/commands/config.go b/core/commands/config.go index 45f930840..e2fc32b0c 100644 --- a/core/commands/config.go +++ b/core/commands/config.go @@ -280,10 +280,14 @@ can't be undone. } defer r.Close() - file, err := req.Files.NextFile() - if err != nil { - return err + it, _ := req.Files.Entries() + if !it.Next() && it.Err() != nil { + return it.Err() } + if it.File() == nil { + return fmt.Errorf("expected a regular file") + } + file := it.File() defer file.Close() return replaceConfig(r, file) diff --git a/core/commands/dag/dag.go b/core/commands/dag/dag.go index 1e3b0441e..0da4af91a 100644 --- a/core/commands/dag/dag.go +++ b/core/commands/dag/dag.go @@ -92,16 +92,12 @@ into an object of the specified format. defer nd.Blockstore.PinLock().Unlock() } - for { - _, file, err := req.Files.NextFile() - if err == io.EOF { - // Finished the list of files. - break - } else if err != nil { - return err + it, _ := req.Files.Entries() + for it.Next() { + if it.File() == nil { + return fmt.Errorf("expected a regular file") } - - nds, err := coredag.ParseInputs(ienc, format, file, mhType, -1) + nds, err := coredag.ParseInputs(ienc, format, it.File(), mhType, -1) if err != nil { return err } @@ -122,6 +118,9 @@ into an object of the specified format. return err } } + if it.Err() != nil { + return err + } if err := b.Commit(); err != nil { return err diff --git a/core/commands/files.go b/core/commands/files.go index eeff856a7..2792bad98 100644 --- a/core/commands/files.go +++ b/core/commands/files.go @@ -769,12 +769,15 @@ stat' on the file or any of its ancestors. return err } - _, input, err := req.Files.NextFile() - if err != nil { - return err + it, _ := req.Files.Entries() + if !it.Next() && it.Err() != nil { + return it.Err() + } + if it.File() == nil { + return fmt.Errorf("expected a regular file") } - var r io.Reader = input + var r io.Reader = it.File() if countfound { r = io.LimitReader(r, int64(count)) } diff --git a/core/commands/object/object.go b/core/commands/object/object.go index bba3abae5..c21a193e2 100644 --- a/core/commands/object/object.go +++ b/core/commands/object/object.go @@ -391,9 +391,12 @@ And then run: return err } - _, input, err := req.Files.NextFile() - if err != nil && err != io.EOF { - return err + it, _ := req.Files.Entries() + if !it.Next() && it.Err() != nil { + return it.Err() + } + if it.File() == nil { + return fmt.Errorf("expected a regular file") } inputenc, _ := req.Options["inputenc"].(string) @@ -411,7 +414,7 @@ And then run: return err } - p, err := api.Object().Put(req.Context, input, + p, err := api.Object().Put(req.Context, it.File(), options.Object.DataType(datafieldenc), options.Object.InputEnc(inputenc), options.Object.Pin(dopin)) diff --git a/core/commands/object/patch.go b/core/commands/object/patch.go index 0e8fd8d11..85f60f0fc 100644 --- a/core/commands/object/patch.go +++ b/core/commands/object/patch.go @@ -4,12 +4,12 @@ import ( "fmt" "io" - cmdenv "github.com/ipfs/go-ipfs/core/commands/cmdenv" + "github.com/ipfs/go-ipfs/core/commands/cmdenv" coreiface "github.com/ipfs/go-ipfs/core/coreapi/interface" "github.com/ipfs/go-ipfs/core/coreapi/interface/options" - cmds "gx/ipfs/QmaAP56JAwdjwisPTu4yx17whcjTr6y5JCSCF77Y1rahWV/go-ipfs-cmds" - cmdkit "gx/ipfs/Qmde5VP1qUkyQXKCfmEUA7bP64V2HAptbJ7phuPp7jXWwg/go-ipfs-cmdkit" + "gx/ipfs/QmaAP56JAwdjwisPTu4yx17whcjTr6y5JCSCF77Y1rahWV/go-ipfs-cmds" + "gx/ipfs/Qmde5VP1qUkyQXKCfmEUA7bP64V2HAptbJ7phuPp7jXWwg/go-ipfs-cmdkit" ) var ObjectPatchCmd = &cmds.Command{ @@ -60,12 +60,15 @@ the limit will not be respected by the network. return err } - _, data, err := req.Files.NextFile() - if err != nil { - return err + it, _ := req.Files.Entries() + if !it.Next() && it.Err() != nil { + return it.Err() + } + if it.File() == nil { + return fmt.Errorf("expected a regular file") } - p, err := api.Object().AppendData(req.Context, root, data) + p, err := api.Object().AppendData(req.Context, root, it.File()) if err != nil { return err } @@ -107,12 +110,15 @@ Example: return err } - _, data, err := req.Files.NextFile() - if err != nil { - return err + it, _ := req.Files.Entries() + if !it.Next() && it.Err() != nil { + return it.Err() + } + if it.File() == nil { + return fmt.Errorf("expected a regular file") } - p, err := api.Object().SetData(req.Context, root, data) + p, err := api.Object().SetData(req.Context, root, it.File()) if err != nil { return err } diff --git a/core/commands/tar.go b/core/commands/tar.go index 85e71b3de..c03d1c0a9 100644 --- a/core/commands/tar.go +++ b/core/commands/tar.go @@ -44,12 +44,15 @@ represent it. return err } - name, fi, err := req.Files.NextFile() - if err != nil { - return err + it, _ := req.Files.Entries() + if !it.Next() && it.Err() != nil { + return it.Err() + } + if it.File() == nil { + return fmt.Errorf("expected a regular file") } - node, err := tar.ImportTar(req.Context, fi, nd.DAG) + node, err := tar.ImportTar(req.Context, it.File(), nd.DAG) if err != nil { return err } @@ -57,7 +60,7 @@ represent it. c := node.Cid() return cmds.EmitOnce(res, &coreiface.AddEvent{ - Name: name, + Name: it.Name(), Hash: c.String(), }) }, diff --git a/core/coreapi/interface/unixfs.go b/core/coreapi/interface/unixfs.go index 773b36dc0..589083c6b 100644 --- a/core/coreapi/interface/unixfs.go +++ b/core/coreapi/interface/unixfs.go @@ -23,13 +23,13 @@ type UnixfsAPI interface { // Add imports the data from the reader into merkledag file // // TODO: a long useful comment on how to use this for many different scenarios - Add(context.Context, files.File, ...options.UnixfsAddOption) (ResolvedPath, error) + Add(context.Context, files.Node, ...options.UnixfsAddOption) (ResolvedPath, error) // Get returns a read-only handle to a file tree referenced by a path // // Note that some implementations of this API may apply the specified context // to operations performed on the returned file - Get(context.Context, Path) (files.File, error) + Get(context.Context, Path) (files.Node, error) // Ls returns the list of links in a directory Ls(context.Context, Path) ([]*ipld.Link, error) diff --git a/core/coreapi/unixfile.go b/core/coreapi/unixfile.go index d2ef89887..f48caa9c5 100644 --- a/core/coreapi/unixfile.go +++ b/core/coreapi/unixfile.go @@ -3,7 +3,6 @@ package coreapi import ( "context" "errors" - "io" files "gx/ipfs/QmXWZCd8jfaHmt4UDSnjKmGcrQMw95bDGWqEeVLVJjoANX/go-ipfs-files" ft "gx/ipfs/Qmbvw7kpSM2p6rbQ57WGRhhqNfCiNGW6EKH4xgHLw4bsnB/go-unixfs" @@ -21,74 +20,73 @@ const prefetchFiles = 4 type ufsDirectory struct { ctx context.Context dserv ipld.DAGService + dir uio.Directory +} +type ufsIterator struct { + ctx context.Context files chan *ipld.Link + dserv ipld.DAGService + + curName string + curFile files.Node + + err error +} + +func (it *ufsIterator) Name() string { + return it.curName +} + +func (it *ufsIterator) Node() files.Node { + return it.curFile +} + +func (it *ufsIterator) File() files.File { + f, _ := it.curFile.(files.File) + return f +} + +func (it *ufsIterator) Dir() files.Directory { + d, _ := it.curFile.(files.Directory) + return d +} + +func (it *ufsIterator) Next() bool { + l, ok := <-it.files + if !ok { + return false + } + + it.curFile = nil + + nd, err := l.GetNode(it.ctx, it.dserv) + if err != nil { + it.err = err + return false + } + + it.curName = l.Name + it.curFile, it.err = newUnixfsFile(it.ctx, it.dserv, nd) + return it.err == nil +} + +func (it *ufsIterator) Err() error { + return it.err } func (d *ufsDirectory) Close() error { - return files.ErrNotReader + return nil } -func (d *ufsDirectory) Read(_ []byte) (int, error) { - return 0, files.ErrNotReader -} - -func (d *ufsDirectory) IsDirectory() bool { - return true -} - -func (d *ufsDirectory) NextFile() (string, files.File, error) { - l, ok := <-d.files - if !ok { - return "", nil, io.EOF - } - - nd, err := l.GetNode(d.ctx, d.dserv) - if err != nil { - return "", nil, err - } - - f, err := newUnixfsFile(d.ctx, d.dserv, nd, d) - return l.Name, f, err -} - -func (d *ufsDirectory) Size() (int64, error) { - return 0, files.ErrNotReader -} - -func (d *ufsDirectory) Seek(offset int64, whence int) (int64, error) { - return 0, files.ErrNotReader -} - -type ufsFile struct { - uio.DagReader -} - -func (f *ufsFile) IsDirectory() bool { - return false -} - -func (f *ufsFile) NextFile() (string, files.File, error) { - return "", nil, files.ErrNotDirectory -} - -func (f *ufsFile) Size() (int64, error) { - return int64(f.DagReader.Size()), nil -} - -func newUnixfsDir(ctx context.Context, dserv ipld.DAGService, nd ipld.Node) (files.File, error) { - dir, err := uio.NewDirectoryFromNode(dserv, nd) - if err != nil { - return nil, err - } - +func (d *ufsDirectory) Entries() (files.DirIterator, error) { fileCh := make(chan *ipld.Link, prefetchFiles) go func() { - dir.ForEachLink(ctx, func(link *ipld.Link) error { + d.dir.ForEachLink(d.ctx, func(link *ipld.Link) error { select { case fileCh <- link: - case <-ctx.Done(): - return ctx.Err() + case <-d.ctx.Done(): + return d.ctx.Err() } return nil }) @@ -96,15 +94,40 @@ func newUnixfsDir(ctx context.Context, dserv ipld.DAGService, nd ipld.Node) (fil close(fileCh) }() + return &ufsIterator{ + ctx: d.ctx, + files: fileCh, + dserv: d.dserv, + }, nil +} + +func (d *ufsDirectory) Size() (int64, error) { + return 0, files.ErrNotSupported +} + +type ufsFile struct { + uio.DagReader +} + +func (f *ufsFile) Size() (int64, error) { + return int64(f.DagReader.Size()), nil +} + +func newUnixfsDir(ctx context.Context, dserv ipld.DAGService, nd ipld.Node) (files.Directory, error) { + dir, err := uio.NewDirectoryFromNode(dserv, nd) + if err != nil { + return nil, err + } + return &ufsDirectory{ ctx: ctx, dserv: dserv, - files: fileCh, + dir: dir, }, nil } -func newUnixfsFile(ctx context.Context, dserv ipld.DAGService, nd ipld.Node, parent files.File) (files.File, error) { +func newUnixfsFile(ctx context.Context, dserv ipld.DAGService, nd ipld.Node) (files.Node, error) { switch dn := nd.(type) { case *dag.ProtoNode: fsn, err := ft.FSNodeFromBytes(dn.Data()) @@ -129,3 +152,6 @@ func newUnixfsFile(ctx context.Context, dserv ipld.DAGService, nd ipld.Node, par DagReader: dr, }, nil } + +var _ files.Directory = &ufsDirectory{} +var _ files.File = &ufsFile{} diff --git a/core/coreapi/unixfs.go b/core/coreapi/unixfs.go index f3c5038ca..27341c927 100644 --- a/core/coreapi/unixfs.go +++ b/core/coreapi/unixfs.go @@ -28,7 +28,7 @@ type UnixfsAPI CoreAPI // Add builds a merkledag node from a reader, adds it to the blockstore, // and returns the key representing that node. -func (api *UnixfsAPI) Add(ctx context.Context, files files.File, opts ...options.UnixfsAddOption) (coreiface.ResolvedPath, error) { +func (api *UnixfsAPI) Add(ctx context.Context, files files.Node, opts ...options.UnixfsAddOption) (coreiface.ResolvedPath, error) { settings, prefix, err := options.UnixfsAddOptions(opts...) if err != nil { return nil, err @@ -133,7 +133,7 @@ func (api *UnixfsAPI) Add(ctx context.Context, files files.File, opts ...options return coreiface.IpfsPath(nd.Cid()), nil } -func (api *UnixfsAPI) Get(ctx context.Context, p coreiface.Path) (files.File, error) { +func (api *UnixfsAPI) Get(ctx context.Context, p coreiface.Path) (files.Node, error) { ses := api.core().getSession(ctx) nd, err := ses.ResolveNode(ctx, p) @@ -141,7 +141,7 @@ func (api *UnixfsAPI) Get(ctx context.Context, p coreiface.Path) (files.File, er return nil, err } - return newUnixfsFile(ctx, ses.dag, nd, nil) + return newUnixfsFile(ctx, ses.dag, nd) } // Ls returns the contents of an IPFS or IPNS object(s) at path p, with the format: diff --git a/core/coreapi/unixfs_test.go b/core/coreapi/unixfs_test.go index 8b773dd96..55b7aa862 100644 --- a/core/coreapi/unixfs_test.go +++ b/core/coreapi/unixfs_test.go @@ -134,36 +134,36 @@ func makeAPI(ctx context.Context) (*core.IpfsNode, coreiface.CoreAPI, error) { return nd[0], api[0], nil } -func strFile(data string) func() files.File { - return func() files.File { +func strFile(data string) func() files.Node { + return func() files.Node { return files.NewReaderFile(ioutil.NopCloser(strings.NewReader(data)), nil) } } -func twoLevelDir() func() files.File { - return func() files.File { - return files.NewSliceFile([]files.FileEntry{{ - Name: "abc", File: files.NewSliceFile([]files.FileEntry{ - {Name: "def", File: files.NewReaderFile(ioutil.NopCloser(strings.NewReader("world")), nil)}, - })}, +func twoLevelDir() func() files.Node { + return func() files.Node { + return files.NewSliceFile([]files.DirEntry{ + files.FileEntry("abc", files.NewSliceFile([]files.DirEntry{ + files.FileEntry("def", files.NewReaderFile(ioutil.NopCloser(strings.NewReader("world")), nil)), + })), - {Name: "bar", File: files.NewReaderFile(ioutil.NopCloser(strings.NewReader("hello2")), nil)}, - {Name: "foo", File: files.NewReaderFile(ioutil.NopCloser(strings.NewReader("hello1")), nil)}, + files.FileEntry("bar", files.NewReaderFile(ioutil.NopCloser(strings.NewReader("hello2")), nil)), + files.FileEntry("foo", files.NewReaderFile(ioutil.NopCloser(strings.NewReader("hello1")), nil)), }) } } -func flatDir() files.File { - return files.NewSliceFile([]files.FileEntry{ - {Name: "bar", File: files.NewReaderFile(ioutil.NopCloser(strings.NewReader("hello2")), nil)}, - {Name: "foo", File: files.NewReaderFile(ioutil.NopCloser(strings.NewReader("hello1")), nil)}, +func flatDir() files.Node { + return files.NewSliceFile([]files.DirEntry{ + files.FileEntry("bar", files.NewReaderFile(ioutil.NopCloser(strings.NewReader("hello2")), nil)), + files.FileEntry("foo", files.NewReaderFile(ioutil.NopCloser(strings.NewReader("hello1")), nil)), }) } -func wrapped(name string) func(f files.File) files.File { - return func(f files.File) files.File { - return files.NewSliceFile([]files.FileEntry{ - {Name: name, File: f}, +func wrapped(name string) func(f files.Node) files.Node { + return func(f files.Node) files.Node { + return files.NewSliceFile([]files.DirEntry{ + files.FileEntry(name, f), }) } } @@ -177,8 +177,8 @@ func TestAdd(t *testing.T) { cases := []struct { name string - data func() files.File - expect func(files.File) files.File + data func() files.Node + expect func(files.Node) files.Node path string err string @@ -295,7 +295,7 @@ func TestAdd(t *testing.T) { { name: "addWrapped", path: "/ipfs/QmVE9rNpj5doj7XHzp5zMUxD7BJgXEqx4pe3xZ3JBReWHE", - data: func() files.File { + data: func() files.Node { return files.NewReaderFile(ioutil.NopCloser(strings.NewReader(helloStr)), nil) }, wrap: "foo", @@ -305,7 +305,7 @@ func TestAdd(t *testing.T) { { name: "addNotWrappedDirFile", path: hello, - data: func() files.File { + data: func() files.Node { return files.NewReaderFile(ioutil.NopCloser(strings.NewReader(helloStr)), nil) }, wrap: "foo", @@ -313,12 +313,12 @@ func TestAdd(t *testing.T) { { name: "stdinWrapped", path: "/ipfs/QmU3r81oZycjHS9oaSHw37ootMFuFUw1DvMLKXPsezdtqU", - data: func() files.File { + data: func() files.Node { return files.NewReaderFile(ioutil.NopCloser(strings.NewReader(helloStr)), nil) }, - expect: func(files.File) files.File { - return files.NewSliceFile([]files.FileEntry{ - {Name: "QmQy2Dw4Wk7rdJKjThjYXzfFJNaRKRHhHP5gHHXroJMYxk", File: files.NewReaderFile(ioutil.NopCloser(strings.NewReader(helloStr)), nil)}, + expect: func(files.Node) files.Node { + return files.NewSliceFile([]files.DirEntry{ + files.FileEntry("QmQy2Dw4Wk7rdJKjThjYXzfFJNaRKRHhHP5gHHXroJMYxk", files.NewReaderFile(ioutil.NopCloser(strings.NewReader(helloStr)), nil)), }) }, opts: []options.UnixfsAddOption{options.Unixfs.Wrap(true)}, @@ -326,7 +326,7 @@ func TestAdd(t *testing.T) { { name: "stdinNamed", path: "/ipfs/QmQ6cGBmb3ZbdrQW1MRm1RJnYnaxCqfssz7CrTa9NEhQyS", - data: func() files.File { + data: func() files.Node { rf, err := files.NewReaderPathFile(os.Stdin.Name(), ioutil.NopCloser(strings.NewReader(helloStr)), nil) if err != nil { panic(err) @@ -334,9 +334,9 @@ func TestAdd(t *testing.T) { return rf }, - expect: func(files.File) files.File { - return files.NewSliceFile([]files.FileEntry{ - {Name: "test", File: files.NewReaderFile(ioutil.NopCloser(strings.NewReader(helloStr)), nil)}, + expect: func(files.Node) files.Node { + return files.NewSliceFile([]files.DirEntry{ + files.FileEntry("test", files.NewReaderFile(ioutil.NopCloser(strings.NewReader(helloStr)), nil)), }) }, opts: []options.UnixfsAddOption{options.Unixfs.Wrap(true), options.Unixfs.StdinName("test")}, @@ -360,11 +360,11 @@ func TestAdd(t *testing.T) { // hidden { name: "hiddenFiles", - data: func() files.File { - return files.NewSliceFile([]files.FileEntry{ - {Name: ".bar", File: files.NewReaderFile(ioutil.NopCloser(strings.NewReader("hello2")), nil)}, - {Name: "bar", File: files.NewReaderFile(ioutil.NopCloser(strings.NewReader("hello2")), nil)}, - {Name: "foo", File: files.NewReaderFile(ioutil.NopCloser(strings.NewReader("hello1")), nil)}, + data: func() files.Node { + return files.NewSliceFile([]files.DirEntry{ + files.FileEntry(".bar", files.NewReaderFile(ioutil.NopCloser(strings.NewReader("hello2")), nil)), + files.FileEntry("bar", files.NewReaderFile(ioutil.NopCloser(strings.NewReader("hello2")), nil)), + files.FileEntry("foo", files.NewReaderFile(ioutil.NopCloser(strings.NewReader("hello1")), nil)), }) }, wrap: "t", @@ -373,7 +373,7 @@ func TestAdd(t *testing.T) { }, { name: "hiddenFileAlwaysAdded", - data: func() files.File { + data: func() files.Node { return files.NewReaderFile(ioutil.NopCloser(strings.NewReader(helloStr)), nil) }, wrap: ".foo", @@ -381,14 +381,14 @@ func TestAdd(t *testing.T) { }, { name: "hiddenFilesNotAdded", - data: func() files.File { - return files.NewSliceFile([]files.FileEntry{ - {Name: ".bar", File: files.NewReaderFile(ioutil.NopCloser(strings.NewReader("hello2")), nil)}, - {Name: "bar", File: files.NewReaderFile(ioutil.NopCloser(strings.NewReader("hello2")), nil)}, - {Name: "foo", File: files.NewReaderFile(ioutil.NopCloser(strings.NewReader("hello1")), nil)}, + data: func() files.Node { + return files.NewSliceFile([]files.DirEntry{ + files.FileEntry(".bar", files.NewReaderFile(ioutil.NopCloser(strings.NewReader("hello2")), nil)), + files.FileEntry("bar", files.NewReaderFile(ioutil.NopCloser(strings.NewReader("hello2")), nil)), + files.FileEntry("foo", files.NewReaderFile(ioutil.NopCloser(strings.NewReader("hello1")), nil)), }) }, - expect: func(files.File) files.File { + expect: func(files.Node) files.Node { return flatDir() }, wrap: "t", @@ -431,7 +431,7 @@ func TestAdd(t *testing.T) { }, { name: "progress1M", - data: func() files.File { + data: func() files.Node { r := bytes.NewReader(bytes.Repeat([]byte{0}, 1000000)) return files.NewReaderFile(ioutil.NopCloser(r), nil) }, @@ -457,8 +457,8 @@ func TestAdd(t *testing.T) { data := testCase.data() if testCase.wrap != "" { - data = files.NewSliceFile([]files.FileEntry{ - {Name: testCase.wrap, File: data}, + data = files.NewSliceFile([]files.DirEntry{ + files.FileEntry(testCase.wrap, data), }) } @@ -533,9 +533,12 @@ func TestAdd(t *testing.T) { // compare file structure with Unixfs().Get - var cmpFile func(origName string, orig files.File, gotName string, got files.File) - cmpFile = func(origName string, orig files.File, gotName string, got files.File) { - if orig.IsDirectory() != got.IsDirectory() { + var cmpFile func(origName string, orig files.Node, gotName string, got files.Node) + cmpFile = func(origName string, orig files.Node, gotName string, got files.Node) { + _, origDir := orig.(files.Directory) + _, gotDir := got.(files.Directory) + + if origDir != gotDir { t.Fatal("file type mismatch") } @@ -543,16 +546,16 @@ func TestAdd(t *testing.T) { t.Errorf("file name mismatch, orig='%s', got='%s'", origName, gotName) } - if !orig.IsDirectory() { + if !gotDir { defer orig.Close() defer got.Close() - do, err := ioutil.ReadAll(orig) + do, err := ioutil.ReadAll(orig.(files.File)) if err != nil { t.Fatal(err) } - dg, err := ioutil.ReadAll(got) + dg, err := ioutil.ReadAll(got.(files.File)) if err != nil { t.Fatal(err) } @@ -564,21 +567,28 @@ func TestAdd(t *testing.T) { return } + origIt, _ := orig.(files.Directory).Entries() + gotIt, _ := got.(files.Directory).Entries() + for { - origName, origFile, err := orig.NextFile() - gotName, gotFile, err2 := got.NextFile() - - if err != nil { - if err == io.EOF && err2 == io.EOF { - break + if origIt.Next() { + if !gotIt.Next() { + t.Fatal("gotIt out of entries before origIt") } - t.Fatal(err) - } - if err2 != nil { - t.Fatal(err) + } else { + if gotIt.Next() { + t.Fatal("origIt out of entries before gotIt") + } + break } - cmpFile(origName, origFile, gotName, gotFile) + cmpFile(origIt.Name(), origIt.Node(), gotIt.Name(), gotIt.Node()) + } + if origIt.Err() != nil { + t.Fatal(origIt.Err()) + } + if gotIt.Err() != nil { + t.Fatal(gotIt.Err()) } } @@ -667,7 +677,7 @@ func TestGetEmptyFile(t *testing.T) { } buf := make([]byte, 1) // non-zero so that Read() actually tries to read - n, err := io.ReadFull(r, buf) + n, err := io.ReadFull(r.(files.File), buf) if err != nil && err != io.EOF { t.Error(err) } @@ -703,9 +713,8 @@ func TestGetDir(t *testing.T) { t.Error(err) } - _, err = r.Read(make([]byte, 2)) - if err != files.ErrNotReader { - t.Fatalf("expected ErrIsDir, got: %s", err) + if _, ok := r.(files.Directory); !ok { + t.Fatalf("expected a directory") } } diff --git a/core/corehttp/gateway_handler.go b/core/corehttp/gateway_handler.go index 103916c77..e0330185e 100644 --- a/core/corehttp/gateway_handler.go +++ b/core/corehttp/gateway_handler.go @@ -172,10 +172,7 @@ func (i *gatewayHandler) getOrHeadHandler(ctx context.Context, w http.ResponseWr return } - dir := dr.IsDirectory() - if !dir { - defer dr.Close() - } + defer dr.Close() // Check etag send back to us etag := "\"" + resolvedPath.Cid().String() + "\"" @@ -240,14 +237,14 @@ func (i *gatewayHandler) getOrHeadHandler(ctx context.Context, w http.ResponseWr // TODO: break this out when we split /ipfs /ipns routes. modtime := time.Now() - if strings.HasPrefix(urlPath, ipfsPathPrefix) && !dir { - w.Header().Set("Cache-Control", "public, max-age=29030400, immutable") + if f, ok := dr.(files.File); ok { + if strings.HasPrefix(urlPath, ipfsPathPrefix) { + w.Header().Set("Cache-Control", "public, max-age=29030400, immutable") - // set modtime to a really long time ago, since files are immutable and should stay cached - modtime = time.Unix(1, 0) - } + // set modtime to a really long time ago, since files are immutable and should stay cached + modtime = time.Unix(1, 0) + } - if !dir { urlFilename := r.URL.Query().Get("filename") var name string if urlFilename != "" { @@ -256,8 +253,9 @@ func (i *gatewayHandler) getOrHeadHandler(ctx context.Context, w http.ResponseWr } else { name = getFilename(urlPath) } - i.serveFile(w, r, name, modtime, dr) + i.serveFile(w, r, name, modtime, f) return + } nd, err := i.api.ResolveNode(ctx, resolvedPath) @@ -290,8 +288,14 @@ func (i *gatewayHandler) getOrHeadHandler(ctx context.Context, w http.ResponseWr } defer dr.Close() + f, ok := dr.(files.File) + if !ok { + internalWebError(w, files.ErrNotReader) + return + } + // write to request - http.ServeContent(w, r, "index.html", modtime, dr) + http.ServeContent(w, r, "index.html", modtime, f) return default: internalWebError(w, err) diff --git a/core/coreunix/add.go b/core/coreunix/add.go index fcfa4aa6e..66a0ed1d6 100644 --- a/core/coreunix/add.go +++ b/core/coreunix/add.go @@ -2,6 +2,7 @@ package coreunix import ( "context" + "errors" "fmt" "io" "io/ioutil" @@ -400,7 +401,7 @@ func (adder *Adder) addNode(node ipld.Node, path string) error { } // AddAllAndPin adds the given request's files and pin them. -func (adder *Adder) AddAllAndPin(file files.File) (ipld.Node, error) { +func (adder *Adder) AddAllAndPin(file files.Node) (ipld.Node, error) { if adder.Pin { adder.unlocker = adder.blockstore.PinLock() } @@ -410,23 +411,23 @@ func (adder *Adder) AddAllAndPin(file files.File) (ipld.Node, error) { } }() - switch { - case file.IsDirectory(): + switch tf := file.(type) { + case files.Directory: // Iterate over each top-level file and add individually. Otherwise the // single files.File f is treated as a directory, affecting hidden file // semantics. - for { - name, f, err := file.NextFile() - if err == io.EOF { - // Finished the list of files. - break - } else if err != nil { - return nil, err - } - if err := adder.addFile(name, f); err != nil { + it, err := tf.Entries() + if err != nil { + return nil, err + } + for it.Next() { + if err := adder.addFile(it.Name(), it.Node()); err != nil { return nil, err } } + if it.Err() != nil { + return nil, it.Err() + } break default: if err := adder.addFile("", file); err != nil { @@ -447,7 +448,7 @@ func (adder *Adder) AddAllAndPin(file files.File) (ipld.Node, error) { return nd, adder.PinRoot() } -func (adder *Adder) addFile(path string, file files.File) error { +func (adder *Adder) addFile(path string, file files.Node) error { err := adder.maybePauseForGC() if err != nil { return err @@ -467,8 +468,8 @@ func (adder *Adder) addFile(path string, file files.File) error { } adder.liveNodes++ - if file.IsDirectory() { - return adder.addDir(path, file) + if dir, ok := file.(files.Directory); ok { + return adder.addDir(path, dir) } // case for symlink @@ -491,9 +492,12 @@ func (adder *Adder) addFile(path string, file files.File) error { // case for regular file // if the progress flag was specified, wrap the file so that we can send // progress updates to the client (over the output channel) - var reader io.Reader = file + reader, ok := file.(io.Reader) + if !ok { + return errors.New("file doesn't support reading") + } if adder.Progress { - rdr := &progressReader{file: file, path: path, out: adder.Out} + rdr := &progressReader{file: reader, path: path, out: adder.Out} if fi, ok := file.(files.FileInfo); ok { reader = &progressReader2{rdr, fi} } else { @@ -517,7 +521,7 @@ func (adder *Adder) addFile(path string, file files.File) error { return adder.addNode(dagnode, path) } -func (adder *Adder) addDir(path string, dir files.File) error { +func (adder *Adder) addDir(path string, dir files.Directory) error { log.Infof("adding directory: %s", path) mr, err := adder.mfsRoot() @@ -533,27 +537,23 @@ func (adder *Adder) addDir(path string, dir files.File) error { return err } - for { - name, file, err := dir.NextFile() - if err != nil && err != io.EOF { - return err - } - if file == nil { - break - } - - fpath := gopath.Join(path, name) + it, _ := dir.Entries() + for it.Next() { + fpath := gopath.Join(path, it.Name()) // Skip hidden files when adding recursively, unless Hidden is enabled. - if files.IsHidden(fpath, file) && !adder.Hidden { + if files.IsHidden(fpath, it.Node()) && !adder.Hidden { log.Infof("%s is hidden, skipping", fpath) continue } - err = adder.addFile(fpath, file) + err = adder.addFile(fpath, it.Node()) if err != nil { return err } } + if it.Err() != nil { + return it.Err() + } return nil } @@ -616,7 +616,7 @@ func getOutput(dagnode ipld.Node) (*Object, error) { } type progressReader struct { - file files.File + file io.Reader path string out chan<- interface{} bytes int64 diff --git a/core/coreunix/add_test.go b/core/coreunix/add_test.go index b1f5be3c9..fe9e1e0d6 100644 --- a/core/coreunix/add_test.go +++ b/core/coreunix/add_test.go @@ -81,10 +81,10 @@ func TestAddGCLive(t *testing.T) { datad := ioutil.NopCloser(bytes.NewBufferString("testfileD")) rfd := files.NewReaderFile(datad, nil) - slf := files.NewSliceFile([]files.FileEntry{ - {File: rfa, Name: "a"}, - {File: hangfile, Name: "b"}, - {File: rfd, Name: "d"}, + slf := files.NewSliceFile([]files.DirEntry{ + files.FileEntry("a", rfa), + files.FileEntry("b", hangfile), + files.FileEntry("d", rfd), }) addDone := make(chan struct{}) diff --git a/fuse/readonly/ipfs_test.go b/fuse/readonly/ipfs_test.go index dea91b653..8a02d68d1 100644 --- a/fuse/readonly/ipfs_test.go +++ b/fuse/readonly/ipfs_test.go @@ -22,6 +22,7 @@ import ( ci "gx/ipfs/QmPuhRE325DR8ChNcFtgd6F1eANCHy1oohXZPpYop4xsK6/go-testutil/ci" chunker "gx/ipfs/QmR4QQVkBZsZENRjYFVi8dEtPL3daZRNKk24m4r6WKJHNm/go-ipfs-chunker" fstest "gx/ipfs/QmSJBsmLP1XMjv8hxYg2rUMdPDB7YUpyBo9idjrJ6Cmq6F/fuse/fs/fstestutil" + files "gx/ipfs/QmXWZCd8jfaHmt4UDSnjKmGcrQMw95bDGWqEeVLVJjoANX/go-ipfs-files" importer "gx/ipfs/Qmbvw7kpSM2p6rbQ57WGRhhqNfCiNGW6EKH4xgHLw4bsnB/go-unixfs/importer" uio "gx/ipfs/Qmbvw7kpSM2p6rbQ57WGRhhqNfCiNGW6EKH4xgHLw4bsnB/go-unixfs/io" ipld "gx/ipfs/QmcKKBwfz6FyQdHR2jsXrrF6XeSBXYL86anmWNewpFpoF5/go-ipld-format" @@ -180,7 +181,7 @@ func TestIpfsStressRead(t *testing.T) { errs <- err } - data, err := ioutil.ReadAll(read) + data, err := ioutil.ReadAll(read.(files.File)) if err != nil { errs <- err } diff --git a/test/integration/addcat_test.go b/test/integration/addcat_test.go index 177d2c5ee..0d8a20073 100644 --- a/test/integration/addcat_test.go +++ b/test/integration/addcat_test.go @@ -154,7 +154,7 @@ func DirectAddCat(data []byte, conf testutil.LatencyConfig) error { // verify bufout := new(bytes.Buffer) - io.Copy(bufout, readerCatted) + io.Copy(bufout, readerCatted.(io.Reader)) if 0 != bytes.Compare(bufout.Bytes(), data) { return errors.New("catted data does not match added data") } diff --git a/test/integration/bench_cat_test.go b/test/integration/bench_cat_test.go index 6ff8f56ba..d228e45a1 100644 --- a/test/integration/bench_cat_test.go +++ b/test/integration/bench_cat_test.go @@ -101,7 +101,7 @@ func benchCat(b *testing.B, data []byte, conf testutil.LatencyConfig) error { // verify bufout := new(bytes.Buffer) - io.Copy(bufout, readerCatted) + io.Copy(bufout, readerCatted.(io.Reader)) if 0 != bytes.Compare(bufout.Bytes(), data) { return errors.New("catted data does not match added data") } diff --git a/test/integration/three_legged_cat_test.go b/test/integration/three_legged_cat_test.go index 3fca20aa7..200c584e3 100644 --- a/test/integration/three_legged_cat_test.go +++ b/test/integration/three_legged_cat_test.go @@ -133,7 +133,7 @@ func RunThreeLeggedCat(data []byte, conf testutil.LatencyConfig) error { // verify bufout := new(bytes.Buffer) - io.Copy(bufout, readerCatted) + io.Copy(bufout, readerCatted.(io.Reader)) if 0 != bytes.Compare(bufout.Bytes(), data) { return errors.New("catted data does not match added data") }