mirror of
https://github.com/ipfs/kubo.git
synced 2026-03-06 08:47:52 +08:00
address comments from CR
License: MIT Signed-off-by: Jeromy <jeromyj@gmail.com>
This commit is contained in:
parent
6faa70ee59
commit
38fab91013
@ -16,6 +16,7 @@ import (
|
||||
path "github.com/ipfs/go-ipfs/path"
|
||||
ft "github.com/ipfs/go-ipfs/unixfs"
|
||||
|
||||
context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context"
|
||||
logging "github.com/ipfs/go-ipfs/vendor/QmQg1J6vikuXF9oDvm4wpdeAUvvkVEKW1EYDw9HhTMnP2b/go-log"
|
||||
)
|
||||
|
||||
@ -55,44 +56,75 @@ var FilesStatCmd = &cmds.Command{
|
||||
return
|
||||
}
|
||||
|
||||
path := req.Arguments()[0]
|
||||
path, err := checkPath(req.Arguments()[0])
|
||||
if err != nil {
|
||||
res.SetError(err, cmds.ErrNormal)
|
||||
return
|
||||
}
|
||||
|
||||
fsn, err := mfs.Lookup(node.FilesRoot, path)
|
||||
if err != nil {
|
||||
res.SetError(err, cmds.ErrNormal)
|
||||
return
|
||||
}
|
||||
|
||||
nd, err := fsn.GetNode()
|
||||
o, err := statNode(fsn)
|
||||
if err != nil {
|
||||
res.SetError(err, cmds.ErrNormal)
|
||||
return
|
||||
}
|
||||
|
||||
k, err := nd.Key()
|
||||
if err != nil {
|
||||
res.SetError(err, cmds.ErrNormal)
|
||||
return
|
||||
}
|
||||
|
||||
res.SetOutput(&Object{
|
||||
Hash: k.B58String(),
|
||||
})
|
||||
res.SetOutput(o)
|
||||
},
|
||||
Marshalers: cmds.MarshalerMap{
|
||||
cmds.Text: func(res cmds.Response) (io.Reader, error) {
|
||||
out := res.Output().(*Object)
|
||||
return strings.NewReader(out.Hash), nil
|
||||
buf := new(bytes.Buffer)
|
||||
fmt.Fprintln(buf, out.Hash)
|
||||
fmt.Fprintf(buf, "Size: %d\n", out.Size)
|
||||
fmt.Fprintf(buf, "CumulativeSize: %d\n", out.CumulativeSize)
|
||||
fmt.Fprintf(buf, "ChildBlocks: %d\n", out.Blocks)
|
||||
return buf, nil
|
||||
},
|
||||
},
|
||||
Type: Object{},
|
||||
}
|
||||
|
||||
func statNode(fsn mfs.FSNode) (*Object, error) {
|
||||
nd, err := fsn.GetNode()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
k, err := nd.Key()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
d, err := ft.FromBytes(nd.Data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cumulsize, err := nd.Size()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Object{
|
||||
Hash: k.B58String(),
|
||||
Blocks: len(nd.Links),
|
||||
Size: d.GetFilesize(),
|
||||
CumulativeSize: cumulsize,
|
||||
}, nil
|
||||
}
|
||||
|
||||
var FilesCpCmd = &cmds.Command{
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "copy files into mfs",
|
||||
},
|
||||
Arguments: []cmds.Argument{
|
||||
cmds.StringArg("src", true, false, "source object to copy"),
|
||||
cmds.StringArg("source", true, false, "source object to copy"),
|
||||
cmds.StringArg("dest", true, false, "destination to copy object to"),
|
||||
},
|
||||
Run: func(req cmds.Request, res cmds.Response) {
|
||||
@ -102,39 +134,21 @@ var FilesCpCmd = &cmds.Command{
|
||||
return
|
||||
}
|
||||
|
||||
src := req.Arguments()[0]
|
||||
dst := req.Arguments()[1]
|
||||
src, err := checkPath(req.Arguments()[0])
|
||||
if err != nil {
|
||||
res.SetError(err, cmds.ErrNormal)
|
||||
return
|
||||
}
|
||||
dst, err := checkPath(req.Arguments()[1])
|
||||
if err != nil {
|
||||
res.SetError(err, cmds.ErrNormal)
|
||||
return
|
||||
}
|
||||
|
||||
var nd *dag.Node
|
||||
switch {
|
||||
case strings.HasPrefix(src, "/ipfs/"):
|
||||
p, err := path.ParsePath(src)
|
||||
if err != nil {
|
||||
res.SetError(err, cmds.ErrNormal)
|
||||
return
|
||||
}
|
||||
|
||||
obj, err := core.Resolve(req.Context(), node, p)
|
||||
if err != nil {
|
||||
res.SetError(err, cmds.ErrNormal)
|
||||
return
|
||||
}
|
||||
|
||||
nd = obj
|
||||
default:
|
||||
fsn, err := mfs.Lookup(node.FilesRoot, src)
|
||||
if err != nil {
|
||||
res.SetError(err, cmds.ErrNormal)
|
||||
return
|
||||
}
|
||||
|
||||
obj, err := fsn.GetNode()
|
||||
if err != nil {
|
||||
res.SetError(err, cmds.ErrNormal)
|
||||
return
|
||||
}
|
||||
|
||||
nd = obj
|
||||
nd, err := getNodeFromPath(req.Context(), node, src)
|
||||
if err != nil {
|
||||
res.SetError(err, cmds.ErrNormal)
|
||||
return
|
||||
}
|
||||
|
||||
err = mfs.PutNode(node.FilesRoot, dst, nd)
|
||||
@ -145,8 +159,30 @@ var FilesCpCmd = &cmds.Command{
|
||||
},
|
||||
}
|
||||
|
||||
func getNodeFromPath(ctx context.Context, node *core.IpfsNode, p string) (*dag.Node, error) {
|
||||
switch {
|
||||
case strings.HasPrefix(p, "/ipfs/"):
|
||||
np, err := path.ParsePath(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return core.Resolve(ctx, node, np)
|
||||
default:
|
||||
fsn, err := mfs.Lookup(node.FilesRoot, p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return fsn.GetNode()
|
||||
}
|
||||
}
|
||||
|
||||
type Object struct {
|
||||
Hash string
|
||||
Hash string
|
||||
Size uint64
|
||||
CumulativeSize uint64
|
||||
Blocks int
|
||||
}
|
||||
|
||||
type FilesLsOutput struct {
|
||||
@ -181,7 +217,12 @@ Examples:
|
||||
cmds.BoolOption("l", "use long listing format"),
|
||||
},
|
||||
Run: func(req cmds.Request, res cmds.Response) {
|
||||
path := req.Arguments()[0]
|
||||
path, err := checkPath(req.Arguments()[0])
|
||||
if err != nil {
|
||||
res.SetError(err, cmds.ErrNormal)
|
||||
return
|
||||
}
|
||||
|
||||
nd, err := req.InvocContext().GetNode()
|
||||
if err != nil {
|
||||
res.SetError(err, cmds.ErrNormal)
|
||||
@ -243,7 +284,7 @@ Examples:
|
||||
|
||||
$ ipfs files read /test/hello
|
||||
hello
|
||||
`,
|
||||
`,
|
||||
},
|
||||
|
||||
Arguments: []cmds.Argument{
|
||||
@ -260,7 +301,12 @@ Examples:
|
||||
return
|
||||
}
|
||||
|
||||
path := req.Arguments()[0]
|
||||
path, err := checkPath(req.Arguments()[0])
|
||||
if err != nil {
|
||||
res.SetError(err, cmds.ErrNormal)
|
||||
return
|
||||
}
|
||||
|
||||
fsn, err := mfs.Lookup(n.FilesRoot, path)
|
||||
if err != nil {
|
||||
res.SetError(err, cmds.ErrNormal)
|
||||
@ -273,7 +319,26 @@ Examples:
|
||||
return
|
||||
}
|
||||
|
||||
offset, _, _ := req.Option("offset").Int()
|
||||
offset, _, err := req.Option("offset").Int()
|
||||
if err != nil {
|
||||
res.SetError(err, cmds.ErrNormal)
|
||||
return
|
||||
}
|
||||
if offset < 0 {
|
||||
res.SetError(fmt.Errorf("cannot specify negative offset"), cmds.ErrNormal)
|
||||
return
|
||||
}
|
||||
|
||||
filen, err := fi.Size()
|
||||
if err != nil {
|
||||
res.SetError(err, cmds.ErrNormal)
|
||||
return
|
||||
}
|
||||
|
||||
if int64(offset) > filen {
|
||||
res.SetError(fmt.Errorf("offset was past end of file (%d > %d)", offset, filen), cmds.ErrNormal)
|
||||
return
|
||||
}
|
||||
|
||||
_, err = fi.Seek(int64(offset), os.SEEK_SET)
|
||||
if err != nil {
|
||||
@ -282,7 +347,15 @@ Examples:
|
||||
}
|
||||
var r io.Reader = fi
|
||||
count, found, err := req.Option("count").Int()
|
||||
if err == nil && found {
|
||||
if err != nil {
|
||||
res.SetError(err, cmds.ErrNormal)
|
||||
return
|
||||
}
|
||||
if found {
|
||||
if count < 0 {
|
||||
res.SetError(fmt.Errorf("cannot specify negative 'count'"), cmds.ErrNormal)
|
||||
return
|
||||
}
|
||||
r = io.LimitReader(fi, int64(count))
|
||||
}
|
||||
|
||||
@ -300,7 +373,7 @@ Example:
|
||||
|
||||
$ ipfs files mv /myfs/a/b/c /myfs/foo/newc
|
||||
|
||||
`,
|
||||
`,
|
||||
},
|
||||
|
||||
Arguments: []cmds.Argument{
|
||||
@ -314,8 +387,16 @@ Example:
|
||||
return
|
||||
}
|
||||
|
||||
src := req.Arguments()[0]
|
||||
dst := req.Arguments()[1]
|
||||
src, err := checkPath(req.Arguments()[0])
|
||||
if err != nil {
|
||||
res.SetError(err, cmds.ErrNormal)
|
||||
return
|
||||
}
|
||||
dst, err := checkPath(req.Arguments()[1])
|
||||
if err != nil {
|
||||
res.SetError(err, cmds.ErrNormal)
|
||||
return
|
||||
}
|
||||
|
||||
err = mfs.Mv(n.FilesRoot, src, dst)
|
||||
if err != nil {
|
||||
@ -332,14 +413,14 @@ var FilesWriteCmd = &cmds.Command{
|
||||
Write data to a file in a given filesystem. This command allows you to specify
|
||||
a beginning offset to write to. The entire length of the input will be written.
|
||||
|
||||
If the '--create' option is specified, the file will be create if it does not
|
||||
If the '--create' option is specified, the file will be created if it does not
|
||||
exist. Nonexistant intermediate directories will not be created.
|
||||
|
||||
Example:
|
||||
|
||||
echo "hello world" | ipfs files write --create /myfs/a/b/file
|
||||
echo "hello world" | ipfs files write --truncate /myfs/a/b/file
|
||||
`,
|
||||
echo "hello world" | ipfs files write --create /myfs/a/b/file
|
||||
echo "hello world" | ipfs files write --truncate /myfs/a/b/file
|
||||
`,
|
||||
},
|
||||
Arguments: []cmds.Argument{
|
||||
cmds.StringArg("path", true, false, "path to write to"),
|
||||
@ -347,11 +428,17 @@ Example:
|
||||
},
|
||||
Options: []cmds.Option{
|
||||
cmds.IntOption("o", "offset", "offset to write to"),
|
||||
cmds.BoolOption("n", "create", "create the file if it does not exist"),
|
||||
cmds.BoolOption("e", "create", "create the file if it does not exist"),
|
||||
cmds.BoolOption("t", "truncate", "truncate the file before writing"),
|
||||
cmds.IntOption("n", "count", "maximum number of bytes to read"),
|
||||
},
|
||||
Run: func(req cmds.Request, res cmds.Response) {
|
||||
path := req.Arguments()[0]
|
||||
path, err := checkPath(req.Arguments()[0])
|
||||
if err != nil {
|
||||
res.SetError(err, cmds.ErrNormal)
|
||||
return
|
||||
}
|
||||
|
||||
create, _, _ := req.Option("create").Bool()
|
||||
trunc, _, _ := req.Option("truncate").Bool()
|
||||
|
||||
@ -375,7 +462,25 @@ Example:
|
||||
}
|
||||
}
|
||||
|
||||
offset, _, _ := req.Option("offset").Int()
|
||||
offset, _, err := req.Option("offset").Int()
|
||||
if err != nil {
|
||||
res.SetError(err, cmds.ErrNormal)
|
||||
return
|
||||
}
|
||||
if offset < 0 {
|
||||
res.SetError(fmt.Errorf("cannot have negative write offset"), cmds.ErrNormal)
|
||||
return
|
||||
}
|
||||
|
||||
count, countfound, err := req.Option("count").Int()
|
||||
if err != nil {
|
||||
res.SetError(err, cmds.ErrNormal)
|
||||
return
|
||||
}
|
||||
if countfound && count < 0 {
|
||||
res.SetError(fmt.Errorf("cannot have negative byte count"), cmds.ErrNormal)
|
||||
return
|
||||
}
|
||||
|
||||
_, err = fi.Seek(int64(offset), os.SEEK_SET)
|
||||
if err != nil {
|
||||
@ -390,6 +495,11 @@ Example:
|
||||
return
|
||||
}
|
||||
|
||||
var r io.Reader = input
|
||||
if countfound {
|
||||
r = io.LimitReader(r, int64(count))
|
||||
}
|
||||
|
||||
n, err := io.Copy(fi, input)
|
||||
if err != nil {
|
||||
res.SetError(err, cmds.ErrNormal)
|
||||
@ -411,7 +521,7 @@ Note: all paths must be absolute.
|
||||
Examples:
|
||||
|
||||
$ ipfs mfs mkdir /test/newdir
|
||||
$ ipfs mfs mkdir -p /test/does/not/exist/yet
|
||||
$ ipfs mfs mkdir -p /test/does/not/exist/yet
|
||||
`,
|
||||
},
|
||||
|
||||
@ -429,10 +539,9 @@ Examples:
|
||||
}
|
||||
|
||||
dashp, _, _ := req.Option("parents").Bool()
|
||||
dirtomake := req.Arguments()[0]
|
||||
|
||||
if dirtomake[0] != '/' {
|
||||
res.SetError(errors.New("paths must be absolute"), cmds.ErrNormal)
|
||||
dirtomake, err := checkPath(req.Arguments()[0])
|
||||
if err != nil {
|
||||
res.SetError(err, cmds.ErrNormal)
|
||||
return
|
||||
}
|
||||
|
||||
@ -446,8 +555,17 @@ Examples:
|
||||
|
||||
var FilesRmCmd = &cmds.Command{
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "remove a file",
|
||||
ShortDescription: ``,
|
||||
Tagline: "remove a file",
|
||||
ShortDescription: `
|
||||
remove files or directories
|
||||
|
||||
$ ipfs files rm /foo
|
||||
$ ipfs files ls /bar
|
||||
cat
|
||||
dog
|
||||
fish
|
||||
$ ipfs files rm -r /bar
|
||||
`,
|
||||
},
|
||||
|
||||
Arguments: []cmds.Argument{
|
||||
@ -463,7 +581,22 @@ var FilesRmCmd = &cmds.Command{
|
||||
return
|
||||
}
|
||||
|
||||
path := req.Arguments()[0]
|
||||
path, err := checkPath(req.Arguments()[0])
|
||||
if err != nil {
|
||||
res.SetError(err, cmds.ErrNormal)
|
||||
return
|
||||
}
|
||||
|
||||
if path == "/" {
|
||||
res.SetError(fmt.Errorf("cannot delete root"), cmds.ErrNormal)
|
||||
return
|
||||
}
|
||||
|
||||
// 'rm a/b/c/' will fail unless we trim the slash at the end
|
||||
if path[len(path)-1] == '/' {
|
||||
path = path[:len(path)-1]
|
||||
}
|
||||
|
||||
dir, name := gopath.Split(path)
|
||||
parent, err := mfs.Lookup(nd.FilesRoot, dir)
|
||||
if err != nil {
|
||||
@ -546,11 +679,29 @@ func getFileHandle(r *mfs.Root, path string, create bool) (*mfs.File, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// can unsafely cast, if it fails, that means programmer error
|
||||
return fsn.(*mfs.File), nil
|
||||
fi, ok := fsn.(*mfs.File)
|
||||
if !ok {
|
||||
return nil, errors.New("expected *mfs.File, didnt get it. This is likely a race condition")
|
||||
}
|
||||
return fi, nil
|
||||
|
||||
default:
|
||||
log.Error("GFH default")
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
func checkPath(p string) (string, error) {
|
||||
if len(p) == 0 {
|
||||
return "", fmt.Errorf("paths must not be empty")
|
||||
}
|
||||
|
||||
if p[0] != '/' {
|
||||
return "", fmt.Errorf("paths must start with a leading slash")
|
||||
}
|
||||
|
||||
cleaned := gopath.Clean(p)
|
||||
if p[len(p)-1] == '/' && p != "/" {
|
||||
cleaned += "/"
|
||||
}
|
||||
return cleaned, nil
|
||||
}
|
||||
|
||||
@ -57,7 +57,7 @@ import (
|
||||
pin "github.com/ipfs/go-ipfs/pin"
|
||||
repo "github.com/ipfs/go-ipfs/repo"
|
||||
config "github.com/ipfs/go-ipfs/repo/config"
|
||||
unixfs "github.com/ipfs/go-ipfs/unixfs"
|
||||
uio "github.com/ipfs/go-ipfs/unixfs/io"
|
||||
u "github.com/ipfs/go-ipfs/util"
|
||||
)
|
||||
|
||||
@ -472,7 +472,7 @@ func (n *IpfsNode) loadBootstrapPeers() ([]peer.PeerInfo, error) {
|
||||
}
|
||||
|
||||
func (n *IpfsNode) loadFilesRoot() error {
|
||||
dsk := ds.NewKey("/filesroot")
|
||||
dsk := ds.NewKey("/local/filesroot")
|
||||
pf := func(ctx context.Context, k key.Key) error {
|
||||
return n.Repo.Datastore().Put(dsk, []byte(k))
|
||||
}
|
||||
@ -482,7 +482,7 @@ func (n *IpfsNode) loadFilesRoot() error {
|
||||
|
||||
switch {
|
||||
case err == ds.ErrNotFound || val == nil:
|
||||
nd = &merkledag.Node{Data: unixfs.FolderPBData()}
|
||||
nd = uio.NewEmptyDirectory()
|
||||
_, err := n.DAG.Add(nd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failure writing to dagstore: %s", err)
|
||||
|
||||
86
mfs/ops.go
86
mfs/ops.go
@ -14,11 +14,6 @@ import (
|
||||
func Mv(r *Root, src, dst string) error {
|
||||
srcDir, srcFname := gopath.Split(src)
|
||||
|
||||
srcObj, err := Lookup(r, src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var dstDirStr string
|
||||
var filename string
|
||||
if dst[len(dst)-1] == '/' {
|
||||
@ -28,28 +23,46 @@ func Mv(r *Root, src, dst string) error {
|
||||
dstDirStr, filename = gopath.Split(dst)
|
||||
}
|
||||
|
||||
dstDiri, err := Lookup(r, dstDirStr)
|
||||
// get parent directories of both src and dest first
|
||||
dstDir, err := lookupDir(r, dstDirStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
srcDirObj, err := lookupDir(r, srcDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
srcObj, err := srcDirObj.Child(srcFname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dstDir := dstDiri.(*Directory)
|
||||
nd, err := srcObj.GetNode()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fsn, err := dstDir.Child(filename)
|
||||
if err == nil {
|
||||
switch n := fsn.(type) {
|
||||
case *File:
|
||||
_ = dstDir.Unlink(filename)
|
||||
case *Directory:
|
||||
dstDir = n
|
||||
default:
|
||||
return fmt.Errorf("unexpected type at path: %s", dst)
|
||||
}
|
||||
} else if err != os.ErrNotExist {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dstDir.AddChild(filename, nd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
srcDirObji, err := Lookup(r, srcDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
srcDirObj := srcDirObji.(*Directory)
|
||||
err = srcDirObj.Unlink(srcFname)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -58,18 +71,27 @@ func Mv(r *Root, src, dst string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func lookupDir(r *Root, path string) (*Directory, error) {
|
||||
di, err := Lookup(r, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
d, ok := di.(*Directory)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("%s is not a directory", path)
|
||||
}
|
||||
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// PutNode inserts 'nd' at 'path' in the given mfs
|
||||
func PutNode(r *Root, path string, nd *dag.Node) error {
|
||||
dirp, filename := gopath.Split(path)
|
||||
|
||||
parent, err := Lookup(r, dirp)
|
||||
pdir, err := lookupDir(r, dirp)
|
||||
if err != nil {
|
||||
return fmt.Errorf("lookup '%s' failed: %s", dirp, err)
|
||||
}
|
||||
|
||||
pdir, ok := parent.(*Directory)
|
||||
if !ok {
|
||||
return fmt.Errorf("%s did not point to directory", dirp)
|
||||
return err
|
||||
}
|
||||
|
||||
return pdir.AddChild(filename, nd)
|
||||
@ -83,17 +105,27 @@ func Mkdir(r *Root, path string, parents bool) error {
|
||||
parts = parts[1:]
|
||||
}
|
||||
|
||||
// allow 'mkdir /a/b/c/' to create c
|
||||
if parts[len(parts)-1] == "" {
|
||||
parts = parts[:len(parts)-1]
|
||||
}
|
||||
|
||||
if len(parts) == 0 {
|
||||
// this will only happen on 'mkdir /'
|
||||
return fmt.Errorf("cannot mkdir '%s'", path)
|
||||
}
|
||||
|
||||
cur := r.GetValue().(*Directory)
|
||||
for i, d := range parts[:len(parts)-1] {
|
||||
fsn, err := cur.Child(d)
|
||||
if err != nil {
|
||||
if err == os.ErrNotExist && parents {
|
||||
mkd, err := cur.Mkdir(d)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fsn = mkd
|
||||
if err == os.ErrNotExist && parents {
|
||||
mkd, err := cur.Mkdir(d)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fsn = mkd
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
next, ok := fsn.(*Directory)
|
||||
|
||||
@ -59,6 +59,19 @@ test_files_api() {
|
||||
verify_dir_contents /cats
|
||||
'
|
||||
|
||||
test_expect_success "check root hash" '
|
||||
ipfs files stat / | head -n1 > roothash
|
||||
'
|
||||
|
||||
test_expect_success "cannot mkdir /" '
|
||||
test_expect_code 1 ipfs files mkdir /
|
||||
'
|
||||
|
||||
test_expect_success "check root hash was not changed" '
|
||||
ipfs files stat / | head -n1 > roothashafter &&
|
||||
test_cmp roothash roothashafter
|
||||
'
|
||||
|
||||
test_expect_success "can put files into directory" '
|
||||
ipfs files cp /ipfs/$FILE1 /cats/file1
|
||||
'
|
||||
@ -73,7 +86,7 @@ test_files_api() {
|
||||
|
||||
test_expect_success "output looks good" '
|
||||
echo foo > expected &&
|
||||
test_cmp file1out expected
|
||||
test_cmp expected file1out
|
||||
'
|
||||
|
||||
test_expect_success "can put another file into root" '
|
||||
@ -90,7 +103,7 @@ test_files_api() {
|
||||
|
||||
test_expect_success "output looks good" '
|
||||
echo bar > expected &&
|
||||
test_cmp file2out expected
|
||||
test_cmp expected file2out
|
||||
'
|
||||
|
||||
test_expect_success "can make deep directory" '
|
||||
@ -116,7 +129,7 @@ test_files_api() {
|
||||
|
||||
test_expect_success "output looks good" '
|
||||
echo baz > expected &&
|
||||
test_cmp output expected
|
||||
test_cmp expected output
|
||||
'
|
||||
|
||||
test_expect_success "file shows up in dir" '
|
||||
@ -147,6 +160,19 @@ test_files_api() {
|
||||
verify_dir_contents / cats
|
||||
'
|
||||
|
||||
test_expect_success "check root hash" '
|
||||
ipfs files stat / | head -n1 > roothash
|
||||
'
|
||||
|
||||
test_expect_success "cannot remove root" '
|
||||
test_expect_code 1 ipfs files rm -r /
|
||||
'
|
||||
|
||||
test_expect_success "check root hash was not changed" '
|
||||
ipfs files stat / | head -n1 > roothashafter &&
|
||||
test_cmp roothash roothashafter
|
||||
'
|
||||
|
||||
# test read options
|
||||
|
||||
test_expect_success "read from offset works" '
|
||||
@ -155,7 +181,7 @@ test_files_api() {
|
||||
|
||||
test_expect_success "output looks good" '
|
||||
echo oo > expected &&
|
||||
test_cmp output expected
|
||||
test_cmp expected output
|
||||
'
|
||||
|
||||
test_expect_success "read with size works" '
|
||||
@ -164,7 +190,55 @@ test_files_api() {
|
||||
|
||||
test_expect_success "output looks good" '
|
||||
printf fo > expected &&
|
||||
test_cmp output expected
|
||||
test_cmp expected output
|
||||
'
|
||||
|
||||
test_expect_success "cannot read from negative offset" '
|
||||
test_expect_code 1 ipfs files read --offset -3 /cats/file1
|
||||
'
|
||||
|
||||
test_expect_success "read from offset 0 works" '
|
||||
ipfs files read --offset 0 /cats/file1 > output
|
||||
'
|
||||
|
||||
test_expect_success "output looks good" '
|
||||
echo foo > expected &&
|
||||
test_cmp expected output
|
||||
'
|
||||
|
||||
test_expect_success "read last byte works" '
|
||||
ipfs files read --offset 2 /cats/file1 > output
|
||||
'
|
||||
|
||||
test_expect_success "output looks good" '
|
||||
echo o > expected &&
|
||||
test_cmp expected output
|
||||
'
|
||||
|
||||
test_expect_success "offset past end of file fails" '
|
||||
test_expect_code 1 ipfs files read --offset 5 /cats/file1
|
||||
'
|
||||
|
||||
test_expect_success "cannot read negative count bytes" '
|
||||
test_expect_code 1 ipfs read --count -1 /cats/file1
|
||||
'
|
||||
|
||||
test_expect_success "reading zero bytes prints nothing" '
|
||||
ipfs files read --count 0 /cats/file1 > output
|
||||
'
|
||||
|
||||
test_expect_success "output looks good" '
|
||||
printf "" > expected &&
|
||||
test_cmp expected output
|
||||
'
|
||||
|
||||
test_expect_success "count > len(file) prints entire file" '
|
||||
ipfs files read --count 200 /cats/file1 > output
|
||||
'
|
||||
|
||||
test_expect_success "output looks good" '
|
||||
echo foo > expected &&
|
||||
test_cmp expected output
|
||||
'
|
||||
|
||||
# test write
|
||||
@ -189,7 +263,57 @@ test_files_api() {
|
||||
test_expect_success "file looks correct" '
|
||||
echo "ipfs is super cool" > expected &&
|
||||
ipfs files read /cats/ipfs > output &&
|
||||
test_cmp output expected
|
||||
test_cmp expected output
|
||||
'
|
||||
|
||||
test_expect_success "cant write to negative offset" '
|
||||
ipfs files stat /cats/ipfs | head -n1 > filehash &&
|
||||
test_expect_code 1 ipfs files write --offset -1 /cats/ipfs < output
|
||||
'
|
||||
|
||||
test_expect_success "verify file was not changed" '
|
||||
ipfs files stat /cats/ipfs | head -n1 > afterhash &&
|
||||
test_cmp filehash afterhash
|
||||
'
|
||||
|
||||
test_expect_success "write new file for testing" '
|
||||
echo foobar | ipfs files write --create /fun
|
||||
'
|
||||
|
||||
test_expect_success "write to offset past end works" '
|
||||
echo blah | ipfs files write --offset 50 /fun
|
||||
'
|
||||
|
||||
test_expect_success "can read file" '
|
||||
ipfs files read /fun > sparse_output
|
||||
'
|
||||
|
||||
test_expect_success "output looks good" '
|
||||
echo foobar > sparse_expected &&
|
||||
echo blah | dd of=sparse_expected bs=50 seek=1 &&
|
||||
test_cmp sparse_expected sparse_output
|
||||
'
|
||||
|
||||
test_expect_success "cleanup" '
|
||||
ipfs files rm /fun
|
||||
'
|
||||
|
||||
test_expect_success "cannot write to directory" '
|
||||
ipfs files stat /cats | head -n1 > dirhash &&
|
||||
test_expect_code 1 ipfs files write /cats < output
|
||||
'
|
||||
|
||||
test_expect_success "verify dir was not changed" '
|
||||
ipfs files stat /cats | head -n1 > afterdirhash &&
|
||||
test_cmp dirhash afterdirhash
|
||||
'
|
||||
|
||||
test_expect_success "cannot write to nonexistant path" '
|
||||
test_expect_code 1 ipfs files write /cats/bar/ < output
|
||||
'
|
||||
|
||||
test_expect_success "no new paths were created" '
|
||||
verify_dir_contents /cats file1 ipfs this
|
||||
'
|
||||
|
||||
# test mv
|
||||
|
||||
@ -368,19 +368,31 @@ func (dm *DagModifier) Seek(offset int64, whence int) (int64, error) {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
fisize, err := dm.Size()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var newoffset uint64
|
||||
switch whence {
|
||||
case os.SEEK_CUR:
|
||||
dm.curWrOff += uint64(offset)
|
||||
dm.writeStart = dm.curWrOff
|
||||
newoffset = dm.curWrOff + uint64(offset)
|
||||
case os.SEEK_SET:
|
||||
dm.curWrOff = uint64(offset)
|
||||
dm.writeStart = uint64(offset)
|
||||
newoffset = uint64(offset)
|
||||
case os.SEEK_END:
|
||||
return 0, ErrSeekEndNotImpl
|
||||
default:
|
||||
return 0, ErrUnrecognizedWhence
|
||||
}
|
||||
|
||||
if offset > fisize {
|
||||
if err := dm.expandSparse(offset - fisize); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
dm.curWrOff = newoffset
|
||||
dm.writeStart = newoffset
|
||||
|
||||
if dm.read != nil {
|
||||
_, err = dm.read.Seek(offset, whence)
|
||||
if err != nil {
|
||||
|
||||
@ -487,6 +487,53 @@ func TestSparseWrite(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestSeekPastEndWrite(t *testing.T) {
|
||||
dserv := getMockDagServ(t)
|
||||
_, n := getNode(t, dserv, 0)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
dagmod, err := NewDagModifier(ctx, n, dserv, sizeSplitterGen(512))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
buf := make([]byte, 5000)
|
||||
u.NewTimeSeededRand().Read(buf[2500:])
|
||||
|
||||
nseek, err := dagmod.Seek(2500, os.SEEK_SET)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if nseek != 2500 {
|
||||
t.Fatal("failed to seek")
|
||||
}
|
||||
|
||||
wrote, err := dagmod.Write(buf[2500:])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if wrote != 2500 {
|
||||
t.Fatal("incorrect write amount")
|
||||
}
|
||||
|
||||
_, err = dagmod.Seek(0, os.SEEK_SET)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
out, err := ioutil.ReadAll(dagmod)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err = arrComp(out, buf); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkDagmodWrite(b *testing.B) {
|
||||
b.StopTimer()
|
||||
dserv := getMockDagServ(b)
|
||||
|
||||
Loading…
Reference in New Issue
Block a user