Merge pull request #3743 from ipfs/kevina/cidv1-add

Add support for using CidV1 in 'ipfs add'
This commit is contained in:
Jeromy Johnson 2017-04-26 18:54:45 -07:00 committed by GitHub
commit e5529cd57b
17 changed files with 263 additions and 54 deletions

View File

@ -38,8 +38,11 @@ const (
rawLeavesOptionName = "raw-leaves"
noCopyOptionName = "nocopy"
fstoreCacheOptionName = "fscache"
cidVersionOptionName = "cid-version"
)
const adderOutChanSize = 8
var AddCmd = &cmds.Command{
Helptext: cmds.HelpText{
Tagline: "Add a file or directory to ipfs.",
@ -86,6 +89,7 @@ You can now refer to the added file in a gateway, like so:
cmds.BoolOption(rawLeavesOptionName, "Use raw blocks for leaf nodes. (experimental)"),
cmds.BoolOption(noCopyOptionName, "Add the file using filestore. (experimental)"),
cmds.BoolOption(fstoreCacheOptionName, "Check the filestore for pre-existing blocks. (experimental)"),
cmds.IntOption(cidVersionOptionName, "Cid version. Non-zero value will change default of 'raw-leaves' to true. (experimental)").Default(0),
},
PreRun: func(req cmds.Request) error {
quiet, _, _ := req.Option(quietOptionName).Bool()
@ -159,6 +163,7 @@ You can now refer to the added file in a gateway, like so:
rawblks, rbset, _ := req.Option(rawLeavesOptionName).Bool()
nocopy, _, _ := req.Option(noCopyOptionName).Bool()
fscache, _, _ := req.Option(fstoreCacheOptionName).Bool()
cidVer, _, _ := req.Option(cidVersionOptionName).Int()
if nocopy && !cfg.Experimental.FilestoreEnabled {
res.SetError(errors.New("filestore is not enabled, see https://git.io/vy4XN"),
@ -175,6 +180,16 @@ You can now refer to the added file in a gateway, like so:
return
}
if cidVer >= 1 && !rbset {
rawblks = true
}
prefix, err := dag.PrefixForCidVersion(cidVer)
if err != nil {
res.SetError(err, cmds.ErrNormal)
return
}
if hash {
nilnode, err := core.NewNode(n.Context(), &core.BuildCfg{
//TODO: need this to be true or all files
@ -202,15 +217,15 @@ You can now refer to the added file in a gateway, like so:
bserv := blockservice.New(addblockstore, exch)
dserv := dag.NewDAGService(bserv)
outChan := make(chan interface{}, 8)
res.SetOutput((<-chan interface{})(outChan))
fileAdder, err := coreunix.NewAdder(req.Context(), n.Pinning, n.Blockstore, dserv)
if err != nil {
res.SetError(err, cmds.ErrNormal)
return
}
outChan := make(chan interface{}, adderOutChanSize)
res.SetOutput((<-chan interface{})(outChan))
fileAdder.Out = outChan
fileAdder.Chunker = chunker
fileAdder.Progress = progress
@ -221,6 +236,7 @@ You can now refer to the added file in a gateway, like so:
fileAdder.Silent = silent
fileAdder.RawLeaves = rawblks
fileAdder.NoCopy = nocopy
fileAdder.Prefix = &prefix
if hash {
md := dagtest.Mock()

View File

@ -69,13 +69,7 @@ type AddedObject struct {
}
func NewAdder(ctx context.Context, p pin.Pinner, bs bstore.GCBlockstore, ds dag.DAGService) (*Adder, error) {
mr, err := mfs.NewRoot(ctx, ds, unixfs.EmptyDirNode(), nil)
if err != nil {
return nil, err
}
return &Adder{
mr: mr,
ctx: ctx,
pinning: p,
blockstore: bs,
@ -87,7 +81,6 @@ func NewAdder(ctx context.Context, p pin.Pinner, bs bstore.GCBlockstore, ds dag.
Wrap: false,
Chunker: "",
}, nil
}
// Adder holds the switches passed to the `add` command.
@ -107,13 +100,29 @@ type Adder struct {
NoCopy bool
Chunker string
root node.Node
mr *mfs.Root
mroot *mfs.Root
unlocker bs.Unlocker
tempRoot *cid.Cid
Prefix *cid.Prefix
}
func (adder *Adder) mfsRoot() (*mfs.Root, error) {
if adder.mroot != nil {
return adder.mroot, nil
}
rnode := unixfs.EmptyDirNode()
rnode.SetPrefix(adder.Prefix)
mr, err := mfs.NewRoot(adder.ctx, adder.dagService, rnode, nil)
mr.Prefix = adder.Prefix
if err != nil {
return nil, err
}
adder.mroot = mr
return adder.mroot, nil
}
func (adder *Adder) SetMfsRoot(r *mfs.Root) {
adder.mr = r
adder.mroot = r
}
// Constructs a node from reader's data, and adds it. Doesn't pin.
@ -122,11 +131,13 @@ func (adder Adder) add(reader io.Reader) (node.Node, error) {
if err != nil {
return nil, err
}
params := ihelper.DagBuilderParams{
Dagserv: adder.dagService,
RawLeaves: adder.RawLeaves,
Maxlinks: ihelper.DefaultLinksPerBlock,
NoCopy: adder.NoCopy,
Prefix: adder.Prefix,
}
if adder.Trickle {
@ -142,7 +153,11 @@ func (adder *Adder) RootNode() (node.Node, error) {
return adder.root, nil
}
root, err := adder.mr.GetValue().GetNode()
mr, err := adder.mfsRoot()
if err != nil {
return nil, err
}
root, err := mr.GetValue().GetNode()
if err != nil {
return nil, err
}
@ -188,9 +203,13 @@ func (adder *Adder) PinRoot() error {
}
func (adder *Adder) Finalize() (node.Node, error) {
root := adder.mr.GetValue()
mr, err := adder.mfsRoot()
if err != nil {
return nil, err
}
root := mr.GetValue()
err := root.Flush()
err = root.Flush()
if err != nil {
return nil, err
}
@ -203,7 +222,12 @@ func (adder *Adder) Finalize() (node.Node, error) {
}
name = children[0]
dir, ok := adder.mr.GetValue().(*mfs.Directory)
mr, err := adder.mfsRoot()
if err != nil {
return nil, err
}
dir, ok := mr.GetValue().(*mfs.Directory)
if !ok {
return nil, fmt.Errorf("root is not a directory")
}
@ -219,7 +243,7 @@ func (adder *Adder) Finalize() (node.Node, error) {
return nil, err
}
err = adder.mr.Close()
err = mr.Close()
if err != nil {
return nil, err
}
@ -357,14 +381,18 @@ func (adder *Adder) addNode(node node.Node, path string) error {
node = pi.Node
}
mr, err := adder.mfsRoot()
if err != nil {
return err
}
dir := gopath.Dir(path)
if dir != "." {
if err := mfs.Mkdir(adder.mr, dir, true, false); err != nil {
if err := mfs.Mkdir(mr, dir, true, false); err != nil {
return err
}
}
if err := mfs.PutNode(adder.mr, path, node); err != nil {
if err := mfs.PutNode(mr, path, node); err != nil {
return err
}
@ -406,6 +434,7 @@ func (adder *Adder) addFile(file files.File) error {
}
dagnode := dag.NodeWithData(sdata)
dagnode.SetPrefix(adder.Prefix)
_, err = adder.dagService.Add(dagnode)
if err != nil {
return err
@ -439,7 +468,11 @@ func (adder *Adder) addFile(file files.File) error {
func (adder *Adder) addDir(dir files.File) error {
log.Infof("adding directory: %s", dir.FileName())
err := mfs.Mkdir(adder.mr, dir.FileName(), true, false)
mr, err := adder.mfsRoot()
if err != nil {
return err
}
err = mfs.Mkdir(mr, dir.FileName(), true, false)
if err != nil {
return err
}

View File

@ -13,7 +13,7 @@ func BalancedLayout(db *h.DagBuilderHelper) (node.Node, error) {
var root *h.UnixfsNode
for level := 0; !db.Done(); level++ {
nroot := h.NewUnixfsNode()
nroot := db.NewUnixfsNode()
db.SetPosInfo(nroot, 0)
// add our old root as a child of the new root.
@ -33,7 +33,7 @@ func BalancedLayout(db *h.DagBuilderHelper) (node.Node, error) {
}
if root == nil {
root = h.NewUnixfsNode()
root = db.NewUnixfsNode()
}
out, err := db.Add(root)
@ -72,7 +72,7 @@ func fillNodeRec(db *h.DagBuilderHelper, node *h.UnixfsNode, depth int, offset u
// while we have room AND we're not done
for node.NumChildren() < db.Maxlinks() && !db.Done() {
child := h.NewUnixfsNode()
child := db.NewUnixfsNode()
db.SetPosInfo(child, offset)
err := fillNodeRec(db, child, depth-1, offset)

View File

@ -7,7 +7,9 @@ import (
"github.com/ipfs/go-ipfs/commands/files"
"github.com/ipfs/go-ipfs/importer/chunk"
dag "github.com/ipfs/go-ipfs/merkledag"
ft "github.com/ipfs/go-ipfs/unixfs"
cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid"
node "gx/ipfs/Qmb3Hm9QDFmfYuET4pu7Kyg8JV78jFa1nvZx5vnCZsK4ck/go-ipld-format"
)
@ -23,6 +25,7 @@ type DagBuilderHelper struct {
batch *dag.Batch
fullPath string
stat os.FileInfo
prefix *cid.Prefix
}
type DagBuilderParams struct {
@ -33,6 +36,9 @@ type DagBuilderParams struct {
// instead of using the unixfs TRaw type
RawLeaves bool
// CID Prefix to use if set
Prefix *cid.Prefix
// DAGService to write blocks to (required)
Dagserv dag.DAGService
@ -48,6 +54,7 @@ func (dbp *DagBuilderParams) New(spl chunk.Splitter) *DagBuilderHelper {
dserv: dbp.Dagserv,
spl: spl,
rawLeaves: dbp.RawLeaves,
prefix: dbp.Prefix,
maxlinks: dbp.Maxlinks,
batch: dbp.Dagserv.Batch(),
}
@ -103,6 +110,26 @@ func (db *DagBuilderHelper) GetDagServ() dag.DAGService {
return db.dserv
}
// NewUnixfsNode creates a new Unixfs node to represent a file.
func (db *DagBuilderHelper) NewUnixfsNode() *UnixfsNode {
n := &UnixfsNode{
node: new(dag.ProtoNode),
ufmt: &ft.FSNode{Type: ft.TFile},
}
n.SetPrefix(db.prefix)
return n
}
// NewUnixfsBlock creates a new Unixfs node to represent a raw data block
func (db *DagBuilderHelper) NewUnixfsBlock() *UnixfsNode {
n := &UnixfsNode{
node: new(dag.ProtoNode),
ufmt: &ft.FSNode{Type: ft.TRaw},
}
n.SetPrefix(db.prefix)
return n
}
// FillNodeLayer will add datanodes as children to the give node until
// at most db.indirSize ndoes are added
//
@ -143,7 +170,7 @@ func (db *DagBuilderHelper) GetNextDataNode() (*UnixfsNode, error) {
raw: true,
}, nil
} else {
blk := NewUnixfsBlock()
blk := db.NewUnixfsBlock()
blk.SetData(data)
return blk, nil
}

View File

@ -10,6 +10,7 @@ import (
pi "github.com/ipfs/go-ipfs/thirdparty/posinfo"
ft "github.com/ipfs/go-ipfs/unixfs"
cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid"
node "gx/ipfs/Qmb3Hm9QDFmfYuET4pu7Kyg8JV78jFa1nvZx5vnCZsK4ck/go-ipld-format"
)
@ -48,22 +49,6 @@ type UnixfsNode struct {
posInfo *pi.PosInfo
}
// NewUnixfsNode creates a new Unixfs node to represent a file
func NewUnixfsNode() *UnixfsNode {
return &UnixfsNode{
node: new(dag.ProtoNode),
ufmt: &ft.FSNode{Type: ft.TFile},
}
}
// NewUnixfsBlock creates a new Unixfs node to represent a raw data block
func NewUnixfsBlock() *UnixfsNode {
return &UnixfsNode{
node: new(dag.ProtoNode),
ufmt: &ft.FSNode{Type: ft.TRaw},
}
}
// NewUnixfsNodeFromDag reconstructs a Unixfs node from a given dag node
func NewUnixfsNodeFromDag(nd *dag.ProtoNode) (*UnixfsNode, error) {
mb, err := ft.FSNodeFromBytes(nd.Data())
@ -77,6 +62,11 @@ func NewUnixfsNodeFromDag(nd *dag.ProtoNode) (*UnixfsNode, error) {
}, nil
}
// SetPrefix sets the CID Prefix
func (n *UnixfsNode) SetPrefix(prefix *cid.Prefix) {
n.node.SetPrefix(prefix)
}
func (n *UnixfsNode) NumChildren() int {
return n.ufmt.NumChildren()
}

View File

@ -18,13 +18,13 @@ import (
const layerRepeat = 4
func TrickleLayout(db *h.DagBuilderHelper) (node.Node, error) {
root := h.NewUnixfsNode()
root := db.NewUnixfsNode()
if err := db.FillNodeLayer(root); err != nil {
return nil, err
}
for level := 1; !db.Done(); level++ {
for i := 0; i < layerRepeat && !db.Done(); i++ {
next := h.NewUnixfsNode()
next := db.NewUnixfsNode()
if err := fillTrickleRec(db, next, level); err != nil {
return nil, err
}
@ -54,7 +54,7 @@ func fillTrickleRec(db *h.DagBuilderHelper, node *h.UnixfsNode, depth int) error
for i := 1; i < depth && !db.Done(); i++ {
for j := 0; j < layerRepeat && !db.Done(); j++ {
next := h.NewUnixfsNode()
next := db.NewUnixfsNode()
if err := fillTrickleRec(db, next, i); err != nil {
return err
}
@ -117,7 +117,7 @@ func TrickleAppend(ctx context.Context, basen node.Node, db *h.DagBuilderHelper)
// Now, continue filling out tree like normal
for i := n; !db.Done(); i++ {
for j := 0; j < layerRepeat && !db.Done(); j++ {
next := h.NewUnixfsNode()
next := db.NewUnixfsNode()
err := fillTrickleRec(db, next, i)
if err != nil {
return nil, err
@ -162,7 +162,7 @@ func appendFillLastChild(ctx context.Context, ufsn *h.UnixfsNode, depth int, lay
// Partially filled depth layer
if layerFill != 0 {
for ; layerFill < layerRepeat && !db.Done(); layerFill++ {
next := h.NewUnixfsNode()
next := db.NewUnixfsNode()
err := fillTrickleRec(db, next, depth)
if err != nil {
return err
@ -211,7 +211,7 @@ func trickleAppendRec(ctx context.Context, ufsn *h.UnixfsNode, db *h.DagBuilderH
// Now, continue filling out tree like normal
for i := n; i < depth && !db.Done(); i++ {
for j := 0; j < layerRepeat && !db.Done(); j++ {
next := h.NewUnixfsNode()
next := db.NewUnixfsNode()
if err := fillTrickleRec(db, next, i); err != nil {
return nil, err
}

View File

@ -86,7 +86,7 @@ func (n *ProtoNode) EncodeProtobuf(force bool) ([]byte, error) {
if n.cached == nil {
if n.Prefix.Codec == 0 { // unset
n.Prefix = defaultCidPrefix
n.Prefix = v0CidPrefix
}
c, err := n.Prefix.Sum(n.encoded)
if err != nil {

View File

@ -28,13 +28,45 @@ type ProtoNode struct {
Prefix cid.Prefix
}
var defaultCidPrefix = cid.Prefix{
var v0CidPrefix = cid.Prefix{
Codec: cid.DagProtobuf,
MhLength: -1,
MhType: mh.SHA2_256,
Version: 0,
}
var v1CidPrefix = cid.Prefix{
Codec: cid.DagProtobuf,
MhLength: -1,
MhType: mh.SHA2_256,
Version: 1,
}
// PrefixForCidVersion returns the Protobuf prefix for a given CID version
func PrefixForCidVersion(version int) (cid.Prefix, error) {
switch version {
case 0:
return v0CidPrefix, nil
case 1:
return v1CidPrefix, nil
default:
return cid.Prefix{}, fmt.Errorf("unknown CID version: %d", version)
}
}
// SetPrefix sets the CID prefix if it is non nil, if prefix is nil then
// it resets it the default value
func (n *ProtoNode) SetPrefix(prefix *cid.Prefix) {
if prefix == nil {
n.Prefix = v0CidPrefix
} else {
n.Prefix = *prefix
n.Prefix.Codec = cid.DagProtobuf
n.encoded = nil
n.cached = nil
}
}
type LinkSlice []*node.Link
func (ls LinkSlice) Len() int { return len(ls) }
@ -158,6 +190,9 @@ func (n *ProtoNode) Copy() node.Node {
nnode.links = make([]*node.Link, len(n.links))
copy(nnode.links, n.links)
}
nnode.Prefix = n.Prefix
return nnode
}
@ -260,12 +295,13 @@ func (n *ProtoNode) Cid() *cid.Cid {
}
if n.Prefix.Codec == 0 {
n.Prefix = defaultCidPrefix
n.SetPrefix(nil)
}
c, err := n.Prefix.Sum(n.RawData())
if err != nil {
// programmer error
err = fmt.Errorf("invalid CID of length %d: %x: %v", len(n.RawData()), n.RawData(), err)
panic(err)
}

View File

@ -15,6 +15,7 @@ import (
uio "github.com/ipfs/go-ipfs/unixfs/io"
ufspb "github.com/ipfs/go-ipfs/unixfs/pb"
cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid"
node "gx/ipfs/Qmb3Hm9QDFmfYuET4pu7Kyg8JV78jFa1nvZx5vnCZsK4ck/go-ipld-format"
)
@ -57,6 +58,11 @@ func NewDirectory(ctx context.Context, name string, node node.Node, parent child
}, nil
}
// SetPrefix sets the CID prefix
func (d *Directory) SetPrefix(prefix *cid.Prefix) {
d.dirbuilder.SetPrefix(prefix)
}
// closeChild updates the child by the given name to the dag node 'nd'
// and changes its own dag node
func (d *Directory) closeChild(name string, nd node.Node, sync bool) error {

View File

@ -134,6 +134,7 @@ func Mkdir(r *Root, pth string, mkparents bool, flush bool) error {
if err != nil {
return err
}
mkd.SetPrefix(r.Prefix)
fsn = mkd
} else if err != nil {
return err
@ -152,6 +153,7 @@ func Mkdir(r *Root, pth string, mkparents bool, flush bool) error {
return err
}
}
final.SetPrefix(r.Prefix)
if flush {
err := final.Flush()

View File

@ -61,6 +61,9 @@ type Root struct {
dserv dag.DAGService
Type string
// Prefix to use for any children created
Prefix *cid.Prefix
}
type PubFunc func(context.Context, *cid.Cid) error

View File

@ -100,7 +100,7 @@ test_add_cat_5MB() {
test_cmp sha1_expected sha1_actual
'
test_expect_success "'ipfs add bigfile' succeeds" '
test_expect_success "'ipfs add $ADD_FLAGS bigfile' succeeds" '
ipfs add $ADD_FLAGS mountdir/bigfile >actual ||
test_fsh cat daemon_err
'
@ -142,6 +142,9 @@ test_add_cat_raw() {
}
test_add_cat_expensive() {
ADD_FLAGS="$1"
HASH="$2"
test_expect_success EXPENSIVE "generate 100MB file using go-random" '
random 104857600 42 >mountdir/bigfile
'
@ -152,12 +155,11 @@ test_add_cat_expensive() {
test_cmp sha1_expected sha1_actual
'
test_expect_success EXPENSIVE "ipfs add bigfile succeeds" '
ipfs add mountdir/bigfile >actual
test_expect_success EXPENSIVE "ipfs add $ADD_FLAGS bigfile succeeds" '
ipfs add $ADD_FLAGS mountdir/bigfile >actual
'
test_expect_success EXPENSIVE "ipfs add bigfile output looks good" '
HASH="QmU9SWAPPmNEKZB8umYMmjYvN7VyHqABNvdA6GUi4MMEz3" &&
echo "added $HASH bigfile" >expected &&
test_cmp expected actual
'
@ -391,6 +393,16 @@ MARS="zb2rhZdTkQNawVajsTNiYc9cTPHqgLdJVvBRkZok9RjkgQYRU"
VENUS="zb2rhn6TGvnUaMAg4VV4y9HVx5W42HihcH4jsyrDv8mkepFqq"
add_directory '--raw-leaves'
PLANETS="zdj7Wnbun6P41Z5ddTkNvZaDTmQ8ZLdiKFcJrL9sV87rPScMP"
MARS="zb2rhZdTkQNawVajsTNiYc9cTPHqgLdJVvBRkZok9RjkgQYRU"
VENUS="zb2rhn6TGvnUaMAg4VV4y9HVx5W42HihcH4jsyrDv8mkepFqq"
add_directory '--cid-version=1'
PLANETS="zdj7WiC51v78BjBcmZR7uuBvmDWxSn5EDr5MiyTwE18e8qvb7"
MARS="zdj7WWx6fGNrNGkdpkuTAxCjKbQ1pPtarqA6VQhedhLTZu34J"
VENUS="zdj7WbB1BUF8WejmVpQCmMLd1RbPnxJtvAj1Lep6eTmXRFbrz"
add_directory '--cid-version=1 --raw-leaves=false'
test_expect_success "'ipfs add -rn' succeeds" '
mkdir -p mountdir/moons/jupiter &&
@ -425,7 +437,20 @@ test_add_cat_5MB "" "QmSr7FqYkxYWGoSfy8ZiaMWQ5vosb18DQGCzjwEQnVHkTb"
test_add_cat_5MB --raw-leaves "QmbdLHCmdi48eM8T7D67oXjA1S2Puo8eMfngdHhdPukFd6"
test_add_cat_expensive
# note: the specified hash implies that internal nodes are stored
# using CidV1 and leaves are stored using raw blocks
test_add_cat_5MB --cid-version=1 "zdj7WiiaedqVBXjX4SNqx3jfuZideDqdLYnDzCDJ66JDMK9o2"
# note: the specified hash implies that internal nodes are stored
# using CidV1 and leaves are stored using CidV1 but using the legacy
# format (i.e. not raw)
test_add_cat_5MB '--cid-version=1 --raw-leaves=false' "zdj7WfgEsj897BBZj2mcfsRLhaPZcCixPV2G7DkWgF1Wdr64P"
test_add_cat_expensive "" "QmU9SWAPPmNEKZB8umYMmjYvN7VyHqABNvdA6GUi4MMEz3"
# note: the specified hash implies that internal nodes are stored
# using CidV1 and leaves are stored using raw blocks
test_add_cat_expensive "--cid-version=1" "zdj7WcatQrtuE4WMkS4XsfsMixuQN2po4irkYhqxeJyW1wgCq"
test_add_named_pipe " Post http://$API_ADDR/api/v0/add?encoding=json&progress=true&r=true&stream-channels=true:"
@ -433,6 +458,12 @@ test_add_pwd_is_symlink
test_add_cat_raw
test_expect_success "ipfs add --cid-version=9 fails" '
echo "context" > afile.txt &&
test_must_fail ipfs add --cid-version=9 afile.txt 2>&1 | tee add_out &&
grep -q "unknown CID version" add_out
'
test_kill_ipfs_daemon
# should work offline

View File

@ -27,6 +27,14 @@ added QmYC3u5jGWuyFwvTxtvLYm2K3SpWZ31tg3NjpVVvh9cJaJ _jo7/wzvsihy
added QmQkib3f9XNX5sj6WEahLUPFpheTcwSRJwUCSvjcv8b9by _jo7
added QmNQoesMj1qp8ApE51NbtTjFYksyzkezPD4cat7V2kzbKN '
add_w_d1_v1='added zb2rhjXyHbbgwgtAUwHtpBd8iXLgK22ZjVmaiJSMNmqBTpXS3 _jo7/-s782qgs
added zb2rhi6PQqQFbS4QsvrV8sL9ue1fvFoqtLVqogNPCZri8rquN _jo7/15totauzkak-
added zb2rhjQthC6LgnNZztpsF9LcfPxznh3cJnmzUx8dnSqNqJ8Yz _jo7/galecuirrj4r
added zb2rhYh9hgDw1DpaZfLUU5MkKNezPWjPTkgGQPiTyLpZYu3jn _jo7/mzo50r-1xidf5zx
added zb2rhZK5xwEUhY4uskfj4sn979aCH27cnqseVVznYDn7NFWtt _jo7/wzvsihy
added zdj7WfNC8EZchqskczxsgrVEqwLVpksQ9B5kopf391jVbCGwv _jo7
added zdj7Wn5jf686mfYE8gUKWzY7aTjp5eAQcecD8q4ZtqLJbDNxe '
add_w_d2='added Qme987pqNBhZZXy4ckeXiR7zaRQwBabB7fTgHurW2yJfNu 4r93
added QmU9Jqks8TPu4vFr6t7EKkAKQrSJuEujNj1AkzoCeTEDFJ gnz66h/1k0xpx34
added QmSLYZycXAufRw3ePMVH2brbtYWCcWsmksGLbHcT8ia9Ke gnz66h/9cwudvacx
@ -133,6 +141,14 @@ test_add_w() {
test_sort_cmp expected actual
'
test_expect_success "ipfs add -w -r (dir) --cid-version=1 succeeds" '
ipfs add -r -w --cid-version=1 m/t_1wp-8a2/_jo7 >actual
'
test_expect_success "ipfs add -w -r (dir) --cid-version=1 is correct" '
echo "$add_w_d1_v1" >expected &&
test_sort_cmp expected actual
'
}
test_init_ipfs

View File

@ -27,6 +27,17 @@ test_add_symlinks() {
test_cmp filehash_exp filehash_out
'
test_expect_success "ipfs add --cid-version=1 files succeeds" '
ipfs add -q -r --cid-version=1 files >filehash_all &&
tail -n 1 filehash_all >filehash_out
'
test_expect_success "output looks good" '
# note this hash implies all internal nodes are stored using CidV1
echo zdj7WZDQ2xMmr4qn6aRZTsE9fCUs2KmvPigpHdpssqUobwcWK > filehash_exp &&
test_cmp filehash_exp filehash_out
'
test_expect_success "adding a symlink adds the link itself" '
ipfs add -q files/bar/baz > goodlink_out
'

View File

@ -55,4 +55,23 @@ test_expect_success "sharded and unsharded output look the same" '
test_cmp sharded_out unsharded_out
'
test_add_large_dir_v1() {
exphash="$1"
test_expect_success "ipfs add (CIDv1) on very large directory succeeds" '
ipfs add -r -q --cid-version=1 testdata | tail -n1 > sharddir_out &&
echo "$exphash" > sharddir_exp &&
test_cmp sharddir_exp sharddir_out
'
}
# this hash implies both the directory and the leaf entries are CIDv1
SHARDEDV1="zdj7WX91spg4DsnNpvoBLjyjXUGgcTTWavygBbSifpmJdgPUA"
test_add_large_dir_v1 "$SHARDEDV1"
test_launch_ipfs_daemon
test_add_large_dir_v1 "$SHARDEDV1"
test_kill_ipfs_daemon
test_done

View File

@ -31,6 +31,7 @@ import (
format "github.com/ipfs/go-ipfs/unixfs"
upb "github.com/ipfs/go-ipfs/unixfs/pb"
cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid"
proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto"
node "gx/ipfs/Qmb3Hm9QDFmfYuET4pu7Kyg8JV78jFa1nvZx5vnCZsK4ck/go-ipld-format"
"gx/ipfs/QmfJHywXQu98UeZtGJBQrPAR6AtmDjjbe3qjTo9piXHPnx/murmur3"
@ -50,6 +51,7 @@ type HamtShard struct {
tableSize int
tableSizeLg2 int
prefix *cid.Prefix
hashFunc uint64
prefixPadStr string
@ -123,9 +125,15 @@ func NewHamtFromDag(dserv dag.DAGService, nd node.Node) (*HamtShard, error) {
return ds, nil
}
// SetPrefix sets the CID Prefix
func (ds *HamtShard) SetPrefix(prefix *cid.Prefix) {
ds.prefix = prefix
}
// Node serializes the HAMT structure into a merkledag node with unixfs formatting
func (ds *HamtShard) Node() (node.Node, error) {
out := new(dag.ProtoNode)
out.SetPrefix(ds.prefix)
// TODO: optimized 'for each set bit'
for i := 0; i < ds.tableSize; i++ {

View File

@ -8,6 +8,7 @@ import (
mdag "github.com/ipfs/go-ipfs/merkledag"
format "github.com/ipfs/go-ipfs/unixfs"
hamt "github.com/ipfs/go-ipfs/unixfs/hamt"
cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid"
node "gx/ipfs/Qmb3Hm9QDFmfYuET4pu7Kyg8JV78jFa1nvZx5vnCZsK4ck/go-ipld-format"
)
@ -79,6 +80,16 @@ func NewDirectoryFromNode(dserv mdag.DAGService, nd node.Node) (*Directory, erro
}
}
// SetPrefix sets the prefix of the root node
func (d *Directory) SetPrefix(prefix *cid.Prefix) {
if d.dirnode != nil {
d.dirnode.SetPrefix(prefix)
}
if d.shard != nil {
d.shard.SetPrefix(prefix)
}
}
// AddChild adds a (name, key)-pair to the root node.
func (d *Directory) AddChild(ctx context.Context, name string, nd node.Node) error {
if d.shard == nil {