Merge pull request #3097 from ipfs/test/unixfs/reader-cover

test: 80% coverage of unixfs/io [WIP]
This commit is contained in:
Jeromy Johnson 2016-09-12 17:34:36 -04:00 committed by GitHub
commit 87517b74e1
5 changed files with 452 additions and 137 deletions

View File

@ -68,9 +68,7 @@ func NewDagReader(ctx context.Context, n *mdag.Node, serv mdag.DAGService) (*Dag
case ftpb.Data_Directory:
// Dont allow reading directories
return nil, ErrIsDir
case ftpb.Data_Raw:
fallthrough
case ftpb.Data_File:
case ftpb.Data_File, ftpb.Data_Raw:
return NewDataFileReader(ctx, n, pb, serv), nil
case ftpb.Data_Metadata:
if len(n.Links) == 0 {
@ -133,7 +131,7 @@ func (dr *DagReader) precalcNextBuf(ctx context.Context) error {
dr.buf = NewRSNCFromBytes(pb.GetData())
return nil
case ftpb.Data_Metadata:
return errors.New("Shouldnt have had metadata object inside file")
return errors.New("shouldnt have had metadata object inside file")
case ftpb.Data_Symlink:
return errors.New("shouldnt have had symlink inside file")
default:

251
unixfs/io/dagreader_test.go Normal file
View File

@ -0,0 +1,251 @@
package io
import (
"bytes"
"io/ioutil"
"os"
"strings"
"testing"
mdag "github.com/ipfs/go-ipfs/merkledag"
"github.com/ipfs/go-ipfs/unixfs"
context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
testu "github.com/ipfs/go-ipfs/unixfs/test"
)
func TestBasicRead(t *testing.T) {
dserv := testu.GetDAGServ()
inbuf, node := testu.GetRandomNode(t, dserv, 1024)
ctx, closer := context.WithCancel(context.Background())
defer closer()
reader, err := NewDagReader(ctx, node, dserv)
if err != nil {
t.Fatal(err)
}
outbuf, err := ioutil.ReadAll(reader)
if err != nil {
t.Fatal(err)
}
err = testu.ArrComp(inbuf, outbuf)
if err != nil {
t.Fatal(err)
}
}
func TestSeekAndRead(t *testing.T) {
dserv := testu.GetDAGServ()
inbuf := make([]byte, 256)
for i := 0; i <= 255; i++ {
inbuf[i] = byte(i)
}
node := testu.GetNode(t, dserv, inbuf)
ctx, closer := context.WithCancel(context.Background())
defer closer()
reader, err := NewDagReader(ctx, node, dserv)
if err != nil {
t.Fatal(err)
}
for i := 255; i >= 0; i-- {
reader.Seek(int64(i), os.SEEK_SET)
if reader.Offset() != int64(i) {
t.Fatal("expected offset to be increased by one after read")
}
out := readByte(t, reader)
if int(out) != i {
t.Fatalf("read %d at index %d, expected %d", out, i, i)
}
if reader.Offset() != int64(i+1) {
t.Fatal("expected offset to be increased by one after read")
}
}
}
func TestRelativeSeek(t *testing.T) {
dserv := testu.GetDAGServ()
ctx, closer := context.WithCancel(context.Background())
defer closer()
inbuf := make([]byte, 1024)
for i := 0; i < 256; i++ {
inbuf[i*4] = byte(i)
}
inbuf[1023] = 1 // force the reader to be 1024 bytes
node := testu.GetNode(t, dserv, inbuf)
reader, err := NewDagReader(ctx, node, dserv)
if err != nil {
t.Fatal(err)
}
for i := 0; i < 256; i++ {
if reader.Offset() != int64(i*4) {
t.Fatalf("offset should be %d, was %d", i*4, reader.Offset())
}
out := readByte(t, reader)
if int(out) != i {
t.Fatalf("expected to read: %d at %d, read %d", i, reader.Offset()-1, out)
}
if i != 255 {
_, err := reader.Seek(3, os.SEEK_CUR)
if err != nil {
t.Fatal(err)
}
}
}
_, err = reader.Seek(4, os.SEEK_END)
if err != nil {
t.Fatal(err)
}
for i := 0; i < 256; i++ {
if reader.Offset() != int64(1020-i*4) {
t.Fatalf("offset should be %d, was %d", 1020-i*4, reader.Offset())
}
out := readByte(t, reader)
if int(out) != 255-i {
t.Fatalf("expected to read: %d at %d, read %d", 255-i, reader.Offset()-1, out)
}
reader.Seek(-5, os.SEEK_CUR) // seek 4 bytes but we read one byte every time so 5 bytes
}
}
func TestTypeFailures(t *testing.T) {
dserv := testu.GetDAGServ()
ctx, closer := context.WithCancel(context.Background())
defer closer()
node := unixfs.EmptyDirNode()
if _, err := NewDagReader(ctx, node, dserv); err != ErrIsDir {
t.Fatalf("excepted to get %v, got %v", ErrIsDir, err)
}
data, err := unixfs.SymlinkData("/somelink")
if err != nil {
t.Fatal(err)
}
node = mdag.NodeWithData(data)
if _, err := NewDagReader(ctx, node, dserv); err != ErrCantReadSymlinks {
t.Fatalf("excepted to get %v, got %v", ErrCantReadSymlinks, err)
}
}
func TestBadPBData(t *testing.T) {
dserv := testu.GetDAGServ()
ctx, closer := context.WithCancel(context.Background())
defer closer()
node := mdag.NodeWithData([]byte{42})
_, err := NewDagReader(ctx, node, dserv)
if err == nil {
t.Fatal("excepted error, got nil")
}
}
func TestMetadataNode(t *testing.T) {
dserv := testu.GetDAGServ()
rdata, rnode := testu.GetRandomNode(t, dserv, 512)
_, err := dserv.Add(rnode)
if err != nil {
t.Fatal(err)
}
ctx, closer := context.WithCancel(context.Background())
defer closer()
data, err := unixfs.BytesForMetadata(&unixfs.Metadata{"text", 125})
if err != nil {
t.Fatal(err)
}
node := mdag.NodeWithData(data)
_, err = NewDagReader(ctx, node, dserv)
if err == nil {
t.Fatal("expected an error")
}
if !strings.Contains(err.Error(), "incorrectly formatted") {
t.Fatal("expected different error")
}
node.AddNodeLink("", rnode)
reader, err := NewDagReader(ctx, node, dserv)
if err != nil {
t.Fatal(err)
}
readdata, err := ioutil.ReadAll(reader)
if err != nil {
t.Fatal(err)
}
if err := testu.ArrComp(rdata, readdata); err != nil {
t.Fatal(err)
}
}
func TestWriteTo(t *testing.T) {
dserv := testu.GetDAGServ()
inbuf, node := testu.GetRandomNode(t, dserv, 1024)
ctx, closer := context.WithCancel(context.Background())
defer closer()
reader, err := NewDagReader(ctx, node, dserv)
if err != nil {
t.Fatal(err)
}
outbuf := new(bytes.Buffer)
reader.WriteTo(outbuf)
err = testu.ArrComp(inbuf, outbuf.Bytes())
if err != nil {
t.Fatal(err)
}
}
func TestReaderSzie(t *testing.T) {
dserv := testu.GetDAGServ()
size := int64(1024)
_, node := testu.GetRandomNode(t, dserv, size)
ctx, closer := context.WithCancel(context.Background())
defer closer()
reader, err := NewDagReader(ctx, node, dserv)
if err != nil {
t.Fatal(err)
}
if reader.Size() != uint64(size) {
t.Fatal("wrong reader size")
}
}
func readByte(t testing.TB, reader *DagReader) byte {
out := make([]byte, 1)
c, err := reader.Read(out)
if c != 1 {
t.Fatal("reader should have read just one byte")
}
if err != nil {
t.Fatal(err)
}
return out[0]
}

View File

@ -0,0 +1,50 @@
package io
import (
"context"
"io/ioutil"
"testing"
testu "github.com/ipfs/go-ipfs/unixfs/test"
)
func TestEmptyNode(t *testing.T) {
n := NewEmptyDirectory()
if len(n.Links) != 0 {
t.Fatal("empty node should have 0 links")
}
}
func TestDirBuilder(t *testing.T) {
dserv := testu.GetDAGServ()
ctx, closer := context.WithCancel(context.Background())
defer closer()
inbuf, node := testu.GetRandomNode(t, dserv, 1024)
key := node.Cid()
b := NewDirectory(dserv)
b.AddChild(ctx, "random", key)
dir := b.GetNode()
outn, err := dir.GetLinkedNode(ctx, dserv, "random")
if err != nil {
t.Fatal(err)
}
reader, err := NewDagReader(ctx, outn, dserv)
if err != nil {
t.Fatal(err)
}
outbuf, err := ioutil.ReadAll(reader)
if err != nil {
t.Fatal(err)
}
err = testu.ArrComp(inbuf, outbuf)
if err != nil {
t.Fatal(err)
}
}

View File

@ -2,7 +2,6 @@ package mod
import (
"fmt"
"io"
"io/ioutil"
"os"
"testing"
@ -10,13 +9,12 @@ import (
"github.com/ipfs/go-ipfs/blocks/blockstore"
bs "github.com/ipfs/go-ipfs/blockservice"
"github.com/ipfs/go-ipfs/exchange/offline"
imp "github.com/ipfs/go-ipfs/importer"
"github.com/ipfs/go-ipfs/importer/chunk"
h "github.com/ipfs/go-ipfs/importer/helpers"
trickle "github.com/ipfs/go-ipfs/importer/trickle"
mdag "github.com/ipfs/go-ipfs/merkledag"
ft "github.com/ipfs/go-ipfs/unixfs"
uio "github.com/ipfs/go-ipfs/unixfs/io"
testu "github.com/ipfs/go-ipfs/unixfs/test"
u "gx/ipfs/QmZNVWh8LLjAavuQ2JXuFmuYH3C11xo988vSgp7UQrTRj1/go-ipfs-util"
context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
@ -24,14 +22,6 @@ import (
"gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore/sync"
)
func getMockDagServ(t testing.TB) mdag.DAGService {
dstore := ds.NewMapDatastore()
tsds := sync.MutexWrap(dstore)
bstore := blockstore.NewBlockstore(tsds)
bserv := bs.New(bstore, offline.Exchange(bstore))
return mdag.NewDAGService(bserv)
}
func getMockDagServAndBstore(t testing.TB) (mdag.DAGService, blockstore.GCBlockstore) {
dstore := ds.NewMapDatastore()
tsds := sync.MutexWrap(dstore)
@ -41,26 +31,6 @@ func getMockDagServAndBstore(t testing.TB) (mdag.DAGService, blockstore.GCBlocks
return dserv, bstore
}
func getNode(t testing.TB, dserv mdag.DAGService, size int64) ([]byte, *mdag.Node) {
in := io.LimitReader(u.NewTimeSeededRand(), size)
node, err := imp.BuildTrickleDagFromReader(dserv, sizeSplitterGen(500)(in))
if err != nil {
t.Fatal(err)
}
dr, err := uio.NewDagReader(context.Background(), node, dserv)
if err != nil {
t.Fatal(err)
}
b, err := ioutil.ReadAll(dr)
if err != nil {
t.Fatal(err)
}
return b, node
}
func testModWrite(t *testing.T, beg, size uint64, orig []byte, dm *DagModifier) []byte {
newdata := make([]byte, size)
r := u.NewTimeSeededRand()
@ -100,26 +70,20 @@ func testModWrite(t *testing.T, beg, size uint64, orig []byte, dm *DagModifier)
t.Fatal(err)
}
err = arrComp(after, orig)
err = testu.ArrComp(after, orig)
if err != nil {
t.Fatal(err)
}
return orig
}
func sizeSplitterGen(size int64) chunk.SplitterGen {
return func(r io.Reader) chunk.Splitter {
return chunk.NewSizeSplitter(r, size)
}
}
func TestDagModifierBasic(t *testing.T) {
dserv := getMockDagServ(t)
b, n := getNode(t, dserv, 50000)
dserv := testu.GetDAGServ()
b, n := testu.GetRandomNode(t, dserv, 50000)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
dagmod, err := NewDagModifier(ctx, n, dserv, sizeSplitterGen(512))
dagmod, err := NewDagModifier(ctx, n, dserv, testu.SizeSplitterGen(512))
if err != nil {
t.Fatal(err)
}
@ -168,13 +132,13 @@ func TestDagModifierBasic(t *testing.T) {
}
func TestMultiWrite(t *testing.T) {
dserv := getMockDagServ(t)
_, n := getNode(t, dserv, 0)
dserv := testu.GetDAGServ()
n := testu.GetEmptyNode(t, dserv)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
dagmod, err := NewDagModifier(ctx, n, dserv, sizeSplitterGen(512))
dagmod, err := NewDagModifier(ctx, n, dserv, testu.SizeSplitterGen(512))
if err != nil {
t.Fatal(err)
}
@ -214,20 +178,20 @@ func TestMultiWrite(t *testing.T) {
t.Fatal(err)
}
err = arrComp(rbuf, data)
err = testu.ArrComp(rbuf, data)
if err != nil {
t.Fatal(err)
}
}
func TestMultiWriteAndFlush(t *testing.T) {
dserv := getMockDagServ(t)
_, n := getNode(t, dserv, 0)
dserv := testu.GetDAGServ()
n := testu.GetEmptyNode(t, dserv)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
dagmod, err := NewDagModifier(ctx, n, dserv, sizeSplitterGen(512))
dagmod, err := NewDagModifier(ctx, n, dserv, testu.SizeSplitterGen(512))
if err != nil {
t.Fatal(err)
}
@ -262,20 +226,20 @@ func TestMultiWriteAndFlush(t *testing.T) {
t.Fatal(err)
}
err = arrComp(rbuf, data)
err = testu.ArrComp(rbuf, data)
if err != nil {
t.Fatal(err)
}
}
func TestWriteNewFile(t *testing.T) {
dserv := getMockDagServ(t)
_, n := getNode(t, dserv, 0)
dserv := testu.GetDAGServ()
n := testu.GetEmptyNode(t, dserv)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
dagmod, err := NewDagModifier(ctx, n, dserv, sizeSplitterGen(512))
dagmod, err := NewDagModifier(ctx, n, dserv, testu.SizeSplitterGen(512))
if err != nil {
t.Fatal(err)
}
@ -306,19 +270,19 @@ func TestWriteNewFile(t *testing.T) {
t.Fatal(err)
}
if err := arrComp(data, towrite); err != nil {
if err := testu.ArrComp(data, towrite); err != nil {
t.Fatal(err)
}
}
func TestMultiWriteCoal(t *testing.T) {
dserv := getMockDagServ(t)
_, n := getNode(t, dserv, 0)
dserv := testu.GetDAGServ()
n := testu.GetEmptyNode(t, dserv)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
dagmod, err := NewDagModifier(ctx, n, dserv, sizeSplitterGen(512))
dagmod, err := NewDagModifier(ctx, n, dserv, testu.SizeSplitterGen(512))
if err != nil {
t.Fatal(err)
}
@ -351,20 +315,20 @@ func TestMultiWriteCoal(t *testing.T) {
t.Fatal(err)
}
err = arrComp(rbuf, data)
err = testu.ArrComp(rbuf, data)
if err != nil {
t.Fatal(err)
}
}
func TestLargeWriteChunks(t *testing.T) {
dserv := getMockDagServ(t)
_, n := getNode(t, dserv, 0)
dserv := testu.GetDAGServ()
n := testu.GetEmptyNode(t, dserv)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
dagmod, err := NewDagModifier(ctx, n, dserv, sizeSplitterGen(512))
dagmod, err := NewDagModifier(ctx, n, dserv, testu.SizeSplitterGen(512))
if err != nil {
t.Fatal(err)
}
@ -390,19 +354,19 @@ func TestLargeWriteChunks(t *testing.T) {
t.Fatal(err)
}
if err = arrComp(out, data); err != nil {
if err = testu.ArrComp(out, data); err != nil {
t.Fatal(err)
}
}
func TestDagTruncate(t *testing.T) {
dserv := getMockDagServ(t)
b, n := getNode(t, dserv, 50000)
dserv := testu.GetDAGServ()
b, n := testu.GetRandomNode(t, dserv, 50000)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
dagmod, err := NewDagModifier(ctx, n, dserv, sizeSplitterGen(512))
dagmod, err := NewDagModifier(ctx, n, dserv, testu.SizeSplitterGen(512))
if err != nil {
t.Fatal(err)
}
@ -430,7 +394,7 @@ func TestDagTruncate(t *testing.T) {
t.Fatal(err)
}
if err = arrComp(out, b[:12345]); err != nil {
if err = testu.ArrComp(out, b[:12345]); err != nil {
t.Fatal(err)
}
@ -464,12 +428,12 @@ func TestDagTruncate(t *testing.T) {
}
func TestSparseWrite(t *testing.T) {
dserv := getMockDagServ(t)
_, n := getNode(t, dserv, 0)
dserv := testu.GetDAGServ()
n := testu.GetEmptyNode(t, dserv)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
dagmod, err := NewDagModifier(ctx, n, dserv, sizeSplitterGen(512))
dagmod, err := NewDagModifier(ctx, n, dserv, testu.SizeSplitterGen(512))
if err != nil {
t.Fatal(err)
}
@ -496,18 +460,18 @@ func TestSparseWrite(t *testing.T) {
t.Fatal(err)
}
if err = arrComp(out, buf); err != nil {
if err = testu.ArrComp(out, buf); err != nil {
t.Fatal(err)
}
}
func TestSeekPastEndWrite(t *testing.T) {
dserv := getMockDagServ(t)
_, n := getNode(t, dserv, 0)
dserv := testu.GetDAGServ()
n := testu.GetEmptyNode(t, dserv)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
dagmod, err := NewDagModifier(ctx, n, dserv, sizeSplitterGen(512))
dagmod, err := NewDagModifier(ctx, n, dserv, testu.SizeSplitterGen(512))
if err != nil {
t.Fatal(err)
}
@ -543,18 +507,18 @@ func TestSeekPastEndWrite(t *testing.T) {
t.Fatal(err)
}
if err = arrComp(out, buf); err != nil {
if err = testu.ArrComp(out, buf); err != nil {
t.Fatal(err)
}
}
func TestRelativeSeek(t *testing.T) {
dserv := getMockDagServ(t)
_, n := getNode(t, dserv, 0)
dserv := testu.GetDAGServ()
n := testu.GetEmptyNode(t, dserv)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
dagmod, err := NewDagModifier(ctx, n, dserv, sizeSplitterGen(512))
dagmod, err := NewDagModifier(ctx, n, dserv, testu.SizeSplitterGen(512))
if err != nil {
t.Fatal(err)
}
@ -579,13 +543,12 @@ func TestRelativeSeek(t *testing.T) {
}
func TestInvalidSeek(t *testing.T) {
dserv := getMockDagServ(t)
_, n := getNode(t, dserv, 0)
dserv := testu.GetDAGServ()
n := testu.GetEmptyNode(t, dserv)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
dagmod, err := NewDagModifier(ctx, n, dserv, sizeSplitterGen(512))
dagmod, err := NewDagModifier(ctx, n, dserv, testu.SizeSplitterGen(512))
if err != nil {
t.Fatal(err)
}
@ -597,13 +560,13 @@ func TestInvalidSeek(t *testing.T) {
}
func TestEndSeek(t *testing.T) {
dserv := getMockDagServ(t)
dserv := testu.GetDAGServ()
_, n := getNode(t, dserv, 0)
n := testu.GetEmptyNode(t, dserv)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
dagmod, err := NewDagModifier(ctx, n, dserv, sizeSplitterGen(512))
dagmod, err := NewDagModifier(ctx, n, dserv, testu.SizeSplitterGen(512))
if err != nil {
t.Fatal(err)
}
@ -630,13 +593,13 @@ func TestEndSeek(t *testing.T) {
}
func TestReadAndSeek(t *testing.T) {
dserv := getMockDagServ(t)
dserv := testu.GetDAGServ()
_, n := getNode(t, dserv, 0)
n := testu.GetEmptyNode(t, dserv)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
dagmod, err := NewDagModifier(ctx, n, dserv, sizeSplitterGen(512))
dagmod, err := NewDagModifier(ctx, n, dserv, testu.SizeSplitterGen(512))
if err != nil {
t.Fatal(err)
}
@ -698,13 +661,13 @@ func TestReadAndSeek(t *testing.T) {
}
func TestCtxRead(t *testing.T) {
dserv := getMockDagServ(t)
dserv := testu.GetDAGServ()
_, n := getNode(t, dserv, 0)
n := testu.GetEmptyNode(t, dserv)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
dagmod, err := NewDagModifier(ctx, n, dserv, sizeSplitterGen(512))
dagmod, err := NewDagModifier(ctx, n, dserv, testu.SizeSplitterGen(512))
if err != nil {
t.Fatal(err)
}
@ -720,7 +683,7 @@ func TestCtxRead(t *testing.T) {
if err != nil {
t.Fatal(err)
}
err = arrComp(readBuf, []byte{0, 1, 2, 3})
err = testu.ArrComp(readBuf, []byte{0, 1, 2, 3})
if err != nil {
t.Fatal(err)
}
@ -730,14 +693,14 @@ func TestCtxRead(t *testing.T) {
func BenchmarkDagmodWrite(b *testing.B) {
b.StopTimer()
dserv := getMockDagServ(b)
_, n := getNode(b, dserv, 0)
dserv := testu.GetDAGServ()
n := testu.GetEmptyNode(b, dserv)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
wrsize := 4096
dagmod, err := NewDagModifier(ctx, n, dserv, sizeSplitterGen(512))
dagmod, err := NewDagModifier(ctx, n, dserv, testu.SizeSplitterGen(512))
if err != nil {
b.Fatal(err)
}
@ -756,43 +719,3 @@ func BenchmarkDagmodWrite(b *testing.B) {
}
}
}
func arrComp(a, b []byte) error {
if len(a) != len(b) {
return fmt.Errorf("Arrays differ in length. %d != %d", len(a), len(b))
}
for i, v := range a {
if v != b[i] {
return fmt.Errorf("Arrays differ at index: %d", i)
}
}
return nil
}
func printDag(nd *mdag.Node, ds mdag.DAGService, indent int) {
pbd, err := ft.FromBytes(nd.Data())
if err != nil {
panic(err)
}
for i := 0; i < indent; i++ {
fmt.Print(" ")
}
fmt.Printf("{size = %d, type = %s, children = %d", pbd.GetFilesize(), pbd.GetType().String(), len(pbd.GetBlocksizes()))
if len(nd.Links) > 0 {
fmt.Println()
}
for _, lnk := range nd.Links {
child, err := lnk.GetNode(context.Background(), ds)
if err != nil {
panic(err)
}
printDag(child, ds, indent+1)
}
if len(nd.Links) > 0 {
for i := 0; i < indent; i++ {
fmt.Print(" ")
}
}
fmt.Println("}")
}

93
unixfs/test/utils.go Normal file
View File

@ -0,0 +1,93 @@
package testu
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"testing"
imp "github.com/ipfs/go-ipfs/importer"
"github.com/ipfs/go-ipfs/importer/chunk"
mdag "github.com/ipfs/go-ipfs/merkledag"
mdagmock "github.com/ipfs/go-ipfs/merkledag/test"
ft "github.com/ipfs/go-ipfs/unixfs"
u "gx/ipfs/QmZNVWh8LLjAavuQ2JXuFmuYH3C11xo988vSgp7UQrTRj1/go-ipfs-util"
context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
)
func SizeSplitterGen(size int64) chunk.SplitterGen {
return func(r io.Reader) chunk.Splitter {
return chunk.NewSizeSplitter(r, size)
}
}
func GetDAGServ() mdag.DAGService {
return mdagmock.Mock()
}
func GetNode(t testing.TB, dserv mdag.DAGService, data []byte) *mdag.Node {
in := bytes.NewReader(data)
node, err := imp.BuildTrickleDagFromReader(dserv, SizeSplitterGen(500)(in))
if err != nil {
t.Fatal(err)
}
return node
}
func GetEmptyNode(t testing.TB, dserv mdag.DAGService) *mdag.Node {
return GetNode(t, dserv, []byte{})
}
func GetRandomNode(t testing.TB, dserv mdag.DAGService, size int64) ([]byte, *mdag.Node) {
in := io.LimitReader(u.NewTimeSeededRand(), size)
buf, err := ioutil.ReadAll(in)
if err != nil {
t.Fatal(err)
}
node := GetNode(t, dserv, buf)
return buf, node
}
func ArrComp(a, b []byte) error {
if len(a) != len(b) {
return fmt.Errorf("Arrays differ in length. %d != %d", len(a), len(b))
}
for i, v := range a {
if v != b[i] {
return fmt.Errorf("Arrays differ at index: %d", i)
}
}
return nil
}
func PrintDag(nd *mdag.Node, ds mdag.DAGService, indent int) {
pbd, err := ft.FromBytes(nd.Data())
if err != nil {
panic(err)
}
for i := 0; i < indent; i++ {
fmt.Print(" ")
}
fmt.Printf("{size = %d, type = %s, children = %d", pbd.GetFilesize(), pbd.GetType().String(), len(pbd.GetBlocksizes()))
if len(nd.Links) > 0 {
fmt.Println()
}
for _, lnk := range nd.Links {
child, err := lnk.GetNode(context.Background(), ds)
if err != nil {
panic(err)
}
PrintDag(child, ds, indent+1)
}
if len(nd.Links) > 0 {
for i := 0; i < indent; i++ {
fmt.Print(" ")
}
}
fmt.Println("}")
}