address PR comments; remove commented/dead code

License: MIT
Signed-off-by: Zach Ramsay <zach.ramsay@gmail.com>
This commit is contained in:
zramsay 2017-05-31 23:41:26 -04:00
parent c5df8f0796
commit 096b3232d5
27 changed files with 22 additions and 302 deletions

View File

@ -102,10 +102,6 @@ func NewBlockstore(d ds.Batching) Blockstore {
type blockstore struct {
datastore ds.Batching
lk sync.RWMutex
gcreq int32
gcreqlk sync.Mutex
rehash bool
}
@ -246,9 +242,8 @@ func NewGCLocker() GCLocker {
}
type gclocker struct {
lk sync.RWMutex
gcreq int32
gcreqlk sync.Mutex
lk sync.RWMutex
gcreq int32
}
// Unlocker represents an object which can Unlock

View File

@ -37,17 +37,12 @@ import (
// log is the command logger
var log = logging.Logger("cmd/ipfs")
var (
// errUnexpectedApiOutput = errors.New("api returned unexpected output")
// errApiVersionMismatch = errors.New("api version mismatch")
errRequestCanceled = errors.New("request canceled")
)
var errRequestCanceled = errors.New("request canceled")
const (
EnvEnableProfiling = "IPFS_PROF"
cpuProfile = "ipfs.cpuprof"
heapProfile = "ipfs.memprof"
// errorFormat = "ERROR: %v\n\n"
)
type cmdInvocation struct {

View File

@ -10,7 +10,6 @@ import (
const (
multipartFormdataType = "multipart/form-data"
// multipartMixedType = "multipart/mixed"
applicationDirectory = "application/x-directory"
applicationSymlink = "application/symlink"

View File

@ -47,12 +47,9 @@ const (
extraContentLengthHeader = "X-Content-Length"
uaHeader = "User-Agent"
contentTypeHeader = "Content-Type"
// contentDispHeader = "Content-Disposition"
// transferEncodingHeader = "Transfer-Encoding"
applicationJson = "application/json"
applicationOctetStream = "application/octet-stream"
plainText = "text/plain"
// originHeader = "origin"
applicationJson = "application/json"
applicationOctetStream = "application/octet-stream"
plainText = "text/plain"
)
var AllowedExposedHeadersArr = []string{streamHeader, channelHeader, extraContentLengthHeader}

View File

@ -664,11 +664,6 @@ stat' on the file or any of its ancestors.
return
}
var r io.Reader = input
if countfound {
r = io.LimitReader(r, int64(count))
}
n, err := io.Copy(wfd, input)
if err != nil {
res.SetError(err, cmds.ErrNormal)

View File

@ -74,7 +74,6 @@ import (
const IpnsValidatorTag = "ipns"
// const kSizeBlockstoreWriteCache = 100
const kReprovideFrequency = time.Hour * 12
const discoveryConnTimeout = time.Second * 30
@ -84,8 +83,7 @@ type mode int
const (
// zero value is not a valid mode, must be explicitly set
invalidMode mode = iota
localMode
localMode mode = iota
offlineMode
onlineMode
)

View File

@ -18,12 +18,7 @@ import (
// default and 2) to avoid a circular dependency (it needs to be referenced in
// the core if it's going to be the default)
var (
// errHostMissing = errors.New("supernode routing client requires a Host component")
// errIdentityMissing = errors.New("supernode routing server requires a peer ID identity")
// errPeerstoreMissing = errors.New("supernode routing server requires a peerstore")
errServersMissing = errors.New("supernode routing client requires at least 1 server peer")
)
var errServersMissing = errors.New("supernode routing client requires at least 1 server peer")
// SupernodeServer returns a configuration for a routing server that stores
// routing records to the provided datastore. Only routing records are store in

View File

@ -37,9 +37,8 @@ const (
// TODO: if a 'non-nice' strategy is implemented, consider increasing this value
maxProvidersPerRequest = 3
providerRequestTimeout = time.Second * 10
// hasBlockTimeout = time.Second * 15
provideTimeout = time.Second * 15
sizeBatchRequestChan = 32
provideTimeout = time.Second * 15
sizeBatchRequestChan = 32
// kMaxPriority is the max priority as defined by the bitswap protocol
kMaxPriority = math.MaxInt32
)

View File

@ -601,14 +601,14 @@ func TestBitswapLedgerTwoWay(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
blk, err := instances[1].Exchange.GetBlock(ctx, blocks[0].Cid())
_, err = instances[1].Exchange.GetBlock(ctx, blocks[0].Cid())
if err != nil {
t.Fatal(err)
}
ctx, cancel = context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
blk, err = instances[0].Exchange.GetBlock(ctx, blocks[1].Cid())
blk, err := instances[0].Exchange.GetBlock(ctx, blocks[1].Cid())
if err != nil {
t.Fatal(err)
}

View File

@ -27,9 +27,6 @@ type ledger struct {
// Accounting tracks bytes sent and recieved.
Accounting debtRatio
// firstExchnage is the time of the first data exchange.
firstExchange time.Time
// lastExchange is the time of the last data exchange.
lastExchange time.Time

View File

@ -88,8 +88,6 @@ func (i *Instance) SetBlockstoreLatency(t time.Duration) time.Duration {
// just a much better idea.
func Session(ctx context.Context, net tn.Network, p testutil.Identity) Instance {
bsdelay := delay.Fixed(0)
// const bloomSize = 512
// const writeCacheElems = 100
adapter := net.Adapter(p)
dstore := ds_sync.MutexWrap(datastore2.WithDelay(ds.NewMapDatastore(), bsdelay))

View File

@ -55,16 +55,6 @@ func NewWantManager(ctx context.Context, network bsnet.BitSwapNetwork) *WantMana
}
}
/*type msgPair struct {
to peer.ID
msg bsmsg.BitSwapMessage
}
type cancellation struct {
who peer.ID
blk *cid.Cid
}*/
type msgQueue struct {
p peer.ID

View File

@ -7,7 +7,6 @@ import (
"errors"
"fmt"
"strings"
"time"
core "github.com/ipfs/go-ipfs/core"
ipns "github.com/ipfs/go-ipfs/fuse/ipns"
@ -18,10 +17,6 @@ import (
var log = logging.Logger("node")
// amount of time to wait for mount errors
// TODO is this non-deterministic?
const mountTimeout = time.Second
// fuseNoDirectory used to check the returning fuse error
const fuseNoDirectory = "fusermount: failed to access mountpoint"

View File

@ -85,7 +85,6 @@ func (*Root) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
type Node struct {
Ipfs *core.IpfsNode
Nd *mdag.ProtoNode
fd *uio.DagReader
cached *ftpb.Data
}

View File

@ -12,7 +12,6 @@ import (
h "github.com/ipfs/go-ipfs/importer/helpers"
dag "github.com/ipfs/go-ipfs/merkledag"
mdtest "github.com/ipfs/go-ipfs/merkledag/test"
pin "github.com/ipfs/go-ipfs/pin"
uio "github.com/ipfs/go-ipfs/unixfs/io"
"context"
@ -124,11 +123,6 @@ func dagrArrComp(t *testing.T, r io.Reader, should []byte) {
}
}
type dagservAndPinner struct {
ds dag.DAGService
mp pin.Pinner
}
func TestIndirectBlocks(t *testing.T) {
ds := mdtest.Mock()
dag, buf := getTestDag(t, ds, 1024*1024, 512)

View File

@ -5,7 +5,6 @@ import (
"fmt"
"os"
chunk "github.com/ipfs/go-ipfs/importer/chunk"
dag "github.com/ipfs/go-ipfs/merkledag"
pi "github.com/ipfs/go-ipfs/thirdparty/posinfo"
ft "github.com/ipfs/go-ipfs/unixfs"
@ -18,7 +17,6 @@ import (
var BlockSizeLimit = 1048576 // 1 MB
// rough estimates on expected sizes
var roughDataBlockSize = chunk.DefaultBlockSize
var roughLinkBlockSize = 1 << 13 // 8KB
var roughLinkSize = 34 + 8 + 5 // sha256 multihash + size + no name + protobuf framing

View File

@ -13,7 +13,6 @@ import (
h "github.com/ipfs/go-ipfs/importer/helpers"
merkledag "github.com/ipfs/go-ipfs/merkledag"
mdtest "github.com/ipfs/go-ipfs/merkledag/test"
pin "github.com/ipfs/go-ipfs/pin"
ft "github.com/ipfs/go-ipfs/unixfs"
uio "github.com/ipfs/go-ipfs/unixfs/io"
@ -125,11 +124,6 @@ func arrComp(a, b []byte) error {
return nil
}
type dagservAndPinner struct {
ds merkledag.DAGService
mp pin.Pinner
}
func TestIndirectBlocks(t *testing.T) {
splitter := chunk.SizeSplitterGen(512)
nbytes := 1024 * 1024
@ -565,31 +559,3 @@ func TestAppendSingleBytesToEmpty(t *testing.T) {
t.Fatal(err)
}
}
func printDag(nd *merkledag.ProtoNode, ds merkledag.DAGService, indent int) {
pbd, err := ft.FromBytes(nd.Data())
if err != nil {
panic(err)
}
for i := 0; i < indent; i++ {
fmt.Print(" ")
}
fmt.Printf("{size = %d, type = %s, nc = %d", pbd.GetFilesize(), pbd.GetType().String(), len(pbd.GetBlocksizes()))
if len(nd.Links()) > 0 {
fmt.Println()
}
for _, lnk := range nd.Links() {
child, err := lnk.GetNode(context.Background(), ds)
if err != nil {
panic(err)
}
printDag(child.(*merkledag.ProtoNode), ds, indent+1)
}
if len(nd.Links()) > 0 {
for i := 0; i < indent; i++ {
fmt.Print(" ")
}
}
fmt.Println("}")
}

View File

@ -160,17 +160,11 @@ func PutRecordToRouting(ctx context.Context, k ci.PrivKey, value path.Path, seqn
errs <- PublishPublicKey(ctx, r, namekey, k.GetPublic())
}()
err = waitOnErrChan(ctx, errs)
if err != nil {
if err := waitOnErrChan(ctx, errs); err != nil {
return err
}
err = waitOnErrChan(ctx, errs)
if err != nil {
return err
}
return nil
return waitOnErrChan(ctx, errs)
}
func waitOnErrChan(ctx context.Context, errs chan error) error {
@ -340,12 +334,7 @@ func InitializeKeyspace(ctx context.Context, ds dag.DAGService, pub Publisher, p
return err
}
err = pub.Publish(ctx, key, path.FromCid(nodek))
if err != nil {
return err
}
return nil
return pub.Publish(ctx, key, path.FromCid(nodek))
}
func IpnsKeysForID(id peer.ID) (name, ipns string) {

View File

@ -20,13 +20,6 @@ var DefaultSNRServers = []string{
"/ip4/178.62.61.185/tcp/4002/ipfs/QmVw6fGNqBixZE4bewRLT2VXX7fAHUHs8JyidDiJ1P7RUN",
}
func initSNRConfig() (*SupernodeClientConfig, error) {
// TODO perform validation
return &SupernodeClientConfig{
Servers: DefaultSNRServers,
}, nil
}
func (gcr *SupernodeClientConfig) ServerIPFSAddrs() ([]ipfsaddr.IPFSAddr, error) {
var addrs []ipfsaddr.IPFSAddr
for _, server := range gcr.Servers {

View File

@ -41,16 +41,6 @@ func openDefaultDatastore(r *FSRepo) (repo.Datastore, error) {
return nil, fmt.Errorf("unable to open flatfs datastore: %v", err)
}
// Add our PeerID to metrics paths to keep them unique
//
// As some tests just pass a zero-value Config to fsrepo.Init,
// cope with missing PeerID.
id := r.config.Identity.PeerID
if id == "" {
// the tests pass in a zero Config; cope with it
id = fmt.Sprintf("uninitialized_%p", r)
}
prefix := "ipfs.fsrepo.datastore."
metricsBlocks := measure.New(prefix+"blocks", blocksDS)
metricsLevelDB := measure.New(prefix+"leveldb", leveldbDS)

View File

@ -37,12 +37,6 @@ var RepoVersion = 5
var migrationInstructions = `See https://github.com/ipfs/fs-repo-migrations/blob/master/run.md
Sorry for the inconvenience. In the future, these will run automatically.`
/*
var errIncorrectRepoFmt = `Repo has incorrect version: %s
Program version is: %s
Please run the ipfs migration tool before continuing.
` + migrationInstructions
*/
var programTooLowMessage = `Your programs version (%d) is lower than your repos (%d).
Please update ipfs to a version that supports the existing repo, or run
a migration in reverse.

View File

@ -27,7 +27,7 @@ func TestOfflineRouterStorage(t *testing.T) {
t.Fatal("OfflineRouter does not properly store")
}
val, err = offline.GetValue(ctx, "notHere")
_, err = offline.GetValue(ctx, "notHere")
if err == nil {
t.Fatal("Router should throw errors for unfound records")
}

View File

@ -65,7 +65,6 @@ func TestThreeLeggedCat100MBMacbookCoastToCoast(t *testing.T) {
func RunThreeLeggedCat(data []byte, conf testutil.LatencyConfig) error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// const numPeers = 3
// create network
mn := mocknet.New(ctx)

View File

@ -64,8 +64,8 @@ func run() error {
return err
}
repoPath := gopath.Join(cwd, config.DefaultPathName)
if err := ensureRepoInitialized(repoPath); err != nil {
}
_ = ensureRepoInitialized(repoPath)
repo, err := fsrepo.Open(repoPath)
if err != nil { // owned by node
return err
@ -233,26 +233,6 @@ func runFileCattingWorker(ctx context.Context, n *core.IpfsNode) error {
return nil
}
func toPeerInfos(bpeers []config.BootstrapPeer) ([]pstore.PeerInfo, error) {
var peers []pstore.PeerInfo
for _, bootstrap := range bpeers {
p, err := toPeerInfo(bootstrap)
if err != nil {
return nil, err
}
peers = append(peers, p)
}
return peers, nil
}
func toPeerInfo(bootstrap config.BootstrapPeer) (p pstore.PeerInfo, err error) {
p = pstore.PeerInfo{
ID: bootstrap.ID(),
Addrs: []ma.Multiaddr{bootstrap.Multiaddr()},
}
return p, nil
}
func cmdCtx(node *core.IpfsNode, repoPath string) commands.Context {
return commands.Context{
Online: true,

View File

@ -1,13 +1,10 @@
package hamt
import (
"bufio"
"context"
"fmt"
"math/rand"
"os"
"strconv"
"strings"
"testing"
"time"
@ -189,104 +186,3 @@ func genOpSet(seed int64, keep, temp []string) []testOp {
}
}
}
// executes the given op set with a repl to allow easier debugging
func debugExecuteOpSet(ds dag.DAGService, width int, ops []testOp) (*HamtShard, error) {
s, err := NewHamtShard(ds, width)
if err != nil {
return nil, err
}
e := ft.EmptyDirNode()
ds.Add(e)
ctx := context.TODO()
run := 0
opnames := map[int]string{
opAdd: "add",
opDel: "del",
}
mainloop:
for i := 0; i < len(ops); i++ {
o := ops[i]
fmt.Printf("Op %d: %s %s\n", i, opnames[o.Op], o.Val)
for run == 0 {
cmd := readCommand()
parts := strings.Split(cmd, " ")
switch parts[0] {
case "":
run = 1
case "find":
_, err := s.Find(ctx, parts[1])
if err == nil {
fmt.Println("success")
} else {
fmt.Println(err)
}
case "run":
if len(parts) > 1 {
n, err := strconv.Atoi(parts[1])
if err != nil {
panic(err)
}
run = n
} else {
run = -1
}
case "lookop":
for k := 0; k < len(ops); k++ {
if ops[k].Val == parts[1] {
fmt.Printf(" Op %d: %s %s\n", k, opnames[ops[k].Op], parts[1])
}
}
case "restart":
var err error
s, err = NewHamtShard(ds, width)
if err != nil {
panic(err)
}
i = -1
continue mainloop
case "print":
nd, err := s.Node()
if err != nil {
panic(err)
}
printDag(ds, nd.(*dag.ProtoNode), 0)
}
}
run--
switch o.Op {
case opAdd:
err := s.Set(ctx, o.Val, e)
if err != nil {
return nil, fmt.Errorf("inserting %s: %s", o.Val, err)
}
case opDel:
fmt.Println("deleting: ", o.Val)
err := s.Remove(ctx, o.Val)
if err != nil {
return nil, fmt.Errorf("deleting %s: %s", o.Val, err)
}
case opFind:
_, err := s.Find(ctx, o.Val)
if err != nil {
return nil, fmt.Errorf("finding %s: %s", o.Val, err)
}
}
}
return s, nil
}
func readCommand() string {
fmt.Print("> ")
scan := bufio.NewScanner(os.Stdin)
scan.Scan()
return scan.Text()
}

View File

@ -6,7 +6,6 @@ import (
"math/rand"
"os"
"sort"
"strings"
"testing"
"time"
@ -138,7 +137,7 @@ func TestBasicSet(t *testing.T) {
func TestDirBuilding(t *testing.T) {
ds := mdtest.Mock()
s, _ := NewHamtShard(ds, 256)
_, _ = NewHamtShard(ds, 256)
_, s, err := makeDir(ds, 200)
if err != nil {
@ -161,7 +160,7 @@ func TestDirBuilding(t *testing.T) {
func TestShardReload(t *testing.T) {
ds := mdtest.Mock()
s, _ := NewHamtShard(ds, 256)
_, _ = NewHamtShard(ds, 256)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@ -494,21 +493,6 @@ func TestSetHamtChild(t *testing.T) {
}
}
func printDag(ds dag.DAGService, nd *dag.ProtoNode, depth int) {
padding := strings.Repeat(" ", depth)
fmt.Println("{")
for _, l := range nd.Links() {
fmt.Printf("%s%s: %s", padding, l.Name, l.Cid.String())
ch, err := ds.Get(context.Background(), l.Cid)
if err != nil {
panic(err)
}
printDag(ds, ch.(*dag.ProtoNode), depth+1)
}
fmt.Println(padding + "}")
}
func printDiff(ds dag.DAGService, a, b *dag.ProtoNode) {
diff, err := dagutils.Diff(context.TODO(), ds, a, b)
if err != nil {

View File

@ -1,36 +1,21 @@
package mod
import (
"context"
"fmt"
"io"
"io/ioutil"
"testing"
"github.com/ipfs/go-ipfs/blocks/blockstore"
bs "github.com/ipfs/go-ipfs/blockservice"
"github.com/ipfs/go-ipfs/exchange/offline"
h "github.com/ipfs/go-ipfs/importer/helpers"
trickle "github.com/ipfs/go-ipfs/importer/trickle"
mdag "github.com/ipfs/go-ipfs/merkledag"
ft "github.com/ipfs/go-ipfs/unixfs"
uio "github.com/ipfs/go-ipfs/unixfs/io"
testu "github.com/ipfs/go-ipfs/unixfs/test"
context "context"
ds "gx/ipfs/QmRWDav6mzWseLWeYfVd5fvUKiVe9xNH29YfMF438fG364/go-datastore"
"gx/ipfs/QmRWDav6mzWseLWeYfVd5fvUKiVe9xNH29YfMF438fG364/go-datastore/sync"
u "gx/ipfs/QmWbjfz3u6HkAdPh34dgPchGbQjob6LXLhAeCGii2TX69n/go-ipfs-util"
)
func getMockDagServAndBstore(t testing.TB) (mdag.DAGService, blockstore.Blockstore) {
dstore := ds.NewMapDatastore()
tsds := sync.MutexWrap(dstore)
bstore := blockstore.NewBlockstore(tsds)
bserv := bs.New(bstore, offline.Exchange(bstore))
dserv := mdag.NewDAGService(bserv)
return dserv, bstore
}
func testModWrite(t *testing.T, beg, size uint64, orig []byte, dm *DagModifier) []byte {
newdata := make([]byte, size)
r := u.NewTimeSeededRand()
@ -112,7 +97,7 @@ func TestDagModifierBasic(t *testing.T) {
beg = uint64(len(b))
length = 3000
t.Log("Testing pure append")
b = testModWrite(t, beg, length, b, dagmod)
_ = testModWrite(t, beg, length, b, dagmod)
// Verify reported length
node, err := dagmod.GetNode()