Merge branch 'version/0.4.3-rc4'

License: MIT
Signed-off-by: Jeromy <why@ipfs.io>
This commit is contained in:
Jeromy 2016-09-20 19:01:59 -07:00
commit 5457a85cef
No known key found for this signature in database
GPG Key ID: 87E34DFE72DC2B6B
41 changed files with 606 additions and 263 deletions

View File

@ -1,5 +1,29 @@
# go-ipfs changelog
### 0.4.3-rc4 - 2016-09-09
This release candidate fixes issues in Bitswap and the `ipfs add` command, and improves testing.
We plan for this to be the last release candidate before the release of go-ipfs v0.4.3.
With this release candidate, we're also moving go-ipfs to Go 1.7, which we expect will yield improvements in runtime performance, memory usage, build time and size of the release binaries.
- Require Go 1.7. (@whyrusleeping, @Kubuxu, @lgierth, [ipfs/go-ipfs#3163](https://github.com/ipfs/go-ipfs/pull/3163))
- For this purpose, switch Docker image from Alpine 3.4 to Alpine Edge.
- Fix cancellation of Bitswap `wantlist` entries. (@whyrusleeping, [ipfs/go-ipfs#3182](https://github.com/ipfs/go-ipfs/pull/3182))
- Fix clearing of `active` state of Bitswap provider queries. (@whyrusleeping, [ipfs/go-ipfs#3169](https://github.com/ipfs/go-ipfs/pull/3169))
- Fix a panic in the DHT code. (@Kubuxu, [ipfs/go-ipfs#3200](https://github.com/ipfs/go-ipfs/pull/3200))
- Improve handling of `Identity` field in `ipfs config` command. (@Kubuxu, @whyrusleeping, [ipfs/go-ipfs#3141](https://github.com/ipfs/go-ipfs/pull/3141))
- Fix explicit adding of symlinked files and directories. (@kevina, [ipfs/go-ipfs#3135](https://github.com/ipfs/go-ipfs/pull/3135))
- Fix bash auto-completion of `ipfs daemon --unrestricted-api` option. (@lgierth, [ipfs/go-ipfs#3159](https://github.com/ipfs/go-ipfs/pull/3159))
- Introduce a new timeout tool for tests to avoid licensing issues. (@Kubuxu, [ipfs/go-ipfs#3152](https://github.com/ipfs/go-ipfs/pull/3152))
- Improve output for migrations of fs-repo. (@lgierth, [ipfs/go-ipfs#3158](https://github.com/ipfs/go-ipfs/pull/3158))
- Fix info notice of commands taking input from stdin. (@Kubuxu, [ipfs/go-ipfs#3134](https://github.com/ipfs/go-ipfs/pull/3134))
- Bring back a few tests for stdin handling of `ipfs cat` and `ipfs add`. (@Kubuxu, [ipfs/go-ipfs#3144](https://github.com/ipfs/go-ipfs/pull/3144))
- Improve sharness tests for `ipfs repo verify` command. (@whyrusleeping, [ipfs/go-ipfs#3148](https://github.com/ipfs/go-ipfs/pull/3148))
- Improve sharness tests for CORS headers on the gateway. (@Kubuxu, [ipfs/go-ipfs#3142](https://github.com/ipfs/go-ipfs/pull/3142))
- Improve tests for pinning within `ipfs files`. (@kevina, [ipfs/go-ipfs#3151](https://github.com/ipfs/go-ipfs/pull/3151))
- Improve tests for the automatic raising of file descriptor limits. (@whyrusleeping, [ipfs/go-ipfs#3149](https://github.com/ipfs/go-ipfs/pull/3149))
### 0.4.3-rc3 - 2016-08-09
This release candidate fixes a panic that occurs when input from stdin was

View File

@ -9,16 +9,16 @@ import (
// Next to each option is it aproximate memory usage per unit
type CacheOpts struct {
HasBloomFilterSize int // 1 bit
HasBloomFilterSize int // 1 byte
HasBloomFilterHashes int // No size, 7 is usually best, consult bloom papers
HasARCCacheSize int // 32 bytes
}
func DefaultCacheOpts() CacheOpts {
return CacheOpts{
HasBloomFilterSize: 512 * 8 * 1024,
HasBloomFilterSize: 512 << 10,
HasBloomFilterHashes: 7,
HasARCCacheSize: 64 * 1024,
HasARCCacheSize: 64 << 10,
}
}
@ -41,7 +41,8 @@ func CachedBlockstore(bs GCBlockstore,
cbs, err = newARCCachedBS(ctx, cbs, opts.HasARCCacheSize)
}
if opts.HasBloomFilterSize != 0 {
cbs, err = bloomCached(cbs, ctx, opts.HasBloomFilterSize, opts.HasBloomFilterHashes)
// *8 because of bytes to bits conversion
cbs, err = bloomCached(cbs, ctx, opts.HasBloomFilterSize*8, opts.HasBloomFilterHashes)
}
return cbs, err

View File

@ -228,19 +228,25 @@ func daemonFunc(req cmds.Request, res cmds.Response) {
return
case fsrepo.ErrNeedMigration:
domigrate, found, _ := req.Option(migrateKwd).Bool()
fmt.Println("Found old repo version, migrations need to be run.")
fmt.Println("Found outdated fs-repo, migrations need to be run.")
if !found {
domigrate = YesNoPrompt("Run migrations automatically? [y/N]")
domigrate = YesNoPrompt("Run migrations now? [y/N]")
}
if !domigrate {
res.SetError(fmt.Errorf("please run the migrations manually"), cmds.ErrNormal)
fmt.Println("Not running migrations of fs-repo now.")
fmt.Println("Please get fs-repo-migrations from https://dist.ipfs.io")
res.SetError(fmt.Errorf("fs-repo requires migration"), cmds.ErrNormal)
return
}
err = migrate.RunMigration(fsrepo.RepoVersion)
if err != nil {
fmt.Println("The migrations of fs-repo failed:")
fmt.Printf(" %s\n", err)
fmt.Println("If you think this is a bug, please file an issue and include this whole log output.")
fmt.Println(" https://github.com/ipfs/fs-repo-migrations")
res.SetError(err, cmds.ErrNormal)
return
}

View File

@ -30,12 +30,15 @@ func checkAndSetUlimit() error {
return fmt.Errorf("error getting rlimit: %s", err)
}
var setting bool
if rLimit.Cur < ipfsFileDescNum {
if rLimit.Max < ipfsFileDescNum {
log.Error("adjusting max")
rLimit.Max = ipfsFileDescNum
}
fmt.Printf("Adjusting current ulimit to %d.\n", ipfsFileDescNum)
fmt.Printf("Adjusting current ulimit to %d...\n", ipfsFileDescNum)
rLimit.Cur = ipfsFileDescNum
setting = true
}
err = syscall.Setrlimit(syscall.RLIMIT_NOFILE, &rLimit)
@ -43,5 +46,9 @@ func checkAndSetUlimit() error {
return fmt.Errorf("error setting ulimit: %s", err)
}
if setting {
fmt.Printf("Successfully raised file descriptor limit to %d.\n", ipfsFileDescNum)
}
return nil
}

View File

@ -251,7 +251,7 @@ func parseOpts(args []string, root *cmds.Command) (
return
}
const msgStdinInfo = "ipfs: Reading from %s; send Ctrl-d to stop.\n"
const msgStdinInfo = "ipfs: Reading from %s; send Ctrl-d to stop."
func parseArgs(inputs []string, stdin *os.File, argDefs []cmds.Argument, recursive, hidden bool, root *cmds.Command) ([]string, []files.File, error) {
// ignore stdin on Windows
@ -401,16 +401,14 @@ func appendFile(fpath string, argDef *cmds.Argument, recursive, hidden bool) (fi
if err != nil {
return nil, err
}
cwd, err = filepath.EvalSymlinks(cwd)
if err != nil {
return nil, err
}
fpath = cwd
}
fpath = filepath.Clean(fpath)
fpath, err := filepath.EvalSymlinks(fpath)
if err != nil {
return nil, err
}
// Repeat ToSlash after EvalSymlinks as it turns path to platform specific
fpath = filepath.ToSlash(fpath)
fpath = filepath.ToSlash(filepath.Clean(fpath))
stat, err := os.Lstat(fpath)
if err != nil {
@ -469,6 +467,7 @@ func newMessageReader(r io.ReadCloser, msg string) io.ReadCloser {
func (r *messageReader) Read(b []byte) (int, error) {
if !r.done {
fmt.Fprintln(os.Stderr, r.message)
r.done = true
}
return r.r.Read(b)

View File

@ -15,6 +15,7 @@ import (
repo "github.com/ipfs/go-ipfs/repo"
config "github.com/ipfs/go-ipfs/repo/config"
fsrepo "github.com/ipfs/go-ipfs/repo/fsrepo"
u "gx/ipfs/QmZNVWh8LLjAavuQ2JXuFmuYH3C11xo988vSgp7UQrTRj1/go-ipfs-util"
)
@ -162,14 +163,12 @@ included in the output of this command.
return
}
idmap, ok := cfg["Identity"].(map[string]interface{})
if !ok {
res.SetError(fmt.Errorf("config has no identity"), cmds.ErrNormal)
err = scrubValue(cfg, []string{config.IdentityTag, config.PrivKeyTag})
if err != nil {
res.SetError(err, cmds.ErrNormal)
return
}
delete(idmap, "PrivKey")
output, err := config.HumanOutput(cfg)
if err != nil {
res.SetError(err, cmds.ErrNormal)
@ -180,6 +179,47 @@ included in the output of this command.
},
}
func scrubValue(m map[string]interface{}, key []string) error {
find := func(m map[string]interface{}, k string) (string, interface{}, bool) {
lckey := strings.ToLower(k)
for mkey, val := range m {
lcmkey := strings.ToLower(mkey)
if lckey == lcmkey {
return mkey, val, true
}
}
return "", nil, false
}
cur := m
for _, k := range key[:len(key)-1] {
foundk, val, ok := find(cur, k)
if !ok {
return fmt.Errorf("failed to find specified key")
}
if foundk != k {
// case mismatch, calling this an error
return fmt.Errorf("case mismatch in config, expected %q but got %q", k, foundk)
}
mval, mok := val.(map[string]interface{})
if !mok {
return fmt.Errorf("%s was not a map", foundk)
}
cur = mval
}
todel, _, ok := find(cur, key[len(key)-1])
if !ok {
return fmt.Errorf("%s, not found", strings.Join(key, "."))
}
delete(cur, todel)
return nil
}
var configEditCmd = &cmds.Command{
Helptext: cmds.HelpText{
Tagline: "Open the config file for editing in $EDITOR.",
@ -250,19 +290,10 @@ func getConfig(r repo.Repo, key string) (*ConfigField, error) {
}
func setConfig(r repo.Repo, key string, value interface{}) (*ConfigField, error) {
keyF, err := getConfig(r, "Identity.PrivKey")
if err != nil {
return nil, errors.New("failed to get PrivKey")
}
privkey := keyF.Value
err = r.SetConfigKey(key, value)
err := r.SetConfigKey(key, value)
if err != nil {
return nil, fmt.Errorf("failed to set config value: %s (maybe use --json?)", err)
}
err = r.SetConfigKey("Identity.PrivKey", privkey)
if err != nil {
return nil, errors.New("failed to set PrivKey")
}
return getConfig(r, key)
}
@ -286,7 +317,7 @@ func replaceConfig(r repo.Repo, file io.Reader) error {
return errors.New("setting private key with API is not supported")
}
keyF, err := getConfig(r, "Identity.PrivKey")
keyF, err := getConfig(r, config.PrivKeySelector)
if err != nil {
return fmt.Errorf("Failed to get PrivKey")
}

View File

@ -101,7 +101,7 @@ Default: `false`
A boolean value. If set to true, all block reads from disk will be hashed and verified. This will cause increased CPU utilization.
- `BloomFilterSize`
A number representing the size in bits of the blockstore's bloom filter. A value of zero represents the feature being disabled.
A number representing the size in bytes of the blockstore's bloom filter. A value of zero represents the feature being disabled.
Default: `0`

View File

@ -12,7 +12,7 @@ will be allowed up to release day.
- [ ] all tests pass (no exceptions)
- [ ] webui works (for most definitions of 'works')
- [ ] CHANGELOG.md has been updated
- use `LAST=v0.4.2 for n in $(git log --oneline --merges --reverse -n-1 $LAST...master | cut -d'#' -f2 | cut -d' ' -f1); do echo https://github.com/ipfs/go-ipfs/pull/$n; done`
- use `LAST=v0.4.2 ; for n in $(git log --oneline --merges --reverse -n-1 $LAST...master | cut -d'#' -f2 | cut -d' ' -f1); do echo https://github.com/ipfs/go-ipfs/pull/$n; done`
- [ ] version string in `repo/config/version.go` has been updated
- [ ] tag commit with vX.Y.Z
- [ ] bump version string in `repo/config/version.go` to `vX.Y.Z-dev`

View File

@ -17,7 +17,6 @@ import (
bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message"
bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network"
notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications"
wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist"
flags "github.com/ipfs/go-ipfs/flags"
"github.com/ipfs/go-ipfs/thirdparty/delay"
loggables "gx/ipfs/QmYrv4LgCC8FhG2Ab4bwuq5DqBdwMtx3hMb3KKJDZcr2d7/go-libp2p-loggables"
@ -89,7 +88,7 @@ func New(parent context.Context, p peer.ID, network bsnet.BitSwapNetwork,
notifications: notif,
engine: decision.NewEngine(ctx, bstore), // TODO close the engine with Close() method
network: network,
findKeys: make(chan *wantlist.Entry, sizeBatchRequestChan),
findKeys: make(chan *blockRequest, sizeBatchRequestChan),
process: px,
newBlocks: make(chan blocks.Block, HasBlockBufferSize),
provideKeys: make(chan key.Key, provideKeysBufferSize),
@ -132,7 +131,7 @@ type Bitswap struct {
notifications notifications.PubSub
// send keys to a worker to find and connect to providers for them
findKeys chan *wantlist.Entry
findKeys chan *blockRequest
engine *decision.Engine
@ -149,8 +148,8 @@ type Bitswap struct {
}
type blockRequest struct {
key key.Key
ctx context.Context
Key key.Key
Ctx context.Context
}
// GetBlock attempts to retrieve a particular block from peers within the
@ -240,13 +239,50 @@ func (bs *Bitswap) GetBlocks(ctx context.Context, keys []key.Key) (<-chan blocks
// NB: Optimization. Assumes that providers of key[0] are likely to
// be able to provide for all keys. This currently holds true in most
// every situation. Later, this assumption may not hold as true.
req := &wantlist.Entry{
req := &blockRequest{
Key: keys[0],
Ctx: ctx,
}
remaining := make(map[key.Key]struct{})
for _, k := range keys {
remaining[k] = struct{}{}
}
out := make(chan blocks.Block)
go func() {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
defer close(out)
defer func() {
var toCancel []key.Key
for k, _ := range remaining {
toCancel = append(toCancel, k)
}
bs.CancelWants(toCancel)
}()
for {
select {
case blk, ok := <-promise:
if !ok {
return
}
delete(remaining, blk.Key())
select {
case out <- blk:
case <-ctx.Done():
return
}
case <-ctx.Done():
return
}
}
}()
select {
case bs.findKeys <- req:
return promise, nil
return out, nil
case <-ctx.Done():
return nil, ctx.Err()
}

View File

@ -341,7 +341,6 @@ func TestDoubleGet(t *testing.T) {
blocks := bg.Blocks(1)
ctx1, cancel1 := context.WithCancel(context.Background())
blkch1, err := instances[1].Exchange.GetBlocks(ctx1, []key.Key{blocks[0].Key()})
if err != nil {
t.Fatal(err)
@ -369,11 +368,15 @@ func TestDoubleGet(t *testing.T) {
t.Fatal(err)
}
blk, ok := <-blkch2
if !ok {
t.Fatal("expected to get the block here")
select {
case blk, ok := <-blkch2:
if !ok {
t.Fatal("expected to get the block here")
}
t.Log(blk)
case <-time.After(time.Second * 5):
t.Fatal("timed out waiting on block")
}
t.Log(blk)
for _, inst := range instances {
err := inst.Exchange.Close()
@ -382,3 +385,68 @@ func TestDoubleGet(t *testing.T) {
}
}
}
func TestWantlistCleanup(t *testing.T) {
net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay))
sg := NewTestSessionGenerator(net)
defer sg.Close()
bg := blocksutil.NewBlockGenerator()
instances := sg.Instances(1)[0]
bswap := instances.Exchange
blocks := bg.Blocks(20)
var keys []key.Key
for _, b := range blocks {
keys = append(keys, b.Key())
}
ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*50)
defer cancel()
_, err := bswap.GetBlock(ctx, keys[0])
if err != context.DeadlineExceeded {
t.Fatal("shouldnt have fetched any blocks")
}
time.Sleep(time.Millisecond * 50)
if len(bswap.GetWantlist()) > 0 {
t.Fatal("should not have anyting in wantlist")
}
ctx, cancel = context.WithTimeout(context.Background(), time.Millisecond*50)
defer cancel()
_, err = bswap.GetBlocks(ctx, keys[:10])
if err != nil {
t.Fatal(err)
}
<-ctx.Done()
time.Sleep(time.Millisecond * 50)
if len(bswap.GetWantlist()) > 0 {
t.Fatal("should not have anyting in wantlist")
}
_, err = bswap.GetBlocks(context.Background(), keys[:1])
if err != nil {
t.Fatal(err)
}
ctx, cancel = context.WithCancel(context.Background())
_, err = bswap.GetBlocks(ctx, keys[10:])
if err != nil {
t.Fatal(err)
}
time.Sleep(time.Millisecond * 50)
if len(bswap.GetWantlist()) != 11 {
t.Fatal("should have 11 keys in wantlist")
}
cancel()
time.Sleep(time.Millisecond * 50)
if !(len(bswap.GetWantlist()) == 1 && bswap.GetWantlist()[0] == keys[0]) {
t.Fatal("should only have keys[0] in wantlist")
}
}

View File

@ -21,6 +21,6 @@ func BenchmarkTaskQueuePush(b *testing.B) {
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
q.Push(wantlist.Entry{Key: key.Key(i), Priority: math.MaxInt32}, peers[i%len(peers)])
q.Push(&wantlist.Entry{Key: key.Key(i), Priority: math.MaxInt32}, peers[i%len(peers)])
}
}

View File

@ -104,7 +104,7 @@ func NewEngine(ctx context.Context, bs bstore.Blockstore) *Engine {
return e
}
func (e *Engine) WantlistForPeer(p peer.ID) (out []wl.Entry) {
func (e *Engine) WantlistForPeer(p peer.ID) (out []*wl.Entry) {
e.lock.Lock()
partner, ok := e.ledgerMap[p]
if ok {
@ -233,7 +233,7 @@ func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) error {
for _, entry := range m.Wantlist() {
if entry.Cancel {
log.Debugf("cancel %s", entry.Key)
log.Debugf("%s cancel %s", p, entry.Key)
l.CancelWant(entry.Key)
e.peerRequestQueue.Remove(entry.Key, p)
} else {

View File

@ -87,7 +87,7 @@ func (l *ledger) CancelWant(k key.Key) {
l.wantList.Remove(k)
}
func (l *ledger) WantListContains(k key.Key) (wl.Entry, bool) {
func (l *ledger) WantListContains(k key.Key) (*wl.Entry, bool) {
return l.wantList.Contains(k)
}

View File

@ -13,7 +13,7 @@ import (
type peerRequestQueue interface {
// Pop returns the next peerRequestTask. Returns nil if the peerRequestQueue is empty.
Pop() *peerRequestTask
Push(entry wantlist.Entry, to peer.ID)
Push(entry *wantlist.Entry, to peer.ID)
Remove(k key.Key, p peer.ID)
// NB: cannot expose simply expose taskQueue.Len because trashed elements
@ -45,7 +45,7 @@ type prq struct {
}
// Push currently adds a new peerRequestTask to the end of the list
func (tl *prq) Push(entry wantlist.Entry, to peer.ID) {
func (tl *prq) Push(entry *wantlist.Entry, to peer.ID) {
tl.lock.Lock()
defer tl.lock.Unlock()
partner, ok := tl.partners[to]
@ -166,7 +166,7 @@ func (tl *prq) thawRound() {
}
type peerRequestTask struct {
Entry wantlist.Entry
Entry *wantlist.Entry
Target peer.ID
// A callback to signal that this task has been completed

View File

@ -41,7 +41,7 @@ func TestPushPop(t *testing.T) {
for _, index := range rand.Perm(len(alphabet)) { // add blocks for all letters
letter := alphabet[index]
t.Log(partner.String())
prq.Push(wantlist.Entry{Key: key.Key(letter), Priority: math.MaxInt32 - index}, partner)
prq.Push(&wantlist.Entry{Key: key.Key(letter), Priority: math.MaxInt32 - index}, partner)
}
for _, consonant := range consonants {
prq.Remove(key.Key(consonant), partner)
@ -78,10 +78,10 @@ func TestPeerRepeats(t *testing.T) {
// Have each push some blocks
for i := 0; i < 5; i++ {
prq.Push(wantlist.Entry{Key: key.Key(i)}, a)
prq.Push(wantlist.Entry{Key: key.Key(i)}, b)
prq.Push(wantlist.Entry{Key: key.Key(i)}, c)
prq.Push(wantlist.Entry{Key: key.Key(i)}, d)
prq.Push(&wantlist.Entry{Key: key.Key(i)}, a)
prq.Push(&wantlist.Entry{Key: key.Key(i)}, b)
prq.Push(&wantlist.Entry{Key: key.Key(i)}, c)
prq.Push(&wantlist.Entry{Key: key.Key(i)}, d)
}
// now, pop off four entries, there should be one from each

View File

@ -64,7 +64,7 @@ func newMsg(full bool) *impl {
}
type Entry struct {
wantlist.Entry
*wantlist.Entry
Cancel bool
}
@ -120,7 +120,7 @@ func (m *impl) addEntry(k key.Key, priority int, cancel bool) {
e.Cancel = cancel
} else {
m.wantlist[k] = Entry{
Entry: wantlist.Entry{
Entry: &wantlist.Entry{
Key: k,
Priority: priority,
},

View File

@ -7,8 +7,6 @@ import (
"sync"
key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key"
"gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
)
type ThreadSafe struct {
@ -18,19 +16,17 @@ type ThreadSafe struct {
// not threadsafe
type Wantlist struct {
set map[key.Key]Entry
set map[key.Key]*Entry
}
type Entry struct {
Key key.Key
Priority int
Ctx context.Context
cancel func()
RefCnt int
}
type entrySlice []Entry
type entrySlice []*Entry
func (es entrySlice) Len() int { return len(es) }
func (es entrySlice) Swap(i, j int) { es[i], es[j] = es[j], es[i] }
@ -44,41 +40,41 @@ func NewThreadSafe() *ThreadSafe {
func New() *Wantlist {
return &Wantlist{
set: make(map[key.Key]Entry),
set: make(map[key.Key]*Entry),
}
}
func (w *ThreadSafe) Add(k key.Key, priority int) {
func (w *ThreadSafe) Add(k key.Key, priority int) bool {
w.lk.Lock()
defer w.lk.Unlock()
w.Wantlist.Add(k, priority)
return w.Wantlist.Add(k, priority)
}
func (w *ThreadSafe) AddEntry(e Entry) {
func (w *ThreadSafe) AddEntry(e *Entry) bool {
w.lk.Lock()
defer w.lk.Unlock()
w.Wantlist.AddEntry(e)
return w.Wantlist.AddEntry(e)
}
func (w *ThreadSafe) Remove(k key.Key) {
func (w *ThreadSafe) Remove(k key.Key) bool {
w.lk.Lock()
defer w.lk.Unlock()
w.Wantlist.Remove(k)
return w.Wantlist.Remove(k)
}
func (w *ThreadSafe) Contains(k key.Key) (Entry, bool) {
func (w *ThreadSafe) Contains(k key.Key) (*Entry, bool) {
w.lk.RLock()
defer w.lk.RUnlock()
return w.Wantlist.Contains(k)
}
func (w *ThreadSafe) Entries() []Entry {
func (w *ThreadSafe) Entries() []*Entry {
w.lk.RLock()
defer w.lk.RUnlock()
return w.Wantlist.Entries()
}
func (w *ThreadSafe) SortedEntries() []Entry {
func (w *ThreadSafe) SortedEntries() []*Entry {
w.lk.RLock()
defer w.lk.RUnlock()
return w.Wantlist.SortedEntries()
@ -94,50 +90,50 @@ func (w *Wantlist) Len() int {
return len(w.set)
}
func (w *Wantlist) Add(k key.Key, priority int) {
func (w *Wantlist) Add(k key.Key, priority int) bool {
if e, ok := w.set[k]; ok {
e.RefCnt++
return
return false
}
ctx, cancel := context.WithCancel(context.Background())
w.set[k] = Entry{
w.set[k] = &Entry{
Key: k,
Priority: priority,
Ctx: ctx,
cancel: cancel,
RefCnt: 1,
}
return true
}
func (w *Wantlist) AddEntry(e Entry) {
if _, ok := w.set[e.Key]; ok {
return
func (w *Wantlist) AddEntry(e *Entry) bool {
if ex, ok := w.set[e.Key]; ok {
ex.RefCnt++
return false
}
w.set[e.Key] = e
return true
}
func (w *Wantlist) Remove(k key.Key) {
func (w *Wantlist) Remove(k key.Key) bool {
e, ok := w.set[k]
if !ok {
return
return false
}
e.RefCnt--
if e.RefCnt <= 0 {
delete(w.set, k)
if e.cancel != nil {
e.cancel()
}
return true
}
return false
}
func (w *Wantlist) Contains(k key.Key) (Entry, bool) {
func (w *Wantlist) Contains(k key.Key) (*Entry, bool) {
e, ok := w.set[k]
return e, ok
}
func (w *Wantlist) Entries() []Entry {
func (w *Wantlist) Entries() []*Entry {
var es entrySlice
for _, e := range w.set {
es = append(es, e)
@ -145,7 +141,7 @@ func (w *Wantlist) Entries() []Entry {
return es
}
func (w *Wantlist) SortedEntries() []Entry {
func (w *Wantlist) SortedEntries() []*Entry {
var es entrySlice
for _, e := range w.set {
es = append(es, e)

View File

@ -75,6 +75,7 @@ func (pm *WantManager) WantBlocks(ctx context.Context, ks []key.Key) {
}
func (pm *WantManager) CancelWants(ks []key.Key) {
log.Infof("cancel wants: %s", ks)
pm.addEntries(context.TODO(), ks, true)
}
@ -83,16 +84,17 @@ func (pm *WantManager) addEntries(ctx context.Context, ks []key.Key, cancel bool
for i, k := range ks {
entries = append(entries, &bsmsg.Entry{
Cancel: cancel,
Entry: wantlist.Entry{
Entry: &wantlist.Entry{
Key: k,
Priority: kMaxPriority - i,
Ctx: ctx,
RefCnt: 1,
},
})
}
select {
case pm.incoming <- entries:
case <-pm.ctx.Done():
case <-ctx.Done():
}
}
@ -241,33 +243,31 @@ func (pm *WantManager) Run() {
case entries := <-pm.incoming:
// add changes to our wantlist
var filtered []*bsmsg.Entry
for _, e := range entries {
if e.Cancel {
pm.wl.Remove(e.Key)
if pm.wl.Remove(e.Key) {
filtered = append(filtered, e)
}
} else {
pm.wl.AddEntry(e.Entry)
if pm.wl.AddEntry(e.Entry) {
filtered = append(filtered, e)
}
}
}
// broadcast those wantlist changes
for _, p := range pm.peers {
p.addMessage(entries)
p.addMessage(filtered)
}
case <-tock.C:
// resend entire wantlist every so often (REALLY SHOULDNT BE NECESSARY)
var es []*bsmsg.Entry
for _, e := range pm.wl.Entries() {
select {
case <-e.Ctx.Done():
// entry has been cancelled
// simply continue, the entry will be removed from the
// wantlist soon enough
continue
default:
}
es = append(es, &bsmsg.Entry{Entry: e})
}
for _, p := range pm.peers {
p.outlk.Lock()
p.out = bsmsg.New(true)

View File

@ -1,16 +1,15 @@
package bitswap
import (
"math/rand"
"sync"
"time"
process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess"
procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context"
context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist"
logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log"
peer "gx/ipfs/QmWXjJo15p4pzT7cayEwZi2sWgJqLnGDof6ZGMh9xBgU1p/go-libp2p-peer"
context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key"
)
@ -173,9 +172,17 @@ func (bs *Bitswap) rebroadcastWorker(parent context.Context) {
}
case <-broadcastSignal.C: // resend unfulfilled wantlist keys
log.Event(ctx, "Bitswap.Rebroadcast.active")
for _, e := range bs.wm.wl.Entries() {
e := e
bs.findKeys <- &e
entries := bs.wm.wl.Entries()
if len(entries) == 0 {
continue
}
// TODO: come up with a better strategy for determining when to search
// for new providers for blocks.
i := rand.Intn(len(entries))
bs.findKeys <- &blockRequest{
Key: entries[i].Key,
Ctx: ctx,
}
case <-parent.Done():
return
@ -185,33 +192,37 @@ func (bs *Bitswap) rebroadcastWorker(parent context.Context) {
func (bs *Bitswap) providerQueryManager(ctx context.Context) {
var activeLk sync.Mutex
active := make(map[key.Key]*wantlist.Entry)
kset := key.NewKeySet()
for {
select {
case e := <-bs.findKeys:
activeLk.Lock()
if _, ok := active[e.Key]; ok {
if kset.Has(e.Key) {
activeLk.Unlock()
continue
}
active[e.Key] = e
kset.Add(e.Key)
activeLk.Unlock()
go func(e *wantlist.Entry) {
go func(e *blockRequest) {
child, cancel := context.WithTimeout(e.Ctx, providerRequestTimeout)
defer cancel()
providers := bs.network.FindProvidersAsync(child, e.Key, maxProvidersPerRequest)
wg := &sync.WaitGroup{}
for p := range providers {
wg.Add(1)
go func(p peer.ID) {
defer wg.Done()
err := bs.network.ConnectTo(child, p)
if err != nil {
log.Debug("failed to connect to provider %s: %s", p, err)
}
}(p)
}
wg.Wait()
activeLk.Lock()
delete(active, e.Key)
kset.Remove(e.Key)
activeLk.Unlock()
}(e)

View File

@ -104,7 +104,7 @@ _ipfs_config_show()
_ipfs_daemon()
{
_ipfs_comp "--init --routing= --mount --writable --mount-ipfs= \
--mount-ipns= --disable-transport-encryption \
--mount-ipns= --unrestricted-api --disable-transport-encryption \
--help"
}

View File

@ -256,6 +256,12 @@
"hash": "QmYvLYkYiVEi5LBHP2uFqiUaHqH7zWnEuRqoNEuGLNG6JB",
"name": "go-libp2p-kad-dht",
"version": "1.1.0"
},
{
"author": "whyrusleeping",
"hash": "QmdCL8M8DXJdSRnwhpDhukX5r8ydjxnzPJpaKrFudDA8yn",
"name": "hang-fds",
"version": "0.0.0"
}
],
"gxVersion": "0.4.0",

View File

@ -5,6 +5,10 @@ import (
ic "gx/ipfs/QmVoi5es8D5fNHZDqoW6DgDAEPEV5hQp8GBz161vZXiwpQ/go-libp2p-crypto"
)
const IdentityTag = "Identity"
const PrivKeyTag = "PrivKey"
const PrivKeySelector = IdentityTag + "." + PrivKeyTag
// Identity tracks the configuration of the local node's identity.
type Identity struct {
PeerID string

View File

@ -482,6 +482,14 @@ func (r *FSRepo) SetConfigKey(key string, value interface{}) error {
return err
}
// Load private key to guard against it being overwritten.
// NOTE: this is a temporary measure to secure this field until we move
// keys out of the config file.
pkval, err := common.MapGetKV(mapconf, config.PrivKeySelector)
if err != nil {
return err
}
// Get the type of the value associated with the key
oldValue, err := common.MapGetKV(mapconf, key)
ok := true
@ -523,6 +531,11 @@ func (r *FSRepo) SetConfigKey(key string, value interface{}) error {
return err
}
// replace private key, in case it was overwritten.
if err := common.MapSetKV(mapconf, config.PrivKeySelector, pkval); err != nil {
return err
}
// This step doubles as to validate the map against the struct
// before serialization
conf, err := config.FromMap(mapconf)

View File

@ -37,7 +37,7 @@ func migrationsBinName() string {
func RunMigration(newv int) error {
migrateBin := migrationsBinName()
fmt.Println(" => checking for migrations binary...")
fmt.Println(" => Looking for suitable fs-repo-migrations binary.")
var err error
migrateBin, err = exec.LookPath(migrateBin)
@ -47,15 +47,17 @@ func RunMigration(newv int) error {
}
if err != nil {
fmt.Println(" => usable migrations not found on system, fetching...")
fmt.Println(" => None found, downloading.")
loc, err := GetMigrations()
if err != nil {
fmt.Println(" => Failed to download fs-repo-migrations.")
return err
}
err = verifyMigrationSupportsVersion(loc, newv)
if err != nil {
return fmt.Errorf("no migration binary found that supports version %d - %s", newv, err)
return fmt.Errorf("no fs-repo-migration binary found for version %d: %s", newv, err)
}
migrateBin = loc
@ -65,14 +67,15 @@ func RunMigration(newv int) error {
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
fmt.Printf(" => running migration: '%s -to %d -y'\n\n", migrateBin, newv)
fmt.Printf(" => Running: %s -to %d -y\n", migrateBin, newv)
err = cmd.Run()
if err != nil {
fmt.Printf(" => Failed: %s -to %d -y\n", migrateBin, newv)
return fmt.Errorf("migration failed: %s", err)
}
fmt.Println(" => migrations binary completed successfully")
fmt.Printf(" => Success: fs-repo has been migrated to version %d.\n", newv)
return nil
}
@ -80,21 +83,19 @@ func RunMigration(newv int) error {
func GetMigrations() (string, error) {
latest, err := GetLatestVersion(DistPath, migrations)
if err != nil {
return "", fmt.Errorf("getting latest version of fs-repo-migrations: %s", err)
return "", fmt.Errorf("failed to find latest fs-repo-migrations: %s", err)
}
dir, err := ioutil.TempDir("", "go-ipfs-migrate")
if err != nil {
return "", fmt.Errorf("tempdir: %s", err)
return "", fmt.Errorf("failed to create fs-repo-migrations tempdir: %s", err)
}
out := filepath.Join(dir, migrationsBinName())
err = GetBinaryForVersion(migrations, migrations, DistPath, latest, out)
if err != nil {
fmt.Printf(" => error getting migrations binary: %s\n", err)
fmt.Println(" => could not find or install fs-repo-migrations, please manually install it")
return "", fmt.Errorf("failed to find migrations binary")
return "", fmt.Errorf("failed to download latest fs-repo-migrations: %s", err)
}
err = os.Chmod(out, 0755)
@ -184,7 +185,6 @@ func httpGet(url string) (*http.Response, error) {
}
func httpFetch(url string) (io.ReadCloser, error) {
fmt.Printf("fetching url: %s\n", url)
resp, err := httpGet(url)
if err != nil {
return nil, err
@ -196,7 +196,7 @@ func httpFetch(url string) (io.ReadCloser, error) {
return nil, fmt.Errorf("error reading error body: %s", err)
}
return nil, fmt.Errorf("%s: %s", resp.Status, string(mes))
return nil, fmt.Errorf("GET %s error: %s: %s", url, resp.Status, string(mes))
}
return resp.Body, nil

View File

@ -1,11 +1,13 @@
BINS = bin/random bin/multihash bin/ipfs bin/pollEndpoint bin/iptb bin/go-sleep
BINS += bin/go-timeout
IPFS_ROOT = ../
IPFS_CMD = ../cmd/ipfs
RANDOM_SRC = ../Godeps/_workspace/src/github.com/jbenet/go-random
RANDOM_FILES_SRC = ../Godeps/_workspace/src/github.com/jbenet/go-random-files
POLLENDPOINT_SRC= ../thirdparty/pollEndpoint
GOSLEEP_SRC = ./dependencies/go-sleep
GOTIMEOUT_SRC = ./dependencies/go-timeout
export PATH := ../bin:${PATH}
@ -49,16 +51,28 @@ bin/go-sleep: $(call find_go_files, $(GOSLEEP_SRC)) IPFS-BUILD-OPTIONS
@echo "*** installing $@ ***"
go build $(GOFLAGS) -o bin/go-sleep $(GOSLEEP_SRC)
bin/go-timeout: $(call find_go_files, $(GOTIMEOUT_SRC)) IPFS-BUILD-OPTIONS
@echo "*** installing $@ ***"
go build $(GOFLAGS) -o bin/go-timeout $(GOTIMEOUT_SRC)
# gx dependencies
multihash_src:
$(eval MULTIHASH_HASH := $(shell cd .. && bin/gx deps find go-multihash))
$(eval MULTIHASH_SRC := gx/ipfs/$(MULTIHASH_HASH)/go-multihash)
hang-fds_src:
$(eval HANG_FDS_HASH := $(shell cd .. && bin/gx deps find hang-fds))
$(eval HANG_FDS_SRC := gx/ipfs/$(HANG_FDS_HASH)/hang-fds)
bin/multihash: multihash_src $(call find_go_files, $(MULTIHASH_SRC)) IPFS-BUILD-OPTIONS
@echo "*** installing $@ ***"
go build $(GOFLAGS) -o bin/multihash $(MULTIHASH_SRC)/multihash
bin/hang-fds: hang-fds_src $(call find_go_files, $(HANG_FDS_SRC)) IPFS-BUILD-OPTIONS
@echo "*** installing $@ ***"
go build $(GOFLAGS) -o bin/hang-fds $(HANG_FDS_SRC)
iptb_src:
$(eval IPTB_HASH := $(shell cd .. && bin/gx deps find iptb))
$(eval IPTB_SRC := gx/ipfs/$(IPTB_HASH)/iptb)

1
test/bin/.gitignore vendored
View File

@ -8,4 +8,3 @@
!checkflags
!continueyn
!verify-go-fmt.sh
!time-out

View File

@ -1,83 +0,0 @@
#!/bin/bash
#
# The Bash shell script executes a command with a time-out.
# Upon time-out expiration SIGTERM (15) is sent to the process. If the signal
# is blocked, then the subsequent SIGKILL (9) terminates it.
#
# Based on the Bash documentation example.
scriptName="${0##*/}"
declare -i DEFAULT_TIMEOUT=9
declare -i DEFAULT_INTERVAL=1
declare -i DEFAULT_DELAY=1
# Timeout.
declare -i timeout=DEFAULT_TIMEOUT
# Interval between checks if the process is still alive.
declare -i interval=DEFAULT_INTERVAL
# Delay between posting the SIGTERM signal and destroying the process by SIGKILL.
declare -i delay=DEFAULT_DELAY
function printUsage() {
cat <<EOF
Synopsis
$scriptName [-t timeout] [-i interval] [-d delay] command
Execute a command with a time-out.
Upon time-out expiration SIGTERM (15) is sent to the process. If SIGTERM
signal is blocked, then the subsequent SIGKILL (9) terminates it.
-t timeout
Number of seconds to wait for command completion.
Default value: $DEFAULT_TIMEOUT seconds.
-i interval
Interval between checks if the process is still alive.
Positive integer, default value: $DEFAULT_INTERVAL seconds.
-d delay
Delay between posting the SIGTERM signal and destroying the
process by SIGKILL. Default value: $DEFAULT_DELAY seconds.
As of today, Bash does not support floating point arithmetic (sleep does),
therefore all delay/time values must be integers.
EOF
}
# Options.
while getopts ":t:i:d:" option; do
case "$option" in
t) timeout=$OPTARG ;;
i) interval=$OPTARG ;;
d) delay=$OPTARG ;;
*) printUsage; exit 1 ;;
esac
done
shift $((OPTIND - 1))
# $# should be at least 1 (the command to execute), however it may be strictly
# greater than 1 if the command itself has options.
if (($# == 0 || interval <= 0)); then
printUsage
exit 1
fi
# kill -0 pid Exit code indicates if a signal may be sent to $pid process.
(
((t = timeout))
while ((t > 0)); do
sleep $interval
kill -0 $$ || exit 0
((t -= interval))
done
# Be nice, post SIGTERM first.
# The 'exit 0' below will be executed if any preceeding command fails.
kill -s SIGTERM $$ && kill -0 $$ || exit 0
sleep $delay
kill -s SIGKILL $$
) 2> /dev/null &
exec "$@"

View File

@ -0,0 +1 @@
go-timeout

View File

@ -0,0 +1,22 @@
The MIT License (MIT)
Copyright (c) 2016 Jakub "Kubuxu" Sztandera <k.sztandera@protonmail.ch>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@ -0,0 +1,55 @@
package main
import (
"context"
"fmt"
"os"
"os/exec"
"strconv"
"syscall"
"time"
)
func main() {
if len(os.Args) < 3 {
fmt.Fprintf(os.Stderr,
"Usage: %s <timeout-in-sec> <command ...>\n", os.Args[0])
os.Exit(1)
}
timeout, err := strconv.ParseUint(os.Args[1], 10, 32)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
ctx, _ := context.WithTimeout(context.Background(), time.Duration(timeout)*time.Second)
cmd := exec.CommandContext(ctx, os.Args[2], os.Args[3:]...)
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err = cmd.Start()
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
}
err = cmd.Wait()
if err != nil {
if ctx.Err() != nil {
os.Exit(124)
} else {
exitErr, ok := err.(*exec.ExitError)
if !ok {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(255)
}
waits, ok := exitErr.Sys().(syscall.WaitStatus)
if !ok {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(255)
}
os.Exit(waits.ExitStatus())
}
} else {
os.Exit(0)
}
}

View File

@ -8,7 +8,7 @@
T = $(sort $(wildcard t[0-9][0-9][0-9][0-9]-*.sh))
BINS = bin/random bin/multihash bin/ipfs bin/pollEndpoint \
bin/iptb bin/go-sleep bin/random-files
bin/iptb bin/go-sleep bin/random-files bin/go-timeout bin/hang-fds
SHARNESS = lib/sharness/sharness.sh
IPFS_ROOT = ../..

View File

@ -111,10 +111,10 @@ test_config_cmd() {
test_expect_success "'ipfs config replace' injects privkey back" '
ipfs config replace show_config &&
grep "\"PrivKey\":" "$IPFS_PATH/config" | grep -e ": \".\+\"" >/dev/null
grep "\"PrivKey\":" "$IPFS_PATH/config" | grep -e ": \".\+\"" >/dev/null
'
test_expect_success "'ipfs config replace' with privkey erors out" '
test_expect_success "'ipfs config replace' with privkey errors out" '
cp "$IPFS_PATH/config" real_config &&
test_expect_code 1 ipfs config replace - < real_config 2> replace_out
'
@ -124,6 +124,16 @@ test_config_cmd() {
test_cmp replace_out replace_expected
'
test_expect_success "'ipfs config replace' with lower case privkey errors out" '
cp "$IPFS_PATH/config" real_config &&
sed -i -e '\''s/PrivKey/privkey/'\'' real_config &&
test_expect_code 1 ipfs config replace - < real_config 2> replace_out
'
test_expect_success "output looks good" '
echo "Error: setting private key with API is not supported" > replace_expected
test_cmp replace_out replace_expected
'
}
test_init_ipfs

View File

@ -189,6 +189,18 @@ test_add_named_pipe() {
'
}
test_add_pwd_is_symlink() {
test_expect_success "ipfs add -r adds directory content when ./ is symlink" '
mkdir hellodir &&
echo "World" > hellodir/world &&
ln -s hellodir hellolink &&
( cd hellolink &&
ipfs add -r . > ../actual ) &&
grep "added Qma9CyFdG5ffrZCcYSin2uAETygB25cswVwEYYzwfQuhTe" actual &&
rm -r hellodir
'
}
test_launch_ipfs_daemon_and_mount
test_expect_success "'ipfs add --help' succeeds" '
@ -344,6 +356,25 @@ test_expect_success "ipfs cat output looks good" '
test_cmp expected actual
'
test_expect_success "ipfs cat with both arg and stdin" '
echo "$MARS" | ipfs cat "$VENUS" >actual
'
test_expect_success "ipfs cat output looks good" '
cat mountdir/planets/venus.txt >expected &&
test_cmp expected actual
'
test_expect_success "ipfs cat with two args and stdin" '
echo "$MARS" | ipfs cat "$VENUS" "$VENUS" >actual
'
test_expect_success "ipfs cat output looks good" '
cat mountdir/planets/venus.txt mountdir/planets/venus.txt >expected &&
test_cmp expected actual
'
test_expect_success "go-random is installed" '
type random
'
@ -354,6 +385,8 @@ test_add_cat_expensive
test_add_named_pipe " Post http://$API_ADDR/api/v0/add?encoding=json&progress=true&r=true&stream-channels=true:"
test_add_pwd_is_symlink
test_kill_ipfs_daemon
# should work offline
@ -371,6 +404,8 @@ test_expect_success "ipfs cat file fails" '
test_add_named_pipe ""
test_add_pwd_is_symlink
# Test daemon in offline mode
test_launch_ipfs_daemon --offline

View File

@ -11,13 +11,9 @@ test_description="Test add -w"
test_expect_success "creating files succeeds" '
mkdir -p files/foo &&
mkdir -p files/bar &&
mkdir -p files/badin
echo "some text" > files/foo/baz &&
ln -s ../foo/baz files/bar/baz &&
ln -s files/does/not/exist files/badin/bad &&
mkdir -p files2/a/b/c &&
echo "some other text" > files2/a/b/c/foo &&
ln -s b files2/a/d
ln -s files/foo/baz files/bar/baz &&
ln -s files/does/not/exist files/bad
'
test_add_symlinks() {
@ -27,21 +23,21 @@ test_add_symlinks() {
'
test_expect_success "output looks good" '
echo QmQRgZT6xVFKJLVVpJDu3WcPkw2iqQ1jqK1F9jmdeq9zAv > filehash_exp &&
echo QmWdiHKoeSW8G1u7ATCgpx4yMoUhYaJBQGkyPLkS9goYZ8 > filehash_exp &&
test_cmp filehash_exp filehash_out
'
test_expect_success "adding a symlink adds the file itself" '
test_expect_success "adding a symlink adds the link itself" '
ipfs add -q files/bar/baz > goodlink_out
'
test_expect_success "output looks good" '
echo QmcPNXE5zjkWkM24xQ7Bi3VAm8fRxiaNp88jFsij7kSQF1 > goodlink_exp &&
echo "QmdocmZeF7qwPT9Z8SiVhMSyKA2KKoA2J7jToW6z6WBmxR" > goodlink_exp &&
test_cmp goodlink_exp goodlink_out
'
test_expect_success "adding a broken symlink works" '
ipfs add -qr files/badin | head -1 > badlink_out
ipfs add -q files/bad > badlink_out
'
test_expect_success "output looks good" '
@ -51,6 +47,9 @@ test_add_symlinks() {
test_expect_success "adding with symlink in middle of path is same as\
adding with no symlink" '
mkdir -p files2/a/b/c &&
echo "some other text" > files2/a/b/c/foo &&
ln -s b files2/a/d
ipfs add -rq files2/a/b/c > no_sym &&
ipfs add -rq files2/a/d/c > sym &&
test_cmp no_sym sym

View File

@ -121,12 +121,20 @@ test_expect_success "daemon with pipe eventually becomes live" '
test_fsh cat stdin_daemon_out || test_fsh cat stdin_daemon_err || test_fsh cat stdin_poll_apiout || test_fsh cat stdin_poll_apierr
'
ulimit -n 512
ulimit -S -n 512
TEST_ULIMIT_PRESET=1
test_launch_ipfs_daemon
test_expect_success "daemon raised its fd limit" '
grep "ulimit" actual_daemon > /dev/null
grep "raised file descriptor limit to 1024." actual_daemon > /dev/null
'
test_expect_success "daemon actually can handle 1024 file descriptors" '
hang-fds -hold=2s 1000 '$API_MADDR'
'
test_expect_success "daemon didnt throw any errors" '
test_expect_code 1 grep "too many open files" daemon_err
'
test_kill_ipfs_daemon

View File

@ -23,11 +23,11 @@ test_expect_success "manually reset repo version to 3" '
'
test_expect_success "ipfs daemon --migrate=false fails" '
test_expect_code 1 ipfs daemon --migrate=false 2> false_out
test_expect_code 1 ipfs daemon --migrate=false > false_out
'
test_expect_success "output looks good" '
grep "please run the migrations manually" false_out
grep "Please get fs-repo-migrations from https://dist.ipfs.io" false_out
'
test_expect_success "ipfs daemon --migrate=true runs migration" '
@ -35,8 +35,8 @@ test_expect_success "ipfs daemon --migrate=true runs migration" '
'
test_expect_success "output looks good" '
grep "running migration" true_out > /dev/null &&
grep "binary completed successfully" true_out > /dev/null
grep "Running: " true_out > /dev/null &&
grep "Success: fs-repo has been migrated to version 4." true_out > /dev/null
'
test_expect_success "'ipfs daemon' prompts to auto migrate" '
@ -44,9 +44,9 @@ test_expect_success "'ipfs daemon' prompts to auto migrate" '
'
test_expect_success "output looks good" '
grep "Found old repo version" daemon_out > /dev/null &&
grep "Run migrations automatically?" daemon_out > /dev/null &&
grep "please run the migrations manually" daemon_err > /dev/null
grep "Found outdated fs-repo" daemon_out > /dev/null &&
grep "Run migrations now?" daemon_out > /dev/null &&
grep "Please get fs-repo-migrations from https://dist.ipfs.io" daemon_out > /dev/null
'
test_done

View File

@ -0,0 +1,60 @@
#!/bin/sh
#
# Copyright (c) 2016 Jeromy Johnson
# MIT Licensed; see the LICENSE file in this repository.
#
test_description="Test ipfs repo fsck"
. lib/test-lib.sh
test_init_ipfs
sort_rand() {
case `uname` in
Linux)
sort -R
;;
Darwin)
ruby -e 'puts STDIN.readlines.shuffle'
;;
*)
echo "unsupported system: $(uname)"
esac
}
check_random_corruption() {
to_break=$(find "$IPFS_PATH/blocks" -type f | sort_rand | head -n 1)
test_expect_success "back up file and overwrite it" '
cp "$to_break" backup_file &&
echo "this is super broken" > "$to_break"
'
test_expect_success "repo verify detects failure" '
test_expect_code 1 ipfs repo verify
'
test_expect_success "replace the object" '
cp backup_file "$to_break"
'
test_expect_success "ipfs repo verify passes just fine now" '
ipfs repo verify
'
}
test_expect_success "create some files" '
random-files -depth=3 -dirs=4 -files=10 foobar > /dev/null
'
test_expect_success "add them all" '
ipfs add -r -q foobar > /dev/null
'
for i in `seq 20`
do
check_random_corruption
done
test_done

View File

@ -28,10 +28,12 @@ thash='QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn'
test_expect_success "GET to Gateway succeeds" '
curl -svX GET "http://127.0.0.1:$gwport/ipfs/$thash" 2>curl_output
'
cat curl_output
# GET Response from Gateway should contain CORS headers
test_expect_success "GET response for Gateway resource looks good" '
grep "Access-Control-Allow-Origin:" curl_output &&
grep "Access-Control-Allow-Methods:" curl_output &&
grep "Access-Control-Allow-Origin:" curl_output | grep "\*" &&
grep "Access-Control-Allow-Methods:" curl_output | grep " GET\b" &&
grep "Access-Control-Allow-Headers:" curl_output
'
@ -41,8 +43,8 @@ test_expect_success "OPTIONS to Gateway succeeds" '
'
# OPTION Response from Gateway should contain CORS headers
test_expect_success "OPTIONS response for Gateway resource looks good" '
grep "Access-Control-Allow-Origin:" curl_output &&
grep "Access-Control-Allow-Methods:" curl_output &&
grep "Access-Control-Allow-Origin:" curl_output | grep "\*" &&
grep "Access-Control-Allow-Methods:" curl_output | grep " GET\b" &&
grep "Access-Control-Allow-Headers:" curl_output
'

View File

@ -11,13 +11,6 @@ test_description="test bitswap commands"
test_init_ipfs
test_launch_ipfs_daemon
test_expect_success "'ipfs block get' adds hash to wantlist" '
export NONEXIST=QmeXxaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa &&
test_expect_code 1 ipfs block get $NONEXIST --timeout=10ms &&
ipfs bitswap wantlist >wantlist_out &&
grep $NONEXIST wantlist_out
'
test_expect_success "'ipfs bitswap stat' succeeds" '
ipfs bitswap stat >stat_out
'
@ -29,8 +22,7 @@ bitswap status
blocks received: 0
dup blocks received: 0
dup data received: 0 B
wantlist [1 keys]
$NONEXIST
wantlist [0 keys]
partners [0]
EOF
test_cmp expected stat_out
@ -45,12 +37,8 @@ test_expect_success "'ipfs bitswap wantlist -p' works" '
ipfs bitswap wantlist -p "$PEERID" >wantlist_p_out
'
test_expect_failure "'ipfs bitswap wantlist -p' output looks good" '
test_cmp wantlist_out wantlist_p_out
'
test_expect_success "'ipfs bitswap unwant' succeeds" '
ipfs bitswap unwant $NONEXIST
test_expect_success "'ipfs bitswap wantlist -p' output looks good" '
test_must_be_empty wantlist_p_out
'
test_expect_success "hash was removed from wantlist" '

View File

@ -11,13 +11,15 @@ test_description="test how the unix files api interacts with the gc"
test_init_ipfs
test_expect_success "object not removed after gc" '
echo "hello world" | ipfs files write --create /hello.txt &&
echo "hello world" > hello.txt &&
cat hello.txt | ipfs files write --create /hello.txt &&
ipfs repo gc &&
ipfs cat QmVib14uvPnCP73XaCDpwugRuwfTsVbGyWbatHAmLSdZUS
'
test_expect_success "/hello.txt still accessible after gc" '
ipfs files read /hello.txt
ipfs files read /hello.txt > hello-actual &&
test_cmp hello.txt hello-actual
'
ADIR_HASH=QmbCgoMYVuZq8m1vK31JQx9DorwQdLMF1M3sJ7kygLLqnW
@ -27,7 +29,6 @@ test_expect_success "gc okay after adding incomplete node -- prep" '
ipfs files mkdir /adir &&
echo "file1" | ipfs files write --create /adir/file1 &&
echo "file2" | ipfs files write --create /adir/file2 &&
ipfs cat $FILE1_HASH &&
ipfs pin add --recursive=false $ADIR_HASH &&
ipfs files rm -r /adir &&
ipfs repo gc && # will remove /adir/file1 and /adir/file2 but not /adir
@ -42,4 +43,34 @@ test_expect_success "gc okay after adding incomplete node" '
ipfs refs $ADIR_HASH
'
test_expect_success "add directory with direct pin" '
mkdir mydir/ &&
echo "hello world!" > mydir/hello.txt &&
FILE_UNPINNED=$(ipfs add --pin=false -q -r mydir/hello.txt) &&
DIR_PINNED=$(ipfs add --pin=false -q -r mydir | tail -n1) &&
ipfs add --pin=false -r mydir &&
ipfs pin add --recursive=false $DIR_PINNED &&
ipfs cat $FILE_UNPINNED
'
test_expect_success "run gc and make sure directory contents are removed" '
ipfs repo gc &&
test_must_fail ipfs cat $FILE_UNPINNED
'
test_expect_success "add incomplete directory and make sure gc is okay" '
ipfs files cp /ipfs/$DIR_PINNED /mydir &&
ipfs repo gc &&
test_must_fail ipfs cat $FILE_UNPINNED
'
test_expect_success "add back directory contents and run gc" '
ipfs add --pin=false mydir/hello.txt &&
ipfs repo gc
'
test_expect_success "make sure directory contents are not removed" '
ipfs cat $FILE_UNPINNED
'
test_done

View File

@ -8,17 +8,17 @@ test_description="Tests for various fixed issues and regressions."
test_expect_success "ipfs init with occupied input works - #2748" '
export IPFS_PATH="ipfs_path"
echo "" | time-out ipfs init &&
echo "" | go-timeout 10 ipfs init &&
rm -rf ipfs_path
'
test_init_ipfs
test_expect_success "ipfs cat --help succeeds with no input" '
time-out ipfs cat --help
test_expect_success "ipfs cat --help succeeds when input remains open" '
yes | go-timeout 1 ipfs cat --help
'
test_expect_success "ipfs pin ls --help succeeds with no input" '
time-out ipfs pin ls --help
test_expect_success "ipfs pin ls --help succeeds when input remains open" '
yes | go-timeout 1 ipfs pin ls --help
'
test_expect_success "ipfs add on 1MB from stdin woks" '