mirror of
https://github.com/ipfs/kubo.git
synced 2026-02-23 03:17:43 +08:00
commit
7e887b9191
2
.gitignore
vendored
2
.gitignore
vendored
@ -8,6 +8,6 @@
|
||||
*.test
|
||||
*.orig
|
||||
*~
|
||||
.go-ipfs
|
||||
|
||||
/test/bin
|
||||
.ipfs
|
||||
|
||||
6
Godeps/Godeps.json
generated
6
Godeps/Godeps.json
generated
@ -141,7 +141,7 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/jbenet/go-datastore",
|
||||
"Rev": "35738aceb35505bd3c77c2a618fb1947ca3f72da"
|
||||
"Rev": "f1a0a0fd88f23b67589957f02b7500372aca186f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/jbenet/go-detect-race",
|
||||
@ -159,6 +159,10 @@
|
||||
"ImportPath": "github.com/jbenet/go-logging",
|
||||
"Rev": "74bec4b83f6d45d1402c1e9d94c0c29e39f6e0ea"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/jbenet/go-migrate",
|
||||
"Rev": "593be6b4b24a87e4d380e54339721ad4b4c6543c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/jbenet/go-msgio",
|
||||
"Rev": "dbae89193876910c736b2ce1291fa8bbcf299d77"
|
||||
|
||||
4
Godeps/_workspace/src/github.com/jbenet/go-datastore/Godeps/Godeps.json
generated
vendored
4
Godeps/_workspace/src/github.com/jbenet/go-datastore/Godeps/Godeps.json
generated
vendored
@ -1,6 +1,6 @@
|
||||
{
|
||||
"ImportPath": "github.com/jbenet/go-datastore",
|
||||
"GoVersion": "go1.3",
|
||||
"GoVersion": "go1.4",
|
||||
"Packages": [
|
||||
"./..."
|
||||
],
|
||||
@ -20,7 +20,7 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/jbenet/goprocess",
|
||||
"Rev": "b4b4178efcf2404ce9db72438c9c49db2fb399d8"
|
||||
"Rev": "5b02f8d275a2dd882fb06f8bbdf74347795ff3b1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mattbaird/elastigo/api",
|
||||
|
||||
4
Godeps/_workspace/src/github.com/jbenet/go-datastore/Makefile
generated
vendored
4
Godeps/_workspace/src/github.com/jbenet/go-datastore/Makefile
generated
vendored
@ -1,8 +1,8 @@
|
||||
build:
|
||||
go build
|
||||
|
||||
test:
|
||||
go test ./...
|
||||
test: build
|
||||
go test -race -cpu=5 -v ./...
|
||||
|
||||
# saves/vendors third-party dependencies to Godeps/_workspace
|
||||
# -r flag rewrites import paths to use the vendored path
|
||||
|
||||
2
Godeps/_workspace/src/github.com/jbenet/go-datastore/README.md
generated
vendored
2
Godeps/_workspace/src/github.com/jbenet/go-datastore/README.md
generated
vendored
@ -8,7 +8,7 @@ Based on [datastore.py](https://github.com/datastore/datastore).
|
||||
|
||||
### Documentation
|
||||
|
||||
https://godoc.org/github.com/datastore/go-datastore
|
||||
https://godoc.org/github.com/jbenet/go-datastore
|
||||
|
||||
### License
|
||||
|
||||
|
||||
3
Godeps/_workspace/src/github.com/jbenet/go-datastore/basic_ds.go
generated
vendored
3
Godeps/_workspace/src/github.com/jbenet/go-datastore/basic_ds.go
generated
vendored
@ -45,6 +45,9 @@ func (d *MapDatastore) Has(key Key) (exists bool, err error) {
|
||||
|
||||
// Delete implements Datastore.Delete
|
||||
func (d *MapDatastore) Delete(key Key) (err error) {
|
||||
if _, found := d.values[key]; !found {
|
||||
return ErrNotFound
|
||||
}
|
||||
delete(d.values, key)
|
||||
return nil
|
||||
}
|
||||
|
||||
42
Godeps/_workspace/src/github.com/jbenet/go-datastore/callback/callback.go
generated
vendored
Normal file
42
Godeps/_workspace/src/github.com/jbenet/go-datastore/callback/callback.go
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
package callback
|
||||
|
||||
import (
|
||||
ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore"
|
||||
dsq "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/query"
|
||||
)
|
||||
|
||||
type Datastore struct {
|
||||
D ds.Datastore
|
||||
F func()
|
||||
}
|
||||
|
||||
func Wrap(ds ds.Datastore, f func()) ds.Datastore {
|
||||
return &Datastore{ds, f}
|
||||
}
|
||||
|
||||
func (c *Datastore) SetFunc(f func()) { c.F = f }
|
||||
|
||||
func (c *Datastore) Put(key ds.Key, value interface{}) (err error) {
|
||||
c.F()
|
||||
return c.D.Put(key, value)
|
||||
}
|
||||
|
||||
func (c *Datastore) Get(key ds.Key) (value interface{}, err error) {
|
||||
c.F()
|
||||
return c.D.Get(key)
|
||||
}
|
||||
|
||||
func (c *Datastore) Has(key ds.Key) (exists bool, err error) {
|
||||
c.F()
|
||||
return c.D.Has(key)
|
||||
}
|
||||
|
||||
func (c *Datastore) Delete(key ds.Key) (err error) {
|
||||
c.F()
|
||||
return c.D.Delete(key)
|
||||
}
|
||||
|
||||
func (c *Datastore) Query(q dsq.Query) (dsq.Results, error) {
|
||||
c.F()
|
||||
return c.D.Query(q)
|
||||
}
|
||||
126
Godeps/_workspace/src/github.com/jbenet/go-datastore/coalesce/coalesce.go
generated
vendored
Normal file
126
Godeps/_workspace/src/github.com/jbenet/go-datastore/coalesce/coalesce.go
generated
vendored
Normal file
@ -0,0 +1,126 @@
|
||||
package coalesce
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore"
|
||||
dsq "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/query"
|
||||
)
|
||||
|
||||
var (
|
||||
putKey = "put"
|
||||
getKey = // parent keys
|
||||
"get"
|
||||
hasKey = "has"
|
||||
deleteKey = "delete"
|
||||
)
|
||||
|
||||
type keySync struct {
|
||||
op string
|
||||
k ds.Key
|
||||
value interface{}
|
||||
}
|
||||
|
||||
type valSync struct {
|
||||
val interface{}
|
||||
err error
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
// Datastore uses golang-lru for internal storage.
|
||||
type datastore struct {
|
||||
child ds.Datastore
|
||||
|
||||
reqmu sync.Mutex
|
||||
req map[keySync]*valSync
|
||||
}
|
||||
|
||||
// Wrap wraps a given datastore with a coalescing datastore.
|
||||
// All simultaenous requests which have the same keys will
|
||||
// yield the exact same result. Note that this shares
|
||||
// memory. It is not possible to copy a generic interface{}
|
||||
func Wrap(d ds.Datastore) ds.Datastore {
|
||||
return &datastore{child: d, req: make(map[keySync]*valSync)}
|
||||
}
|
||||
|
||||
// sync synchronizes requests for a given key.
|
||||
func (d *datastore) sync(k keySync) (vs *valSync, found bool) {
|
||||
d.reqmu.Lock()
|
||||
vs, found = d.req[k]
|
||||
if !found {
|
||||
vs = &valSync{done: make(chan struct{})}
|
||||
d.req[k] = vs
|
||||
}
|
||||
d.reqmu.Unlock()
|
||||
|
||||
// if we did find one, wait till it's done.
|
||||
if found {
|
||||
<-vs.done
|
||||
}
|
||||
return vs, found
|
||||
}
|
||||
|
||||
// sync synchronizes requests for a given key.
|
||||
func (d *datastore) syncDone(k keySync) {
|
||||
|
||||
d.reqmu.Lock()
|
||||
vs, found := d.req[k]
|
||||
if !found {
|
||||
panic("attempt to syncDone non-existent request")
|
||||
}
|
||||
delete(d.req, k)
|
||||
d.reqmu.Unlock()
|
||||
|
||||
// release all the waiters.
|
||||
close(vs.done)
|
||||
}
|
||||
|
||||
// Put stores the object `value` named by `key`.
|
||||
func (d *datastore) Put(key ds.Key, value interface{}) (err error) {
|
||||
ks := keySync{putKey, key, value}
|
||||
vs, found := d.sync(ks)
|
||||
if !found {
|
||||
vs.err = d.child.Put(key, value)
|
||||
d.syncDone(ks)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Get retrieves the object `value` named by `key`.
|
||||
func (d *datastore) Get(key ds.Key) (value interface{}, err error) {
|
||||
ks := keySync{getKey, key, nil}
|
||||
vs, found := d.sync(ks)
|
||||
if !found {
|
||||
vs.val, vs.err = d.child.Get(key)
|
||||
d.syncDone(ks)
|
||||
}
|
||||
return vs.val, vs.err
|
||||
}
|
||||
|
||||
// Has returns whether the `key` is mapped to a `value`.
|
||||
func (d *datastore) Has(key ds.Key) (exists bool, err error) {
|
||||
ks := keySync{hasKey, key, nil}
|
||||
vs, found := d.sync(ks)
|
||||
if !found {
|
||||
vs.val, vs.err = d.child.Has(key)
|
||||
d.syncDone(ks)
|
||||
}
|
||||
return vs.val.(bool), vs.err
|
||||
}
|
||||
|
||||
// Delete removes the value for given `key`.
|
||||
func (d *datastore) Delete(key ds.Key) (err error) {
|
||||
ks := keySync{deleteKey, key, nil}
|
||||
vs, found := d.sync(ks)
|
||||
if !found {
|
||||
vs.err = d.child.Delete(key)
|
||||
d.syncDone(ks)
|
||||
}
|
||||
return vs.err
|
||||
}
|
||||
|
||||
// Query returns a list of keys in the datastore
|
||||
func (d *datastore) Query(q dsq.Query) (dsq.Results, error) {
|
||||
// query not coalesced yet.
|
||||
return d.child.Query(q)
|
||||
}
|
||||
299
Godeps/_workspace/src/github.com/jbenet/go-datastore/coalesce/coalesce_test.go
generated
vendored
Normal file
299
Godeps/_workspace/src/github.com/jbenet/go-datastore/coalesce/coalesce_test.go
generated
vendored
Normal file
@ -0,0 +1,299 @@
|
||||
package coalesce
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore"
|
||||
dscb "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/callback"
|
||||
dssync "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/sync"
|
||||
)
|
||||
|
||||
type mock struct {
|
||||
sync.Mutex
|
||||
|
||||
inside int
|
||||
outside int
|
||||
ds ds.Datastore
|
||||
}
|
||||
|
||||
func setup() *mock {
|
||||
m := &mock{}
|
||||
|
||||
mp := ds.NewMapDatastore()
|
||||
ts := dssync.MutexWrap(mp)
|
||||
cb1 := dscb.Wrap(ts, func() {
|
||||
m.Lock()
|
||||
m.inside++
|
||||
m.Unlock()
|
||||
<-time.After(20 * time.Millisecond)
|
||||
})
|
||||
cd := Wrap(cb1)
|
||||
cb2 := dscb.Wrap(cd, func() {
|
||||
m.Lock()
|
||||
m.outside++
|
||||
m.Unlock()
|
||||
})
|
||||
|
||||
m.ds = cb2
|
||||
return m
|
||||
}
|
||||
|
||||
func TestCoalesceSamePut(t *testing.T) {
|
||||
m := setup()
|
||||
done := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
m.ds.Put(ds.NewKey("foo"), "bar")
|
||||
done <- struct{}{}
|
||||
}()
|
||||
go func() {
|
||||
m.ds.Put(ds.NewKey("foo"), "bar")
|
||||
done <- struct{}{}
|
||||
}()
|
||||
go func() {
|
||||
m.ds.Put(ds.NewKey("foo"), "bar")
|
||||
done <- struct{}{}
|
||||
}()
|
||||
|
||||
<-done
|
||||
<-done
|
||||
<-done
|
||||
|
||||
if m.inside != 1 {
|
||||
t.Error("incalls should be 1", m.inside)
|
||||
}
|
||||
|
||||
if m.outside != 3 {
|
||||
t.Error("outcalls should be 3", m.outside)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCoalesceSamePutDiffPut(t *testing.T) {
|
||||
m := setup()
|
||||
done := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
m.ds.Put(ds.NewKey("foo"), "bar")
|
||||
done <- struct{}{}
|
||||
}()
|
||||
go func() {
|
||||
m.ds.Put(ds.NewKey("foo"), "bar")
|
||||
done <- struct{}{}
|
||||
}()
|
||||
go func() {
|
||||
m.ds.Put(ds.NewKey("foo"), "bar2")
|
||||
done <- struct{}{}
|
||||
}()
|
||||
go func() {
|
||||
m.ds.Put(ds.NewKey("foo"), "bar3")
|
||||
done <- struct{}{}
|
||||
}()
|
||||
|
||||
<-done
|
||||
<-done
|
||||
<-done
|
||||
<-done
|
||||
|
||||
if m.inside != 3 {
|
||||
t.Error("incalls should be 3", m.inside)
|
||||
}
|
||||
|
||||
if m.outside != 4 {
|
||||
t.Error("outcalls should be 4", m.outside)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCoalesceSameGet(t *testing.T) {
|
||||
m := setup()
|
||||
done := make(chan struct{})
|
||||
errs := make(chan error, 30)
|
||||
|
||||
m.ds.Put(ds.NewKey("foo1"), "bar")
|
||||
m.ds.Put(ds.NewKey("foo2"), "baz")
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
go func() {
|
||||
v, err := m.ds.Get(ds.NewKey("foo1"))
|
||||
if err != nil {
|
||||
errs <- err
|
||||
}
|
||||
if v != "bar" {
|
||||
errs <- fmt.Errorf("v is not bar", v)
|
||||
}
|
||||
done <- struct{}{}
|
||||
}()
|
||||
}
|
||||
for i := 0; i < 10; i++ {
|
||||
go func() {
|
||||
v, err := m.ds.Get(ds.NewKey("foo2"))
|
||||
if err != nil {
|
||||
errs <- err
|
||||
}
|
||||
if v != "baz" {
|
||||
errs <- fmt.Errorf("v is not baz", v)
|
||||
}
|
||||
done <- struct{}{}
|
||||
}()
|
||||
}
|
||||
for i := 0; i < 10; i++ {
|
||||
go func() {
|
||||
_, err := m.ds.Get(ds.NewKey("foo3"))
|
||||
if err == nil {
|
||||
errs <- fmt.Errorf("no error")
|
||||
}
|
||||
done <- struct{}{}
|
||||
}()
|
||||
}
|
||||
|
||||
for i := 0; i < 30; i++ {
|
||||
<-done
|
||||
}
|
||||
|
||||
if m.inside != 5 {
|
||||
t.Error("incalls should be 3", m.inside)
|
||||
}
|
||||
|
||||
if m.outside != 32 {
|
||||
t.Error("outcalls should be 30", m.outside)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCoalesceHas(t *testing.T) {
|
||||
m := setup()
|
||||
done := make(chan struct{})
|
||||
errs := make(chan error, 30)
|
||||
|
||||
m.ds.Put(ds.NewKey("foo1"), "bar")
|
||||
m.ds.Put(ds.NewKey("foo2"), "baz")
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
go func() {
|
||||
v, err := m.ds.Has(ds.NewKey("foo1"))
|
||||
if err != nil {
|
||||
errs <- err
|
||||
}
|
||||
if !v {
|
||||
errs <- fmt.Errorf("should have foo1")
|
||||
}
|
||||
done <- struct{}{}
|
||||
}()
|
||||
}
|
||||
for i := 0; i < 10; i++ {
|
||||
go func() {
|
||||
v, err := m.ds.Has(ds.NewKey("foo2"))
|
||||
if err != nil {
|
||||
errs <- err
|
||||
}
|
||||
if !v {
|
||||
errs <- fmt.Errorf("should have foo2")
|
||||
}
|
||||
done <- struct{}{}
|
||||
}()
|
||||
}
|
||||
for i := 0; i < 10; i++ {
|
||||
go func() {
|
||||
v, err := m.ds.Has(ds.NewKey("foo3"))
|
||||
if err != nil {
|
||||
errs <- err
|
||||
}
|
||||
if v {
|
||||
errs <- fmt.Errorf("should not have foo3")
|
||||
}
|
||||
done <- struct{}{}
|
||||
}()
|
||||
}
|
||||
|
||||
for i := 0; i < 30; i++ {
|
||||
<-done
|
||||
}
|
||||
|
||||
if m.inside != 5 {
|
||||
t.Error("incalls should be 3", m.inside)
|
||||
}
|
||||
|
||||
if m.outside != 32 {
|
||||
t.Error("outcalls should be 30", m.outside)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCoalesceDelete(t *testing.T) {
|
||||
m := setup()
|
||||
done := make(chan struct{})
|
||||
errs := make(chan error, 30)
|
||||
|
||||
m.ds.Put(ds.NewKey("foo1"), "bar1")
|
||||
m.ds.Put(ds.NewKey("foo2"), "bar2")
|
||||
m.ds.Put(ds.NewKey("foo3"), "bar3")
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
go func() {
|
||||
err := m.ds.Delete(ds.NewKey("foo1"))
|
||||
if err != nil {
|
||||
errs <- err
|
||||
}
|
||||
has, err := m.ds.Has(ds.NewKey("foo1"))
|
||||
if err != nil {
|
||||
errs <- err
|
||||
}
|
||||
if has {
|
||||
t.Error("still have it after deleting")
|
||||
}
|
||||
done <- struct{}{}
|
||||
}()
|
||||
}
|
||||
for i := 0; i < 10; i++ {
|
||||
go func() {
|
||||
err := m.ds.Delete(ds.NewKey("foo2"))
|
||||
if err != nil {
|
||||
errs <- err
|
||||
}
|
||||
has, err := m.ds.Has(ds.NewKey("foo2"))
|
||||
if err != nil {
|
||||
errs <- err
|
||||
}
|
||||
if has {
|
||||
t.Error("still have it after deleting")
|
||||
}
|
||||
done <- struct{}{}
|
||||
}()
|
||||
}
|
||||
for i := 0; i < 10; i++ {
|
||||
go func() {
|
||||
has, err := m.ds.Has(ds.NewKey("foo3"))
|
||||
if err != nil {
|
||||
errs <- err
|
||||
}
|
||||
if !has {
|
||||
t.Error("should still have foo3")
|
||||
}
|
||||
done <- struct{}{}
|
||||
}()
|
||||
}
|
||||
for i := 0; i < 10; i++ {
|
||||
go func() {
|
||||
has, err := m.ds.Has(ds.NewKey("foo4"))
|
||||
if err != nil {
|
||||
errs <- err
|
||||
}
|
||||
if has {
|
||||
t.Error("should not have foo4")
|
||||
}
|
||||
done <- struct{}{}
|
||||
}()
|
||||
}
|
||||
|
||||
for i := 0; i < 40; i++ {
|
||||
<-done
|
||||
}
|
||||
|
||||
if m.inside != 9 {
|
||||
t.Error("incalls should be 9", m.inside)
|
||||
}
|
||||
|
||||
if m.outside != 63 {
|
||||
t.Error("outcalls should be 63", m.outside)
|
||||
}
|
||||
}
|
||||
4
Godeps/_workspace/src/github.com/jbenet/go-datastore/datastore.go
generated
vendored
4
Godeps/_workspace/src/github.com/jbenet/go-datastore/datastore.go
generated
vendored
@ -39,7 +39,7 @@ type Datastore interface {
|
||||
// Ultimately, the lowest-level datastore will need to do some value checking
|
||||
// or risk getting incorrect values. It may also be useful to expose a more
|
||||
// type-safe interface to your application, and do the checking up-front.
|
||||
Put(key Key, value interface{}) (err error)
|
||||
Put(key Key, value interface{}) error
|
||||
|
||||
// Get retrieves the object `value` named by `key`.
|
||||
// Get will return ErrNotFound if the key is not mapped to a value.
|
||||
@ -52,7 +52,7 @@ type Datastore interface {
|
||||
Has(key Key) (exists bool, err error)
|
||||
|
||||
// Delete removes the value for given `key`.
|
||||
Delete(key Key) (err error)
|
||||
Delete(key Key) error
|
||||
|
||||
// Query searches the datastore and returns a query result. This function
|
||||
// may return before the query actually runs. To wait for the query:
|
||||
|
||||
250
Godeps/_workspace/src/github.com/jbenet/go-datastore/flatfs/flatfs.go
generated
vendored
Normal file
250
Godeps/_workspace/src/github.com/jbenet/go-datastore/flatfs/flatfs.go
generated
vendored
Normal file
@ -0,0 +1,250 @@
|
||||
// Package flatfs is a Datastore implementation that stores all
|
||||
// objects in a two-level directory structure in the local file
|
||||
// system, regardless of the hierarchy of the keys.
|
||||
package flatfs
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore"
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/query"
|
||||
)
|
||||
|
||||
const (
|
||||
extension = ".data"
|
||||
maxPrefixLen = 16
|
||||
)
|
||||
|
||||
var (
|
||||
ErrBadPrefixLen = errors.New("bad prefix length")
|
||||
)
|
||||
|
||||
type Datastore struct {
|
||||
path string
|
||||
// length of the dir splay prefix, in bytes of hex digits
|
||||
hexPrefixLen int
|
||||
}
|
||||
|
||||
var _ datastore.Datastore = (*Datastore)(nil)
|
||||
|
||||
func New(path string, prefixLen int) (*Datastore, error) {
|
||||
if prefixLen <= 0 || prefixLen > maxPrefixLen {
|
||||
return nil, ErrBadPrefixLen
|
||||
}
|
||||
fs := &Datastore{
|
||||
path: path,
|
||||
// convert from binary bytes to bytes of hex encoding
|
||||
hexPrefixLen: prefixLen * hex.EncodedLen(1),
|
||||
}
|
||||
return fs, nil
|
||||
}
|
||||
|
||||
var padding = strings.Repeat("_", maxPrefixLen*hex.EncodedLen(1))
|
||||
|
||||
func (fs *Datastore) encode(key datastore.Key) (dir, file string) {
|
||||
safe := hex.EncodeToString(key.Bytes()[1:])
|
||||
prefix := (safe + padding)[:fs.hexPrefixLen]
|
||||
dir = path.Join(fs.path, prefix)
|
||||
file = path.Join(dir, safe+extension)
|
||||
return dir, file
|
||||
}
|
||||
|
||||
func (fs *Datastore) decode(file string) (key datastore.Key, ok bool) {
|
||||
if path.Ext(file) != extension {
|
||||
return datastore.Key{}, false
|
||||
}
|
||||
name := file[:len(file)-len(extension)]
|
||||
k, err := hex.DecodeString(name)
|
||||
if err != nil {
|
||||
return datastore.Key{}, false
|
||||
}
|
||||
return datastore.NewKey(string(k)), true
|
||||
}
|
||||
|
||||
func (fs *Datastore) makePrefixDir(dir string) error {
|
||||
if err := os.Mkdir(dir, 0777); err != nil {
|
||||
// EEXIST is safe to ignore here, that just means the prefix
|
||||
// directory already existed.
|
||||
if !os.IsExist(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// In theory, if we create a new prefix dir and add a file to
|
||||
// it, the creation of the prefix dir itself might not be
|
||||
// durable yet. Sync the root dir after a successful mkdir of
|
||||
// a prefix dir, just to be paranoid.
|
||||
f, err := os.Open(fs.path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
if err := f.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fs *Datastore) Put(key datastore.Key, value interface{}) error {
|
||||
val, ok := value.([]byte)
|
||||
if !ok {
|
||||
return datastore.ErrInvalidType
|
||||
}
|
||||
|
||||
dir, path := fs.encode(key)
|
||||
if err := fs.makePrefixDir(dir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dirF, err := os.Open(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer dirF.Close()
|
||||
|
||||
tmp, err := ioutil.TempFile(dir, "put-")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
closed := false
|
||||
removed := false
|
||||
defer func() {
|
||||
if !closed {
|
||||
// silence errcheck
|
||||
_ = tmp.Close()
|
||||
}
|
||||
if !removed {
|
||||
// silence errcheck
|
||||
_ = os.Remove(tmp.Name())
|
||||
}
|
||||
}()
|
||||
|
||||
if _, err := tmp.Write(val); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := tmp.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := tmp.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
closed = true
|
||||
|
||||
err = os.Rename(tmp.Name(), path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
removed = true
|
||||
|
||||
if err := dirF.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fs *Datastore) Get(key datastore.Key) (value interface{}, err error) {
|
||||
_, path := fs.encode(key)
|
||||
data, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, datastore.ErrNotFound
|
||||
}
|
||||
// no specific error to return, so just pass it through
|
||||
return nil, err
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func (fs *Datastore) Has(key datastore.Key) (exists bool, err error) {
|
||||
_, path := fs.encode(key)
|
||||
switch _, err := os.Stat(path); {
|
||||
case err == nil:
|
||||
return true, nil
|
||||
case os.IsNotExist(err):
|
||||
return false, nil
|
||||
default:
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
func (fs *Datastore) Delete(key datastore.Key) error {
|
||||
_, path := fs.encode(key)
|
||||
switch err := os.Remove(path); {
|
||||
case err == nil:
|
||||
return nil
|
||||
case os.IsNotExist(err):
|
||||
return datastore.ErrNotFound
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func (fs *Datastore) Query(q query.Query) (query.Results, error) {
|
||||
if (q.Prefix != "" && q.Prefix != "/") ||
|
||||
len(q.Filters) > 0 ||
|
||||
len(q.Orders) > 0 ||
|
||||
q.Limit > 0 ||
|
||||
q.Offset > 0 ||
|
||||
!q.KeysOnly {
|
||||
// TODO this is overly simplistic, but the only caller is
|
||||
// `ipfs refs local` for now, and this gets us moving.
|
||||
return nil, errors.New("flatfs only supports listing all keys in random order")
|
||||
}
|
||||
|
||||
// TODO this dumb implementation gathers all keys into a single slice.
|
||||
root, err := os.Open(fs.path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer root.Close()
|
||||
|
||||
var res []query.Entry
|
||||
prefixes, err := root.Readdir(0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, fi := range prefixes {
|
||||
var err error
|
||||
res, err = fs.enumerateKeys(fi, res)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return query.ResultsWithEntries(q, res), nil
|
||||
}
|
||||
|
||||
func (fs *Datastore) enumerateKeys(fi os.FileInfo, res []query.Entry) ([]query.Entry, error) {
|
||||
if !fi.IsDir() || fi.Name()[0] == '.' {
|
||||
return res, nil
|
||||
}
|
||||
child, err := os.Open(path.Join(fs.path, fi.Name()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer child.Close()
|
||||
objs, err := child.Readdir(0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, fi := range objs {
|
||||
if !fi.Mode().IsRegular() || fi.Name()[0] == '.' {
|
||||
return res, nil
|
||||
}
|
||||
key, ok := fs.decode(fi.Name())
|
||||
if !ok {
|
||||
return res, nil
|
||||
}
|
||||
res = append(res, query.Entry{Key: key.String()})
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
var _ datastore.ThreadSafeDatastore = (*Datastore)(nil)
|
||||
|
||||
func (*Datastore) IsThreadSafe() {}
|
||||
315
Godeps/_workspace/src/github.com/jbenet/go-datastore/flatfs/flatfs_test.go
generated
vendored
Normal file
315
Godeps/_workspace/src/github.com/jbenet/go-datastore/flatfs/flatfs_test.go
generated
vendored
Normal file
@ -0,0 +1,315 @@
|
||||
package flatfs_test
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore"
|
||||
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/flatfs"
|
||||
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/query"
|
||||
)
|
||||
|
||||
func tempdir(t testing.TB) (path string, cleanup func()) {
|
||||
path, err := ioutil.TempDir("", "test-datastore-flatfs-")
|
||||
if err != nil {
|
||||
t.Fatalf("cannot create temp directory: %v", err)
|
||||
}
|
||||
|
||||
cleanup = func() {
|
||||
if err := os.RemoveAll(path); err != nil {
|
||||
t.Errorf("tempdir cleanup failed: %v", err)
|
||||
}
|
||||
}
|
||||
return path, cleanup
|
||||
}
|
||||
|
||||
func TestBadPrefixLen(t *testing.T) {
|
||||
temp, cleanup := tempdir(t)
|
||||
defer cleanup()
|
||||
|
||||
for i := 0; i > -3; i-- {
|
||||
_, err := flatfs.New(temp, 0)
|
||||
if g, e := err, flatfs.ErrBadPrefixLen; g != e {
|
||||
t.Errorf("expected ErrBadPrefixLen, got: %v", g)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPutBadValueType(t *testing.T) {
|
||||
temp, cleanup := tempdir(t)
|
||||
defer cleanup()
|
||||
|
||||
fs, err := flatfs.New(temp, 2)
|
||||
if err != nil {
|
||||
t.Fatalf("New fail: %v\n", err)
|
||||
}
|
||||
|
||||
err = fs.Put(datastore.NewKey("quux"), 22)
|
||||
if g, e := err, datastore.ErrInvalidType; g != e {
|
||||
t.Fatalf("expected ErrInvalidType, got: %v\n", g)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPut(t *testing.T) {
|
||||
temp, cleanup := tempdir(t)
|
||||
defer cleanup()
|
||||
|
||||
fs, err := flatfs.New(temp, 2)
|
||||
if err != nil {
|
||||
t.Fatalf("New fail: %v\n", err)
|
||||
}
|
||||
|
||||
err = fs.Put(datastore.NewKey("quux"), []byte("foobar"))
|
||||
if err != nil {
|
||||
t.Fatalf("Put fail: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGet(t *testing.T) {
|
||||
temp, cleanup := tempdir(t)
|
||||
defer cleanup()
|
||||
|
||||
fs, err := flatfs.New(temp, 2)
|
||||
if err != nil {
|
||||
t.Fatalf("New fail: %v\n", err)
|
||||
}
|
||||
|
||||
const input = "foobar"
|
||||
err = fs.Put(datastore.NewKey("quux"), []byte(input))
|
||||
if err != nil {
|
||||
t.Fatalf("Put fail: %v\n", err)
|
||||
}
|
||||
|
||||
data, err := fs.Get(datastore.NewKey("quux"))
|
||||
if err != nil {
|
||||
t.Fatalf("Get failed: %v", err)
|
||||
}
|
||||
buf, ok := data.([]byte)
|
||||
if !ok {
|
||||
t.Fatalf("expected []byte from Get, got %T: %v", data, data)
|
||||
}
|
||||
if g, e := string(buf), input; g != e {
|
||||
t.Fatalf("Get gave wrong content: %q != %q", g, e)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPutOverwrite(t *testing.T) {
|
||||
temp, cleanup := tempdir(t)
|
||||
defer cleanup()
|
||||
|
||||
fs, err := flatfs.New(temp, 2)
|
||||
if err != nil {
|
||||
t.Fatalf("New fail: %v\n", err)
|
||||
}
|
||||
|
||||
const (
|
||||
loser = "foobar"
|
||||
winner = "xyzzy"
|
||||
)
|
||||
err = fs.Put(datastore.NewKey("quux"), []byte(loser))
|
||||
if err != nil {
|
||||
t.Fatalf("Put fail: %v\n", err)
|
||||
}
|
||||
|
||||
err = fs.Put(datastore.NewKey("quux"), []byte(winner))
|
||||
if err != nil {
|
||||
t.Fatalf("Put fail: %v\n", err)
|
||||
}
|
||||
|
||||
data, err := fs.Get(datastore.NewKey("quux"))
|
||||
if err != nil {
|
||||
t.Fatalf("Get failed: %v", err)
|
||||
}
|
||||
if g, e := string(data.([]byte)), winner; g != e {
|
||||
t.Fatalf("Get gave wrong content: %q != %q", g, e)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetNotFoundError(t *testing.T) {
|
||||
temp, cleanup := tempdir(t)
|
||||
defer cleanup()
|
||||
|
||||
fs, err := flatfs.New(temp, 2)
|
||||
if err != nil {
|
||||
t.Fatalf("New fail: %v\n", err)
|
||||
}
|
||||
|
||||
_, err = fs.Get(datastore.NewKey("quux"))
|
||||
if g, e := err, datastore.ErrNotFound; g != e {
|
||||
t.Fatalf("expected ErrNotFound, got: %v\n", g)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStorage(t *testing.T) {
|
||||
temp, cleanup := tempdir(t)
|
||||
defer cleanup()
|
||||
|
||||
const prefixLen = 2
|
||||
const prefix = "2f71"
|
||||
const target = prefix + "/2f71757578.data"
|
||||
fs, err := flatfs.New(temp, prefixLen)
|
||||
if err != nil {
|
||||
t.Fatalf("New fail: %v\n", err)
|
||||
}
|
||||
|
||||
err = fs.Put(datastore.NewKey("quux"), []byte("foobar"))
|
||||
if err != nil {
|
||||
t.Fatalf("Put fail: %v\n", err)
|
||||
}
|
||||
|
||||
seen := false
|
||||
walk := func(absPath string, fi os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
path, err := filepath.Rel(temp, absPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch path {
|
||||
case ".", "..":
|
||||
// ignore
|
||||
case prefix:
|
||||
if !fi.IsDir() {
|
||||
t.Errorf("prefix directory is not a file? %v", fi.Mode())
|
||||
}
|
||||
// we know it's there if we see the file, nothing more to
|
||||
// do here
|
||||
case target:
|
||||
seen = true
|
||||
if !fi.Mode().IsRegular() {
|
||||
t.Errorf("expected a regular file, mode: %04o", fi.Mode())
|
||||
}
|
||||
if g, e := fi.Mode()&os.ModePerm&0007, os.FileMode(0000); g != e {
|
||||
t.Errorf("file should not be world accessible: %04o", fi.Mode())
|
||||
}
|
||||
default:
|
||||
t.Errorf("saw unexpected directory entry: %q %v", path, fi.Mode())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if err := filepath.Walk(temp, walk); err != nil {
|
||||
t.Fatal("walk: %v", err)
|
||||
}
|
||||
if !seen {
|
||||
t.Error("did not see the data file")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasNotFound(t *testing.T) {
|
||||
temp, cleanup := tempdir(t)
|
||||
defer cleanup()
|
||||
|
||||
fs, err := flatfs.New(temp, 2)
|
||||
if err != nil {
|
||||
t.Fatalf("New fail: %v\n", err)
|
||||
}
|
||||
|
||||
found, err := fs.Has(datastore.NewKey("quux"))
|
||||
if err != nil {
|
||||
t.Fatalf("Has fail: %v\n", err)
|
||||
}
|
||||
if g, e := found, false; g != e {
|
||||
t.Fatalf("wrong Has: %v != %v", g, e)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasFound(t *testing.T) {
|
||||
temp, cleanup := tempdir(t)
|
||||
defer cleanup()
|
||||
|
||||
fs, err := flatfs.New(temp, 2)
|
||||
if err != nil {
|
||||
t.Fatalf("New fail: %v\n", err)
|
||||
}
|
||||
err = fs.Put(datastore.NewKey("quux"), []byte("foobar"))
|
||||
if err != nil {
|
||||
t.Fatalf("Put fail: %v\n", err)
|
||||
}
|
||||
|
||||
found, err := fs.Has(datastore.NewKey("quux"))
|
||||
if err != nil {
|
||||
t.Fatalf("Has fail: %v\n", err)
|
||||
}
|
||||
if g, e := found, true; g != e {
|
||||
t.Fatalf("wrong Has: %v != %v", g, e)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteNotFound(t *testing.T) {
|
||||
temp, cleanup := tempdir(t)
|
||||
defer cleanup()
|
||||
|
||||
fs, err := flatfs.New(temp, 2)
|
||||
if err != nil {
|
||||
t.Fatalf("New fail: %v\n", err)
|
||||
}
|
||||
|
||||
err = fs.Delete(datastore.NewKey("quux"))
|
||||
if g, e := err, datastore.ErrNotFound; g != e {
|
||||
t.Fatalf("expected ErrNotFound, got: %v\n", g)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteFound(t *testing.T) {
|
||||
temp, cleanup := tempdir(t)
|
||||
defer cleanup()
|
||||
|
||||
fs, err := flatfs.New(temp, 2)
|
||||
if err != nil {
|
||||
t.Fatalf("New fail: %v\n", err)
|
||||
}
|
||||
err = fs.Put(datastore.NewKey("quux"), []byte("foobar"))
|
||||
if err != nil {
|
||||
t.Fatalf("Put fail: %v\n", err)
|
||||
}
|
||||
|
||||
err = fs.Delete(datastore.NewKey("quux"))
|
||||
if err != nil {
|
||||
t.Fatalf("Delete fail: %v\n", err)
|
||||
}
|
||||
|
||||
// check that it's gone
|
||||
_, err = fs.Get(datastore.NewKey("quux"))
|
||||
if g, e := err, datastore.ErrNotFound; g != e {
|
||||
t.Fatalf("expected Get after Delete to give ErrNotFound, got: %v\n", g)
|
||||
}
|
||||
}
|
||||
|
||||
func TestQuerySimple(t *testing.T) {
|
||||
temp, cleanup := tempdir(t)
|
||||
defer cleanup()
|
||||
|
||||
fs, err := flatfs.New(temp, 2)
|
||||
if err != nil {
|
||||
t.Fatalf("New fail: %v\n", err)
|
||||
}
|
||||
const myKey = "quux"
|
||||
err = fs.Put(datastore.NewKey(myKey), []byte("foobar"))
|
||||
if err != nil {
|
||||
t.Fatalf("Put fail: %v\n", err)
|
||||
}
|
||||
|
||||
res, err := fs.Query(query.Query{KeysOnly: true})
|
||||
if err != nil {
|
||||
t.Fatalf("Query fail: %v\n", err)
|
||||
}
|
||||
entries, err := res.Rest()
|
||||
if err != nil {
|
||||
t.Fatalf("Query Results.Rest fail: %v\n", err)
|
||||
}
|
||||
seen := false
|
||||
for _, e := range entries {
|
||||
switch e.Key {
|
||||
case datastore.NewKey(myKey).String():
|
||||
seen = true
|
||||
default:
|
||||
t.Errorf("saw unexpected key: %q", e.Key)
|
||||
}
|
||||
}
|
||||
if !seen {
|
||||
t.Errorf("did not see wanted key %q in %+v", myKey, entries)
|
||||
}
|
||||
}
|
||||
19
Godeps/_workspace/src/github.com/jbenet/go-datastore/fs/fs.go
generated
vendored
19
Godeps/_workspace/src/github.com/jbenet/go-datastore/fs/fs.go
generated
vendored
@ -1,3 +1,20 @@
|
||||
// Package fs is a simple Datastore implementation that stores keys
|
||||
// are directories and files, mirroring the key. That is, the key
|
||||
// "/foo/bar" is stored as file "PATH/foo/bar/.dsobject".
|
||||
//
|
||||
// This means key some segments will not work. For example, the
|
||||
// following keys will result in unwanted behavior:
|
||||
//
|
||||
// - "/foo/./bar"
|
||||
// - "/foo/../bar"
|
||||
// - "/foo\x00bar"
|
||||
//
|
||||
// Keys that only differ in case may be confused with each other on
|
||||
// case insensitive file systems, for example in OS X.
|
||||
//
|
||||
// This package is intended for exploratory use, where the user would
|
||||
// examine the file system manually, and should only be used with
|
||||
// human-friendly, trusted keys. You have been warned.
|
||||
package fs
|
||||
|
||||
import (
|
||||
@ -13,7 +30,7 @@ import (
|
||||
|
||||
var ObjectKeySuffix = ".dsobject"
|
||||
|
||||
// Datastore uses a standard Go map for internal storage.
|
||||
// Datastore uses a uses a file per key to store values.
|
||||
type Datastore struct {
|
||||
path string
|
||||
}
|
||||
|
||||
1
Godeps/_workspace/src/github.com/jbenet/go-datastore/lru/datastore_test.go
generated
vendored
1
Godeps/_workspace/src/github.com/jbenet/go-datastore/lru/datastore_test.go
generated
vendored
@ -9,7 +9,6 @@ import (
|
||||
. "gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
// Hook up gocheck into the "go test" runner.
|
||||
func Test(t *testing.T) { TestingT(t) }
|
||||
|
||||
type DSSuite struct{}
|
||||
|
||||
116
Godeps/_workspace/src/github.com/jbenet/go-datastore/mount/mount.go
generated
vendored
Normal file
116
Godeps/_workspace/src/github.com/jbenet/go-datastore/mount/mount.go
generated
vendored
Normal file
@ -0,0 +1,116 @@
|
||||
// Package mount provides a Datastore that has other Datastores
|
||||
// mounted at various key prefixes.
|
||||
package mount
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore"
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/keytransform"
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/query"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNoMount = errors.New("no datastore mounted for this key")
|
||||
)
|
||||
|
||||
type Mount struct {
|
||||
Prefix datastore.Key
|
||||
Datastore datastore.Datastore
|
||||
}
|
||||
|
||||
func New(mounts []Mount) *Datastore {
|
||||
// make a copy so we're sure it doesn't mutate
|
||||
m := make([]Mount, len(mounts))
|
||||
for i, v := range mounts {
|
||||
m[i] = v
|
||||
}
|
||||
return &Datastore{mounts: m}
|
||||
}
|
||||
|
||||
type Datastore struct {
|
||||
mounts []Mount
|
||||
}
|
||||
|
||||
var _ datastore.Datastore = (*Datastore)(nil)
|
||||
|
||||
func (d *Datastore) lookup(key datastore.Key) (ds datastore.Datastore, mountpoint, rest datastore.Key) {
|
||||
for _, m := range d.mounts {
|
||||
if m.Prefix.Equal(key) || m.Prefix.IsAncestorOf(key) {
|
||||
s := strings.TrimPrefix(key.String(), m.Prefix.String())
|
||||
k := datastore.NewKey(s)
|
||||
return m.Datastore, m.Prefix, k
|
||||
}
|
||||
}
|
||||
return nil, datastore.NewKey("/"), key
|
||||
}
|
||||
|
||||
func (d *Datastore) Put(key datastore.Key, value interface{}) error {
|
||||
ds, _, k := d.lookup(key)
|
||||
if ds == nil {
|
||||
return ErrNoMount
|
||||
}
|
||||
return ds.Put(k, value)
|
||||
}
|
||||
|
||||
func (d *Datastore) Get(key datastore.Key) (value interface{}, err error) {
|
||||
ds, _, k := d.lookup(key)
|
||||
if ds == nil {
|
||||
return nil, datastore.ErrNotFound
|
||||
}
|
||||
return ds.Get(k)
|
||||
}
|
||||
|
||||
func (d *Datastore) Has(key datastore.Key) (exists bool, err error) {
|
||||
ds, _, k := d.lookup(key)
|
||||
if ds == nil {
|
||||
return false, nil
|
||||
}
|
||||
return ds.Has(k)
|
||||
}
|
||||
|
||||
func (d *Datastore) Delete(key datastore.Key) error {
|
||||
ds, _, k := d.lookup(key)
|
||||
if ds == nil {
|
||||
return datastore.ErrNotFound
|
||||
}
|
||||
return ds.Delete(k)
|
||||
}
|
||||
|
||||
func (d *Datastore) Query(q query.Query) (query.Results, error) {
|
||||
if len(q.Filters) > 0 ||
|
||||
len(q.Orders) > 0 ||
|
||||
q.Limit > 0 ||
|
||||
q.Offset > 0 {
|
||||
// TODO this is overly simplistic, but the only caller is
|
||||
// `ipfs refs local` for now, and this gets us moving.
|
||||
return nil, errors.New("mount only supports listing all prefixed keys in random order")
|
||||
}
|
||||
key := datastore.NewKey(q.Prefix)
|
||||
ds, mount, k := d.lookup(key)
|
||||
if ds == nil {
|
||||
return nil, errors.New("mount only supports listing a mount point")
|
||||
}
|
||||
// TODO support listing cross mount points too
|
||||
|
||||
// delegate the query to the mounted datastore, while adjusting
|
||||
// keys in and out
|
||||
q2 := q
|
||||
q2.Prefix = k.String()
|
||||
wrapDS := keytransform.Wrap(ds, &keytransform.Pair{
|
||||
Convert: func(datastore.Key) datastore.Key {
|
||||
panic("this should never be called")
|
||||
},
|
||||
Invert: func(k datastore.Key) datastore.Key {
|
||||
return mount.Child(k)
|
||||
},
|
||||
})
|
||||
|
||||
r, err := wrapDS.Query(q2)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r = query.ResultsReplaceQuery(r, q)
|
||||
return r, nil
|
||||
}
|
||||
241
Godeps/_workspace/src/github.com/jbenet/go-datastore/mount/mount_test.go
generated
vendored
Normal file
241
Godeps/_workspace/src/github.com/jbenet/go-datastore/mount/mount_test.go
generated
vendored
Normal file
@ -0,0 +1,241 @@
|
||||
package mount_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore"
|
||||
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/mount"
|
||||
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/query"
|
||||
)
|
||||
|
||||
func TestPutBadNothing(t *testing.T) {
|
||||
m := mount.New(nil)
|
||||
|
||||
err := m.Put(datastore.NewKey("quux"), []byte("foobar"))
|
||||
if g, e := err, mount.ErrNoMount; g != e {
|
||||
t.Fatalf("Put got wrong error: %v != %v", g, e)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPutBadNoMount(t *testing.T) {
|
||||
mapds := datastore.NewMapDatastore()
|
||||
m := mount.New([]mount.Mount{
|
||||
{Prefix: datastore.NewKey("/redherring"), Datastore: mapds},
|
||||
})
|
||||
|
||||
err := m.Put(datastore.NewKey("/quux/thud"), []byte("foobar"))
|
||||
if g, e := err, mount.ErrNoMount; g != e {
|
||||
t.Fatalf("expected ErrNoMount, got: %v\n", g)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPut(t *testing.T) {
|
||||
mapds := datastore.NewMapDatastore()
|
||||
m := mount.New([]mount.Mount{
|
||||
{Prefix: datastore.NewKey("/quux"), Datastore: mapds},
|
||||
})
|
||||
|
||||
if err := m.Put(datastore.NewKey("/quux/thud"), []byte("foobar")); err != nil {
|
||||
t.Fatalf("Put error: %v", err)
|
||||
}
|
||||
|
||||
val, err := mapds.Get(datastore.NewKey("/thud"))
|
||||
if err != nil {
|
||||
t.Fatalf("Get error: %v", err)
|
||||
}
|
||||
buf, ok := val.([]byte)
|
||||
if !ok {
|
||||
t.Fatalf("Get value is not []byte: %T %v", val, val)
|
||||
}
|
||||
if g, e := string(buf), "foobar"; g != e {
|
||||
t.Errorf("wrong value: %q != %q", g, e)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetBadNothing(t *testing.T) {
|
||||
m := mount.New([]mount.Mount{})
|
||||
|
||||
_, err := m.Get(datastore.NewKey("/quux/thud"))
|
||||
if g, e := err, datastore.ErrNotFound; g != e {
|
||||
t.Fatalf("expected ErrNotFound, got: %v\n", g)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetBadNoMount(t *testing.T) {
|
||||
mapds := datastore.NewMapDatastore()
|
||||
m := mount.New([]mount.Mount{
|
||||
{Prefix: datastore.NewKey("/redherring"), Datastore: mapds},
|
||||
})
|
||||
|
||||
_, err := m.Get(datastore.NewKey("/quux/thud"))
|
||||
if g, e := err, datastore.ErrNotFound; g != e {
|
||||
t.Fatalf("expected ErrNotFound, got: %v\n", g)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetNotFound(t *testing.T) {
|
||||
mapds := datastore.NewMapDatastore()
|
||||
m := mount.New([]mount.Mount{
|
||||
{Prefix: datastore.NewKey("/quux"), Datastore: mapds},
|
||||
})
|
||||
|
||||
_, err := m.Get(datastore.NewKey("/quux/thud"))
|
||||
if g, e := err, datastore.ErrNotFound; g != e {
|
||||
t.Fatalf("expected ErrNotFound, got: %v\n", g)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGet(t *testing.T) {
|
||||
mapds := datastore.NewMapDatastore()
|
||||
m := mount.New([]mount.Mount{
|
||||
{Prefix: datastore.NewKey("/quux"), Datastore: mapds},
|
||||
})
|
||||
|
||||
if err := mapds.Put(datastore.NewKey("/thud"), []byte("foobar")); err != nil {
|
||||
t.Fatalf("Get error: %v", err)
|
||||
}
|
||||
|
||||
val, err := m.Get(datastore.NewKey("/quux/thud"))
|
||||
if err != nil {
|
||||
t.Fatalf("Put error: %v", err)
|
||||
}
|
||||
|
||||
buf, ok := val.([]byte)
|
||||
if !ok {
|
||||
t.Fatalf("Get value is not []byte: %T %v", val, val)
|
||||
}
|
||||
if g, e := string(buf), "foobar"; g != e {
|
||||
t.Errorf("wrong value: %q != %q", g, e)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasBadNothing(t *testing.T) {
|
||||
m := mount.New([]mount.Mount{})
|
||||
|
||||
found, err := m.Has(datastore.NewKey("/quux/thud"))
|
||||
if err != nil {
|
||||
t.Fatalf("Has error: %v", err)
|
||||
}
|
||||
if g, e := found, false; g != e {
|
||||
t.Fatalf("wrong value: %v != %v", g, e)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasBadNoMount(t *testing.T) {
|
||||
mapds := datastore.NewMapDatastore()
|
||||
m := mount.New([]mount.Mount{
|
||||
{Prefix: datastore.NewKey("/redherring"), Datastore: mapds},
|
||||
})
|
||||
|
||||
found, err := m.Has(datastore.NewKey("/quux/thud"))
|
||||
if err != nil {
|
||||
t.Fatalf("Has error: %v", err)
|
||||
}
|
||||
if g, e := found, false; g != e {
|
||||
t.Fatalf("wrong value: %v != %v", g, e)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasNotFound(t *testing.T) {
|
||||
mapds := datastore.NewMapDatastore()
|
||||
m := mount.New([]mount.Mount{
|
||||
{Prefix: datastore.NewKey("/quux"), Datastore: mapds},
|
||||
})
|
||||
|
||||
found, err := m.Has(datastore.NewKey("/quux/thud"))
|
||||
if err != nil {
|
||||
t.Fatalf("Has error: %v", err)
|
||||
}
|
||||
if g, e := found, false; g != e {
|
||||
t.Fatalf("wrong value: %v != %v", g, e)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHas(t *testing.T) {
|
||||
mapds := datastore.NewMapDatastore()
|
||||
m := mount.New([]mount.Mount{
|
||||
{Prefix: datastore.NewKey("/quux"), Datastore: mapds},
|
||||
})
|
||||
|
||||
if err := mapds.Put(datastore.NewKey("/thud"), []byte("foobar")); err != nil {
|
||||
t.Fatalf("Put error: %v", err)
|
||||
}
|
||||
|
||||
found, err := m.Has(datastore.NewKey("/quux/thud"))
|
||||
if err != nil {
|
||||
t.Fatalf("Has error: %v", err)
|
||||
}
|
||||
if g, e := found, true; g != e {
|
||||
t.Fatalf("wrong value: %v != %v", g, e)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteNotFound(t *testing.T) {
|
||||
mapds := datastore.NewMapDatastore()
|
||||
m := mount.New([]mount.Mount{
|
||||
{Prefix: datastore.NewKey("/quux"), Datastore: mapds},
|
||||
})
|
||||
|
||||
err := m.Delete(datastore.NewKey("/quux/thud"))
|
||||
if g, e := err, datastore.ErrNotFound; g != e {
|
||||
t.Fatalf("expected ErrNotFound, got: %v\n", g)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDelete(t *testing.T) {
|
||||
mapds := datastore.NewMapDatastore()
|
||||
m := mount.New([]mount.Mount{
|
||||
{Prefix: datastore.NewKey("/quux"), Datastore: mapds},
|
||||
})
|
||||
|
||||
if err := mapds.Put(datastore.NewKey("/thud"), []byte("foobar")); err != nil {
|
||||
t.Fatalf("Put error: %v", err)
|
||||
}
|
||||
|
||||
err := m.Delete(datastore.NewKey("/quux/thud"))
|
||||
if err != nil {
|
||||
t.Fatalf("Delete error: %v", err)
|
||||
}
|
||||
|
||||
// make sure it disappeared
|
||||
found, err := mapds.Has(datastore.NewKey("/thud"))
|
||||
if err != nil {
|
||||
t.Fatalf("Has error: %v", err)
|
||||
}
|
||||
if g, e := found, false; g != e {
|
||||
t.Fatalf("wrong value: %v != %v", g, e)
|
||||
}
|
||||
}
|
||||
|
||||
func TestQuerySimple(t *testing.T) {
|
||||
mapds := datastore.NewMapDatastore()
|
||||
m := mount.New([]mount.Mount{
|
||||
{Prefix: datastore.NewKey("/quux"), Datastore: mapds},
|
||||
})
|
||||
|
||||
const myKey = "/quux/thud"
|
||||
if err := m.Put(datastore.NewKey(myKey), []byte("foobar")); err != nil {
|
||||
t.Fatalf("Put error: %v", err)
|
||||
}
|
||||
|
||||
res, err := m.Query(query.Query{Prefix: "/quux"})
|
||||
if err != nil {
|
||||
t.Fatalf("Query fail: %v\n", err)
|
||||
}
|
||||
entries, err := res.Rest()
|
||||
if err != nil {
|
||||
t.Fatalf("Query Results.Rest fail: %v\n", err)
|
||||
}
|
||||
seen := false
|
||||
for _, e := range entries {
|
||||
switch e.Key {
|
||||
case datastore.NewKey(myKey).String():
|
||||
seen = true
|
||||
default:
|
||||
t.Errorf("saw unexpected key: %q", e.Key)
|
||||
}
|
||||
}
|
||||
if !seen {
|
||||
t.Errorf("did not see wanted key %q in %+v", myKey, entries)
|
||||
}
|
||||
}
|
||||
94
Godeps/_workspace/src/github.com/jbenet/go-datastore/tiered/tiered.go
generated
vendored
Normal file
94
Godeps/_workspace/src/github.com/jbenet/go-datastore/tiered/tiered.go
generated
vendored
Normal file
@ -0,0 +1,94 @@
|
||||
package tiered
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore"
|
||||
dsq "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/query"
|
||||
)
|
||||
|
||||
type tiered []ds.Datastore
|
||||
|
||||
// New returns a tiered datastore. Puts and Deletes will write-through to
|
||||
// all datastores, Has and Get will try each datastore sequentially, and
|
||||
// Query will always try the last one (most complete) first.
|
||||
func New(dses ...ds.Datastore) ds.Datastore {
|
||||
return tiered(dses)
|
||||
}
|
||||
|
||||
// Put stores the object `value` named by `key`.
|
||||
func (d tiered) Put(key ds.Key, value interface{}) (err error) {
|
||||
errs := make(chan error, len(d))
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for _, cd := range d {
|
||||
wg.Add(1)
|
||||
go func(cd ds.Datastore) {
|
||||
defer wg.Done()
|
||||
if err := cd.Put(key, value); err != nil {
|
||||
errs <- err
|
||||
}
|
||||
}(cd)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
close(errs)
|
||||
for err := range errs {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get retrieves the object `value` named by `key`.
|
||||
func (d tiered) Get(key ds.Key) (value interface{}, err error) {
|
||||
err = fmt.Errorf("no datastores")
|
||||
for _, cd := range d {
|
||||
value, err = cd.Get(key)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Has returns whether the `key` is mapped to a `value`.
|
||||
func (d tiered) Has(key ds.Key) (exists bool, err error) {
|
||||
err = fmt.Errorf("no datastores")
|
||||
for _, cd := range d {
|
||||
exists, err = cd.Has(key)
|
||||
if err == nil && exists {
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Delete removes the value for given `key`.
|
||||
func (d tiered) Delete(key ds.Key) (err error) {
|
||||
errs := make(chan error, len(d))
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for _, cd := range d {
|
||||
wg.Add(1)
|
||||
go func(cd ds.Datastore) {
|
||||
defer wg.Done()
|
||||
if err := cd.Delete(key); err != nil {
|
||||
errs <- err
|
||||
}
|
||||
}(cd)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
close(errs)
|
||||
for err := range errs {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Query returns a list of keys in the datastore
|
||||
func (d tiered) Query(q dsq.Query) (dsq.Results, error) {
|
||||
// query always the last (most complete) one
|
||||
return d[len(d)-1].Query(q)
|
||||
}
|
||||
79
Godeps/_workspace/src/github.com/jbenet/go-datastore/tiered/tiered_test.go
generated
vendored
Normal file
79
Godeps/_workspace/src/github.com/jbenet/go-datastore/tiered/tiered_test.go
generated
vendored
Normal file
@ -0,0 +1,79 @@
|
||||
package tiered
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore"
|
||||
dscb "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/callback"
|
||||
dsq "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/query"
|
||||
)
|
||||
|
||||
func testHas(t *testing.T, dses []ds.Datastore, k ds.Key, v interface{}) {
|
||||
// all under should have it
|
||||
for _, d := range dses {
|
||||
if v2, err := d.Get(k); err != nil {
|
||||
t.Error(err)
|
||||
} else if v2 != v {
|
||||
t.Error("value incorrect", d, k, v, v2)
|
||||
}
|
||||
|
||||
if has, err := d.Has(k); err != nil {
|
||||
t.Error(err)
|
||||
} else if !has {
|
||||
t.Error("should have it", d, k, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testNotHas(t *testing.T, dses []ds.Datastore, k ds.Key) {
|
||||
// all under should not have it
|
||||
for _, d := range dses {
|
||||
if _, err := d.Get(k); err == nil {
|
||||
t.Error("should not have it", d, k)
|
||||
}
|
||||
|
||||
if has, err := d.Has(k); err != nil {
|
||||
t.Error(err)
|
||||
} else if has {
|
||||
t.Error("should not have it", d, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTiered(t *testing.T) {
|
||||
d1 := ds.NewMapDatastore()
|
||||
d2 := ds.NewMapDatastore()
|
||||
d3 := ds.NewMapDatastore()
|
||||
d4 := ds.NewMapDatastore()
|
||||
|
||||
td := New(d1, d2, d3, d4)
|
||||
td.Put(ds.NewKey("foo"), "bar")
|
||||
testHas(t, []ds.Datastore{td}, ds.NewKey("foo"), "bar")
|
||||
testHas(t, td.(tiered), ds.NewKey("foo"), "bar") // all children
|
||||
|
||||
// remove it from, say, caches.
|
||||
d1.Delete(ds.NewKey("foo"))
|
||||
d2.Delete(ds.NewKey("foo"))
|
||||
testHas(t, []ds.Datastore{td}, ds.NewKey("foo"), "bar")
|
||||
testHas(t, td.(tiered)[2:], ds.NewKey("foo"), "bar")
|
||||
testNotHas(t, td.(tiered)[:2], ds.NewKey("foo"))
|
||||
|
||||
// write it again.
|
||||
td.Put(ds.NewKey("foo"), "bar2")
|
||||
testHas(t, []ds.Datastore{td}, ds.NewKey("foo"), "bar2")
|
||||
testHas(t, td.(tiered), ds.NewKey("foo"), "bar2")
|
||||
}
|
||||
|
||||
func TestQueryCallsLast(t *testing.T) {
|
||||
var d1n, d2n, d3n int
|
||||
d1 := dscb.Wrap(ds.NewMapDatastore(), func() { d1n++ })
|
||||
d2 := dscb.Wrap(ds.NewMapDatastore(), func() { d2n++ })
|
||||
d3 := dscb.Wrap(ds.NewMapDatastore(), func() { d3n++ })
|
||||
|
||||
td := New(d1, d2, d3)
|
||||
|
||||
td.Query(dsq.Query{})
|
||||
if d3n < 1 {
|
||||
t.Error("should call last")
|
||||
}
|
||||
}
|
||||
96
Godeps/_workspace/src/github.com/jbenet/go-datastore/timecache/timecache.go
generated
vendored
Normal file
96
Godeps/_workspace/src/github.com/jbenet/go-datastore/timecache/timecache.go
generated
vendored
Normal file
@ -0,0 +1,96 @@
|
||||
package timecache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore"
|
||||
dsq "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/query"
|
||||
)
|
||||
|
||||
var (
|
||||
putKey = "put"
|
||||
getKey = // op keys
|
||||
"get"
|
||||
hasKey = "has"
|
||||
deleteKey = "delete"
|
||||
)
|
||||
|
||||
type datastore struct {
|
||||
cache ds.Datastore
|
||||
ttl time.Duration
|
||||
|
||||
ttlmu sync.Mutex
|
||||
ttls map[ds.Key]time.Time
|
||||
}
|
||||
|
||||
func WithTTL(ttl time.Duration) ds.Datastore {
|
||||
return WithCache(ds.NewMapDatastore(), ttl)
|
||||
}
|
||||
|
||||
// WithCache wraps a given datastore as a timecache.
|
||||
// Get + Has requests are considered expired after a TTL.
|
||||
func WithCache(d ds.Datastore, ttl time.Duration) ds.Datastore {
|
||||
return &datastore{cache: d, ttl: ttl, ttls: make(map[ds.Key]time.Time)}
|
||||
}
|
||||
|
||||
func (d *datastore) gc() {
|
||||
var now = time.Now()
|
||||
var del []ds.Key
|
||||
|
||||
// remove all expired ttls.
|
||||
d.ttlmu.Lock()
|
||||
for k, ttl := range d.ttls {
|
||||
if now.After(ttl) {
|
||||
delete(d.ttls, k)
|
||||
del = append(del, k)
|
||||
}
|
||||
}
|
||||
d.ttlmu.Unlock()
|
||||
|
||||
for _, k := range del {
|
||||
d.cache.Delete(k)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *datastore) ttlPut(key ds.Key) {
|
||||
d.ttlmu.Lock()
|
||||
d.ttls[key] = time.Now().Add(d.ttl)
|
||||
d.ttlmu.Unlock()
|
||||
}
|
||||
|
||||
func (d *datastore) ttlDelete(key ds.Key) {
|
||||
d.ttlmu.Lock()
|
||||
delete(d.ttls, key)
|
||||
d.ttlmu.Unlock()
|
||||
}
|
||||
|
||||
// Put stores the object `value` named by `key`.
|
||||
func (d *datastore) Put(key ds.Key, value interface{}) (err error) {
|
||||
err = d.cache.Put(key, value)
|
||||
d.ttlPut(key)
|
||||
return err
|
||||
}
|
||||
|
||||
// Get retrieves the object `value` named by `key`.
|
||||
func (d *datastore) Get(key ds.Key) (value interface{}, err error) {
|
||||
d.gc()
|
||||
return d.cache.Get(key)
|
||||
}
|
||||
|
||||
// Has returns whether the `key` is mapped to a `value`.
|
||||
func (d *datastore) Has(key ds.Key) (exists bool, err error) {
|
||||
d.gc()
|
||||
return d.cache.Has(key)
|
||||
}
|
||||
|
||||
// Delete removes the value for given `key`.
|
||||
func (d *datastore) Delete(key ds.Key) (err error) {
|
||||
d.ttlDelete(key)
|
||||
return d.cache.Delete(key)
|
||||
}
|
||||
|
||||
// Query returns a list of keys in the datastore
|
||||
func (d *datastore) Query(q dsq.Query) (dsq.Results, error) {
|
||||
return d.cache.Query(q)
|
||||
}
|
||||
64
Godeps/_workspace/src/github.com/jbenet/go-datastore/timecache/timecache_test.go
generated
vendored
Normal file
64
Godeps/_workspace/src/github.com/jbenet/go-datastore/timecache/timecache_test.go
generated
vendored
Normal file
@ -0,0 +1,64 @@
|
||||
package timecache
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore"
|
||||
)
|
||||
|
||||
func testHas(t *testing.T, d ds.Datastore, k ds.Key, v interface{}) {
|
||||
if v2, err := d.Get(k); err != nil {
|
||||
t.Error(err)
|
||||
} else if v2 != v {
|
||||
t.Error("value incorrect", d, k, v, v2)
|
||||
}
|
||||
|
||||
if has, err := d.Has(k); err != nil {
|
||||
t.Error(err)
|
||||
} else if !has {
|
||||
t.Error("should have it", d, k, v)
|
||||
}
|
||||
}
|
||||
|
||||
func testNotHas(t *testing.T, d ds.Datastore, k ds.Key) {
|
||||
if _, err := d.Get(k); err == nil {
|
||||
t.Error("should not have it", d, k)
|
||||
}
|
||||
|
||||
if has, err := d.Has(k); err != nil {
|
||||
t.Error(err)
|
||||
} else if has {
|
||||
t.Error("should not have it", d, k)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimeCache(t *testing.T) {
|
||||
ttl := time.Millisecond * 100
|
||||
cache := WithTTL(ttl)
|
||||
cache.Put(ds.NewKey("foo1"), "bar1")
|
||||
cache.Put(ds.NewKey("foo2"), "bar2")
|
||||
|
||||
<-time.After(ttl / 2)
|
||||
cache.Put(ds.NewKey("foo3"), "bar3")
|
||||
cache.Put(ds.NewKey("foo4"), "bar4")
|
||||
testHas(t, cache, ds.NewKey("foo1"), "bar1")
|
||||
testHas(t, cache, ds.NewKey("foo2"), "bar2")
|
||||
testHas(t, cache, ds.NewKey("foo3"), "bar3")
|
||||
testHas(t, cache, ds.NewKey("foo4"), "bar4")
|
||||
|
||||
<-time.After(ttl / 2)
|
||||
testNotHas(t, cache, ds.NewKey("foo1"))
|
||||
testNotHas(t, cache, ds.NewKey("foo2"))
|
||||
testHas(t, cache, ds.NewKey("foo3"), "bar3")
|
||||
testHas(t, cache, ds.NewKey("foo4"), "bar4")
|
||||
|
||||
cache.Delete(ds.NewKey("foo3"))
|
||||
testNotHas(t, cache, ds.NewKey("foo3"))
|
||||
|
||||
<-time.After(ttl / 2)
|
||||
testNotHas(t, cache, ds.NewKey("foo1"))
|
||||
testNotHas(t, cache, ds.NewKey("foo2"))
|
||||
testNotHas(t, cache, ds.NewKey("foo3"))
|
||||
testNotHas(t, cache, ds.NewKey("foo4"))
|
||||
}
|
||||
21
Godeps/_workspace/src/github.com/jbenet/go-migrate/LICENSE
generated
vendored
Normal file
21
Godeps/_workspace/src/github.com/jbenet/go-migrate/LICENSE
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Juan Batiz-Benet
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
11
Godeps/_workspace/src/github.com/jbenet/go-migrate/README.md
generated
vendored
Normal file
11
Godeps/_workspace/src/github.com/jbenet/go-migrate/README.md
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
# go-migrate
|
||||
|
||||
This is a very simple migration framework. See "Migrations" in https://github.com/jbenet/random-ideas/issues/33
|
||||
|
||||
This package includes:
|
||||
|
||||
- `migrate` package -- lib to write migration programs
|
||||
|
||||
## The model
|
||||
|
||||
The idea here is that we have some thing -- usually a directory -- that needs to be migrated between different representation versions. This may be because there has been an upgrade.
|
||||
51
Godeps/_workspace/src/github.com/jbenet/go-migrate/cli.go
generated
vendored
Normal file
51
Godeps/_workspace/src/github.com/jbenet/go-migrate/cli.go
generated
vendored
Normal file
@ -0,0 +1,51 @@
|
||||
package migrate
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
type Flags struct {
|
||||
Force bool
|
||||
Revert bool
|
||||
Path string // file path to migrate for fs based migrations
|
||||
}
|
||||
|
||||
func (f *Flags) Setup() {
|
||||
flag.BoolVar(&f.Force, "f", false, "whether to force a migration (ignores warnings)")
|
||||
flag.BoolVar(&f.Revert, "revert", false, "whether to apply the migration backwards")
|
||||
flag.StringVar(&f.Path, "path", "", "file path to migrate for fs based migrations")
|
||||
}
|
||||
|
||||
func (f *Flags) Parse() {
|
||||
flag.Parse()
|
||||
}
|
||||
|
||||
func Run(m Migration) error {
|
||||
f := Flags{}
|
||||
f.Setup()
|
||||
f.Parse()
|
||||
|
||||
if !m.Reversible() {
|
||||
if f.Revert {
|
||||
return fmt.Errorf("migration %d is irreversible", m.Versions())
|
||||
}
|
||||
if !f.Force {
|
||||
return fmt.Errorf("migration %d is irreversible (use -f to proceed)", m.Versions())
|
||||
}
|
||||
}
|
||||
|
||||
if f.Revert {
|
||||
return m.Revert(Options{f})
|
||||
} else {
|
||||
return m.Apply(Options{f})
|
||||
}
|
||||
}
|
||||
|
||||
func Main(m Migration) {
|
||||
if err := Run(m); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error: %s\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
2
Godeps/_workspace/src/github.com/jbenet/go-migrate/doc.go
generated
vendored
Normal file
2
Godeps/_workspace/src/github.com/jbenet/go-migrate/doc.go
generated
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
// Package migrate is used to write migrations between representations of things.
|
||||
package migrate
|
||||
37
Godeps/_workspace/src/github.com/jbenet/go-migrate/migrate.go
generated
vendored
Normal file
37
Godeps/_workspace/src/github.com/jbenet/go-migrate/migrate.go
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
package migrate
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Options are migration options. For now all flags are options.
|
||||
type Options struct {
|
||||
Flags
|
||||
}
|
||||
|
||||
// Migration represents
|
||||
type Migration interface {
|
||||
|
||||
// Versions is the "v-to-v" version string.
|
||||
Versions() string
|
||||
|
||||
// Reversible returns whether this migration can be reverted.
|
||||
// Endeavor to make them all reversible. This is here only to warn users
|
||||
// in case this is not the case.
|
||||
Reversible() bool
|
||||
|
||||
// Apply applies the migration in question.
|
||||
Apply(Options) error
|
||||
|
||||
// Revert un-applies the migration in question. This should be best-effort.
|
||||
// Some migrations are definitively one-way. If so, return an error.
|
||||
Revert(Options) error
|
||||
}
|
||||
|
||||
func SplitVersion(s string) (from int, to int) {
|
||||
_, err := fmt.Scanf(s, "%d-to-%d", &from, &to)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -145,7 +145,7 @@ accomplished with the following command:
|
||||
### Troubleshooting
|
||||
If you have previously installed ipfs before and you are running into
|
||||
problems getting a newer version to work, try deleting (or backing up somewhere
|
||||
else) your ipfs config directory (~/.go-ipfs by default) and rerunning `ipfs init`.
|
||||
else) your ipfs config directory (~/.ipfs by default) and rerunning `ipfs init`.
|
||||
This will reinitialize the config file to its defaults and clear out the local
|
||||
datastore of any bad entries.
|
||||
|
||||
|
||||
@ -18,7 +18,7 @@ import (
|
||||
var log = eventlog.Logger("blockstore")
|
||||
|
||||
// BlockPrefix namespaces blockstore datastores
|
||||
var BlockPrefix = ds.NewKey("b")
|
||||
var BlockPrefix = ds.NewKey("blocks")
|
||||
|
||||
var ValueTypeMismatch = errors.New("The retrieved value is not a Block")
|
||||
|
||||
@ -89,6 +89,8 @@ func (bs *blockstore) AllKeysChan(ctx context.Context) (<-chan u.Key, error) {
|
||||
|
||||
// KeysOnly, because that would be _a lot_ of data.
|
||||
q := dsq.Query{KeysOnly: true}
|
||||
// datastore/namespace does *NOT* fix up Query.Prefix
|
||||
q.Prefix = BlockPrefix.String()
|
||||
res, err := bs.datastore.Query(q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@ -103,23 +103,18 @@ func daemonFunc(req cmds.Request, res cmds.Response) {
|
||||
}
|
||||
}
|
||||
|
||||
// To ensure that IPFS has been initialized, fetch the config. Do this
|
||||
// _before_ acquiring the daemon lock so the user gets an appropriate error
|
||||
// message.
|
||||
// NB: It's safe to read the config without the daemon lock, but not safe
|
||||
// to write.
|
||||
ctx := req.Context()
|
||||
cfg, err := ctx.GetConfig()
|
||||
// acquire the repo lock _before_ constructing a node. we need to make
|
||||
// sure we are permitted to access the resources (datastore, etc.)
|
||||
repo, err := fsrepo.Open(req.Context().ConfigRoot)
|
||||
if err != nil {
|
||||
res.SetError(err, cmds.ErrNormal)
|
||||
return
|
||||
}
|
||||
|
||||
// acquire the repo lock _before_ constructing a node. we need to make
|
||||
// sure we are permitted to access the resources (datastore, etc.)
|
||||
repo, err := fsrepo.Open(req.Context().ConfigRoot)
|
||||
ctx := req.Context()
|
||||
cfg, err := ctx.GetConfig()
|
||||
if err != nil {
|
||||
res.SetError(fmt.Errorf("Couldn't obtain lock. Is another daemon already running?"), cmds.ErrNormal)
|
||||
res.SetError(err, cmds.ErrNormal)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@ -49,7 +49,7 @@ Get the value of the 'datastore.path' key:
|
||||
|
||||
Set the value of the 'datastore.path' key:
|
||||
|
||||
ipfs config datastore.path ~/.go-ipfs/datastore
|
||||
ipfs config datastore.path ~/.ipfs/datastore
|
||||
`,
|
||||
},
|
||||
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
# FUSE
|
||||
|
||||
As a golang project, `go-ipfs` is easily downloaded and installed with `go get github.com/ipfs/go-ipfs`. All data is stored in a leveldb data store in `~/.go-ipfs/datastore`. If, however, you would like to mount the datastore (`ipfs mount /ipfs`) and use it as you would a normal filesystem, you will need to install fuse.
|
||||
As a golang project, `go-ipfs` is easily downloaded and installed with `go get github.com/ipfs/go-ipfs`. All data is stored in a leveldb data store in `~/.ipfs/datastore`. If, however, you would like to mount the datastore (`ipfs mount /ipfs`) and use it as you would a normal filesystem, you will need to install fuse.
|
||||
|
||||
As a precursor, you will have to create the `/ipfs` and `/ipns` directories explicitly. Note that modifying root requires sudo permissions.
|
||||
|
||||
|
||||
@ -8,6 +8,6 @@ make clean
|
||||
make test
|
||||
make save_logs
|
||||
|
||||
docker cp dockertest_server_1:/root/.go-ipfs/logs/events.log $(PWD)/build/server-events.log
|
||||
docker cp dockertest_bootstrap_1:/root/.go-ipfs/logs/events.log $(PWD)/build/bootstrap-events.log
|
||||
docker cp dockertest_client_1:/root/.go-ipfs/logs/events.log $(PWD)/build/client-events.log
|
||||
docker cp dockertest_server_1:/root/.ipfs/logs/events.log $(PWD)/build/server-events.log
|
||||
docker cp dockertest_bootstrap_1:/root/.ipfs/logs/events.log $(PWD)/build/bootstrap-events.log
|
||||
docker cp dockertest_client_1:/root/.ipfs/logs/events.log $(PWD)/build/client-events.log
|
||||
|
||||
@ -29,8 +29,10 @@ type Config struct {
|
||||
}
|
||||
|
||||
const (
|
||||
// DefaultPathName is the default config dir name
|
||||
DefaultPathName = ".ipfs"
|
||||
// DefaultPathRoot is the path to the default config dir location.
|
||||
DefaultPathRoot = "~/.go-ipfs"
|
||||
DefaultPathRoot = "~/" + DefaultPathName
|
||||
// DefaultConfigFile is the filename of the configuration file
|
||||
DefaultConfigFile = "config"
|
||||
// EnvDir is the environment variable used to change the path root.
|
||||
|
||||
@ -2,7 +2,7 @@
|
||||
//
|
||||
// TODO explain the package roadmap...
|
||||
//
|
||||
// .go-ipfs/
|
||||
// .ipfs/
|
||||
// ├── client/
|
||||
// | ├── client.lock <------ protects client/ + signals its own pid
|
||||
// │ ├── ipfs-client.cpuprof
|
||||
|
||||
@ -7,15 +7,19 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore"
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/flatfs"
|
||||
levelds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/leveldb"
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/mount"
|
||||
ldbopts "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt"
|
||||
repo "github.com/ipfs/go-ipfs/repo"
|
||||
"github.com/ipfs/go-ipfs/repo/common"
|
||||
config "github.com/ipfs/go-ipfs/repo/config"
|
||||
lockfile "github.com/ipfs/go-ipfs/repo/fsrepo/lock"
|
||||
mfsr "github.com/ipfs/go-ipfs/repo/fsrepo/migrations"
|
||||
serialize "github.com/ipfs/go-ipfs/repo/fsrepo/serialize"
|
||||
dir "github.com/ipfs/go-ipfs/thirdparty/dir"
|
||||
"github.com/ipfs/go-ipfs/thirdparty/eventlog"
|
||||
@ -24,8 +28,26 @@ import (
|
||||
ds2 "github.com/ipfs/go-ipfs/util/datastore2"
|
||||
)
|
||||
|
||||
// version number that we are currently expecting to see
|
||||
var RepoVersion = "2"
|
||||
|
||||
var migrationInstructions = `See https://github.com/ipfs/fs-repo-migrations/blob/master/run.md
|
||||
Sorry for the inconvenience. In the future, these will run automatically.`
|
||||
|
||||
var errIncorrectRepoFmt = `Repo has incorrect version: %s
|
||||
Program version is: %s
|
||||
Please run the ipfs migration tool before continuing.
|
||||
` + migrationInstructions
|
||||
|
||||
var (
|
||||
ErrNoRepo = errors.New("no ipfs repo found. please run: ipfs init")
|
||||
ErrNoVersion = errors.New("no version file found, please run 0-to-1 migration tool.\n" + migrationInstructions)
|
||||
ErrOldRepo = errors.New("ipfs repo found in old '~/.go-ipfs' location, please run migration tool.\n" + migrationInstructions)
|
||||
)
|
||||
|
||||
const (
|
||||
defaultDataStoreDirectory = "datastore"
|
||||
leveldbDirectory = "datastore"
|
||||
flatfsDirectory = "blocks"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -60,8 +82,9 @@ type FSRepo struct {
|
||||
// the same fsrepo path concurrently
|
||||
lockfile io.Closer
|
||||
config *config.Config
|
||||
// ds is set on Open
|
||||
ds ds2.ThreadSafeDatastoreCloser
|
||||
ds ds.ThreadSafeDatastore
|
||||
// tracked separately for use in Close; do not use directly.
|
||||
leveldbDS levelds.Datastore
|
||||
}
|
||||
|
||||
var _ repo.Repo = (*FSRepo)(nil)
|
||||
@ -79,13 +102,14 @@ func open(repoPath string) (repo.Repo, error) {
|
||||
packageLock.Lock()
|
||||
defer packageLock.Unlock()
|
||||
|
||||
expPath, err := u.TildeExpansion(path.Clean(repoPath))
|
||||
r, err := newFSRepo(repoPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r := &FSRepo{
|
||||
path: expPath,
|
||||
// Check if its initialized
|
||||
if err := checkInitialized(r.path); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r.lockfile, err = lockfile.Lock(r.path)
|
||||
@ -100,12 +124,20 @@ func open(repoPath string) (repo.Repo, error) {
|
||||
}
|
||||
}()
|
||||
|
||||
if !isInitializedUnsynced(r.path) {
|
||||
return nil, errors.New("ipfs not initialized, please run 'ipfs init'")
|
||||
// Check version, and error out if not matching
|
||||
ver, err := mfsr.RepoPath(r.path).Version()
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, ErrNoVersion
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if ver != RepoVersion {
|
||||
return nil, fmt.Errorf(errIncorrectRepoFmt, ver, RepoVersion)
|
||||
}
|
||||
|
||||
// check repo path, then check all constituent parts.
|
||||
// TODO acquire repo lock
|
||||
// TODO if err := initCheckDir(logpath); err != nil { // }
|
||||
if err := dir.Writable(r.path); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -118,13 +150,33 @@ func open(repoPath string) (repo.Repo, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// log.Debugf("writing eventlogs to ...", c.path)
|
||||
// setup eventlogger
|
||||
configureEventLoggerAtRepoPath(r.config, r.path)
|
||||
|
||||
keepLocked = true
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func newFSRepo(rpath string) (*FSRepo, error) {
|
||||
expPath, err := u.TildeExpansion(path.Clean(rpath))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &FSRepo{path: expPath}, nil
|
||||
}
|
||||
|
||||
func checkInitialized(path string) error {
|
||||
if !isInitializedUnsynced(path) {
|
||||
alt := strings.Replace(path, ".ipfs", ".go-ipfs", 1)
|
||||
if isInitializedUnsynced(alt) {
|
||||
return ErrOldRepo
|
||||
}
|
||||
return ErrNoRepo
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ConfigAt returns an error if the FSRepo at the given path is not
|
||||
// initialized. This function allows callers to read the config file even when
|
||||
// another process is running and holding the lock.
|
||||
@ -190,8 +242,13 @@ func Init(repoPath string, conf *config.Config) error {
|
||||
|
||||
// The actual datastore contents are initialized lazily when Opened.
|
||||
// During Init, we merely check that the directory is writeable.
|
||||
p := path.Join(repoPath, defaultDataStoreDirectory)
|
||||
if err := dir.Writable(p); err != nil {
|
||||
leveldbPath := path.Join(repoPath, leveldbDirectory)
|
||||
if err := dir.Writable(leveldbPath); err != nil {
|
||||
return fmt.Errorf("datastore: %s", err)
|
||||
}
|
||||
|
||||
flatfsPath := path.Join(repoPath, flatfsDirectory)
|
||||
if err := dir.Writable(flatfsPath); err != nil {
|
||||
return fmt.Errorf("datastore: %s", err)
|
||||
}
|
||||
|
||||
@ -199,6 +256,10 @@ func Init(repoPath string, conf *config.Config) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := mfsr.RepoPath(repoPath).WriteVersion(RepoVersion); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -236,14 +297,41 @@ func (r *FSRepo) openConfig() error {
|
||||
|
||||
// openDatastore returns an error if the config file is not present.
|
||||
func (r *FSRepo) openDatastore() error {
|
||||
dsPath := path.Join(r.path, defaultDataStoreDirectory)
|
||||
ds, err := levelds.NewDatastore(dsPath, &levelds.Options{
|
||||
leveldbPath := path.Join(r.path, leveldbDirectory)
|
||||
var err error
|
||||
// save leveldb reference so it can be neatly closed afterward
|
||||
r.leveldbDS, err = levelds.NewDatastore(leveldbPath, &levelds.Options{
|
||||
Compression: ldbopts.NoCompression,
|
||||
})
|
||||
if err != nil {
|
||||
return errors.New("unable to open leveldb datastore")
|
||||
}
|
||||
r.ds = ds
|
||||
|
||||
// 4TB of 256kB objects ~=17M objects, splitting that 256-way
|
||||
// leads to ~66k objects per dir, splitting 256*256-way leads to
|
||||
// only 256.
|
||||
//
|
||||
// The keys seen by the block store have predictable prefixes,
|
||||
// including "/" from datastore.Key and 2 bytes from multihash. To
|
||||
// reach a uniform 256-way split, we need approximately 4 bytes of
|
||||
// prefix.
|
||||
blocksDS, err := flatfs.New(path.Join(r.path, flatfsDirectory), 4)
|
||||
if err != nil {
|
||||
return errors.New("unable to open flatfs datastore")
|
||||
}
|
||||
|
||||
mountDS := mount.New([]mount.Mount{
|
||||
{Prefix: ds.NewKey("/blocks"), Datastore: blocksDS},
|
||||
{Prefix: ds.NewKey("/"), Datastore: r.leveldbDS},
|
||||
})
|
||||
// Make sure it's ok to claim the virtual datastore from mount as
|
||||
// threadsafe. There's no clean way to make mount itself provide
|
||||
// this information without copy-pasting the code into two
|
||||
// variants. This is the same dilemma as the `[].byte` attempt at
|
||||
// introducing const types to Go.
|
||||
var _ ds.ThreadSafeDatastore = blocksDS
|
||||
var _ ds.ThreadSafeDatastore = r.leveldbDS
|
||||
r.ds = ds2.ClaimThreadSafe{mountDS}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -268,7 +356,7 @@ func (r *FSRepo) Close() error {
|
||||
return errors.New("repo is closed")
|
||||
}
|
||||
|
||||
if err := r.ds.Close(); err != nil {
|
||||
if err := r.leveldbDS.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -430,7 +518,7 @@ func isInitializedUnsynced(repoPath string) bool {
|
||||
if !configIsInitialized(repoPath) {
|
||||
return false
|
||||
}
|
||||
if !util.FileExists(path.Join(repoPath, defaultDataStoreDirectory)) {
|
||||
if !util.FileExists(path.Join(repoPath, leveldbDirectory)) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
|
||||
@ -8,9 +8,9 @@ import (
|
||||
"github.com/ipfs/go-ipfs/util"
|
||||
)
|
||||
|
||||
// LockFile is the filename of the daemon lock, relative to config dir
|
||||
// LockFile is the filename of the repo lock, relative to config dir
|
||||
// TODO rename repo lock and hide name
|
||||
const LockFile = "daemon.lock"
|
||||
const LockFile = "repo.lock"
|
||||
|
||||
func Lock(confdir string) (io.Closer, error) {
|
||||
c, err := lock.Lock(path.Join(confdir, LockFile))
|
||||
|
||||
61
repo/fsrepo/migrations/mfsr.go
Normal file
61
repo/fsrepo/migrations/mfsr.go
Normal file
@ -0,0 +1,61 @@
|
||||
package mfsr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const VersionFile = "version"
|
||||
|
||||
type RepoPath string
|
||||
|
||||
func (rp RepoPath) VersionFile() string {
|
||||
return path.Join(string(rp), VersionFile)
|
||||
}
|
||||
|
||||
func (rp RepoPath) Version() (string, error) {
|
||||
if rp == "" {
|
||||
return "", fmt.Errorf("invalid repo path \"%s\"", rp)
|
||||
}
|
||||
|
||||
fn := rp.VersionFile()
|
||||
if _, err := os.Stat(fn); os.IsNotExist(err) {
|
||||
return "", VersionFileNotFound(rp)
|
||||
}
|
||||
|
||||
c, err := ioutil.ReadFile(fn)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
s := string(c)
|
||||
s = strings.TrimSpace(s)
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (rp RepoPath) CheckVersion(version string) error {
|
||||
v, err := rp.Version()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if v != version {
|
||||
return fmt.Errorf("versions differ (expected: %s, actual:%s)", version, v)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rp RepoPath) WriteVersion(version string) error {
|
||||
fn := rp.VersionFile()
|
||||
return ioutil.WriteFile(fn, []byte(version+"\n"), 0644)
|
||||
}
|
||||
|
||||
type VersionFileNotFound string
|
||||
|
||||
func (v VersionFileNotFound) Error() string {
|
||||
return "no version file in repo at " + string(v)
|
||||
}
|
||||
@ -2,7 +2,7 @@ FROM zaqwsx_ipfs-test-img
|
||||
|
||||
RUN ipfs init -b=1024
|
||||
ADD . /tmp/id
|
||||
RUN mv -f /tmp/id/config /root/.go-ipfs/config
|
||||
RUN mv -f /tmp/id/config /root/.ipfs/config
|
||||
RUN ipfs id
|
||||
|
||||
ENV IPFS_PROF true
|
||||
|
||||
@ -5,7 +5,7 @@
|
||||
},
|
||||
"Datastore": {
|
||||
"Type": "leveldb",
|
||||
"Path": "/root/.go-ipfs/datastore"
|
||||
"Path": "/root/.ipfs/datastore"
|
||||
},
|
||||
"Addresses": {
|
||||
"Swarm": [
|
||||
@ -30,7 +30,7 @@
|
||||
"Last": ""
|
||||
},
|
||||
"Logs": {
|
||||
"Filename": "/root/.go-ipfs/logs/events.log",
|
||||
"Filename": "/root/.ipfs/logs/events.log",
|
||||
"MaxSizeMB": 0,
|
||||
"MaxBackups": 0,
|
||||
"MaxAgeDays": 0
|
||||
|
||||
@ -2,7 +2,7 @@ FROM zaqwsx_ipfs-test-img
|
||||
|
||||
RUN ipfs init -b=1024
|
||||
ADD . /tmp/id
|
||||
RUN mv -f /tmp/id/config /root/.go-ipfs/config
|
||||
RUN mv -f /tmp/id/config /root/.ipfs/config
|
||||
RUN ipfs id
|
||||
|
||||
EXPOSE 4031 4032/udp
|
||||
|
||||
@ -8,7 +8,7 @@
|
||||
"Bootstrap": [
|
||||
],
|
||||
"Datastore": {
|
||||
"Path": "/root/.go-ipfs/datastore",
|
||||
"Path": "/root/.ipfs/datastore",
|
||||
"Type": "leveldb"
|
||||
},
|
||||
"Identity": {
|
||||
@ -16,7 +16,7 @@
|
||||
"PrivKey": "CAAS4AQwggJcAgEAAoGBANlJUjOCbPXgYUfo1Pr6nlIjJDPNwN81ACamhaoEZ9VRHXI3fPe7RVAaaXrWLHb892mRqFi1ScE2lcMTLc7WGfyc7dwPqBOZqkVvT0KpCx3Mg246+WvnG8I3HCbWyjSP9tJflOBQxVq6qT2yZSXjNTtDdO4skd4PsPqBco53guYTAgMBAAECgYEAtIcYhrdMNBSSfp5RpZxnwbJ0t52xK0HruDEOSK2UX0Ufg+/aIjEza1QmYupi0xFltg5QojMs7hyd3Q+oNXro5tKsYVeiqrLsUh9jMjaQofzSlV9Oc+bhkkl48YWvF6Y8qx88UYAX+oJqB627H4S1gxLdNEJhPjEAD6n/jql3zUECQQDmHP75wJ7nC4TlxT1SHim5syMAqWNs/SOHnvX8yLrFV9FrMRzsD5qMlIEGBrAjaESzEck6XpbqkyxB8KKGo7OjAkEA8brtEh/AMoQ/yoSWdYT2MRbJxCAn+KG2c6Hi9AMMmJ+K779HxywpUIDYIa22hzLKYumYIuRa1X++1glOAFGq0QJAPQgXwFoMSy9M8jwcBXmmi3AtqnFCw5doIwJQL9l1X/3ot0txZlLFJOAGUHjZoqp2/h+LhYWs9U5PgLW4BYnJjQJAPydY/J0y93+5ss1FCdr8/wI3IHhOORT2t+sZgiqxxcYY5F4TAKQ2/wNKdDIQN+47FfB1gNgsKw8+6mhv6oFroQJACBF2yssNVXiXa2Na/a9tKYutGvxbm3lXzOvmpkW3FukbsObKYS344J1vdg0nzM6EWQCaiBweSA5TQ27iNW6BzQ=="
|
||||
},
|
||||
"Logs": {
|
||||
"Filename": "/root/.go-ipfs/logs/events.log",
|
||||
"Filename": "/root/.ipfs/logs/events.log",
|
||||
"MaxAgeDays": 0,
|
||||
"MaxBackups": 0,
|
||||
"MaxSizeMB": 0
|
||||
|
||||
@ -2,7 +2,7 @@ FROM zaqwsx_ipfs-test-img
|
||||
|
||||
RUN ipfs init -b=1024
|
||||
ADD . /tmp/test
|
||||
RUN mv -f /tmp/test/config /root/.go-ipfs/config
|
||||
RUN mv -f /tmp/test/config /root/.ipfs/config
|
||||
RUN ipfs id
|
||||
RUN chmod +x /tmp/test/run.sh
|
||||
|
||||
|
||||
@ -8,7 +8,7 @@
|
||||
"Bootstrap": [
|
||||
],
|
||||
"Datastore": {
|
||||
"Path": "/root/.go-ipfs/datastore",
|
||||
"Path": "/root/.ipfs/datastore",
|
||||
"Type": "leveldb"
|
||||
},
|
||||
"Identity": {
|
||||
@ -16,7 +16,7 @@
|
||||
"PrivKey": "CAAS4AQwggJcAgEAAoGBANW3mJMmDSJbdRyykO0Ze5t6WL6jeTtpOhklxePBIkJL/Uil78Va/tODx6Mvv3GMCkbGvzWslTZXpaHa9vBmjE3MVZSmd5fLRybKT0zZ3juABKcx+WIVNw8JlkpEORihJdwb+5tRUC5pUcMzxqHSmGX+d6e9KZqLnv7piNKg2+r7AgMBAAECgYAqc6+w+wv82SHoM2gqULeG6MScCajZLkvGFwS5+vEtLh7/wUZhc3PO3AxZ0/A5Q9H+wRfWN5PkGYDjJ7WJhzUzGfTbrQ821JV6B3IUR4UHo2IgJkZO4EUB5L9KBUqvYxDJigtGBopgQh0EeDSS+9X8vaGmit5l4zcAfi+UGYPgMQJBAOCJQU8N2HW5SawBo2QX0bnCAAnu5Ilk2QaqwDZbDQaM5JWFcpRpGnjBhsZihHwVWvKCbnq83JhAGRQvKAEepMUCQQDzqjvIyM+Au42nP7SFDHoMjEnHW8Nimvz8zPbyrSUEHe4l9/yS4+BeRPxpwI5xgzp8g1wEYfNeXt08buYwCsy/AkBXWg5mSuSjJ+pZWGnQTtPwiGCrfJy8NteXmGYev11Z5wYmhTwGML1zrRZZp4oTG9u97LA+X6sSMB2RlKbjiKBhAkEAgl/hoSshK+YugwCpHE9ytmgRyeOlhYscNj+NGofeOHezRwmLUSUwlgAfdo4bKU1n69t1TrsCNspXYdCMxcPhjQJAMNxkJ8t2tFMpucCQfWJ09wvFKZSHX1/iD9GKWL0Qk2FcMCg3NXiqei5NL3NYqCWpdC/IfjsAEGCJrTFwp/OoUw=="
|
||||
},
|
||||
"Logs": {
|
||||
"Filename": "/root/.go-ipfs/logs/events.log",
|
||||
"Filename": "/root/.ipfs/logs/events.log",
|
||||
"MaxAgeDays": 0,
|
||||
"MaxBackups": 0,
|
||||
"MaxSizeMB": 0
|
||||
|
||||
@ -53,7 +53,7 @@ func benchmarkAdd(amount int64) (*testing.BenchmarkResult, error) {
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
env := append(
|
||||
[]string{fmt.Sprintf("%s=%s", config.EnvDir, path.Join(tmpDir, ".go-ipfs"))}, // first in order to override
|
||||
[]string{fmt.Sprintf("%s=%s", config.EnvDir, path.Join(tmpDir, config.DefaultPathName))}, // first in order to override
|
||||
os.Environ()...,
|
||||
)
|
||||
setupCmd := func(cmd *exec.Cmd) {
|
||||
|
||||
@ -43,7 +43,7 @@ func benchmarkAdd(amount int64) (*testing.BenchmarkResult, error) {
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
env := append(os.Environ(), fmt.Sprintf("%s=%s", config.EnvDir, path.Join(tmpDir, ".go-ipfs")))
|
||||
env := append(os.Environ(), fmt.Sprintf("%s=%s", config.EnvDir, path.Join(tmpDir, config.DefaultPathName)))
|
||||
setupCmd := func(cmd *exec.Cmd) {
|
||||
cmd.Env = env
|
||||
}
|
||||
|
||||
@ -8,6 +8,6 @@ make clean
|
||||
make test
|
||||
make save_logs
|
||||
|
||||
docker cp 3nodetest_server_1:/root/.go-ipfs/logs/events.log $(PWD)/build/server-events.log
|
||||
docker cp 3nodetest_bootstrap_1:/root/.go-ipfs/logs/events.log $(PWD)/build/bootstrap-events.log
|
||||
docker cp 3nodetest_client_1:/root/.go-ipfs/logs/events.log $(PWD)/build/client-events.log
|
||||
docker cp 3nodetest_server_1:/root/.ipfs/logs/events.log $(PWD)/build/server-events.log
|
||||
docker cp 3nodetest_bootstrap_1:/root/.ipfs/logs/events.log $(PWD)/build/bootstrap-events.log
|
||||
docker cp 3nodetest_client_1:/root/.ipfs/logs/events.log $(PWD)/build/client-events.log
|
||||
|
||||
@ -68,11 +68,11 @@ This means cating certain files, or running diagnostic commands.
|
||||
For example:
|
||||
|
||||
```
|
||||
test_expect_success ".go-ipfs/ has been created" '
|
||||
test -d ".go-ipfs" &&
|
||||
test -f ".go-ipfs/config" &&
|
||||
test -d ".go-ipfs/datastore" ||
|
||||
test_fsh ls -al .go-ipfs
|
||||
test_expect_success ".ipfs/ has been created" '
|
||||
test -d ".ipfs" &&
|
||||
test -f ".ipfs/config" &&
|
||||
test -d ".ipfs/datastore" ||
|
||||
test_fsh ls -al .ipfs
|
||||
'
|
||||
```
|
||||
|
||||
|
||||
@ -148,7 +148,7 @@ test_init_ipfs() {
|
||||
# todo: in the future, use env?
|
||||
|
||||
test_expect_success "ipfs init succeeds" '
|
||||
export IPFS_PATH="$(pwd)/.go-ipfs" &&
|
||||
export IPFS_PATH="$(pwd)/.ipfs" &&
|
||||
ipfs init -b=1024 > /dev/null
|
||||
'
|
||||
|
||||
|
||||
@ -9,16 +9,16 @@ test_description="Test init command"
|
||||
. lib/test-lib.sh
|
||||
|
||||
test_expect_success "ipfs init succeeds" '
|
||||
export IPFS_PATH="$(pwd)/.go-ipfs" &&
|
||||
export IPFS_PATH="$(pwd)/.ipfs" &&
|
||||
BITS="2048" &&
|
||||
ipfs init --bits="$BITS" >actual_init
|
||||
'
|
||||
|
||||
test_expect_success ".go-ipfs/ has been created" '
|
||||
test -d ".go-ipfs" &&
|
||||
test -f ".go-ipfs/config" &&
|
||||
test -d ".go-ipfs/datastore" ||
|
||||
test_fsh ls -al .go-ipfs
|
||||
test_expect_success ".ipfs/ has been created" '
|
||||
test -d ".ipfs" &&
|
||||
test -f ".ipfs/config" &&
|
||||
test -d ".ipfs/datastore" ||
|
||||
test_fsh ls -al .ipfs
|
||||
'
|
||||
|
||||
test_expect_success "ipfs config succeeds" '
|
||||
|
||||
@ -10,7 +10,7 @@ test_description="Test daemon command"
|
||||
|
||||
# this needs to be in a different test than "ipfs daemon --init" below
|
||||
test_expect_success "setup IPFS_PATH" '
|
||||
IPFS_PATH="$(pwd)/.go-ipfs"
|
||||
IPFS_PATH="$(pwd)/.ipfs"
|
||||
'
|
||||
|
||||
# NOTE: this should remove bootstrap peers (needs a flag)
|
||||
@ -54,11 +54,11 @@ test_expect_failure "ipfs daemon output looks good" '
|
||||
test_cmp_repeat_10_sec expected actual_daemon
|
||||
'
|
||||
|
||||
test_expect_success ".go-ipfs/ has been created" '
|
||||
test -d ".go-ipfs" &&
|
||||
test -f ".go-ipfs/config" &&
|
||||
test -d ".go-ipfs/datastore" ||
|
||||
test_fsh ls .go-ipfs
|
||||
test_expect_success ".ipfs/ has been created" '
|
||||
test -d ".ipfs" &&
|
||||
test -f ".ipfs/config" &&
|
||||
test -d ".ipfs/datastore" ||
|
||||
test_fsh ls .ipfs
|
||||
'
|
||||
|
||||
# begin same as in t0010
|
||||
|
||||
2
test/supernode_client/.gitignore
vendored
2
test/supernode_client/.gitignore
vendored
@ -1 +1 @@
|
||||
.go-ipfs/
|
||||
.ipfs/
|
||||
|
||||
@ -63,7 +63,7 @@ func run() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
repoPath := gopath.Join(cwd, ".go-ipfs")
|
||||
repoPath := gopath.Join(cwd, config.DefaultPathName)
|
||||
if err := ensureRepoInitialized(repoPath); err != nil {
|
||||
}
|
||||
repo, err := fsrepo.Open(repoPath)
|
||||
|
||||
15
util/datastore2/threadsafe.go
Normal file
15
util/datastore2/threadsafe.go
Normal file
@ -0,0 +1,15 @@
|
||||
package datastore2
|
||||
|
||||
import (
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore"
|
||||
)
|
||||
|
||||
// ClaimThreadSafe claims that a Datastore is threadsafe, even when
|
||||
// it's type does not guarantee this. Use carefully.
|
||||
type ClaimThreadSafe struct {
|
||||
datastore.Datastore
|
||||
}
|
||||
|
||||
var _ datastore.ThreadSafeDatastore = ClaimThreadSafe{}
|
||||
|
||||
func (ClaimThreadSafe) IsThreadSafe() {}
|
||||
Loading…
Reference in New Issue
Block a user